Merge branch 'sigp:stable' into stable

This commit is contained in:
David Boreham 2024-02-03 15:38:26 -07:00 committed by GitHub
commit acffbed0a0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
529 changed files with 38051 additions and 9752 deletions

113
.config/nextest.toml Normal file
View File

@ -0,0 +1,113 @@
# This is the default config used by nextest. It is embedded in the binary at
# build time. It may be used as a template for .config/nextest.toml.
[store]
# The directory under the workspace root at which nextest-related files are
# written. Profile-specific storage is currently written to dir/<profile-name>.
dir = "target/nextest"
# This section defines the default nextest profile. Custom profiles are layered
# on top of the default profile.
[profile.default]
# "retries" defines the number of times a test should be retried. If set to a
# non-zero value, tests that succeed on a subsequent attempt will be marked as
# non-flaky. Can be overridden through the `--retries` option.
# Examples
# * retries = 3
# * retries = { backoff = "fixed", count = 2, delay = "1s" }
# * retries = { backoff = "exponential", count = 10, delay = "1s", jitter = true, max-delay = "10s" }
retries = 0
# The number of threads to run tests with. Supported values are either an integer or
# the string "num-cpus". Can be overridden through the `--test-threads` option.
test-threads = 8
# The number of threads required for each test. This is generally used in overrides to
# mark certain tests as heavier than others. However, it can also be set as a global parameter.
threads-required = 1
# Show these test statuses in the output.
#
# The possible values this can take are:
# * none: no output
# * fail: show failed (including exec-failed) tests
# * retry: show flaky and retried tests
# * slow: show slow tests
# * pass: show passed tests
# * skip: show skipped tests (most useful for CI)
# * all: all of the above
#
# Each value includes all the values above it; for example, "slow" includes
# failed and retried tests.
#
# Can be overridden through the `--status-level` flag.
status-level = "pass"
# Similar to status-level, show these test statuses at the end of the run.
final-status-level = "flaky"
# "failure-output" defines when standard output and standard error for failing tests are produced.
# Accepted values are
# * "immediate": output failures as soon as they happen
# * "final": output failures at the end of the test run
# * "immediate-final": output failures as soon as they happen and at the end of
# the test run; combination of "immediate" and "final"
# * "never": don't output failures at all
#
# For large test suites and CI it is generally useful to use "immediate-final".
#
# Can be overridden through the `--failure-output` option.
failure-output = "immediate"
# "success-output" controls production of standard output and standard error on success. This should
# generally be set to "never".
success-output = "never"
# Cancel the test run on the first failure. For CI runs, consider setting this
# to false.
fail-fast = true
# Treat a test that takes longer than the configured 'period' as slow, and print a message.
# See <https://nexte.st/book/slow-tests> for more information.
#
# Optional: specify the parameter 'terminate-after' with a non-zero integer,
# which will cause slow tests to be terminated after the specified number of
# periods have passed.
# Example: slow-timeout = { period = "60s", terminate-after = 2 }
slow-timeout = { period = "120s" }
# Treat a test as leaky if after the process is shut down, standard output and standard error
# aren't closed within this duration.
#
# This usually happens in case of a test that creates a child process and lets it inherit those
# handles, but doesn't clean the child process up (especially when it fails).
#
# See <https://nexte.st/book/leaky-tests> for more information.
leak-timeout = "100ms"
[profile.default.junit]
# Output a JUnit report into the given file inside 'store.dir/<profile-name>'.
# If unspecified, JUnit is not written out.
# path = "junit.xml"
# The name of the top-level "report" element in JUnit report. If aggregating
# reports across different test runs, it may be useful to provide separate names
# for each report.
report-name = "lighthouse-run"
# Whether standard output and standard error for passing tests should be stored in the JUnit report.
# Output is stored in the <system-out> and <system-err> elements of the <testcase> element.
store-success-output = false
# Whether standard output and standard error for failing tests should be stored in the JUnit report.
# Output is stored in the <system-out> and <system-err> elements of the <testcase> element.
#
# Note that if a description can be extracted from the output, it is always stored in the
# <description> element.
store-failure-output = true
# This profile is activated if MIRI_SYSROOT is set.
[profile.default-miri]
# Miri tests take up a lot of memory, so only run 1 test at a time by default.
test-threads = 4

19
.github/mergify.yml vendored Normal file
View File

@ -0,0 +1,19 @@
queue_rules:
- name: default
batch_size: 8
batch_max_wait_time: 60 s
checks_timeout: 10800 s
merge_method: squash
commit_message_template: |
{{ title }} (#{{ number }})
{% for commit in commits %}
* {{ commit.commit_message }}
{% endfor %}
queue_conditions:
- "#approved-reviews-by >= 1"
- "check-success=license/cla"
- "check-success=target-branch-check"
merge_conditions:
- "check-success=test-suite-success"
- "check-success=local-testnet-success"

View File

@ -22,14 +22,15 @@ jobs:
- name: Checkout code
uses: actions/checkout@v3
- name: Create docker network
run: docker network create book
- name: Run mdbook server
run: docker run -v ${{ github.workspace }}/book:/book --network book --name book -p 3000:3000 -d peaceiris/mdbook:v0.4.20-rust serve --hostname 0.0.0.0
run: |
docker run -v ${{ github.workspace }}/book:/book --name book -p 3000:3000 -d peaceiris/mdbook:latest serve --hostname 0.0.0.0
sleep 5
- name: Print logs
run: docker logs book
- name: Run linkcheck
run: docker run --network book tennox/linkcheck:latest book:3000
run: |
curl -sL https://github.com/filiph/linkcheck/releases/download/3.0.0/linkcheck-3.0.0-linux-x64.tar.gz | tar xvzf - linkcheck/linkcheck --strip 1
./linkcheck localhost:3000 -d

View File

@ -20,6 +20,9 @@ jobs:
- ubuntu-22.04
- macos-12
runs-on: ${{ matrix.os }}
env:
# Enable portable to prevent issues with caching `blst` for the wrong CPU type
FEATURES: portable,jemalloc
steps:
- uses: actions/checkout@v3
@ -83,4 +86,15 @@ jobs:
- name: Stop local testnet with blinded block production
run: ./stop_local_testnet.sh
working-directory: scripts/local_testnet
working-directory: scripts/local_testnet
# This job succeeds ONLY IF all others succeed. It is used by the merge queue to determine whether
# a PR is safe to merge. New jobs should be added here.
local-testnet-success:
name: local-testnet-success
runs-on: ubuntu-latest
needs: ["run-local-testnet"]
steps:
- uses: actions/checkout@v3
- name: Check that success job is dependent on all others
run: ./scripts/ci/check-success-job.sh ./.github/workflows/local-testnet.yml local-testnet-success

View File

@ -282,9 +282,6 @@ jobs:
| <img src="https://simpleicons.org/icons/docker.svg" style="width: 32px;"/> | Docker | [${{ env.VERSION }}](https://hub.docker.com/r/${{ env.IMAGE_NAME }}/tags?page=1&ordering=last_updated&name=${{ env.VERSION }}) | [${{ env.IMAGE_NAME }}](https://hub.docker.com/r/${{ env.IMAGE_NAME }}) |
ENDBODY
)
assets=()
for asset in ./lighthouse-*.tar.gz*; do
assets+=("-a" "$asset/$asset")
done
assets=(./lighthouse-*.tar.gz*/lighthouse-*.tar.gz*)
tag_name="${{ env.VERSION }}"
echo "$body" | hub release create --draft "${assets[@]}" -F "-" "$tag_name"
echo "$body" | gh release create --draft -F "-" "$tag_name" "${assets[@]}"

View File

@ -18,14 +18,16 @@ env:
# Deny warnings in CI
# Disable debug info (see https://github.com/sigp/lighthouse/issues/4005)
RUSTFLAGS: "-D warnings -C debuginfo=0"
# The Nightly version used for cargo-udeps, might need updating from time to time.
PINNED_NIGHTLY: nightly-2023-04-16
# Prevent Github API rate limiting.
LIGHTHOUSE_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# Enable self-hosted runners for the sigp repo only.
SELF_HOSTED_RUNNERS: ${{ github.repository == 'sigp/lighthouse' }}
# Self-hosted runners need to reference a different host for `./watch` tests.
WATCH_HOST: ${{ github.repository == 'sigp/lighthouse' && 'host.docker.internal' || 'localhost' }}
# Disable incremental compilation
CARGO_INCREMENTAL: 0
# Enable portable to prevent issues with caching `blst` for the wrong CPU type
TEST_FEATURES: portable
jobs:
target-branch-check:
name: target-branch-check
@ -34,145 +36,191 @@ jobs:
steps:
- name: Check that the pull request is not targeting the stable branch
run: test ${{ github.base_ref }} != "stable"
extract-msrv:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Extract Minimum Supported Rust Version (MSRV)
run: |
metadata=$(cargo metadata --no-deps --format-version 1)
msrv=$(echo $metadata | jq -r '.packages | map(select(.name == "lighthouse")) | .[0].rust_version')
echo "MSRV=$msrv" >> $GITHUB_OUTPUT
id: extract_msrv
outputs:
MSRV: ${{ steps.extract_msrv.outputs.MSRV }}
cargo-fmt:
name: cargo-fmt
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
run: rustup update stable
- name: Check formatting with cargo fmt
run: make cargo-fmt
release-tests-ubuntu:
name: release-tests-ubuntu
# Use self-hosted runners only on the sigp repo.
runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }}
needs: cargo-fmt
steps:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
if: env.SELF_HOSTED_RUNNERS == false
run: rustup update stable
if: env.SELF_HOSTED_RUNNERS == 'false'
uses: moonrepo/setup-rust@v1
with:
channel: stable
cache-target: release
bins: cargo-nextest
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Install Foundry (anvil)
if: env.SELF_HOSTED_RUNNERS == 'false'
uses: foundry-rs/foundry-toolchain@v1
with:
version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d
- name: Run tests in release
run: make test-release
run: make nextest-release
- name: Show cache stats
if: env.SELF_HOSTED_RUNNERS == 'true'
run: sccache --show-stats
release-tests-windows:
name: release-tests-windows
runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "windows", "CI"]') || 'windows-2019' }}
needs: cargo-fmt
steps:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
if: env.SELF_HOSTED_RUNNERS == false
run: rustup update stable
if: env.SELF_HOSTED_RUNNERS == 'false'
uses: moonrepo/setup-rust@v1
with:
channel: stable
cache-target: release
bins: cargo-nextest
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Install Foundry (anvil)
if: env.SELF_HOSTED_RUNNERS == 'false'
uses: foundry-rs/foundry-toolchain@v1
with:
version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d
- name: Install make
if: env.SELF_HOSTED_RUNNERS == 'false'
run: choco install -y make
- uses: KyleMayes/install-llvm-action@v1
if: env.SELF_HOSTED_RUNNERS == false
with:
version: "15.0"
directory: ${{ runner.temp }}/llvm
# - uses: KyleMayes/install-llvm-action@v1
# if: env.SELF_HOSTED_RUNNERS == 'false'
# with:
# version: "15.0"
# directory: ${{ runner.temp }}/llvm
- name: Set LIBCLANG_PATH
run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV
- name: Run tests in release
run: make test-release
run: make nextest-release
- name: Show cache stats
if: env.SELF_HOSTED_RUNNERS == 'true'
run: sccache --show-stats
beacon-chain-tests:
name: beacon-chain-tests
# Use self-hosted runners only on the sigp repo.
runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }}
needs: cargo-fmt
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
steps:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
if: env.SELF_HOSTED_RUNNERS == false
run: rustup update stable
if: env.SELF_HOSTED_RUNNERS == 'false'
uses: moonrepo/setup-rust@v1
with:
channel: stable
cache-target: release
bins: cargo-nextest
- name: Run beacon_chain tests for all known forks
run: make test-beacon-chain
- name: Show cache stats
if: env.SELF_HOSTED_RUNNERS == 'true'
run: sccache --show-stats
op-pool-tests:
name: op-pool-tests
runs-on: ubuntu-latest
needs: cargo-fmt
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
steps:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
run: rustup update stable
uses: moonrepo/setup-rust@v1
with:
channel: stable
cache-target: release
bins: cargo-nextest
- name: Run operation_pool tests for all known forks
run: make test-op-pool
network-tests:
name: network-tests
runs-on: ubuntu-latest
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
steps:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
uses: moonrepo/setup-rust@v1
with:
channel: stable
cache-target: release
bins: cargo-nextest
- name: Run network tests for all known forks
run: make test-network
slasher-tests:
name: slasher-tests
runs-on: ubuntu-latest
needs: cargo-fmt
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
steps:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
run: rustup update stable
uses: moonrepo/setup-rust@v1
with:
channel: stable
cache-target: release
bins: cargo-nextest
- name: Run slasher tests for all supported backends
run: make test-slasher
debug-tests-ubuntu:
name: debug-tests-ubuntu
# Use self-hosted runners only on the sigp repo.
runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }}
needs: cargo-fmt
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
steps:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
if: env.SELF_HOSTED_RUNNERS == false
run: rustup update stable
if: env.SELF_HOSTED_RUNNERS == 'false'
uses: moonrepo/setup-rust@v1
with:
channel: stable
bins: cargo-nextest
- name: Install Foundry (anvil)
if: env.SELF_HOSTED_RUNNERS == 'false'
uses: foundry-rs/foundry-toolchain@v1
with:
version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d
- name: Run tests in debug
run: make test-debug
run: make nextest-debug
- name: Show cache stats
if: env.SELF_HOSTED_RUNNERS == 'true'
run: sccache --show-stats
state-transition-vectors-ubuntu:
name: state-transition-vectors-ubuntu
runs-on: ubuntu-latest
needs: cargo-fmt
steps:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
run: rustup update stable
uses: moonrepo/setup-rust@v1
with:
channel: stable
cache-target: release
- name: Run state_transition_vectors in release.
run: make run-state-transition-tests
ef-tests-ubuntu:
name: ef-tests-ubuntu
# Use self-hosted runners only on the sigp repo.
runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest' }}
needs: cargo-fmt
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
steps:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
if: env.SELF_HOSTED_RUNNERS == false
run: rustup update stable
if: env.SELF_HOSTED_RUNNERS == 'false'
uses: moonrepo/setup-rust@v1
with:
channel: stable
cache-target: release
bins: cargo-nextest
- name: Run consensus-spec-tests with blst, milagro and fake_crypto
run: make test-ef
run: make nextest-ef
- name: Show cache stats
if: env.SELF_HOSTED_RUNNERS == 'true'
run: sccache --show-stats
dockerfile-ubuntu:
name: dockerfile-ubuntu
runs-on: ubuntu-latest
needs: cargo-fmt
steps:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
run: rustup update stable
- name: Build the root Dockerfile
run: docker build --build-arg FEATURES=portable -t lighthouse:local .
- name: Test the built image
@ -180,11 +228,13 @@ jobs:
eth1-simulator-ubuntu:
name: eth1-simulator-ubuntu
runs-on: ubuntu-latest
needs: cargo-fmt
steps:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
run: rustup update stable
uses: moonrepo/setup-rust@v1
with:
channel: stable
cache-target: release
- name: Install Foundry (anvil)
uses: foundry-rs/foundry-toolchain@v1
with:
@ -194,11 +244,13 @@ jobs:
merge-transition-ubuntu:
name: merge-transition-ubuntu
runs-on: ubuntu-latest
needs: cargo-fmt
steps:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
run: rustup update stable
uses: moonrepo/setup-rust@v1
with:
channel: stable
cache-target: release
- name: Install Foundry (anvil)
uses: foundry-rs/foundry-toolchain@v1
with:
@ -208,21 +260,25 @@ jobs:
no-eth1-simulator-ubuntu:
name: no-eth1-simulator-ubuntu
runs-on: ubuntu-latest
needs: cargo-fmt
steps:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
run: rustup update stable
uses: moonrepo/setup-rust@v1
with:
channel: stable
cache-target: release
- name: Run the beacon chain sim without an eth1 connection
run: cargo run --release --bin simulator no-eth1-sim
syncing-simulator-ubuntu:
name: syncing-simulator-ubuntu
runs-on: ubuntu-latest
needs: cargo-fmt
steps:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
run: rustup update stable
uses: moonrepo/setup-rust@v1
with:
channel: stable
cache-target: release
- name: Install Foundry (anvil)
uses: foundry-rs/foundry-toolchain@v1
with:
@ -231,21 +287,30 @@ jobs:
run: cargo run --release --bin simulator syncing-sim
doppelganger-protection-test:
name: doppelganger-protection-test
runs-on: ubuntu-latest
needs: cargo-fmt
runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest' }}
env:
# Enable portable to prevent issues with caching `blst` for the wrong CPU type
FEATURES: jemalloc,portable
steps:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
run: rustup update stable
if: env.SELF_HOSTED_RUNNERS == 'false'
uses: moonrepo/setup-rust@v1
with:
channel: stable
cache-target: release
- name: Install geth
if: env.SELF_HOSTED_RUNNERS == 'false'
run: |
sudo add-apt-repository -y ppa:ethereum/ethereum
sudo apt-get update
sudo apt-get install ethereum
- name: Install lighthouse and lcli
- name: Install lighthouse
run: |
make
make install-lcli
- name: Install lcli
if: env.SELF_HOSTED_RUNNERS == 'false'
run: make install-lcli
- name: Run the doppelganger protection failure test script
run: |
cd scripts/tests
@ -256,90 +321,76 @@ jobs:
./doppelganger_protection.sh success genesis.json
execution-engine-integration-ubuntu:
name: execution-engine-integration-ubuntu
runs-on: ubuntu-latest
needs: cargo-fmt
runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest' }}
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: '1.20'
- uses: actions/setup-dotnet@v3
with:
dotnet-version: '6.0.201'
- name: Get latest version of stable Rust
run: rustup update stable
if: env.SELF_HOSTED_RUNNERS == 'false'
uses: moonrepo/setup-rust@v1
with:
channel: stable
cache-target: release
cache: false
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Add go compiler to $PATH
if: env.SELF_HOSTED_RUNNERS == 'true'
run: echo "/usr/local/go/bin" >> $GITHUB_PATH
- name: Run exec engine integration tests in release
run: make test-exec-engine
check-benchmarks:
name: check-benchmarks
check-code:
name: check-code
runs-on: ubuntu-latest
needs: cargo-fmt
env:
CARGO_INCREMENTAL: 1
steps:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
run: rustup update stable
- name: Typecheck benchmark code without running it
run: make check-benches
clippy:
name: clippy
runs-on: ubuntu-latest
needs: cargo-fmt
steps:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
run: rustup update stable
uses: moonrepo/setup-rust@v1
with:
channel: stable
cache-target: release
components: rustfmt,clippy
bins: cargo-audit
- name: Check formatting with cargo fmt
run: make cargo-fmt
- name: Lint code for quality and style with Clippy
run: make lint
- name: Certify Cargo.lock freshness
run: git diff --exit-code Cargo.lock
- name: Typecheck benchmark code without running it
run: make check-benches
- name: Validate state_processing feature arbitrary-fuzz
run: make arbitrary-fuzz
- name: Run cargo audit
run: make audit-CI
- name: Run cargo vendor to make sure dependencies can be vendored for packaging, reproducibility and archival purpose
run: CARGO_HOME=$(readlink -f $HOME) make vendor
check-msrv:
name: check-msrv
runs-on: ubuntu-latest
needs: [cargo-fmt, extract-msrv]
steps:
- uses: actions/checkout@v3
- name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }})
run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }}
- name: Install Rust at Minimum Supported Rust Version (MSRV)
run: |
metadata=$(cargo metadata --no-deps --format-version 1)
msrv=$(echo $metadata | jq -r '.packages | map(select(.name == "lighthouse")) | .[0].rust_version')
rustup override set $msrv
- name: Run cargo check
run: cargo check --workspace
arbitrary-check:
name: arbitrary-check
runs-on: ubuntu-latest
needs: cargo-fmt
steps:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
run: rustup update stable
- name: Validate state_processing feature arbitrary-fuzz
run: make arbitrary-fuzz
cargo-audit:
name: cargo-audit
runs-on: ubuntu-latest
needs: cargo-fmt
steps:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
run: rustup update stable
- name: Run cargo audit to identify known security vulnerabilities reported to the RustSec Advisory Database
run: make audit
cargo-vendor:
name: cargo-vendor
runs-on: ubuntu-latest
needs: cargo-fmt
steps:
- uses: actions/checkout@v3
- name: Run cargo vendor to make sure dependencies can be vendored for packaging, reproducibility and archival purpose
run: CARGO_HOME=$(readlink -f $HOME) make vendor
cargo-udeps:
name: cargo-udeps
runs-on: ubuntu-latest
needs: cargo-fmt
steps:
- uses: actions/checkout@v3
- name: Install Rust (${{ env.PINNED_NIGHTLY }})
run: rustup toolchain install $PINNED_NIGHTLY
- name: Install cargo-udeps
run: cargo install cargo-udeps --locked --force
- name: Get latest version of nightly Rust
uses: moonrepo/setup-rust@v1
with:
channel: nightly
bins: cargo-udeps
cache: false
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Create Cargo config dir
run: mkdir -p .cargo
- name: Install custom Cargo config
@ -360,3 +411,48 @@ jobs:
run: rustup override set beta
- name: Run make
run: make
cli-check:
name: cli-check
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
uses: moonrepo/setup-rust@v1
with:
channel: stable
cache-target: release
- name: Run Makefile to trigger the bash script
run: make cli
# This job succeeds ONLY IF all others succeed. It is used by the merge queue to determine whether
# a PR is safe to merge. New jobs should be added here.
test-suite-success:
name: test-suite-success
runs-on: ubuntu-latest
needs: [
'target-branch-check',
'release-tests-ubuntu',
'release-tests-windows',
'beacon-chain-tests',
'op-pool-tests',
'network-tests',
'slasher-tests',
'debug-tests-ubuntu',
'state-transition-vectors-ubuntu',
'ef-tests-ubuntu',
'dockerfile-ubuntu',
'eth1-simulator-ubuntu',
'merge-transition-ubuntu',
'no-eth1-simulator-ubuntu',
'syncing-simulator-ubuntu',
'doppelganger-protection-test',
'execution-engine-integration-ubuntu',
'check-code',
'check-msrv',
'cargo-udeps',
'compile-with-beta-compiler',
'cli-check',
]
steps:
- uses: actions/checkout@v3
- name: Check that success job is dependent on all others
run: ./scripts/ci/check-success-job.sh ./.github/workflows/test-suite.yml test-suite-success

7
.gitignore vendored
View File

@ -1,4 +1,5 @@
target/
vendor/
**/*.rs.bk
*.pk
*.sk
@ -9,7 +10,11 @@ perf.data*
/bin
genesis.ssz
/clippy.toml
/.cargo
# IntelliJ
/*.iml
.idea
.idea
# VSCode
/.vscode

2813
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -59,6 +59,7 @@ members = [
"consensus/swap_or_not_shuffle",
"crypto/bls",
"crypto/kzg",
"crypto/eth2_key_derivation",
"crypto/eth2_keystore",
"crypto/eth2_wallet",
@ -104,7 +105,7 @@ criterion = "0.3"
delay_map = "0.3"
derivative = "2"
dirs = "3"
discv5 = { version = "0.3", features = ["libp2p"] }
discv5 = { git="https://github.com/sigp/discv5", rev="e30a2c31b7ac0c57876458b971164654dfa4513b", features = ["libp2p"] }
env_logger = "0.9"
error-chain = "0.12"
ethereum-types = "0.14"
@ -119,12 +120,12 @@ fnv = "1"
fs2 = "0.4"
futures = "0.3"
hex = "0.4"
hyper = "0.14"
hyper = "1"
itertools = "0.10"
lazy_static = "1"
libsecp256k1 = "0.7"
log = "0.4"
lru = "0.7"
lru = "0.12"
maplit = "1"
num_cpus = "1"
parking_lot = "0.12"
@ -136,19 +137,19 @@ r2d2 = "0.8"
rand = "0.8"
rayon = "1.7"
regex = "1"
reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "stream", "rustls-tls"] }
reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "stream", "rustls-tls", "native-tls-vendored"] }
ring = "0.16"
rusqlite = { version = "0.28", features = ["bundled"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
serde_repr = "0.1"
serde_yaml = "0.8"
serde_yaml = "0.9"
sha2 = "0.9"
slog = { version = "2", features = ["max_level_trace", "release_max_level_trace"] }
slog = { version = "2", features = ["max_level_trace", "release_max_level_trace", "nested-values"] }
slog-async = "2"
slog-term = "2"
sloggers = { version = "2", features = ["json"] }
smallvec = "1"
smallvec = "1.11.2"
snap = "1"
ssz_types = "0.5"
strum = { version = "0.24", features = ["derive"] }
@ -156,9 +157,13 @@ superstruct = "0.6"
syn = "1"
sysinfo = "0.26"
tempfile = "3"
tokio = { version = "1", features = ["rt-multi-thread", "sync"] }
tokio = { version = "1", features = ["rt-multi-thread", "sync", "signal"] }
tokio-stream = { version = "0.1", features = ["sync"] }
tokio-util = { version = "0.6", features = ["codec", "compat", "time"] }
tracing-appender = "0.2"
tracing-core = "0.1"
tracing-log = "0.2"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
tree_hash = "0.5"
tree_hash_derive = "0.5"
url = "2"
@ -194,6 +199,7 @@ fork_choice = { path = "consensus/fork_choice" }
genesis = { path = "beacon_node/genesis" }
http_api = { path = "beacon_node/http_api" }
int_to_bytes = { path = "consensus/int_to_bytes" }
kzg = { path = "crypto/kzg" }
lighthouse_metrics = { path = "common/lighthouse_metrics" }
lighthouse_network = { path = "beacon_node/lighthouse_network" }
lighthouse_version = { path = "common/lighthouse_version" }
@ -218,7 +224,7 @@ swap_or_not_shuffle = { path = "consensus/swap_or_not_shuffle" }
task_executor = { path = "common/task_executor" }
types = { path = "consensus/types" }
unused_port = { path = "common/unused_port" }
validator_client = { path = "validator_client/" }
validator_client = { path = "validator_client" }
validator_dir = { path = "common/validator_dir" }
warp_utils = { path = "common/warp_utils" }

View File

@ -1,5 +1,5 @@
[target.x86_64-unknown-linux-gnu]
pre-build = ["apt-get install -y cmake clang-3.9"]
pre-build = ["apt-get install -y cmake clang-5.0"]
[target.aarch64-unknown-linux-gnu]
pre-build = ["apt-get install -y cmake clang-3.9"]
pre-build = ["apt-get install -y cmake clang-5.0"]

View File

@ -1,10 +1,12 @@
FROM rust:1.69.0-bullseye AS builder
FROM rust:1.73.0-bullseye AS builder
RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev
COPY . lighthouse
ARG FEATURES
ARG PROFILE=release
ARG CARGO_USE_GIT_CLI=true
ENV FEATURES $FEATURES
ENV PROFILE $PROFILE
ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_USE_GIT_CLI
RUN cd lighthouse && make
FROM ubuntu:22.04
@ -13,4 +15,4 @@ RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-reco
ca-certificates \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
COPY --from=builder /usr/local/cargo/bin/lighthouse /usr/local/bin/lighthouse
COPY --from=builder /usr/local/cargo/bin/lighthouse /usr/local/bin/lighthouse

View File

@ -14,7 +14,7 @@ BUILD_PATH_AARCH64 = "target/$(AARCH64_TAG)/release"
PINNED_NIGHTLY ?= nightly
CLIPPY_PINNED_NIGHTLY=nightly-2022-05-19
# List of features to use when building natively. Can be overriden via the environment.
# List of features to use when building natively. Can be overridden via the environment.
# No jemalloc on Windows
ifeq ($(OS),Windows_NT)
FEATURES?=
@ -31,12 +31,15 @@ CROSS_PROFILE ?= release
# List of features to use when running EF tests.
EF_TEST_FEATURES ?=
# List of features to use when running CI tests.
TEST_FEATURES ?=
# Cargo profile for regular builds.
PROFILE ?= release
# List of all hard forks. This list is used to set env variables for several tests so that
# they run for different forks.
FORKS=phase0 altair merge capella
FORKS=phase0 altair merge capella deneb
# Extra flags for Cargo
CARGO_INSTALL_EXTRA_FLAGS?=
@ -106,12 +109,26 @@ build-release-tarballs:
# Runs the full workspace tests in **release**, without downloading any additional
# test vectors.
test-release:
cargo test --workspace --release --exclude ef_tests --exclude beacon_chain --exclude slasher
cargo test --workspace --release --features "$(TEST_FEATURES)" \
--exclude ef_tests --exclude beacon_chain --exclude slasher --exclude network
# Runs the full workspace tests in **release**, without downloading any additional
# test vectors, using nextest.
nextest-release:
cargo nextest run --workspace --release --features "$(TEST_FEATURES)" \
--exclude ef_tests --exclude beacon_chain --exclude slasher --exclude network
# Runs the full workspace tests in **debug**, without downloading any additional test
# vectors.
test-debug:
cargo test --workspace --exclude ef_tests --exclude beacon_chain
cargo test --workspace --features "$(TEST_FEATURES)" \
--exclude ef_tests --exclude beacon_chain --exclude network
# Runs the full workspace tests in **debug**, without downloading any additional test
# vectors, using nextest.
nextest-debug:
cargo nextest run --workspace --features "$(TEST_FEATURES)" \
--exclude ef_tests --exclude beacon_chain --exclude network
# Runs cargo-fmt (linter).
cargo-fmt:
@ -119,7 +136,7 @@ cargo-fmt:
# Typechecks benchmark code
check-benches:
cargo check --workspace --benches
cargo check --workspace --benches --features "$(TEST_FEATURES)"
# Runs only the ef-test vectors.
run-ef-tests:
@ -129,25 +146,41 @@ run-ef-tests:
cargo test --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),milagro"
./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/consensus-spec-tests
# Runs EF test vectors with nextest
nextest-run-ef-tests:
rm -rf $(EF_TESTS)/.accessed_file_log.txt
cargo nextest run --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES)"
cargo nextest run --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),fake_crypto"
cargo nextest run --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),milagro"
./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/consensus-spec-tests
# Run the tests in the `beacon_chain` crate for all known forks.
test-beacon-chain: $(patsubst %,test-beacon-chain-%,$(FORKS))
test-beacon-chain-%:
env FORK_NAME=$* cargo test --release --features fork_from_env,slasher/lmdb -p beacon_chain
env FORK_NAME=$* cargo nextest run --release --features "fork_from_env,slasher/lmdb,$(TEST_FEATURES)" -p beacon_chain
# Run the tests in the `operation_pool` crate for all known forks.
test-op-pool: $(patsubst %,test-op-pool-%,$(FORKS))
test-op-pool-%:
env FORK_NAME=$* cargo test --release \
--features 'beacon_chain/fork_from_env'\
env FORK_NAME=$* cargo nextest run --release \
--features "beacon_chain/fork_from_env,$(TEST_FEATURES)"\
-p operation_pool
# Run the tests in the `network` crate for all known forks.
test-network: $(patsubst %,test-network-%,$(FORKS))
test-network-%:
env FORK_NAME=$* cargo nextest run --release \
--features "fork_from_env,$(TEST_FEATURES)" \
-p network
# Run the tests in the `slasher` crate for all supported database backends.
test-slasher:
cargo test --release -p slasher --features lmdb
cargo test --release -p slasher --no-default-features --features mdbx
cargo test --release -p slasher --features lmdb,mdbx # both backends enabled
cargo nextest run --release -p slasher --features "lmdb,$(TEST_FEATURES)"
cargo nextest run --release -p slasher --no-default-features --features "mdbx,$(TEST_FEATURES)"
cargo nextest run --release -p slasher --features "lmdb,mdbx,$(TEST_FEATURES)" # both backends enabled
# Runs only the tests/state_transition_vectors tests.
run-state-transition-tests:
@ -156,6 +189,9 @@ run-state-transition-tests:
# Downloads and runs the EF test vectors.
test-ef: make-ef-tests run-ef-tests
# Downloads and runs the EF test vectors with nextest.
nextest-ef: make-ef-tests nextest-run-ef-tests
# Runs tests checking interop between Lighthouse and execution clients.
test-exec-engine:
make -C $(EXECUTION_ENGINE_INTEGRATION) test
@ -164,21 +200,34 @@ test-exec-engine:
# test vectors.
test: test-release
# Updates the CLI help text pages in the Lighthouse book, building with Docker.
cli:
docker run --rm --user=root \
-v ${PWD}:/home/runner/actions-runner/lighthouse sigmaprime/github-runner \
bash -c 'cd lighthouse && make && ./scripts/cli.sh'
# Updates the CLI help text pages in the Lighthouse book, building using local
# `cargo`.
cli-local:
make && ./scripts/cli.sh
# Runs the entire test suite, downloading test vectors if required.
test-full: cargo-fmt test-release test-debug test-ef test-exec-engine
# Lints the code for bad style and potentially unsafe arithmetic using Clippy.
# Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints.
lint:
cargo clippy --workspace --tests $(EXTRA_CLIPPY_OPTS) -- \
cargo clippy --workspace --tests $(EXTRA_CLIPPY_OPTS) --features "$(TEST_FEATURES)" -- \
-D clippy::fn_to_numeric_cast_any \
-D clippy::manual_let_else \
-D warnings \
-A clippy::derive_partial_eq_without_eq \
-A clippy::from-over-into \
-A clippy::upper-case-acronyms \
-A clippy::vec-init-then-push \
-A clippy::question-mark \
-A clippy::uninlined-format-args
-A clippy::uninlined-format-args \
-A clippy::enum_variant_names
# Lints the code using Clippy and automatically fix some simple compiler warnings.
lint-fix:
@ -201,12 +250,16 @@ make-ef-tests:
# Verifies that crates compile with fuzzing features enabled
arbitrary-fuzz:
cargo check -p state_processing --features arbitrary-fuzz
cargo check -p slashing_protection --features arbitrary-fuzz
cargo check -p state_processing --features arbitrary-fuzz,$(TEST_FEATURES)
cargo check -p slashing_protection --features arbitrary-fuzz,$(TEST_FEATURES)
# Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database)
audit:
audit: install-audit audit-CI
install-audit:
cargo install --force cargo-audit
audit-CI:
cargo audit
# Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose.
@ -215,7 +268,7 @@ vendor:
# Runs `cargo udeps` to check for unused dependencies
udeps:
cargo +$(PINNED_NIGHTLY) udeps --tests --all-targets --release
cargo +$(PINNED_NIGHTLY) udeps --tests --all-targets --release --features "$(TEST_FEATURES)"
# Performs a `cargo` clean and cleans the `ef_tests` directory.
clean:

View File

@ -14,7 +14,7 @@ use slot_clock::{SlotClock, SystemTimeSlotClock};
use std::path::{Path, PathBuf};
use std::time::Duration;
use tokio::time::sleep;
use types::{ChainSpec, Epoch, EthSpec, Fork, VoluntaryExit};
use types::{ChainSpec, Epoch, EthSpec, VoluntaryExit};
pub const CMD: &str = "exit";
pub const KEYSTORE_FLAG: &str = "keystore";
@ -146,7 +146,6 @@ async fn publish_voluntary_exit<E: EthSpec>(
.ok_or("Failed to get current epoch. Please check your system time")?;
let validator_index = get_validator_index_for_exit(client, &keypair.pk, epoch, spec).await?;
let fork = get_beacon_state_fork(client).await?;
let voluntary_exit = VoluntaryExit {
epoch,
validator_index,
@ -173,12 +172,8 @@ async fn publish_voluntary_exit<E: EthSpec>(
if confirmation == CONFIRMATION_PHRASE {
// Sign and publish the voluntary exit to network
let signed_voluntary_exit = voluntary_exit.sign(
&keypair.sk,
&fork,
genesis_data.genesis_validators_root,
spec,
);
let signed_voluntary_exit =
voluntary_exit.sign(&keypair.sk, genesis_data.genesis_validators_root, spec);
client
.post_beacon_pool_voluntary_exits(&signed_voluntary_exit)
.await
@ -316,16 +311,6 @@ async fn is_syncing(client: &BeaconNodeHttpClient) -> Result<bool, String> {
.is_syncing)
}
/// Get fork object for the current state by querying the beacon node client.
async fn get_beacon_state_fork(client: &BeaconNodeHttpClient) -> Result<Fork, String> {
Ok(client
.get_beacon_states_fork(StateId::Head)
.await
.map_err(|e| format!("Failed to get get fork: {:?}", e))?
.ok_or("Failed to get fork, state not found")?
.data)
}
/// Calculates the current epoch from the genesis time and current time.
fn get_current_epoch<E: EthSpec>(genesis_time: u64, spec: &ChainSpec) -> Option<Epoch> {
let slot_clock = SystemTimeSlotClock::new(

View File

@ -284,6 +284,8 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin
suggested_fee_recipient,
None,
None,
None,
None,
)
.map_err(|e| format!("Unable to create new validator definition: {:?}", e))?;

View File

@ -16,7 +16,6 @@ pub const EXPORT_CMD: &str = "export";
pub const IMPORT_FILE_ARG: &str = "IMPORT-FILE";
pub const EXPORT_FILE_ARG: &str = "EXPORT-FILE";
pub const MINIFY_FLAG: &str = "minify";
pub const PUBKEYS_FLAG: &str = "pubkeys";
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
@ -31,16 +30,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
.value_name("FILE")
.help("The slashing protection interchange file to import (.json)"),
)
.arg(
Arg::with_name(MINIFY_FLAG)
.long(MINIFY_FLAG)
.takes_value(true)
.possible_values(&["false", "true"])
.help(
"Deprecated: Lighthouse no longer requires minification on import \
because it always minifies",
),
),
)
.subcommand(
App::new(EXPORT_CMD)
@ -61,17 +50,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
comma-separated. All known keys will be exported if omitted",
),
)
.arg(
Arg::with_name(MINIFY_FLAG)
.long(MINIFY_FLAG)
.takes_value(true)
.default_value("false")
.possible_values(&["false", "true"])
.help(
"Minify the output file. This will make it smaller and faster to \
import, but not faster to generate.",
),
),
)
}
@ -92,7 +70,6 @@ pub fn cli_run<T: EthSpec>(
match matches.subcommand() {
(IMPORT_CMD, Some(matches)) => {
let import_filename: PathBuf = clap_utils::parse_required(matches, IMPORT_FILE_ARG)?;
let minify: Option<bool> = clap_utils::parse_optional(matches, MINIFY_FLAG)?;
let import_file = File::open(&import_filename).map_err(|e| {
format!(
"Unable to open import file at {}: {:?}",
@ -102,23 +79,10 @@ pub fn cli_run<T: EthSpec>(
})?;
eprint!("Loading JSON file into memory & deserializing");
let mut interchange = Interchange::from_json_reader(&import_file)
let interchange = Interchange::from_json_reader(&import_file)
.map_err(|e| format!("Error parsing file for import: {:?}", e))?;
eprintln!(" [done].");
if let Some(minify) = minify {
eprintln!(
"WARNING: --minify flag is deprecated and will be removed in a future release"
);
if minify {
eprint!("Minifying input file for faster loading");
interchange = interchange
.minify()
.map_err(|e| format!("Minification failed: {:?}", e))?;
eprintln!(" [done].");
}
}
let slashing_protection_database =
SlashingDatabase::open_or_create(&slashing_protection_db_path).map_err(|e| {
format!(
@ -206,7 +170,6 @@ pub fn cli_run<T: EthSpec>(
}
(EXPORT_CMD, Some(matches)) => {
let export_filename: PathBuf = clap_utils::parse_required(matches, EXPORT_FILE_ARG)?;
let minify: bool = clap_utils::parse_required(matches, MINIFY_FLAG)?;
let selected_pubkeys = if let Some(pubkeys) =
clap_utils::parse_optional::<String>(matches, PUBKEYS_FLAG)?
@ -237,17 +200,10 @@ pub fn cli_run<T: EthSpec>(
)
})?;
let mut interchange = slashing_protection_database
let interchange = slashing_protection_database
.export_interchange_info(genesis_validators_root, selected_pubkeys.as_deref())
.map_err(|e| format!("Error during export: {:?}", e))?;
if minify {
eprintln!("Minifying output file");
interchange = interchange
.minify()
.map_err(|e| format!("Unable to minify output: {:?}", e))?;
}
let output_file = File::create(export_filename)
.map_err(|e| format!("Error creating output file: {:?}", e))?;

View File

@ -1,6 +1,6 @@
[package]
name = "beacon_node"
version = "4.5.0"
version = "4.6.0"
authors = [
"Paul Hauner <paul@paulhauner.com>",
"Age Manning <Age@AgeManning.com",
@ -37,6 +37,7 @@ eth2_network_config = { workspace = true }
execution_layer = { workspace = true }
lighthouse_network = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
clap_utils = { workspace = true }
hyper = { workspace = true }
lighthouse_version = { workspace = true }

View File

@ -10,6 +10,7 @@ default = ["participation_metrics"]
write_ssz_files = [] # Writes debugging .ssz files to /tmp during block processing.
participation_metrics = [] # Exposes validator participation metrics to Prometheus.
fork_from_env = [] # Initialise the harness chain spec from the FORK_NAME env variable
portable = ["bls/supranational-portable"]
[dev-dependencies]
maplit = { workspace = true }
@ -17,6 +18,8 @@ environment = { workspace = true }
serde_json = { workspace = true }
[dependencies]
serde_json = { workspace = true }
eth2_network_config = { workspace = true }
merkle_proof = { workspace = true }
store = { workspace = true }
parking_lot = { workspace = true }
@ -50,6 +53,7 @@ lru = { workspace = true }
tempfile = { workspace = true }
bitvec = { workspace = true }
bls = { workspace = true }
kzg = { workspace = true }
safe_arith = { workspace = true }
fork_choice = { workspace = true }
task_executor = { workspace = true }
@ -65,6 +69,8 @@ superstruct = { workspace = true }
hex = { workspace = true }
exit-future = { workspace = true }
oneshot_broadcast = { path = "../../common/oneshot_broadcast/" }
slog-term = { workspace = true }
slog-async = { workspace = true }
[[test]]
name = "beacon_chain_tests"

View File

@ -5,6 +5,9 @@ use participation_cache::ParticipationCache;
use safe_arith::SafeArith;
use serde_utils::quoted_u64::Quoted;
use slog::debug;
use state_processing::per_epoch_processing::altair::{
process_inactivity_updates, process_justification_and_finalization,
};
use state_processing::{
common::altair::BaseRewardPerIncrement,
per_epoch_processing::altair::{participation_cache, rewards_and_penalties::get_flag_weight},
@ -26,6 +29,7 @@ use state_processing::per_epoch_processing::base::rewards_and_penalties::{
};
use state_processing::per_epoch_processing::base::validator_statuses::InclusionInfo;
use state_processing::per_epoch_processing::base::{
process_justification_and_finalization as process_justification_and_finalization_base,
TotalBalances, ValidatorStatus, ValidatorStatuses,
};
@ -50,9 +54,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
match state {
BeaconState::Base(_) => self.compute_attestation_rewards_base(state, validators),
BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => {
self.compute_attestation_rewards_altair(state, validators)
}
BeaconState::Altair(_)
| BeaconState::Merge(_)
| BeaconState::Capella(_)
| BeaconState::Deneb(_) => self.compute_attestation_rewards_altair(state, validators),
}
}
@ -65,6 +70,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let mut validator_statuses = ValidatorStatuses::new(&state, spec)?;
validator_statuses.process_attestations(&state)?;
process_justification_and_finalization_base(
&state,
&validator_statuses.total_balances,
spec,
)?
.apply_changes_to_state(&mut state);
let ideal_rewards =
self.compute_ideal_rewards_base(&state, &validator_statuses.total_balances)?;
@ -123,6 +135,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// Calculate ideal_rewards
let participation_cache = ParticipationCache::new(&state, spec)?;
process_justification_and_finalization(&state, &participation_cache)?
.apply_changes_to_state(&mut state);
process_inactivity_updates(&mut state, &participation_cache, spec)?;
let previous_epoch = state.previous_epoch();
@ -189,6 +204,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let mut head_reward = 0i64;
let mut target_reward = 0i64;
let mut source_reward = 0i64;
let mut inactivity_penalty = 0i64;
if eligible {
let effective_balance = state.get_effective_balance(*validator_index)?;
@ -214,6 +230,14 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
head_reward = 0;
} else if flag_index == TIMELY_TARGET_FLAG_INDEX {
target_reward = *penalty;
let penalty_numerator = effective_balance
.safe_mul(state.get_inactivity_score(*validator_index)?)?;
let penalty_denominator = spec
.inactivity_score_bias
.safe_mul(spec.inactivity_penalty_quotient_for_state(&state))?;
inactivity_penalty =
-(penalty_numerator.safe_div(penalty_denominator)? as i64);
} else if flag_index == TIMELY_SOURCE_FLAG_INDEX {
source_reward = *penalty;
}
@ -225,8 +249,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
target: target_reward,
source: source_reward,
inclusion_delay: None,
// TODO: altair calculation logic needs to be updated to include inactivity penalty
inactivity: 0,
inactivity: inactivity_penalty,
});
}
@ -249,7 +272,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
target: 0,
source: 0,
inclusion_delay: None,
// TODO: altair calculation logic needs to be updated to include inactivity penalty
inactivity: 0,
});
match *flag_index {

View File

@ -0,0 +1,107 @@
use crate::{BeaconChain, BeaconChainTypes};
use slog::{debug, error};
use slot_clock::SlotClock;
use std::sync::Arc;
use task_executor::TaskExecutor;
use tokio::time::sleep;
use types::{EthSpec, Slot};
/// Don't run the attestation simulator if the head slot is this many epochs
/// behind the wall-clock slot.
const SYNCING_TOLERANCE_EPOCHS: u64 = 2;
/// Spawns a routine which produces an unaggregated attestation at every slot.
///
/// This routine will run once per slot
pub fn start_attestation_simulator_service<T: BeaconChainTypes>(
executor: TaskExecutor,
chain: Arc<BeaconChain<T>>,
) {
executor.clone().spawn(
async move { attestation_simulator_service(executor, chain).await },
"attestation_simulator_service",
);
}
/// Loop indefinitely, calling `BeaconChain::produce_unaggregated_attestation` every 4s into each slot.
async fn attestation_simulator_service<T: BeaconChainTypes>(
executor: TaskExecutor,
chain: Arc<BeaconChain<T>>,
) {
let slot_duration = chain.slot_clock.slot_duration();
let additional_delay = slot_duration / 3;
loop {
match chain.slot_clock.duration_to_next_slot() {
Some(duration) => {
sleep(duration + additional_delay).await;
debug!(
chain.log,
"Simulating unagg. attestation production";
);
// Run the task in the executor
let inner_chain = chain.clone();
executor.spawn(
async move {
if let Ok(current_slot) = inner_chain.slot() {
produce_unaggregated_attestation(inner_chain, current_slot);
}
},
"attestation_simulator_service",
);
}
None => {
error!(chain.log, "Failed to read slot clock");
// If we can't read the slot clock, just wait another slot.
sleep(slot_duration).await;
}
};
}
}
pub fn produce_unaggregated_attestation<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>,
current_slot: Slot,
) {
// Don't run the attestation simulator when the head slot is far behind the
// wall-clock slot.
//
// This helps prevent the simulator from becoming a burden by computing
// committees from old states.
let syncing_tolerance_slots = SYNCING_TOLERANCE_EPOCHS * T::EthSpec::slots_per_epoch();
if chain.best_slot() + syncing_tolerance_slots < current_slot {
return;
}
// Since attestations for different committees are practically identical (apart from the committee index field)
// Committee 0 is guaranteed to exist. That means there's no need to load the committee.
let beacon_committee_index = 0;
// Store the unaggregated attestation in the validator monitor for later processing
match chain.produce_unaggregated_attestation(current_slot, beacon_committee_index) {
Ok(unaggregated_attestation) => {
let data = &unaggregated_attestation.data;
debug!(
chain.log,
"Produce unagg. attestation";
"attestation_source" => data.source.root.to_string(),
"attestation_target" => data.target.root.to_string(),
);
chain
.validator_monitor
.write()
.set_unaggregated_attestation(unaggregated_attestation);
}
Err(e) => {
debug!(
chain.log,
"Failed to simulate attestation";
"error" => ?e
);
}
}
}

View File

@ -55,7 +55,7 @@ use std::borrow::Cow;
use strum::AsRefStr;
use tree_hash::TreeHash;
use types::{
Attestation, BeaconCommittee, ChainSpec, CommitteeIndex, Epoch, EthSpec, Hash256,
Attestation, BeaconCommittee, ChainSpec, CommitteeIndex, Epoch, EthSpec, ForkName, Hash256,
IndexedAttestation, SelectionProof, SignedAggregateAndProof, Slot, SubnetId,
};
@ -1049,10 +1049,21 @@ pub fn verify_propagation_slot_range<S: SlotClock, E: EthSpec>(
}
// Taking advantage of saturating subtraction on `Slot`.
let earliest_permissible_slot = slot_clock
let one_epoch_prior = slot_clock
.now_with_past_tolerance(spec.maximum_gossip_clock_disparity())
.ok_or(BeaconChainError::UnableToReadSlot)?
- E::slots_per_epoch();
let current_fork =
spec.fork_name_at_slot::<E>(slot_clock.now().ok_or(BeaconChainError::UnableToReadSlot)?);
let earliest_permissible_slot = match current_fork {
ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => one_epoch_prior,
// EIP-7045
ForkName::Deneb => one_epoch_prior
.epoch(E::slots_per_epoch())
.start_slot(E::slots_per_epoch()),
};
if attestation_slot < earliest_permissible_slot {
return Err(Error::PastSlot {
attestation_slot,

View File

@ -33,6 +33,17 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
state.build_committee_cache(RelativeEpoch::Previous, &self.spec)?;
state.build_committee_cache(RelativeEpoch::Current, &self.spec)?;
self.compute_beacon_block_reward_with_cache(block, block_root, state)
}
// This should only be called after a committee cache has been built
// for both the previous and current epoch
fn compute_beacon_block_reward_with_cache<Payload: AbstractExecPayload<T::EthSpec>>(
&self,
block: BeaconBlockRef<'_, T::EthSpec, Payload>,
block_root: Hash256,
state: &BeaconState<T::EthSpec>,
) -> Result<StandardBlockReward, BeaconChainError> {
let proposer_index = block.proposer_index();
let sync_aggregate_reward =
@ -64,19 +75,19 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
self.compute_beacon_block_attestation_reward_base(block, block_root, state)
.map_err(|e| {
error!(
self.log,
"Error calculating base block attestation reward";
"error" => ?e
self.log,
"Error calculating base block attestation reward";
"error" => ?e
);
BeaconChainError::BlockRewardAttestationError
})?
} else {
self.compute_beacon_block_attestation_reward_altair(block, state)
self.compute_beacon_block_attestation_reward_altair_deneb(block, state)
.map_err(|e| {
error!(
self.log,
"Error calculating altair block attestation reward";
"error" => ?e
self.log,
"Error calculating altair block attestation reward";
"error" => ?e
);
BeaconChainError::BlockRewardAttestationError
})?
@ -173,10 +184,12 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
Ok(block_attestation_reward)
}
fn compute_beacon_block_attestation_reward_altair<Payload: AbstractExecPayload<T::EthSpec>>(
fn compute_beacon_block_attestation_reward_altair_deneb<
Payload: AbstractExecPayload<T::EthSpec>,
>(
&self,
block: BeaconBlockRef<'_, T::EthSpec, Payload>,
state: &mut BeaconState<T::EthSpec>,
state: &BeaconState<T::EthSpec>,
) -> Result<BeaconBlockSubRewardValue, BeaconChainError> {
let total_active_balance = state.get_total_active_balance()?;
let base_reward_per_increment =
@ -189,9 +202,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.safe_mul(WEIGHT_DENOMINATOR)?
.safe_div(PROPOSER_WEIGHT)?;
let mut current_epoch_participation = state.current_epoch_participation()?.clone();
let mut previous_epoch_participation = state.previous_epoch_participation()?.clone();
for attestation in block.body().attestations() {
let data = &attestation.data;
let inclusion_delay = state.slot().safe_sub(data.slot)?.as_u64();
// [Modified in Deneb:EIP7045]
let participation_flag_indices = get_attestation_participation_flag_indices(
state,
data,
@ -200,13 +217,16 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
)?;
let attesting_indices = get_attesting_indices_from_state(state, attestation)?;
let mut proposer_reward_numerator = 0;
for index in attesting_indices {
let index = index as usize;
for (flag_index, &weight) in PARTICIPATION_FLAG_WEIGHTS.iter().enumerate() {
let epoch_participation =
state.get_epoch_participation_mut(data.target.epoch)?;
let epoch_participation = if data.target.epoch == state.current_epoch() {
&mut current_epoch_participation
} else {
&mut previous_epoch_participation
};
let validator_participation = epoch_participation
.get_mut(index)
.ok_or(BeaconStateError::ParticipationOutOfBounds(index))?;

View File

@ -3,7 +3,7 @@ use execution_layer::{ExecutionLayer, ExecutionPayloadBodyV1};
use slog::{crit, debug, Logger};
use std::collections::HashMap;
use std::sync::Arc;
use store::DatabaseBlock;
use store::{DatabaseBlock, ExecutionPayloadDeneb};
use task_executor::TaskExecutor;
use tokio::sync::{
mpsc::{self, UnboundedSender},
@ -97,6 +97,7 @@ fn reconstruct_default_header_block<E: EthSpec>(
let payload: ExecutionPayload<E> = match fork {
ForkName::Merge => ExecutionPayloadMerge::default().into(),
ForkName::Capella => ExecutionPayloadCapella::default().into(),
ForkName::Deneb => ExecutionPayloadDeneb::default().into(),
ForkName::Base | ForkName::Altair => {
return Err(Error::PayloadReconstruction(format!(
"Block with fork variant {} has execution payload",
@ -714,19 +715,21 @@ mod tests {
}
#[tokio::test]
async fn check_all_blocks_from_altair_to_capella() {
async fn check_all_blocks_from_altair_to_deneb() {
let slots_per_epoch = MinimalEthSpec::slots_per_epoch() as usize;
let num_epochs = 8;
let bellatrix_fork_epoch = 2usize;
let capella_fork_epoch = 4usize;
let deneb_fork_epoch = 6usize;
let num_blocks_produced = num_epochs * slots_per_epoch;
let mut spec = test_spec::<MinimalEthSpec>();
spec.altair_fork_epoch = Some(Epoch::new(0));
spec.bellatrix_fork_epoch = Some(Epoch::new(bellatrix_fork_epoch as u64));
spec.capella_fork_epoch = Some(Epoch::new(capella_fork_epoch as u64));
spec.deneb_fork_epoch = Some(Epoch::new(deneb_fork_epoch as u64));
let harness = get_harness(VALIDATOR_COUNT, spec);
let harness = get_harness(VALIDATOR_COUNT, spec.clone());
// go to bellatrix fork
harness
.extend_slots(bellatrix_fork_epoch * slots_per_epoch)
@ -833,17 +836,19 @@ mod tests {
}
#[tokio::test]
async fn check_fallback_altair_to_capella() {
async fn check_fallback_altair_to_deneb() {
let slots_per_epoch = MinimalEthSpec::slots_per_epoch() as usize;
let num_epochs = 8;
let bellatrix_fork_epoch = 2usize;
let capella_fork_epoch = 4usize;
let deneb_fork_epoch = 6usize;
let num_blocks_produced = num_epochs * slots_per_epoch;
let mut spec = test_spec::<MinimalEthSpec>();
spec.altair_fork_epoch = Some(Epoch::new(0));
spec.bellatrix_fork_epoch = Some(Epoch::new(bellatrix_fork_epoch as u64));
spec.capella_fork_epoch = Some(Epoch::new(capella_fork_epoch as u64));
spec.deneb_fork_epoch = Some(Epoch::new(deneb_fork_epoch as u64));
let harness = get_harness(VALIDATOR_COUNT, spec);

File diff suppressed because it is too large Load Diff

View File

@ -14,18 +14,20 @@ use lru::LruCache;
use smallvec::SmallVec;
use state_processing::state_advance::partial_state_advance;
use std::cmp::Ordering;
use std::num::NonZeroUsize;
use types::non_zero_usize::new_non_zero_usize;
use types::{
BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, Fork, Hash256, Slot,
Unsigned,
};
/// The number of sets of proposer indices that should be cached.
const CACHE_SIZE: usize = 16;
const CACHE_SIZE: NonZeroUsize = new_non_zero_usize(16);
/// This value is fairly unimportant, it's used to avoid heap allocations. The result of it being
/// incorrect is non-substantial from a consensus perspective (and probably also from a
/// performance perspective).
const TYPICAL_SLOTS_PER_EPOCH: usize = 32;
pub const TYPICAL_SLOTS_PER_EPOCH: usize = 32;
/// For some given slot, this contains the proposer index (`index`) and the `fork` that should be
/// used to verify their signature.

View File

@ -0,0 +1,661 @@
use derivative::Derivative;
use slot_clock::SlotClock;
use std::sync::Arc;
use crate::beacon_chain::{BeaconChain, BeaconChainTypes, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT};
use crate::block_verification::{
cheap_state_advance_to_obtain_committees, get_validator_pubkey_cache, process_block_slash_info,
BlockSlashInfo,
};
use crate::kzg_utils::{validate_blob, validate_blobs};
use crate::{metrics, BeaconChainError};
use kzg::{Error as KzgError, Kzg, KzgCommitment};
use merkle_proof::MerkleTreeError;
use slog::{debug, warn};
use ssz_derive::{Decode, Encode};
use ssz_types::VariableList;
use tree_hash::TreeHash;
use types::blob_sidecar::BlobIdentifier;
use types::{
BeaconStateError, BlobSidecar, CloneConfig, EthSpec, Hash256, SignedBeaconBlockHeader, Slot,
};
/// An error occurred while validating a gossip blob.
#[derive(Debug)]
pub enum GossipBlobError<T: EthSpec> {
/// The blob sidecar is from a slot that is later than the current slot (with respect to the
/// gossip clock disparity).
///
/// ## Peer scoring
///
/// Assuming the local clock is correct, the peer has sent an invalid message.
FutureSlot {
message_slot: Slot,
latest_permissible_slot: Slot,
},
/// There was an error whilst processing the blob. It is not known if it is
/// valid or invalid.
///
/// ## Peer scoring
///
/// We were unable to process this blob due to an internal error. It's
/// unclear if the blob is valid.
BeaconChainError(BeaconChainError),
/// The `BlobSidecar` was gossiped over an incorrect subnet.
///
/// ## Peer scoring
///
/// The blob is invalid or the peer is faulty.
InvalidSubnet { expected: u64, received: u64 },
/// The sidecar corresponds to a slot older than the finalized head slot.
///
/// ## Peer scoring
///
/// It's unclear if this blob is valid, but this blob is for a finalized slot and is
/// therefore useless to us.
PastFinalizedSlot {
blob_slot: Slot,
finalized_slot: Slot,
},
/// The proposer index specified in the sidecar does not match the locally computed
/// proposer index.
///
/// ## Peer scoring
///
/// The blob is invalid and the peer is faulty.
ProposerIndexMismatch { sidecar: usize, local: usize },
/// The proposal signature in invalid.
///
/// ## Peer scoring
///
/// The blob is invalid and the peer is faulty.
ProposalSignatureInvalid,
/// The proposal_index corresponding to blob.beacon_block_root is not known.
///
/// ## Peer scoring
///
/// The blob is invalid and the peer is faulty.
UnknownValidator(u64),
/// The provided blob is not from a later slot than its parent.
///
/// ## Peer scoring
///
/// The blob is invalid and the peer is faulty.
BlobIsNotLaterThanParent { blob_slot: Slot, parent_slot: Slot },
/// The provided blob's parent block is unknown.
///
/// ## Peer scoring
///
/// We cannot process the blob without validating its parent, the peer isn't necessarily faulty.
BlobParentUnknown(Arc<BlobSidecar<T>>),
/// Invalid kzg commitment inclusion proof
/// ## Peer scoring
///
/// The blob sidecar is invalid and the peer is faulty
InvalidInclusionProof,
/// A blob has already been seen for the given `(sidecar.block_root, sidecar.index)` tuple
/// over gossip or no gossip sources.
///
/// ## Peer scoring
///
/// The peer isn't faulty, but we do not forward it over gossip.
RepeatBlob {
proposer: u64,
slot: Slot,
index: u64,
},
/// `Kzg` struct hasn't been initialized. This is an internal error.
///
/// ## Peer scoring
///
/// The peer isn't faulty, This is an internal error.
KzgNotInitialized,
/// The kzg verification failed.
///
/// ## Peer scoring
///
/// The blob sidecar is invalid and the peer is faulty.
KzgError(kzg::Error),
/// The kzg commitment inclusion proof failed.
///
/// ## Peer scoring
///
/// The blob sidecar is invalid
InclusionProof(MerkleTreeError),
/// The pubkey cache timed out.
///
/// ## Peer scoring
///
/// The blob sidecar may be valid, this is an internal error.
PubkeyCacheTimeout,
/// The block conflicts with finalization, no need to propagate.
///
/// ## Peer scoring
///
/// It's unclear if this block is valid, but it conflicts with finality and shouldn't be
/// imported.
NotFinalizedDescendant { block_parent_root: Hash256 },
}
impl<T: EthSpec> std::fmt::Display for GossipBlobError<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
GossipBlobError::BlobParentUnknown(blob_sidecar) => {
write!(
f,
"BlobParentUnknown(parent_root:{})",
blob_sidecar.block_parent_root()
)
}
other => write!(f, "{:?}", other),
}
}
}
impl<T: EthSpec> From<BeaconChainError> for GossipBlobError<T> {
fn from(e: BeaconChainError) -> Self {
GossipBlobError::BeaconChainError(e)
}
}
impl<T: EthSpec> From<BeaconStateError> for GossipBlobError<T> {
fn from(e: BeaconStateError) -> Self {
GossipBlobError::BeaconChainError(BeaconChainError::BeaconStateError(e))
}
}
pub type GossipVerifiedBlobList<T> = VariableList<
GossipVerifiedBlob<T>,
<<T as BeaconChainTypes>::EthSpec as EthSpec>::MaxBlobsPerBlock,
>;
/// A wrapper around a `BlobSidecar` that indicates it has been approved for re-gossiping on
/// the p2p network.
#[derive(Debug)]
pub struct GossipVerifiedBlob<T: BeaconChainTypes> {
block_root: Hash256,
blob: KzgVerifiedBlob<T::EthSpec>,
}
impl<T: BeaconChainTypes> GossipVerifiedBlob<T> {
pub fn new(
blob: Arc<BlobSidecar<T::EthSpec>>,
subnet_id: u64,
chain: &BeaconChain<T>,
) -> Result<Self, GossipBlobError<T::EthSpec>> {
let header = blob.signed_block_header.clone();
// We only process slashing info if the gossip verification failed
// since we do not process the blob any further in that case.
validate_blob_sidecar_for_gossip(blob, subnet_id, chain).map_err(|e| {
process_block_slash_info::<_, GossipBlobError<T::EthSpec>>(
chain,
BlockSlashInfo::from_early_error_blob(header, e),
)
})
}
/// Construct a `GossipVerifiedBlob` that is assumed to be valid.
///
/// This should ONLY be used for testing.
pub fn __assumed_valid(blob: Arc<BlobSidecar<T::EthSpec>>) -> Self {
Self {
block_root: blob.block_root(),
blob: KzgVerifiedBlob { blob },
}
}
pub fn id(&self) -> BlobIdentifier {
BlobIdentifier {
block_root: self.block_root,
index: self.blob.blob_index(),
}
}
pub fn block_root(&self) -> Hash256 {
self.block_root
}
pub fn slot(&self) -> Slot {
self.blob.blob.slot()
}
pub fn index(&self) -> u64 {
self.blob.blob.index
}
pub fn kzg_commitment(&self) -> KzgCommitment {
self.blob.blob.kzg_commitment
}
pub fn signed_block_header(&self) -> SignedBeaconBlockHeader {
self.blob.blob.signed_block_header.clone()
}
pub fn block_proposer_index(&self) -> u64 {
self.blob.blob.block_proposer_index()
}
pub fn into_inner(self) -> KzgVerifiedBlob<T::EthSpec> {
self.blob
}
pub fn as_blob(&self) -> &BlobSidecar<T::EthSpec> {
self.blob.as_blob()
}
/// This is cheap as we're calling clone on an Arc
pub fn clone_blob(&self) -> Arc<BlobSidecar<T::EthSpec>> {
self.blob.clone_blob()
}
}
/// Wrapper over a `BlobSidecar` for which we have completed kzg verification.
/// i.e. `verify_blob_kzg_proof(blob, commitment, proof) == true`.
#[derive(Debug, Derivative, Clone, Encode, Decode)]
#[derivative(PartialEq, Eq)]
#[ssz(struct_behaviour = "transparent")]
pub struct KzgVerifiedBlob<T: EthSpec> {
blob: Arc<BlobSidecar<T>>,
}
impl<T: EthSpec> PartialOrd for KzgVerifiedBlob<T> {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl<T: EthSpec> Ord for KzgVerifiedBlob<T> {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.blob.cmp(&other.blob)
}
}
impl<T: EthSpec> KzgVerifiedBlob<T> {
pub fn new(blob: Arc<BlobSidecar<T>>, kzg: &Kzg) -> Result<Self, KzgError> {
verify_kzg_for_blob(blob, kzg)
}
pub fn to_blob(self) -> Arc<BlobSidecar<T>> {
self.blob
}
pub fn as_blob(&self) -> &BlobSidecar<T> {
&self.blob
}
/// This is cheap as we're calling clone on an Arc
pub fn clone_blob(&self) -> Arc<BlobSidecar<T>> {
self.blob.clone()
}
pub fn blob_index(&self) -> u64 {
self.blob.index
}
/// Construct a `KzgVerifiedBlob` that is assumed to be valid.
///
/// This should ONLY be used for testing.
#[cfg(test)]
pub fn __assumed_valid(blob: Arc<BlobSidecar<T>>) -> Self {
Self { blob }
}
}
/// Complete kzg verification for a `BlobSidecar`.
///
/// Returns an error if the kzg verification check fails.
pub fn verify_kzg_for_blob<T: EthSpec>(
blob: Arc<BlobSidecar<T>>,
kzg: &Kzg,
) -> Result<KzgVerifiedBlob<T>, KzgError> {
validate_blob::<T>(kzg, &blob.blob, blob.kzg_commitment, blob.kzg_proof)?;
Ok(KzgVerifiedBlob { blob })
}
pub struct KzgVerifiedBlobList<E: EthSpec> {
verified_blobs: Vec<KzgVerifiedBlob<E>>,
}
impl<E: EthSpec> KzgVerifiedBlobList<E> {
pub fn new<I: IntoIterator<Item = Arc<BlobSidecar<E>>>>(
blob_list: I,
kzg: &Kzg,
) -> Result<Self, KzgError> {
let blobs = blob_list.into_iter().collect::<Vec<_>>();
verify_kzg_for_blob_list(blobs.iter(), kzg)?;
Ok(Self {
verified_blobs: blobs
.into_iter()
.map(|blob| KzgVerifiedBlob { blob })
.collect(),
})
}
}
impl<E: EthSpec> IntoIterator for KzgVerifiedBlobList<E> {
type Item = KzgVerifiedBlob<E>;
type IntoIter = std::vec::IntoIter<Self::Item>;
fn into_iter(self) -> Self::IntoIter {
self.verified_blobs.into_iter()
}
}
/// Complete kzg verification for a list of `BlobSidecar`s.
/// Returns an error if any of the `BlobSidecar`s fails kzg verification.
///
/// Note: This function should be preferred over calling `verify_kzg_for_blob`
/// in a loop since this function kzg verifies a list of blobs more efficiently.
pub fn verify_kzg_for_blob_list<'a, T: EthSpec, I>(
blob_iter: I,
kzg: &'a Kzg,
) -> Result<(), KzgError>
where
I: Iterator<Item = &'a Arc<BlobSidecar<T>>>,
{
let (blobs, (commitments, proofs)): (Vec<_>, (Vec<_>, Vec<_>)) = blob_iter
.map(|blob| (&blob.blob, (blob.kzg_commitment, blob.kzg_proof)))
.unzip();
validate_blobs::<T>(kzg, commitments.as_slice(), blobs, proofs.as_slice())
}
pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
blob_sidecar: Arc<BlobSidecar<T::EthSpec>>,
subnet: u64,
chain: &BeaconChain<T>,
) -> Result<GossipVerifiedBlob<T>, GossipBlobError<T::EthSpec>> {
let blob_slot = blob_sidecar.slot();
let blob_index = blob_sidecar.index;
let block_parent_root = blob_sidecar.block_parent_root();
let blob_proposer_index = blob_sidecar.block_proposer_index();
let block_root = blob_sidecar.block_root();
let blob_epoch = blob_slot.epoch(T::EthSpec::slots_per_epoch());
let signed_block_header = &blob_sidecar.signed_block_header;
// This condition is not possible if we have received the blob from the network
// since we only subscribe to `MaxBlobsPerBlock` subnets over gossip network.
// We include this check only for completeness.
// Getting this error would imply something very wrong with our networking decoding logic.
if blob_index >= T::EthSpec::max_blobs_per_block() as u64 {
return Err(GossipBlobError::InvalidSubnet {
expected: subnet,
received: blob_index,
});
}
// Verify that the blob_sidecar was received on the correct subnet.
if blob_index != subnet {
return Err(GossipBlobError::InvalidSubnet {
expected: blob_index,
received: subnet,
});
}
// Verify that the sidecar is not from a future slot.
let latest_permissible_slot = chain
.slot_clock
.now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity())
.ok_or(BeaconChainError::UnableToReadSlot)?;
if blob_slot > latest_permissible_slot {
return Err(GossipBlobError::FutureSlot {
message_slot: blob_slot,
latest_permissible_slot,
});
}
// Verify that the sidecar slot is greater than the latest finalized slot
let latest_finalized_slot = chain
.head()
.finalized_checkpoint()
.epoch
.start_slot(T::EthSpec::slots_per_epoch());
if blob_slot <= latest_finalized_slot {
return Err(GossipBlobError::PastFinalizedSlot {
blob_slot,
finalized_slot: latest_finalized_slot,
});
}
// Verify that this is the first blob sidecar received for the tuple:
// (block_header.slot, block_header.proposer_index, blob_sidecar.index)
if chain
.observed_blob_sidecars
.read()
.proposer_is_known(&blob_sidecar)
.map_err(|e| GossipBlobError::BeaconChainError(e.into()))?
{
return Err(GossipBlobError::RepeatBlob {
proposer: blob_proposer_index,
slot: blob_slot,
index: blob_index,
});
}
// Verify the inclusion proof in the sidecar
let _timer = metrics::start_timer(&metrics::BLOB_SIDECAR_INCLUSION_PROOF_VERIFICATION);
if !blob_sidecar
.verify_blob_sidecar_inclusion_proof()
.map_err(GossipBlobError::InclusionProof)?
{
return Err(GossipBlobError::InvalidInclusionProof);
}
drop(_timer);
let fork_choice = chain.canonical_head.fork_choice_read_lock();
// We have already verified that the blob is past finalization, so we can
// just check fork choice for the block's parent.
let Some(parent_block) = fork_choice.get_block(&block_parent_root) else {
return Err(GossipBlobError::BlobParentUnknown(blob_sidecar));
};
// Do not process a blob that does not descend from the finalized root.
// We just loaded the parent_block, so we can be sure that it exists in fork choice.
if !fork_choice.is_finalized_checkpoint_or_descendant(block_parent_root) {
return Err(GossipBlobError::NotFinalizedDescendant { block_parent_root });
}
drop(fork_choice);
if parent_block.slot >= blob_slot {
return Err(GossipBlobError::BlobIsNotLaterThanParent {
blob_slot,
parent_slot: parent_block.slot,
});
}
let proposer_shuffling_root =
if parent_block.slot.epoch(T::EthSpec::slots_per_epoch()) == blob_epoch {
parent_block
.next_epoch_shuffling_id
.shuffling_decision_block
} else {
parent_block.root
};
let proposer_opt = chain
.beacon_proposer_cache
.lock()
.get_slot::<T::EthSpec>(proposer_shuffling_root, blob_slot);
let (proposer_index, fork) = if let Some(proposer) = proposer_opt {
(proposer.index, proposer.fork)
} else {
debug!(
chain.log,
"Proposer shuffling cache miss for blob verification";
"block_root" => %block_root,
"index" => %blob_index,
);
if let Some(mut snapshot) = chain
.snapshot_cache
.try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT)
.and_then(|snapshot_cache| {
snapshot_cache.get_cloned(block_parent_root, CloneConfig::committee_caches_only())
})
{
if snapshot.beacon_state.slot() == blob_slot {
debug!(
chain.log,
"Cloning snapshot cache state for blob verification";
"block_root" => %block_root,
"index" => %blob_index,
);
(
snapshot
.beacon_state
.get_beacon_proposer_index(blob_slot, &chain.spec)?,
snapshot.beacon_state.fork(),
)
} else {
debug!(
chain.log,
"Cloning and advancing snapshot cache state for blob verification";
"block_root" => %block_root,
"index" => %blob_index,
);
let state =
cheap_state_advance_to_obtain_committees::<_, GossipBlobError<T::EthSpec>>(
&mut snapshot.beacon_state,
Some(snapshot.beacon_block_root),
blob_slot,
&chain.spec,
)?;
(
state.get_beacon_proposer_index(blob_slot, &chain.spec)?,
state.fork(),
)
}
}
// Need to advance the state to get the proposer index
else {
warn!(
chain.log,
"Snapshot cache miss for blob verification";
"block_root" => %block_root,
"index" => %blob_index,
);
let parent_block = chain
.get_blinded_block(&block_parent_root)
.map_err(GossipBlobError::BeaconChainError)?
.ok_or_else(|| {
GossipBlobError::from(BeaconChainError::MissingBeaconBlock(block_parent_root))
})?;
let mut parent_state = chain
.get_state(&parent_block.state_root(), Some(parent_block.slot()))?
.ok_or_else(|| {
BeaconChainError::DBInconsistent(format!(
"Missing state {:?}",
parent_block.state_root()
))
})?;
let state = cheap_state_advance_to_obtain_committees::<_, GossipBlobError<T::EthSpec>>(
&mut parent_state,
Some(parent_block.state_root()),
blob_slot,
&chain.spec,
)?;
let proposers = state.get_beacon_proposer_indices(&chain.spec)?;
let proposer_index = *proposers
.get(blob_slot.as_usize() % T::EthSpec::slots_per_epoch() as usize)
.ok_or_else(|| BeaconChainError::NoProposerForSlot(blob_slot))?;
let fork = state.fork();
// Prime the proposer shuffling cache with the newly-learned value.
chain.beacon_proposer_cache.lock().insert(
blob_epoch,
proposer_shuffling_root,
proposers,
fork,
)?;
(proposer_index, fork)
}
};
// Signature verify the signed block header.
let signature_is_valid = {
let pubkey_cache =
get_validator_pubkey_cache(chain).map_err(|_| GossipBlobError::PubkeyCacheTimeout)?;
let pubkey = pubkey_cache
.get(proposer_index)
.ok_or_else(|| GossipBlobError::UnknownValidator(proposer_index as u64))?;
signed_block_header.verify_signature::<T::EthSpec>(
pubkey,
&fork,
chain.genesis_validators_root,
&chain.spec,
)
};
if !signature_is_valid {
return Err(GossipBlobError::ProposalSignatureInvalid);
}
if proposer_index != blob_proposer_index as usize {
return Err(GossipBlobError::ProposerIndexMismatch {
sidecar: blob_proposer_index as usize,
local: proposer_index,
});
}
chain
.observed_slashable
.write()
.observe_slashable(
blob_sidecar.slot(),
blob_sidecar.block_proposer_index(),
block_root,
)
.map_err(|e| GossipBlobError::BeaconChainError(e.into()))?;
// Now the signature is valid, store the proposal so we don't accept another blob sidecar
// with the same `BlobIdentifier`.
// It's important to double-check that the proposer still hasn't been observed so we don't
// have a race-condition when verifying two blocks simultaneously.
//
// Note: If this BlobSidecar goes on to fail full verification, we do not evict it from the seen_cache
// as alternate blob_sidecars for the same identifier can still be retrieved
// over rpc. Evicting them from this cache would allow faster propagation over gossip. So we allow
// retrieval of potentially valid blocks over rpc, but try to punish the proposer for signing
// invalid messages. Issue for more background
// https://github.com/ethereum/consensus-specs/issues/3261
if chain
.observed_blob_sidecars
.write()
.observe_sidecar(&blob_sidecar)
.map_err(|e| GossipBlobError::BeaconChainError(e.into()))?
{
return Err(GossipBlobError::RepeatBlob {
proposer: proposer_index as u64,
slot: blob_slot,
index: blob_index,
});
}
// Kzg verification for gossip blob sidecar
let kzg = chain
.kzg
.as_ref()
.ok_or(GossipBlobError::KzgNotInitialized)?;
let kzg_verified_blob =
KzgVerifiedBlob::new(blob_sidecar, kzg).map_err(GossipBlobError::KzgError)?;
Ok(GossipVerifiedBlob {
block_root,
blob: kzg_verified_blob,
})
}
/// Returns the canonical root of the given `blob`.
///
/// Use this function to ensure that we report the blob hashing time Prometheus metric.
pub fn get_blob_root<E: EthSpec>(blob: &BlobSidecar<E>) -> Hash256 {
let blob_root_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_BLOB_ROOT);
let blob_root = blob.tree_hash_root();
metrics::stop_timer(blob_root_timer);
blob_root
}

View File

@ -23,7 +23,7 @@ pub struct Timestamps {
}
// Helps arrange delay data so it is more relevant to metrics.
#[derive(Default)]
#[derive(Debug, Default)]
pub struct BlockDelays {
pub observed: Option<Duration>,
pub imported: Option<Duration>,
@ -51,7 +51,7 @@ impl BlockDelays {
// If the block was received via gossip, we can record the client type of the peer which sent us
// the block.
#[derive(Clone, Default)]
#[derive(Debug, Clone, Default, PartialEq)]
pub struct BlockPeerInfo {
pub id: Option<String>,
pub client: Option<String>,
@ -80,6 +80,8 @@ pub struct BlockTimesCache {
/// Helper methods to read from and write to the cache.
impl BlockTimesCache {
/// Set the observation time for `block_root` to `timestamp` if `timestamp` is less than
/// any previous timestamp at which this block was observed.
pub fn set_time_observed(
&mut self,
block_root: BlockRoot,
@ -92,11 +94,19 @@ impl BlockTimesCache {
.cache
.entry(block_root)
.or_insert_with(|| BlockTimesCacheValue::new(slot));
block_times.timestamps.observed = Some(timestamp);
block_times.peer_info = BlockPeerInfo {
id: peer_id,
client: peer_client,
};
match block_times.timestamps.observed {
Some(existing_observation_time) if existing_observation_time <= timestamp => {
// Existing timestamp is earlier, do nothing.
}
_ => {
// No existing timestamp, or new timestamp is earlier.
block_times.timestamps.observed = Some(timestamp);
block_times.peer_info = BlockPeerInfo {
id: peer_id,
client: peer_client,
};
}
}
}
pub fn set_time_imported(&mut self, block_root: BlockRoot, slot: Slot, timestamp: Duration) {
@ -141,3 +151,71 @@ impl BlockTimesCache {
.retain(|_, cache| cache.slot > current_slot.saturating_sub(64_u64));
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn observed_time_uses_minimum() {
let mut cache = BlockTimesCache::default();
let block_root = Hash256::zero();
let slot = Slot::new(100);
let slot_start_time = Duration::from_secs(0);
let ts1 = Duration::from_secs(5);
let ts2 = Duration::from_secs(6);
let ts3 = Duration::from_secs(4);
let peer_info2 = BlockPeerInfo {
id: Some("peer2".to_string()),
client: Some("lighthouse".to_string()),
};
let peer_info3 = BlockPeerInfo {
id: Some("peer3".to_string()),
client: Some("prysm".to_string()),
};
cache.set_time_observed(block_root, slot, ts1, None, None);
assert_eq!(
cache.get_block_delays(block_root, slot_start_time).observed,
Some(ts1)
);
assert_eq!(cache.get_peer_info(block_root), BlockPeerInfo::default());
// Second observation with higher timestamp should not override anything, even though it has
// superior peer info.
cache.set_time_observed(
block_root,
slot,
ts2,
peer_info2.id.clone(),
peer_info2.client.clone(),
);
assert_eq!(
cache.get_block_delays(block_root, slot_start_time).observed,
Some(ts1)
);
assert_eq!(cache.get_peer_info(block_root), BlockPeerInfo::default());
// Third observation with lower timestamp should override everything.
cache.set_time_observed(
block_root,
slot,
ts3,
peer_info3.id.clone(),
peer_info3.client.clone(),
);
assert_eq!(
cache.get_block_delays(block_root, slot_start_time).observed,
Some(ts3)
);
assert_eq!(cache.get_peer_info(block_root), peer_info3);
}
}

View File

@ -23,6 +23,7 @@
//! |
//! ▼
//! SignedBeaconBlock
//! |
//! |---------------
//! | |
//! | ▼
@ -47,6 +48,11 @@
// returned alongside.
#![allow(clippy::result_large_err)]
use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob};
use crate::block_verification_types::{
AsBlock, BlockContentsError, BlockImportData, GossipVerifiedBlockContents, RpcBlock,
};
use crate::data_availability_checker::{AvailabilityCheckError, MaybeAvailableBlock};
use crate::eth1_finalization_cache::Eth1FinalizationData;
use crate::execution_payload::{
is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block,
@ -64,15 +70,17 @@ use crate::{
metrics, BeaconChain, BeaconChainError, BeaconChainTypes,
};
use derivative::Derivative;
use eth2::types::EventKind;
use eth2::types::{EventKind, PublishBlockRequest};
use execution_layer::PayloadStatus;
use fork_choice::{AttestationFromBlock, PayloadVerificationStatus};
pub use fork_choice::{AttestationFromBlock, PayloadVerificationStatus};
use parking_lot::RwLockReadGuard;
use proto_array::Block as ProtoBlock;
use safe_arith::ArithError;
use slog::{debug, error, warn, Logger};
use slot_clock::SlotClock;
use ssz::Encode;
use ssz_derive::{Decode, Encode};
use ssz_types::VariableList;
use state_processing::per_block_processing::{errors::IntoWithIndex, is_merge_transition_block};
use state_processing::{
block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError},
@ -82,6 +90,7 @@ use state_processing::{
StateProcessingStrategy, VerifyBlockRoot,
};
use std::borrow::Cow;
use std::fmt::Debug;
use std::fs;
use std::io::Write;
use std::sync::Arc;
@ -89,12 +98,12 @@ use std::time::Duration;
use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp};
use task_executor::JoinHandle;
use tree_hash::TreeHash;
use types::ExecPayload;
use types::{
BeaconBlockRef, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, CloneConfig, Epoch,
EthSpec, ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes,
RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot,
BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec,
ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch,
SignedBeaconBlock, SignedBeaconBlockHeader, Slot,
};
use types::{BlobSidecar, ExecPayload};
pub const POS_PANDA_BANNER: &str = r#"
,,, ,,, ,,, ,,,
@ -141,7 +150,7 @@ pub enum BlockError<T: EthSpec> {
///
/// It's unclear if this block is valid, but it cannot be processed without already knowing
/// its parent.
ParentUnknown(Arc<SignedBeaconBlock<T>>),
ParentUnknown(RpcBlock<T>),
/// The block slot is greater than the present slot.
///
/// ## Peer scoring
@ -215,7 +224,7 @@ pub enum BlockError<T: EthSpec> {
///
/// The block is invalid and the peer is faulty.
InvalidSignature,
/// The provided block is from an later slot than its parent.
/// The provided block is not from a later slot than its parent.
///
/// ## Peer scoring
///
@ -284,6 +293,27 @@ pub enum BlockError<T: EthSpec> {
/// Honest peers shouldn't forward more than 1 equivocating block from the same proposer, so
/// we penalise them with a mid-tolerance error.
Slashable,
/// The block and blob together failed validation.
///
/// ## Peer scoring
///
/// This error implies that the block satisfied all block validity conditions except consistency
/// with the corresponding blob that we received over gossip/rpc. This is because availability
/// checks are always done after all other checks are completed.
/// This implies that either:
/// 1. The block proposer is faulty
/// 2. We received the blob over rpc and it is invalid (inconsistent w.r.t the block).
/// 3. It is an internal error
/// For all these cases, we cannot penalize the peer that gave us the block.
/// TODO: We may need to penalize the peer that gave us a potentially invalid rpc blob.
/// https://github.com/sigp/lighthouse/issues/4546
AvailabilityCheck(AvailabilityCheckError),
}
impl<T: EthSpec> From<AvailabilityCheckError> for BlockError<T> {
fn from(e: AvailabilityCheckError) -> Self {
Self::AvailabilityCheck(e)
}
}
/// Returned when block validation failed due to some issue verifying
@ -459,6 +489,7 @@ impl<T: EthSpec> From<ArithError> for BlockError<T> {
}
/// Stores information about verifying a payload against an execution engine.
#[derive(Debug, PartialEq, Clone, Encode, Decode)]
pub struct PayloadVerificationOutcome {
pub payload_verification_status: PayloadVerificationStatus,
pub is_valid_merge_transition_block: bool,
@ -476,7 +507,7 @@ pub enum BlockSlashInfo<TErr> {
}
impl<E: EthSpec> BlockSlashInfo<BlockError<E>> {
pub fn from_early_error(header: SignedBeaconBlockHeader, e: BlockError<E>) -> Self {
pub fn from_early_error_block(header: SignedBeaconBlockHeader, e: BlockError<E>) -> Self {
match e {
BlockError::ProposalSignatureInvalid => BlockSlashInfo::SignatureInvalid(e),
// `InvalidSignature` could indicate any signature in the block, so we want
@ -486,17 +517,28 @@ impl<E: EthSpec> BlockSlashInfo<BlockError<E>> {
}
}
impl<E: EthSpec> BlockSlashInfo<GossipBlobError<E>> {
pub fn from_early_error_blob(header: SignedBeaconBlockHeader, e: GossipBlobError<E>) -> Self {
match e {
GossipBlobError::ProposalSignatureInvalid => BlockSlashInfo::SignatureInvalid(e),
// `InvalidSignature` could indicate any signature in the block, so we want
// to recheck the proposer signature alone.
_ => BlockSlashInfo::SignatureNotChecked(header, e),
}
}
}
/// Process invalid blocks to see if they are suitable for the slasher.
///
/// If no slasher is configured, this is a no-op.
fn process_block_slash_info<T: BeaconChainTypes>(
pub(crate) fn process_block_slash_info<T: BeaconChainTypes, TErr: BlockBlobError>(
chain: &BeaconChain<T>,
slash_info: BlockSlashInfo<BlockError<T::EthSpec>>,
) -> BlockError<T::EthSpec> {
slash_info: BlockSlashInfo<TErr>,
) -> TErr {
if let Some(slasher) = chain.slasher.as_ref() {
let (verified_header, error) = match slash_info {
BlockSlashInfo::SignatureNotChecked(header, e) => {
if verify_header_signature(chain, &header).is_ok() {
if verify_header_signature::<_, TErr>(chain, &header).is_ok() {
(header, e)
} else {
return e;
@ -528,7 +570,7 @@ fn process_block_slash_info<T: BeaconChainTypes>(
/// The given `chain_segment` must contain only blocks from the same epoch, otherwise an error
/// will be returned.
pub fn signature_verify_chain_segment<T: BeaconChainTypes>(
mut chain_segment: Vec<(Hash256, Arc<SignedBeaconBlock<T::EthSpec>>)>,
mut chain_segment: Vec<(Hash256, RpcBlock<T::EthSpec>)>,
chain: &BeaconChain<T>,
) -> Result<Vec<SignatureVerifiedBlock<T>>, BlockError<T::EthSpec>> {
if chain_segment.is_empty() {
@ -545,32 +587,40 @@ pub fn signature_verify_chain_segment<T: BeaconChainTypes>(
.map(|(_, block)| block.slot())
.unwrap_or_else(|| slot);
let state = cheap_state_advance_to_obtain_committees(
let state = cheap_state_advance_to_obtain_committees::<_, BlockError<T::EthSpec>>(
&mut parent.pre_state,
parent.beacon_state_root,
highest_slot,
&chain.spec,
)?;
// unzip chain segment and verify kzg in bulk
let (roots, blocks): (Vec<_>, Vec<_>) = chain_segment.into_iter().unzip();
let maybe_available_blocks = chain
.data_availability_checker
.verify_kzg_for_rpc_blocks(blocks)?;
// zip it back up
let mut signature_verified_blocks = roots
.into_iter()
.zip(maybe_available_blocks)
.map(|(block_root, maybe_available_block)| {
let consensus_context = ConsensusContext::new(maybe_available_block.slot())
.set_current_block_root(block_root);
SignatureVerifiedBlock {
block: maybe_available_block,
block_root,
parent: None,
consensus_context,
}
})
.collect::<Vec<_>>();
// verify signatures
let pubkey_cache = get_validator_pubkey_cache(chain)?;
let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec);
let mut signature_verified_blocks = Vec::with_capacity(chain_segment.len());
for (block_root, block) in &chain_segment {
let mut consensus_context =
ConsensusContext::new(block.slot()).set_current_block_root(*block_root);
signature_verifier.include_all_signatures(block, &mut consensus_context)?;
// Save the block and its consensus context. The context will have had its proposer index
// and attesting indices filled in, which can be used to accelerate later block processing.
signature_verified_blocks.push(SignatureVerifiedBlock {
block: block.clone(),
block_root: *block_root,
parent: None,
consensus_context,
});
for svb in &mut signature_verified_blocks {
signature_verifier
.include_all_signatures(svb.block.as_block(), &mut svb.consensus_context)?;
}
if signature_verifier.verify().is_err() {
@ -600,7 +650,7 @@ pub struct GossipVerifiedBlock<T: BeaconChainTypes> {
/// A wrapper around a `SignedBeaconBlock` that indicates that all signatures (except the deposit
/// signatures) have been verified.
pub struct SignatureVerifiedBlock<T: BeaconChainTypes> {
block: Arc<SignedBeaconBlock<T::EthSpec>>,
block: MaybeAvailableBlock<T::EthSpec>,
block_root: Hash256,
parent: Option<PreProcessingSnapshot<T::EthSpec>>,
consensus_context: ConsensusContext<T::EthSpec>,
@ -617,52 +667,68 @@ type PayloadVerificationHandle<E> =
/// - Signatures
/// - State root check
/// - Per block processing
/// - Blobs sidecar has been validated if present
///
/// Note: a `ExecutionPendingBlock` is not _forever_ valid to be imported, it may later become invalid
/// due to finality or some other event. A `ExecutionPendingBlock` should be imported into the
/// `BeaconChain` immediately after it is instantiated.
pub struct ExecutionPendingBlock<T: BeaconChainTypes> {
pub block: Arc<SignedBeaconBlock<T::EthSpec>>,
pub block_root: Hash256,
pub state: BeaconState<T::EthSpec>,
pub parent_block: SignedBeaconBlock<T::EthSpec, BlindedPayload<T::EthSpec>>,
pub parent_eth1_finalization_data: Eth1FinalizationData,
pub confirmed_state_roots: Vec<Hash256>,
pub consensus_context: ConsensusContext<T::EthSpec>,
pub block: MaybeAvailableBlock<T::EthSpec>,
pub import_data: BlockImportData<T::EthSpec>,
pub payload_verification_handle: PayloadVerificationHandle<T::EthSpec>,
}
pub trait IntoGossipVerifiedBlock<T: BeaconChainTypes>: Sized {
pub trait IntoGossipVerifiedBlockContents<T: BeaconChainTypes>: Sized {
fn into_gossip_verified_block(
self,
chain: &BeaconChain<T>,
) -> Result<GossipVerifiedBlock<T>, BlockError<T::EthSpec>>;
fn inner(&self) -> Arc<SignedBeaconBlock<T::EthSpec>>;
) -> Result<GossipVerifiedBlockContents<T>, BlockContentsError<T::EthSpec>>;
fn inner_block(&self) -> &SignedBeaconBlock<T::EthSpec>;
}
impl<T: BeaconChainTypes> IntoGossipVerifiedBlock<T> for GossipVerifiedBlock<T> {
impl<T: BeaconChainTypes> IntoGossipVerifiedBlockContents<T> for GossipVerifiedBlockContents<T> {
fn into_gossip_verified_block(
self,
_chain: &BeaconChain<T>,
) -> Result<GossipVerifiedBlock<T>, BlockError<T::EthSpec>> {
) -> Result<GossipVerifiedBlockContents<T>, BlockContentsError<T::EthSpec>> {
Ok(self)
}
fn inner(&self) -> Arc<SignedBeaconBlock<T::EthSpec>> {
self.block.clone()
fn inner_block(&self) -> &SignedBeaconBlock<T::EthSpec> {
self.0.block.as_block()
}
}
impl<T: BeaconChainTypes> IntoGossipVerifiedBlock<T> for Arc<SignedBeaconBlock<T::EthSpec>> {
impl<T: BeaconChainTypes> IntoGossipVerifiedBlockContents<T> for PublishBlockRequest<T::EthSpec> {
fn into_gossip_verified_block(
self,
chain: &BeaconChain<T>,
) -> Result<GossipVerifiedBlock<T>, BlockError<T::EthSpec>> {
GossipVerifiedBlock::new(self, chain)
) -> Result<GossipVerifiedBlockContents<T>, BlockContentsError<T::EthSpec>> {
let (block, blobs) = self.deconstruct();
let gossip_verified_blobs = blobs
.map(|(kzg_proofs, blobs)| {
let mut gossip_verified_blobs = vec![];
for (i, (kzg_proof, blob)) in kzg_proofs.iter().zip(blobs).enumerate() {
let _timer =
metrics::start_timer(&metrics::BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION);
let blob = BlobSidecar::new(i, blob, &block, *kzg_proof)
.map_err(BlockContentsError::SidecarError)?;
drop(_timer);
let gossip_verified_blob =
GossipVerifiedBlob::new(Arc::new(blob), i as u64, chain)?;
gossip_verified_blobs.push(gossip_verified_blob);
}
let gossip_verified_blobs = VariableList::from(gossip_verified_blobs);
Ok::<_, BlockContentsError<T::EthSpec>>(gossip_verified_blobs)
})
.transpose()?;
let gossip_verified_block = GossipVerifiedBlock::new(block, chain)?;
Ok((gossip_verified_block, gossip_verified_blobs))
}
fn inner(&self) -> Arc<SignedBeaconBlock<T::EthSpec>> {
self.clone()
fn inner_block(&self) -> &SignedBeaconBlock<T::EthSpec> {
self.signed_block()
}
}
@ -684,7 +750,9 @@ pub trait IntoExecutionPendingBlock<T: BeaconChainTypes>: Sized {
}
execution_pending
})
.map_err(|slash_info| process_block_slash_info(chain, slash_info))
.map_err(|slash_info| {
process_block_slash_info::<_, BlockError<T::EthSpec>>(chain, slash_info)
})
}
/// Convert the block to fully-verified form while producing data to aid checking slashability.
@ -712,14 +780,21 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
// it to the slasher if an error occurs, because that's the end of this block's journey,
// and it could be a repeat proposal (a likely cause for slashing!).
let header = block.signed_block_header();
Self::new_without_slasher_checks(block, chain).map_err(|e| {
process_block_slash_info(chain, BlockSlashInfo::from_early_error(header, e))
// The `SignedBeaconBlock` and `SignedBeaconBlockHeader` have the same canonical root,
// but it's way quicker to calculate root of the header since the hash of the tree rooted
// at `BeaconBlockBody` is already computed in the header.
Self::new_without_slasher_checks(block, &header, chain).map_err(|e| {
process_block_slash_info::<_, BlockError<T::EthSpec>>(
chain,
BlockSlashInfo::from_early_error_block(header, e),
)
})
}
/// As for new, but doesn't pass the block to the slasher.
fn new_without_slasher_checks(
block: Arc<SignedBeaconBlock<T::EthSpec>>,
block_header: &SignedBeaconBlockHeader,
chain: &BeaconChain<T>,
) -> Result<Self, BlockError<T::EthSpec>> {
// Ensure the block is the correct structure for the fork at `block.slot()`.
@ -739,7 +814,7 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
});
}
let block_root = get_block_root(&block);
let block_root = get_block_header_root(block_header);
// Disallow blocks that conflict with the anchor (weak subjectivity checkpoint), if any.
check_block_against_anchor_slot(block.message(), chain)?;
@ -762,11 +837,16 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
// Do not process a block that doesn't descend from the finalized root.
//
// We check this *before* we load the parent so that we can return a more detailed error.
check_block_is_finalized_checkpoint_or_descendant(chain, &fork_choice_read_lock, &block)?;
drop(fork_choice_read_lock);
let block = check_block_is_finalized_checkpoint_or_descendant(
chain,
&fork_choice_read_lock,
block,
)?;
let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch());
let (parent_block, block) = verify_parent_block_is_known(chain, block)?;
let (parent_block, block) =
verify_parent_block_is_known::<T>(block_root, &fork_choice_read_lock, block)?;
drop(fork_choice_read_lock);
// Track the number of skip slots between the block and its parent.
metrics::set_gauge(
@ -825,7 +905,7 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
);
// The state produced is only valid for determining proposer/attester shuffling indices.
let state = cheap_state_advance_to_obtain_committees(
let state = cheap_state_advance_to_obtain_committees::<_, BlockError<T::EthSpec>>(
&mut parent.pre_state,
parent.beacon_state_root,
block.slot(),
@ -866,6 +946,11 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
return Err(BlockError::ProposalSignatureInvalid);
}
chain
.observed_slashable
.write()
.observe_slashable(block.slot(), block.message().proposer_index(), block_root)
.map_err(|e| BlockError::BeaconChainError(e.into()))?;
// Now the signature is valid, store the proposal so we don't accept another from this
// validator and slot.
//
@ -877,7 +962,9 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
.observe_proposal(block_root, block.message())
.map_err(|e| BlockError::BeaconChainError(e.into()))?
{
SeenBlock::Slashable => return Err(BlockError::Slashable),
SeenBlock::Slashable => {
return Err(BlockError::Slashable);
}
SeenBlock::Duplicate => return Err(BlockError::BlockIsAlreadyKnown),
SeenBlock::UniqueNonSlashable => {}
};
@ -895,7 +982,7 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
// Having checked the proposer index and the block root we can cache them.
let consensus_context = ConsensusContext::new(block.slot())
.set_current_block_root(block_root)
.set_proposer_index(block.message().proposer_index());
.set_proposer_index(block.as_block().message().proposer_index());
Ok(Self {
block,
@ -928,7 +1015,7 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for GossipVerifiedBlock<T
}
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
&self.block
self.block.as_block()
}
}
@ -938,12 +1025,13 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
///
/// Returns an error if the block is invalid, or if the block was unable to be verified.
pub fn new(
block: Arc<SignedBeaconBlock<T::EthSpec>>,
block: MaybeAvailableBlock<T::EthSpec>,
block_root: Hash256,
chain: &BeaconChain<T>,
) -> Result<Self, BlockError<T::EthSpec>> {
// Ensure the block is the correct structure for the fork at `block.slot()`.
block
.as_block()
.fork_name(&chain.spec)
.map_err(BlockError::InconsistentFork)?;
@ -952,7 +1040,7 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
let (mut parent, block) = load_parent(block_root, block, chain)?;
let state = cheap_state_advance_to_obtain_committees(
let state = cheap_state_advance_to_obtain_committees::<_, BlockError<T::EthSpec>>(
&mut parent.pre_state,
parent.beacon_state_root,
block.slot(),
@ -966,7 +1054,7 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
let mut consensus_context =
ConsensusContext::new(block.slot()).set_current_block_root(block_root);
signature_verifier.include_all_signatures(&block, &mut consensus_context)?;
signature_verifier.include_all_signatures(block.as_block(), &mut consensus_context)?;
if signature_verifier.verify().is_ok() {
Ok(Self {
@ -982,12 +1070,13 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
/// As for `new` above but producing `BlockSlashInfo`.
pub fn check_slashable(
block: Arc<SignedBeaconBlock<T::EthSpec>>,
block: MaybeAvailableBlock<T::EthSpec>,
block_root: Hash256,
chain: &BeaconChain<T>,
) -> Result<Self, BlockSlashInfo<BlockError<T::EthSpec>>> {
let header = block.signed_block_header();
Self::new(block, block_root, chain).map_err(|e| BlockSlashInfo::from_early_error(header, e))
Self::new(block, block_root, chain)
.map_err(|e| BlockSlashInfo::from_early_error_block(header, e))
}
/// Finishes signature verification on the provided `GossipVerifedBlock`. Does not re-verify
@ -1002,7 +1091,7 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
load_parent(from.block_root, from.block, chain)?
};
let state = cheap_state_advance_to_obtain_committees(
let state = cheap_state_advance_to_obtain_committees::<_, BlockError<T::EthSpec>>(
&mut parent.pre_state,
parent.beacon_state_root,
block.slot(),
@ -1017,11 +1106,14 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
// signature.
let mut consensus_context = from.consensus_context;
signature_verifier
.include_all_signatures_except_proposal(&block, &mut consensus_context)?;
.include_all_signatures_except_proposal(block.as_ref(), &mut consensus_context)?;
if signature_verifier.verify().is_ok() {
Ok(Self {
block,
block: MaybeAvailableBlock::AvailabilityPending {
block_root: from.block_root,
block,
},
block_root: from.block_root,
parent: Some(parent),
consensus_context,
@ -1038,7 +1130,7 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
) -> Result<Self, BlockSlashInfo<BlockError<T::EthSpec>>> {
let header = from.block.signed_block_header();
Self::from_gossip_verified_block(from, chain)
.map_err(|e| BlockSlashInfo::from_early_error(header, e))
.map_err(|e| BlockSlashInfo::from_early_error_block(header, e))
}
pub fn block_root(&self) -> Hash256 {
@ -1074,7 +1166,7 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for SignatureVerifiedBloc
}
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
&self.block
self.block.as_block()
}
}
@ -1090,8 +1182,16 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for Arc<SignedBeaconBlock
// Perform an early check to prevent wasting time on irrelevant blocks.
let block_root = check_block_relevancy(&self, block_root, chain)
.map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?;
SignatureVerifiedBlock::check_slashable(self, block_root, chain)?
let maybe_available = chain
.data_availability_checker
.verify_kzg_for_rpc_block(RpcBlock::new_without_blobs(Some(block_root), self.clone()))
.map_err(|e| {
BlockSlashInfo::SignatureNotChecked(
self.signed_block_header(),
BlockError::AvailabilityCheck(e),
)
})?;
SignatureVerifiedBlock::check_slashable(maybe_available, block_root, chain)?
.into_execution_pending_block_slashable(block_root, chain, notify_execution_layer)
}
@ -1100,6 +1200,36 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for Arc<SignedBeaconBlock
}
}
impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for RpcBlock<T::EthSpec> {
/// Verifies the `SignedBeaconBlock` by first transforming it into a `SignatureVerifiedBlock`
/// and then using that implementation of `IntoExecutionPendingBlock` to complete verification.
fn into_execution_pending_block_slashable(
self,
block_root: Hash256,
chain: &Arc<BeaconChain<T>>,
notify_execution_layer: NotifyExecutionLayer,
) -> Result<ExecutionPendingBlock<T>, BlockSlashInfo<BlockError<T::EthSpec>>> {
// Perform an early check to prevent wasting time on irrelevant blocks.
let block_root = check_block_relevancy(self.as_block(), block_root, chain)
.map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?;
let maybe_available = chain
.data_availability_checker
.verify_kzg_for_rpc_block(self.clone())
.map_err(|e| {
BlockSlashInfo::SignatureNotChecked(
self.signed_block_header(),
BlockError::AvailabilityCheck(e),
)
})?;
SignatureVerifiedBlock::check_slashable(maybe_available, block_root, chain)?
.into_execution_pending_block_slashable(block_root, chain, notify_execution_layer)
}
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
self.as_block()
}
}
impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
/// Instantiates `Self`, a wrapper that indicates that the given `block` is fully valid. See
/// the struct-level documentation for more information.
@ -1109,13 +1239,19 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
///
/// Returns an error if the block is invalid, or if the block was unable to be verified.
pub fn from_signature_verified_components(
block: Arc<SignedBeaconBlock<T::EthSpec>>,
block: MaybeAvailableBlock<T::EthSpec>,
block_root: Hash256,
parent: PreProcessingSnapshot<T::EthSpec>,
mut consensus_context: ConsensusContext<T::EthSpec>,
chain: &Arc<BeaconChain<T>>,
notify_execution_layer: NotifyExecutionLayer,
) -> Result<Self, BlockError<T::EthSpec>> {
chain
.observed_slashable
.write()
.observe_slashable(block.slot(), block.message().proposer_index(), block_root)
.map_err(|e| BlockError::BeaconChainError(e.into()))?;
chain
.observed_block_producers
.write()
@ -1145,14 +1281,14 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
// because it will revert finalization. Note that the finalized block is stored in fork
// choice, so we will not reject any child of the finalized block (this is relevant during
// genesis).
return Err(BlockError::ParentUnknown(block));
return Err(BlockError::ParentUnknown(block.into_rpc_block()));
}
/*
* Perform cursory checks to see if the block is even worth processing.
*/
check_block_relevancy(&block, block_root, chain)?;
check_block_relevancy(block.as_block(), block_root, chain)?;
// Define a future that will verify the execution payload with an execution engine.
//
@ -1160,7 +1296,7 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
// with the payload verification.
let payload_notifier = PayloadNotifier::new(
chain.clone(),
block.clone(),
block.block_cloned(),
&parent.pre_state,
notify_execution_layer,
)?;
@ -1310,7 +1446,9 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
StoreOp::PutStateTemporaryFlag(state_root),
]
};
chain.store.do_atomically(state_batch)?;
chain
.store
.do_atomically_with_block_and_blobs_cache(state_batch)?;
drop(txn_lock);
confirmed_state_roots.push(state_root);
@ -1401,13 +1539,13 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
&state,
&chain.log,
);
write_block(&block, block_root, &chain.log);
write_block(block.as_block(), block_root, &chain.log);
let core_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CORE);
if let Err(err) = per_block_processing(
&mut state,
&block,
block.as_block(),
// Signatures were verified earlier in this function.
BlockSignatureStrategy::NoVerification,
StateProcessingStrategy::Accurate,
@ -1491,12 +1629,14 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
Ok(Self {
block,
block_root,
state,
parent_block: parent.beacon_block,
parent_eth1_finalization_data,
confirmed_state_roots,
consensus_context,
import_data: BlockImportData {
block_root,
state,
parent_block: parent.beacon_block,
parent_eth1_finalization_data,
confirmed_state_roots,
consensus_context,
},
payload_verification_handle,
})
}
@ -1551,13 +1691,16 @@ fn check_block_against_finalized_slot<T: BeaconChainTypes>(
/// ## Warning
///
/// Taking a lock on the `chain.canonical_head.fork_choice` might cause a deadlock here.
pub fn check_block_is_finalized_checkpoint_or_descendant<T: BeaconChainTypes>(
pub fn check_block_is_finalized_checkpoint_or_descendant<
T: BeaconChainTypes,
B: AsBlock<T::EthSpec>,
>(
chain: &BeaconChain<T>,
fork_choice: &BeaconForkChoice<T>,
block: &Arc<SignedBeaconBlock<T::EthSpec>>,
) -> Result<(), BlockError<T::EthSpec>> {
block: B,
) -> Result<B, BlockError<T::EthSpec>> {
if fork_choice.is_finalized_checkpoint_or_descendant(block.parent_root()) {
Ok(())
Ok(block)
} else {
// If fork choice does *not* consider the parent to be a descendant of the finalized block,
// then there are two more cases:
@ -1576,7 +1719,7 @@ pub fn check_block_is_finalized_checkpoint_or_descendant<T: BeaconChainTypes>(
block_parent_root: block.parent_root(),
})
} else {
Err(BlockError::ParentUnknown(block.clone()))
Err(BlockError::ParentUnknown(block.into_rpc_block()))
}
}
}
@ -1643,21 +1786,34 @@ pub fn get_block_root<E: EthSpec>(block: &SignedBeaconBlock<E>) -> Hash256 {
block_root
}
/// Returns the canonical root of the given `block_header`.
///
/// Use this function to ensure that we report the block hashing time Prometheus metric.
pub fn get_block_header_root(block_header: &SignedBeaconBlockHeader) -> Hash256 {
let block_root_timer = metrics::start_timer(&metrics::BLOCK_HEADER_PROCESSING_BLOCK_ROOT);
let block_root = block_header.message.canonical_root();
metrics::stop_timer(block_root_timer);
block_root
}
/// Verify the parent of `block` is known, returning some information about the parent block from
/// fork choice.
#[allow(clippy::type_complexity)]
fn verify_parent_block_is_known<T: BeaconChainTypes>(
chain: &BeaconChain<T>,
block_root: Hash256,
fork_choice_read_lock: &RwLockReadGuard<BeaconForkChoice<T>>,
block: Arc<SignedBeaconBlock<T::EthSpec>>,
) -> Result<(ProtoBlock, Arc<SignedBeaconBlock<T::EthSpec>>), BlockError<T::EthSpec>> {
if let Some(proto_block) = chain
.canonical_head
.fork_choice_read_lock()
.get_block(&block.message().parent_root())
{
if let Some(proto_block) = fork_choice_read_lock.get_block(&block.parent_root()) {
Ok((proto_block, block))
} else {
Err(BlockError::ParentUnknown(block))
Err(BlockError::ParentUnknown(RpcBlock::new_without_blobs(
Some(block_root),
block,
)))
}
}
@ -1666,17 +1822,11 @@ fn verify_parent_block_is_known<T: BeaconChainTypes>(
/// Returns `Err(BlockError::ParentUnknown)` if the parent is not found, or if an error occurs
/// whilst attempting the operation.
#[allow(clippy::type_complexity)]
fn load_parent<T: BeaconChainTypes>(
fn load_parent<T: BeaconChainTypes, B: AsBlock<T::EthSpec>>(
block_root: Hash256,
block: Arc<SignedBeaconBlock<T::EthSpec>>,
block: B,
chain: &BeaconChain<T>,
) -> Result<
(
PreProcessingSnapshot<T::EthSpec>,
Arc<SignedBeaconBlock<T::EthSpec>>,
),
BlockError<T::EthSpec>,
> {
) -> Result<(PreProcessingSnapshot<T::EthSpec>, B), BlockError<T::EthSpec>> {
let spec = &chain.spec;
// Reject any block if its parent is not known to fork choice.
@ -1694,7 +1844,7 @@ fn load_parent<T: BeaconChainTypes>(
.fork_choice_read_lock()
.contains_block(&block.parent_root())
{
return Err(BlockError::ParentUnknown(block));
return Err(BlockError::ParentUnknown(block.into_rpc_block()));
}
let block_delay = chain
@ -1794,6 +1944,47 @@ fn load_parent<T: BeaconChainTypes>(
result
}
/// This trait is used to unify `BlockError` and `GossipBlobError`.
pub trait BlockBlobError: From<BeaconStateError> + From<BeaconChainError> + Debug {
fn not_later_than_parent_error(block_slot: Slot, state_slot: Slot) -> Self;
fn unknown_validator_error(validator_index: u64) -> Self;
fn proposer_signature_invalid() -> Self;
}
impl<E: EthSpec> BlockBlobError for BlockError<E> {
fn not_later_than_parent_error(block_slot: Slot, parent_slot: Slot) -> Self {
BlockError::BlockIsNotLaterThanParent {
block_slot,
parent_slot,
}
}
fn unknown_validator_error(validator_index: u64) -> Self {
BlockError::UnknownValidator(validator_index)
}
fn proposer_signature_invalid() -> Self {
BlockError::ProposalSignatureInvalid
}
}
impl<E: EthSpec> BlockBlobError for GossipBlobError<E> {
fn not_later_than_parent_error(blob_slot: Slot, parent_slot: Slot) -> Self {
GossipBlobError::BlobIsNotLaterThanParent {
blob_slot,
parent_slot,
}
}
fn unknown_validator_error(validator_index: u64) -> Self {
GossipBlobError::UnknownValidator(validator_index)
}
fn proposer_signature_invalid() -> Self {
GossipBlobError::ProposalSignatureInvalid
}
}
/// Performs a cheap (time-efficient) state advancement so the committees and proposer shuffling for
/// `slot` can be obtained from `state`.
///
@ -1805,12 +1996,12 @@ fn load_parent<T: BeaconChainTypes>(
/// and `Cow::Borrowed(state)` will be returned. Otherwise, the state will be cloned, cheaply
/// advanced and then returned as a `Cow::Owned`. The end result is that the given `state` is never
/// mutated to be invalid (in fact, it is never changed beyond a simple committee cache build).
fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>(
pub fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec, Err: BlockBlobError>(
state: &'a mut BeaconState<E>,
state_root_opt: Option<Hash256>,
block_slot: Slot,
spec: &ChainSpec,
) -> Result<Cow<'a, BeaconState<E>>, BlockError<E>> {
) -> Result<Cow<'a, BeaconState<E>>, Err> {
let block_epoch = block_slot.epoch(E::slots_per_epoch());
if state.current_epoch() == block_epoch {
@ -1821,10 +2012,7 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>(
Ok(Cow::Borrowed(state))
} else if state.slot() > block_slot {
Err(BlockError::BlockIsNotLaterThanParent {
block_slot,
parent_slot: state.slot(),
})
Err(Err::not_later_than_parent_error(block_slot, state.slot()))
} else {
let mut state = state.clone_with(CloneConfig::committee_caches_only());
let target_slot = block_epoch.start_slot(E::slots_per_epoch());
@ -1832,7 +2020,7 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>(
// Advance the state into the same epoch as the block. Use the "partial" method since state
// roots are not important for proposer/attester shuffling.
partial_state_advance(&mut state, state_root_opt, target_slot, spec)
.map_err(|e| BlockError::BeaconChainError(BeaconChainError::from(e)))?;
.map_err(BeaconChainError::from)?;
state.build_committee_cache(RelativeEpoch::Previous, spec)?;
state.build_committee_cache(RelativeEpoch::Current, spec)?;
@ -1844,12 +2032,11 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>(
/// Obtains a read-locked `ValidatorPubkeyCache` from the `chain`.
pub fn get_validator_pubkey_cache<T: BeaconChainTypes>(
chain: &BeaconChain<T>,
) -> Result<RwLockReadGuard<ValidatorPubkeyCache<T>>, BlockError<T::EthSpec>> {
) -> Result<RwLockReadGuard<ValidatorPubkeyCache<T>>, BeaconChainError> {
chain
.validator_pubkey_cache
.try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
.ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)
.map_err(BlockError::BeaconChainError)
}
/// Produces an _empty_ `BlockSignatureVerifier`.
@ -1890,14 +2077,14 @@ fn get_signature_verifier<'a, T: BeaconChainTypes>(
/// Verify that `header` was signed with a valid signature from its proposer.
///
/// Return `Ok(())` if the signature is valid, and an `Err` otherwise.
fn verify_header_signature<T: BeaconChainTypes>(
pub fn verify_header_signature<T: BeaconChainTypes, Err: BlockBlobError>(
chain: &BeaconChain<T>,
header: &SignedBeaconBlockHeader,
) -> Result<(), BlockError<T::EthSpec>> {
) -> Result<(), Err> {
let proposer_pubkey = get_validator_pubkey_cache(chain)?
.get(header.message.proposer_index as usize)
.cloned()
.ok_or(BlockError::UnknownValidator(header.message.proposer_index))?;
.ok_or(Err::unknown_validator_error(header.message.proposer_index))?;
let head_fork = chain.canonical_head.cached_head().head_fork();
if header.verify_signature::<T::EthSpec>(
@ -1908,7 +2095,7 @@ fn verify_header_signature<T: BeaconChainTypes>(
) {
Ok(())
} else {
Err(BlockError::ProposalSignatureInvalid)
Err(Err::proposer_signature_invalid())
}
}

View File

@ -0,0 +1,533 @@
use crate::blob_verification::{GossipBlobError, GossipVerifiedBlobList};
use crate::block_verification::BlockError;
use crate::data_availability_checker::AvailabilityCheckError;
pub use crate::data_availability_checker::{AvailableBlock, MaybeAvailableBlock};
use crate::eth1_finalization_cache::Eth1FinalizationData;
use crate::{get_block_root, GossipVerifiedBlock, PayloadVerificationOutcome};
use derivative::Derivative;
use ssz_types::VariableList;
use state_processing::ConsensusContext;
use std::sync::Arc;
use types::blob_sidecar::{BlobIdentifier, BlobSidecarError, FixedBlobSidecarList};
use types::{
BeaconBlockRef, BeaconState, BlindedPayload, BlobSidecarList, Epoch, EthSpec, Hash256,
SignedBeaconBlock, SignedBeaconBlockHeader, Slot,
};
/// A block that has been received over RPC. It has 2 internal variants:
///
/// 1. `BlockAndBlobs`: A fully available post deneb block with all the blobs available. This variant
/// is only constructed after making consistency checks between blocks and blobs.
/// Hence, it is fully self contained w.r.t verification. i.e. this block has all the required
/// data to get verified and imported into fork choice.
///
/// 2. `Block`: This can be a fully available pre-deneb block **or** a post-deneb block that may or may
/// not require blobs to be considered fully available.
///
/// Note: We make a distinction over blocks received over gossip because
/// in a post-deneb world, the blobs corresponding to a given block that are received
/// over rpc do not contain the proposer signature for dos resistance.
#[derive(Debug, Clone, Derivative)]
#[derivative(Hash(bound = "E: EthSpec"))]
pub struct RpcBlock<E: EthSpec> {
block_root: Hash256,
block: RpcBlockInner<E>,
}
impl<E: EthSpec> RpcBlock<E> {
pub fn block_root(&self) -> Hash256 {
self.block_root
}
pub fn as_block(&self) -> &SignedBeaconBlock<E> {
match &self.block {
RpcBlockInner::Block(block) => block,
RpcBlockInner::BlockAndBlobs(block, _) => block,
}
}
pub fn blobs(&self) -> Option<&BlobSidecarList<E>> {
match &self.block {
RpcBlockInner::Block(_) => None,
RpcBlockInner::BlockAndBlobs(_, blobs) => Some(blobs),
}
}
}
/// Note: This variant is intentionally private because we want to safely construct the
/// internal variants after applying consistency checks to ensure that the block and blobs
/// are consistent with respect to each other.
#[derive(Debug, Clone, Derivative)]
#[derivative(Hash(bound = "E: EthSpec"))]
enum RpcBlockInner<E: EthSpec> {
/// Single block lookup response. This should potentially hit the data availability cache.
Block(Arc<SignedBeaconBlock<E>>),
/// This variant is used with parent lookups and by-range responses. It should have all blobs
/// ordered, all block roots matching, and the correct number of blobs for this block.
BlockAndBlobs(Arc<SignedBeaconBlock<E>>, BlobSidecarList<E>),
}
impl<E: EthSpec> RpcBlock<E> {
/// Constructs a `Block` variant.
pub fn new_without_blobs(
block_root: Option<Hash256>,
block: Arc<SignedBeaconBlock<E>>,
) -> Self {
let block_root = block_root.unwrap_or_else(|| get_block_root(&block));
Self {
block_root,
block: RpcBlockInner::Block(block),
}
}
/// Constructs a new `BlockAndBlobs` variant after making consistency
/// checks between the provided blocks and blobs.
pub fn new(
block_root: Option<Hash256>,
block: Arc<SignedBeaconBlock<E>>,
blobs: Option<BlobSidecarList<E>>,
) -> Result<Self, AvailabilityCheckError> {
let block_root = block_root.unwrap_or_else(|| get_block_root(&block));
if let (Some(blobs), Ok(block_commitments)) = (
blobs.as_ref(),
block.message().body().blob_kzg_commitments(),
) {
if blobs.len() != block_commitments.len() {
return Err(AvailabilityCheckError::MissingBlobs);
}
for (blob, &block_commitment) in blobs.iter().zip(block_commitments.iter()) {
let blob_commitment = blob.kzg_commitment;
if blob_commitment != block_commitment {
return Err(AvailabilityCheckError::KzgCommitmentMismatch {
block_commitment,
blob_commitment,
});
}
}
}
let inner = match blobs {
Some(blobs) => RpcBlockInner::BlockAndBlobs(block, blobs),
None => RpcBlockInner::Block(block),
};
Ok(Self {
block_root,
block: inner,
})
}
pub fn new_from_fixed(
block_root: Hash256,
block: Arc<SignedBeaconBlock<E>>,
blobs: FixedBlobSidecarList<E>,
) -> Result<Self, AvailabilityCheckError> {
let filtered = blobs
.into_iter()
.filter_map(|b| b.clone())
.collect::<Vec<_>>();
let blobs = if filtered.is_empty() {
None
} else {
Some(VariableList::from(filtered))
};
Self::new(Some(block_root), block, blobs)
}
pub fn deconstruct(
self,
) -> (
Hash256,
Arc<SignedBeaconBlock<E>>,
Option<BlobSidecarList<E>>,
) {
let block_root = self.block_root();
match self.block {
RpcBlockInner::Block(block) => (block_root, block, None),
RpcBlockInner::BlockAndBlobs(block, blobs) => (block_root, block, Some(blobs)),
}
}
pub fn n_blobs(&self) -> usize {
match &self.block {
RpcBlockInner::Block(_) => 0,
RpcBlockInner::BlockAndBlobs(_, blobs) => blobs.len(),
}
}
}
/// A block that has gone through all pre-deneb block processing checks including block processing
/// and execution by an EL client. This block hasn't necessarily completed data availability checks.
///
///
/// It contains 2 variants:
/// 1. `Available`: This block has been executed and also contains all data to consider it a
/// fully available block. i.e. for post-deneb, this implies that this contains all the
/// required blobs.
/// 2. `AvailabilityPending`: This block hasn't received all required blobs to consider it a
/// fully available block.
pub enum ExecutedBlock<E: EthSpec> {
Available(AvailableExecutedBlock<E>),
AvailabilityPending(AvailabilityPendingExecutedBlock<E>),
}
impl<E: EthSpec> ExecutedBlock<E> {
pub fn new(
block: MaybeAvailableBlock<E>,
import_data: BlockImportData<E>,
payload_verification_outcome: PayloadVerificationOutcome,
) -> Self {
match block {
MaybeAvailableBlock::Available(available_block) => {
Self::Available(AvailableExecutedBlock::new(
available_block,
import_data,
payload_verification_outcome,
))
}
MaybeAvailableBlock::AvailabilityPending {
block_root: _,
block: pending_block,
} => Self::AvailabilityPending(AvailabilityPendingExecutedBlock::new(
pending_block,
import_data,
payload_verification_outcome,
)),
}
}
pub fn as_block(&self) -> &SignedBeaconBlock<E> {
match self {
Self::Available(available) => available.block.block(),
Self::AvailabilityPending(pending) => &pending.block,
}
}
pub fn block_root(&self) -> Hash256 {
match self {
ExecutedBlock::AvailabilityPending(pending) => pending.import_data.block_root,
ExecutedBlock::Available(available) => available.import_data.block_root,
}
}
}
/// A block that has completed all pre-deneb block processing checks including verification
/// by an EL client **and** has all requisite blob data to be imported into fork choice.
#[derive(PartialEq)]
pub struct AvailableExecutedBlock<E: EthSpec> {
pub block: AvailableBlock<E>,
pub import_data: BlockImportData<E>,
pub payload_verification_outcome: PayloadVerificationOutcome,
}
impl<E: EthSpec> AvailableExecutedBlock<E> {
pub fn new(
block: AvailableBlock<E>,
import_data: BlockImportData<E>,
payload_verification_outcome: PayloadVerificationOutcome,
) -> Self {
Self {
block,
import_data,
payload_verification_outcome,
}
}
pub fn get_all_blob_ids(&self) -> Vec<BlobIdentifier> {
let num_blobs_expected = self
.block
.message()
.body()
.blob_kzg_commitments()
.map_or(0, |commitments| commitments.len());
let mut blob_ids = Vec::with_capacity(num_blobs_expected);
for i in 0..num_blobs_expected {
blob_ids.push(BlobIdentifier {
block_root: self.import_data.block_root,
index: i as u64,
});
}
blob_ids
}
}
/// A block that has completed all pre-deneb block processing checks, verification
/// by an EL client but does not have all requisite blob data to get imported into
/// fork choice.
pub struct AvailabilityPendingExecutedBlock<E: EthSpec> {
pub block: Arc<SignedBeaconBlock<E>>,
pub import_data: BlockImportData<E>,
pub payload_verification_outcome: PayloadVerificationOutcome,
}
impl<E: EthSpec> AvailabilityPendingExecutedBlock<E> {
pub fn new(
block: Arc<SignedBeaconBlock<E>>,
import_data: BlockImportData<E>,
payload_verification_outcome: PayloadVerificationOutcome,
) -> Self {
Self {
block,
import_data,
payload_verification_outcome,
}
}
pub fn as_block(&self) -> &SignedBeaconBlock<E> {
&self.block
}
pub fn num_blobs_expected(&self) -> usize {
self.block
.message()
.body()
.blob_kzg_commitments()
.map_or(0, |commitments| commitments.len())
}
}
#[derive(Debug, PartialEq)]
pub struct BlockImportData<E: EthSpec> {
pub block_root: Hash256,
pub state: BeaconState<E>,
pub parent_block: SignedBeaconBlock<E, BlindedPayload<E>>,
pub parent_eth1_finalization_data: Eth1FinalizationData,
pub confirmed_state_roots: Vec<Hash256>,
pub consensus_context: ConsensusContext<E>,
}
pub type GossipVerifiedBlockContents<T> =
(GossipVerifiedBlock<T>, Option<GossipVerifiedBlobList<T>>);
#[derive(Debug)]
pub enum BlockContentsError<T: EthSpec> {
BlockError(BlockError<T>),
BlobError(GossipBlobError<T>),
SidecarError(BlobSidecarError),
}
impl<T: EthSpec> From<BlockError<T>> for BlockContentsError<T> {
fn from(value: BlockError<T>) -> Self {
Self::BlockError(value)
}
}
impl<T: EthSpec> From<GossipBlobError<T>> for BlockContentsError<T> {
fn from(value: GossipBlobError<T>) -> Self {
Self::BlobError(value)
}
}
impl<T: EthSpec> std::fmt::Display for BlockContentsError<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
BlockContentsError::BlockError(err) => {
write!(f, "BlockError({})", err)
}
BlockContentsError::BlobError(err) => {
write!(f, "BlobError({})", err)
}
BlockContentsError::SidecarError(err) => {
write!(f, "SidecarError({:?})", err)
}
}
}
}
/// Trait for common block operations.
pub trait AsBlock<E: EthSpec> {
fn slot(&self) -> Slot;
fn epoch(&self) -> Epoch;
fn parent_root(&self) -> Hash256;
fn state_root(&self) -> Hash256;
fn signed_block_header(&self) -> SignedBeaconBlockHeader;
fn message(&self) -> BeaconBlockRef<E>;
fn as_block(&self) -> &SignedBeaconBlock<E>;
fn block_cloned(&self) -> Arc<SignedBeaconBlock<E>>;
fn canonical_root(&self) -> Hash256;
fn into_rpc_block(self) -> RpcBlock<E>;
}
impl<E: EthSpec> AsBlock<E> for Arc<SignedBeaconBlock<E>> {
fn slot(&self) -> Slot {
SignedBeaconBlock::slot(self)
}
fn epoch(&self) -> Epoch {
SignedBeaconBlock::epoch(self)
}
fn parent_root(&self) -> Hash256 {
SignedBeaconBlock::parent_root(self)
}
fn state_root(&self) -> Hash256 {
SignedBeaconBlock::state_root(self)
}
fn signed_block_header(&self) -> SignedBeaconBlockHeader {
SignedBeaconBlock::signed_block_header(self)
}
fn message(&self) -> BeaconBlockRef<E> {
SignedBeaconBlock::message(self)
}
fn as_block(&self) -> &SignedBeaconBlock<E> {
self
}
fn block_cloned(&self) -> Arc<SignedBeaconBlock<E>> {
Arc::<SignedBeaconBlock<E>>::clone(self)
}
fn canonical_root(&self) -> Hash256 {
SignedBeaconBlock::canonical_root(self)
}
fn into_rpc_block(self) -> RpcBlock<E> {
RpcBlock::new_without_blobs(None, self)
}
}
impl<E: EthSpec> AsBlock<E> for MaybeAvailableBlock<E> {
fn slot(&self) -> Slot {
self.as_block().slot()
}
fn epoch(&self) -> Epoch {
self.as_block().epoch()
}
fn parent_root(&self) -> Hash256 {
self.as_block().parent_root()
}
fn state_root(&self) -> Hash256 {
self.as_block().state_root()
}
fn signed_block_header(&self) -> SignedBeaconBlockHeader {
self.as_block().signed_block_header()
}
fn message(&self) -> BeaconBlockRef<E> {
self.as_block().message()
}
fn as_block(&self) -> &SignedBeaconBlock<E> {
match &self {
MaybeAvailableBlock::Available(block) => block.as_block(),
MaybeAvailableBlock::AvailabilityPending {
block_root: _,
block,
} => block,
}
}
fn block_cloned(&self) -> Arc<SignedBeaconBlock<E>> {
match &self {
MaybeAvailableBlock::Available(block) => block.block_cloned(),
MaybeAvailableBlock::AvailabilityPending {
block_root: _,
block,
} => block.clone(),
}
}
fn canonical_root(&self) -> Hash256 {
self.as_block().canonical_root()
}
fn into_rpc_block(self) -> RpcBlock<E> {
match self {
MaybeAvailableBlock::Available(available_block) => available_block.into_rpc_block(),
MaybeAvailableBlock::AvailabilityPending { block_root, block } => {
RpcBlock::new_without_blobs(Some(block_root), block)
}
}
}
}
impl<E: EthSpec> AsBlock<E> for AvailableBlock<E> {
fn slot(&self) -> Slot {
self.block().slot()
}
fn epoch(&self) -> Epoch {
self.block().epoch()
}
fn parent_root(&self) -> Hash256 {
self.block().parent_root()
}
fn state_root(&self) -> Hash256 {
self.block().state_root()
}
fn signed_block_header(&self) -> SignedBeaconBlockHeader {
self.block().signed_block_header()
}
fn message(&self) -> BeaconBlockRef<E> {
self.block().message()
}
fn as_block(&self) -> &SignedBeaconBlock<E> {
self.block()
}
fn block_cloned(&self) -> Arc<SignedBeaconBlock<E>> {
AvailableBlock::block_cloned(self)
}
fn canonical_root(&self) -> Hash256 {
self.block().canonical_root()
}
fn into_rpc_block(self) -> RpcBlock<E> {
let (block_root, block, blobs_opt) = self.deconstruct();
// Circumvent the constructor here, because an Available block will have already had
// consistency checks performed.
let inner = match blobs_opt {
None => RpcBlockInner::Block(block),
Some(blobs) => RpcBlockInner::BlockAndBlobs(block, blobs),
};
RpcBlock {
block_root,
block: inner,
}
}
}
impl<E: EthSpec> AsBlock<E> for RpcBlock<E> {
fn slot(&self) -> Slot {
self.as_block().slot()
}
fn epoch(&self) -> Epoch {
self.as_block().epoch()
}
fn parent_root(&self) -> Hash256 {
self.as_block().parent_root()
}
fn state_root(&self) -> Hash256 {
self.as_block().state_root()
}
fn signed_block_header(&self) -> SignedBeaconBlockHeader {
self.as_block().signed_block_header()
}
fn message(&self) -> BeaconBlockRef<E> {
self.as_block().message()
}
fn as_block(&self) -> &SignedBeaconBlock<E> {
match &self.block {
RpcBlockInner::Block(block) => block,
RpcBlockInner::BlockAndBlobs(block, _) => block,
}
}
fn block_cloned(&self) -> Arc<SignedBeaconBlock<E>> {
match &self.block {
RpcBlockInner::Block(block) => block.clone(),
RpcBlockInner::BlockAndBlobs(block, _) => block.clone(),
}
}
fn canonical_root(&self) -> Hash256 {
self.as_block().canonical_root()
}
fn into_rpc_block(self) -> RpcBlock<E> {
self
}
}

View File

@ -1,4 +1,6 @@
use crate::beacon_chain::{CanonicalHead, BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY};
use crate::beacon_proposer_cache::BeaconProposerCache;
use crate::data_availability_checker::DataAvailabilityChecker;
use crate::eth1_chain::{CachingEth1Backend, SszEth1};
use crate::eth1_finalization_cache::Eth1FinalizationCache;
use crate::fork_choice_signal::ForkChoiceSignalTx;
@ -9,7 +11,7 @@ use crate::persisted_beacon_chain::PersistedBeaconChain;
use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache};
use crate::snapshot_cache::{SnapshotCache, DEFAULT_SNAPSHOT_CACHE_SIZE};
use crate::timeout_rw_lock::TimeoutRwLock;
use crate::validator_monitor::ValidatorMonitor;
use crate::validator_monitor::{ValidatorMonitor, ValidatorMonitorConfig};
use crate::validator_pubkey_cache::ValidatorPubkeyCache;
use crate::ChainConfig;
use crate::{
@ -20,11 +22,12 @@ use eth1::Config as Eth1Config;
use execution_layer::ExecutionLayer;
use fork_choice::{ForkChoice, ResetPayloadStatuses};
use futures::channel::mpsc::Sender;
use kzg::{Kzg, TrustedSetup};
use operation_pool::{OperationPool, PersistedOperationPool};
use parking_lot::RwLock;
use parking_lot::{Mutex, RwLock};
use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold};
use slasher::Slasher;
use slog::{crit, debug, error, info, Logger};
use slog::{crit, debug, error, info, o, Logger};
use slot_clock::{SlotClock, TestingSlotClock};
use state_processing::per_slot_processing;
use std::marker::PhantomData;
@ -33,8 +36,8 @@ use std::time::Duration;
use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp};
use task_executor::{ShutdownReason, TaskExecutor};
use types::{
BeaconBlock, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, Graffiti, Hash256,
PublicKeyBytes, Signature, SignedBeaconBlock, Slot,
BeaconBlock, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, Graffiti, Hash256, Signature,
SignedBeaconBlock, Slot,
};
/// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing
@ -91,11 +94,12 @@ pub struct BeaconChainBuilder<T: BeaconChainTypes> {
log: Option<Logger>,
graffiti: Graffiti,
slasher: Option<Arc<Slasher<T::EthSpec>>>,
validator_monitor: Option<ValidatorMonitor<T::EthSpec>>,
// Pending I/O batch that is constructed during building and should be executed atomically
// alongside `PersistedBeaconChain` storage when `BeaconChainBuilder::build` is called.
pending_io_batch: Vec<KeyValueStoreOp>,
trusted_setup: Option<TrustedSetup>,
task_executor: Option<TaskExecutor>,
validator_monitor_config: Option<ValidatorMonitorConfig>,
}
impl<TSlotClock, TEth1Backend, TEthSpec, THotStore, TColdStore>
@ -132,9 +136,10 @@ where
log: None,
graffiti: Graffiti::default(),
slasher: None,
validator_monitor: None,
pending_io_batch: vec![],
trusted_setup: None,
task_executor: None,
validator_monitor_config: None,
}
}
@ -392,6 +397,11 @@ where
.init_anchor_info(genesis.beacon_block.message(), retain_historic_states)
.map_err(|e| format!("Failed to initialize genesis anchor: {:?}", e))?,
);
self.pending_io_batch.push(
store
.init_blob_info(genesis.beacon_block.slot())
.map_err(|e| format!("Failed to initialize genesis blob info: {:?}", e))?,
);
let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis)
.map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?;
@ -515,6 +525,11 @@ where
.init_anchor_info(weak_subj_block.message(), retain_historic_states)
.map_err(|e| format!("Failed to initialize anchor info: {:?}", e))?,
);
self.pending_io_batch.push(
store
.init_blob_info(weak_subj_block.slot())
.map_err(|e| format!("Failed to initialize blob info: {:?}", e))?,
);
// Store pruning checkpoint to prevent attempting to prune before the anchor state.
self.pending_io_batch
@ -609,19 +624,13 @@ where
/// Register some validators for additional monitoring.
///
/// `validators` is a comma-separated string of 0x-formatted BLS pubkeys.
pub fn monitor_validators(
mut self,
auto_register: bool,
validators: Vec<PublicKeyBytes>,
individual_metrics_threshold: usize,
log: Logger,
) -> Self {
self.validator_monitor = Some(ValidatorMonitor::new(
validators,
auto_register,
individual_metrics_threshold,
log.clone(),
));
pub fn validator_monitor_config(mut self, config: ValidatorMonitorConfig) -> Self {
self.validator_monitor_config = Some(config);
self
}
pub fn trusted_setup(mut self, trusted_setup: TrustedSetup) -> Self {
self.trusted_setup = Some(trusted_setup);
self
}
@ -652,11 +661,16 @@ where
let genesis_state_root = self
.genesis_state_root
.ok_or("Cannot build without a genesis state root")?;
let mut validator_monitor = self
.validator_monitor
.ok_or("Cannot build without a validator monitor")?;
let validator_monitor_config = self.validator_monitor_config.unwrap_or_default();
let head_tracker = Arc::new(self.head_tracker.unwrap_or_default());
let beacon_proposer_cache: Arc<Mutex<BeaconProposerCache>> = <_>::default();
let mut validator_monitor = ValidatorMonitor::new(
validator_monitor_config,
beacon_proposer_cache.clone(),
log.new(o!("service" => "val_mon")),
);
let current_slot = if slot_clock
.is_prior_to_genesis()
.ok_or("Unable to read slot clock")?
@ -666,6 +680,15 @@ where
slot_clock.now().ok_or("Unable to read slot")?
};
let kzg = if let Some(trusted_setup) = self.trusted_setup {
let kzg = Kzg::new_from_trusted_setup(trusted_setup)
.map_err(|e| format!("Failed to load trusted setup: {:?}", e))?;
let kzg_arc = Arc::new(kzg);
Some(kzg_arc)
} else {
None
};
let initial_head_block_root = fork_choice
.get_head(current_slot, &self.spec)
.map_err(|e| format!("Unable to get fork choice head: {:?}", e))?;
@ -763,6 +786,7 @@ where
validator_monitor.process_valid_state(
slot.epoch(TEthSpec::slots_per_epoch()),
&head_snapshot.beacon_state,
&self.spec,
);
}
@ -781,10 +805,11 @@ where
//
// This *must* be stored before constructing the `BeaconChain`, so that its `Drop` instance
// doesn't write a `PersistedBeaconChain` without the rest of the batch.
let head_tracker_reader = head_tracker.0.read();
self.pending_io_batch.push(BeaconChain::<
Witness<TSlotClock, TEth1Backend, TEthSpec, THotStore, TColdStore>,
>::persist_head_in_batch_standalone(
genesis_block_root, &head_tracker
genesis_block_root, &head_tracker_reader
));
self.pending_io_batch.push(BeaconChain::<
Witness<TSlotClock, TEth1Backend, TEthSpec, THotStore, TColdStore>,
@ -795,6 +820,7 @@ where
.hot_db
.do_atomically(self.pending_io_batch)
.map_err(|e| format!("Error writing chain & metadata to disk: {:?}", e))?;
drop(head_tracker_reader);
let genesis_validators_root = head_snapshot.beacon_state.genesis_validators_root();
let genesis_time = head_snapshot.beacon_state.genesis_time();
@ -826,14 +852,14 @@ where
};
let beacon_chain = BeaconChain {
spec: self.spec,
spec: self.spec.clone(),
config: self.chain_config,
store,
store: store.clone(),
task_executor: self
.task_executor
.ok_or("Cannot build without task executor")?,
store_migrator,
slot_clock,
slot_clock: slot_clock.clone(),
op_pool: self.op_pool.ok_or("Cannot build without op pool")?,
// TODO: allow for persisting and loading the pool from disk.
naive_aggregation_pool: <_>::default(),
@ -855,6 +881,8 @@ where
observed_sync_aggregators: <_>::default(),
// TODO: allow for persisting and loading the pool from disk.
observed_block_producers: <_>::default(),
observed_blob_sidecars: <_>::default(),
observed_slashable: <_>::default(),
observed_voluntary_exits: <_>::default(),
observed_proposer_slashings: <_>::default(),
observed_attester_slashings: <_>::default(),
@ -882,7 +910,7 @@ where
log.clone(),
)),
eth1_finalization_cache: TimeoutRwLock::new(Eth1FinalizationCache::new(log.clone())),
beacon_proposer_cache: <_>::default(),
beacon_proposer_cache,
block_times_cache: <_>::default(),
pre_finalization_block_cache: <_>::default(),
validator_pubkey_cache: TimeoutRwLock::new(validator_pubkey_cache),
@ -896,6 +924,12 @@ where
slasher: self.slasher.clone(),
validator_monitor: RwLock::new(validator_monitor),
genesis_backfill_slot,
data_availability_checker: Arc::new(
DataAvailabilityChecker::new(slot_clock, kzg.clone(), store, &log, self.spec)
.map_err(|e| format!("Error initializing DataAvailabiltyChecker: {:?}", e))?,
),
kzg,
block_production_state: Arc::new(Mutex::new(None)),
};
let head = beacon_chain.head_snapshot();
@ -958,6 +992,13 @@ where
);
}
// Prune blobs older than the blob data availability boundary in the background.
if let Some(data_availability_boundary) = beacon_chain.data_availability_boundary() {
beacon_chain
.store_migrator
.process_prune_blobs(data_availability_boundary);
}
Ok(beacon_chain)
}
}
@ -1055,7 +1096,7 @@ fn descriptive_db_error(item: &str, error: &StoreError) -> String {
#[cfg(test)]
mod test {
use super::*;
use crate::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD;
use crate::test_utils::EphemeralHarnessType;
use ethereum_hashing::hash;
use genesis::{
generate_deterministic_keypairs, interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH,
@ -1069,6 +1110,7 @@ mod test {
use types::{EthSpec, MinimalEthSpec, Slot};
type TestEthSpec = MinimalEthSpec;
type Builder = BeaconChainBuilder<EphemeralHarnessType<TestEthSpec>>;
fn get_logger() -> Logger {
let builder = NullLoggerBuilder;
@ -1101,7 +1143,7 @@ mod test {
let (shutdown_tx, _) = futures::channel::mpsc::channel(1);
let runtime = TestRuntime::default();
let chain = BeaconChainBuilder::new(MinimalEthSpec)
let chain = Builder::new(MinimalEthSpec)
.logger(log.clone())
.store(Arc::new(store))
.task_executor(runtime.task_executor.clone())
@ -1112,12 +1154,6 @@ mod test {
.testing_slot_clock(Duration::from_secs(1))
.expect("should configure testing slot clock")
.shutdown_sender(shutdown_tx)
.monitor_validators(
true,
vec![],
DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD,
log.clone(),
)
.build()
.expect("should build");

View File

@ -984,6 +984,20 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.start_slot(T::EthSpec::slots_per_epoch()),
);
self.observed_blob_sidecars.write().prune(
new_view
.finalized_checkpoint
.epoch
.start_slot(T::EthSpec::slots_per_epoch()),
);
self.observed_slashable.write().prune(
new_view
.finalized_checkpoint
.epoch
.start_slot(T::EthSpec::slots_per_epoch()),
);
self.snapshot_cache
.try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT)
.map(|mut snapshot_cache| {
@ -1051,6 +1065,12 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
self.head_tracker.clone(),
)?;
// Prune blobs in the background.
if let Some(data_availability_boundary) = self.data_availability_boundary() {
self.store_migrator
.process_prune_blobs(data_availability_boundary);
}
// Take a write-lock on the canonical head and signal for it to prune.
self.canonical_head.fork_choice_write_lock().prune()?;

View File

@ -1,5 +1,4 @@
//! Provides tools for checking if a node is ready for the Capella upgrade and following merge
//! transition.
//! Provides tools for checking if a node is ready for the Capella upgrade.
use crate::{BeaconChain, BeaconChainTypes};
use execution_layer::http::{

View File

@ -112,7 +112,7 @@ impl Default for ChainConfig {
shuffling_cache_size: crate::shuffling_cache::DEFAULT_CACHE_SIZE,
genesis_backfill: false,
always_prepare_payload: false,
progressive_balances_mode: ProgressiveBalancesMode::Checked,
progressive_balances_mode: ProgressiveBalancesMode::Fast,
epochs_per_migration: crate::migrate::DEFAULT_EPOCHS_PER_MIGRATION,
}
}

View File

@ -0,0 +1,644 @@
use crate::blob_verification::{verify_kzg_for_blob_list, GossipVerifiedBlob, KzgVerifiedBlobList};
use crate::block_verification_types::{
AvailabilityPendingExecutedBlock, AvailableExecutedBlock, RpcBlock,
};
pub use crate::data_availability_checker::availability_view::{
AvailabilityView, GetCommitment, GetCommitments,
};
pub use crate::data_availability_checker::child_components::ChildComponents;
use crate::data_availability_checker::overflow_lru_cache::OverflowLRUCache;
use crate::data_availability_checker::processing_cache::ProcessingCache;
use crate::{BeaconChain, BeaconChainTypes, BeaconStore};
use kzg::Kzg;
use parking_lot::RwLock;
pub use processing_cache::ProcessingComponents;
use slasher::test_utils::E;
use slog::{debug, error, Logger};
use slot_clock::SlotClock;
use std::fmt;
use std::fmt::Debug;
use std::num::NonZeroUsize;
use std::sync::Arc;
use task_executor::TaskExecutor;
use types::beacon_block_body::{KzgCommitmentOpts, KzgCommitments};
use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList};
use types::{BlobSidecarList, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot};
mod availability_view;
mod child_components;
mod error;
mod overflow_lru_cache;
mod processing_cache;
mod state_lru_cache;
pub use error::{Error as AvailabilityCheckError, ErrorCategory as AvailabilityCheckErrorCategory};
use types::non_zero_usize::new_non_zero_usize;
/// The LRU Cache stores `PendingComponents` which can store up to
/// `MAX_BLOBS_PER_BLOCK = 6` blobs each. A `BlobSidecar` is 0.131256 MB. So
/// the maximum size of a `PendingComponents` is ~ 0.787536 MB. Setting this
/// to 1024 means the maximum size of the cache is ~ 0.8 GB. But the cache
/// will target a size of less than 75% of capacity.
pub const OVERFLOW_LRU_CAPACITY: NonZeroUsize = new_non_zero_usize(1024);
/// Until tree-states is implemented, we can't store very many states in memory :(
pub const STATE_LRU_CAPACITY_NON_ZERO: NonZeroUsize = new_non_zero_usize(2);
pub const STATE_LRU_CAPACITY: usize = STATE_LRU_CAPACITY_NON_ZERO.get();
/// This includes a cache for any blocks or blobs that have been received over gossip or RPC
/// and are awaiting more components before they can be imported. Additionally the
/// `DataAvailabilityChecker` is responsible for KZG verification of block components as well as
/// checking whether a "availability check" is required at all.
pub struct DataAvailabilityChecker<T: BeaconChainTypes> {
processing_cache: RwLock<ProcessingCache<T::EthSpec>>,
availability_cache: Arc<OverflowLRUCache<T>>,
slot_clock: T::SlotClock,
kzg: Option<Arc<Kzg>>,
log: Logger,
spec: ChainSpec,
}
/// This type is returned after adding a block / blob to the `DataAvailabilityChecker`.
///
/// Indicates if the block is fully `Available` or if we need blobs or blocks
/// to "complete" the requirements for an `AvailableBlock`.
#[derive(PartialEq)]
pub enum Availability<T: EthSpec> {
MissingComponents(Hash256),
Available(Box<AvailableExecutedBlock<T>>),
}
impl<T: EthSpec> Debug for Availability<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Self::MissingComponents(block_root) => {
write!(f, "MissingComponents({})", block_root)
}
Self::Available(block) => write!(f, "Available({:?})", block.import_data.block_root),
}
}
}
impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
pub fn new(
slot_clock: T::SlotClock,
kzg: Option<Arc<Kzg>>,
store: BeaconStore<T>,
log: &Logger,
spec: ChainSpec,
) -> Result<Self, AvailabilityCheckError> {
let overflow_cache = OverflowLRUCache::new(OVERFLOW_LRU_CAPACITY, store, spec.clone())?;
Ok(Self {
processing_cache: <_>::default(),
availability_cache: Arc::new(overflow_cache),
slot_clock,
log: log.clone(),
kzg,
spec,
})
}
/// Checks if the given block root is cached.
pub fn has_block(&self, block_root: &Hash256) -> bool {
self.processing_cache.read().has_block(block_root)
}
/// Get the processing info for a block.
pub fn get_processing_components(
&self,
block_root: Hash256,
) -> Option<ProcessingComponents<T::EthSpec>> {
self.processing_cache.read().get(&block_root).cloned()
}
/// A `None` indicates blobs are not required.
///
/// If there's no block, all possible ids will be returned that don't exist in the given blobs.
/// If there no blobs, all possible ids will be returned.
pub fn get_missing_blob_ids<V: AvailabilityView<T::EthSpec>>(
&self,
block_root: Hash256,
availability_view: &V,
) -> MissingBlobs {
let Some(current_slot) = self.slot_clock.now_or_genesis() else {
error!(
self.log,
"Failed to read slot clock when checking for missing blob ids"
);
return MissingBlobs::BlobsNotRequired;
};
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
if self.da_check_required_for_epoch(current_epoch) {
match availability_view.get_cached_block() {
Some(cached_block) => {
let block_commitments = cached_block.get_commitments();
let blob_commitments = availability_view.get_cached_blobs();
let num_blobs_expected = block_commitments.len();
let mut blob_ids = Vec::with_capacity(num_blobs_expected);
// Zip here will always limit the number of iterations to the size of
// `block_commitment` because `blob_commitments` will always be populated
// with `Option` values up to `MAX_BLOBS_PER_BLOCK`.
for (index, (block_commitment, blob_commitment_opt)) in block_commitments
.into_iter()
.zip(blob_commitments.iter())
.enumerate()
{
// Always add a missing blob.
let Some(blob_commitment) = blob_commitment_opt else {
blob_ids.push(BlobIdentifier {
block_root,
index: index as u64,
});
continue;
};
let blob_commitment = *blob_commitment.get_commitment();
// Check for consistency, but this shouldn't happen, an availability view
// should guaruntee consistency.
if blob_commitment != block_commitment {
error!(self.log,
"Inconsistent availability view";
"block_root" => ?block_root,
"block_commitment" => ?block_commitment,
"blob_commitment" => ?blob_commitment,
"index" => index
);
blob_ids.push(BlobIdentifier {
block_root,
index: index as u64,
});
}
}
MissingBlobs::KnownMissing(blob_ids)
}
None => {
MissingBlobs::PossibleMissing(BlobIdentifier::get_all_blob_ids::<E>(block_root))
}
}
} else {
MissingBlobs::BlobsNotRequired
}
}
/// Get a blob from the availability cache.
pub fn get_blob(
&self,
blob_id: &BlobIdentifier,
) -> Result<Option<Arc<BlobSidecar<T::EthSpec>>>, AvailabilityCheckError> {
self.availability_cache.peek_blob(blob_id)
}
/// Put a list of blobs received via RPC into the availability cache. This performs KZG
/// verification on the blobs in the list.
pub fn put_rpc_blobs(
&self,
block_root: Hash256,
blobs: FixedBlobSidecarList<T::EthSpec>,
) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> {
let Some(kzg) = self.kzg.as_ref() else {
return Err(AvailabilityCheckError::KzgNotInitialized);
};
let verified_blobs = KzgVerifiedBlobList::new(Vec::from(blobs).into_iter().flatten(), kzg)
.map_err(AvailabilityCheckError::Kzg)?;
self.availability_cache
.put_kzg_verified_blobs(block_root, verified_blobs)
}
/// Check if we've cached other blobs for this block. If it completes a set and we also
/// have a block cached, return the `Availability` variant triggering block import.
/// Otherwise cache the blob sidecar.
///
/// This should only accept gossip verified blobs, so we should not have to worry about dupes.
pub fn put_gossip_blob(
&self,
gossip_blob: GossipVerifiedBlob<T>,
) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> {
self.availability_cache
.put_kzg_verified_blobs(gossip_blob.block_root(), vec![gossip_blob.into_inner()])
}
/// Check if we have all the blobs for a block. Returns `Availability` which has information
/// about whether all components have been received or more are required.
pub fn put_pending_executed_block(
&self,
executed_block: AvailabilityPendingExecutedBlock<T::EthSpec>,
) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> {
self.availability_cache
.put_pending_executed_block(executed_block)
}
/// Verifies kzg commitments for an RpcBlock, returns a `MaybeAvailableBlock` that may
/// include the fully available block.
///
/// WARNING: This function assumes all required blobs are already present, it does NOT
/// check if there are any missing blobs.
pub fn verify_kzg_for_rpc_block(
&self,
block: RpcBlock<T::EthSpec>,
) -> Result<MaybeAvailableBlock<T::EthSpec>, AvailabilityCheckError> {
let (block_root, block, blobs) = block.deconstruct();
match blobs {
None => {
if self.blobs_required_for_block(&block) {
Ok(MaybeAvailableBlock::AvailabilityPending { block_root, block })
} else {
Ok(MaybeAvailableBlock::Available(AvailableBlock {
block_root,
block,
blobs: None,
}))
}
}
Some(blob_list) => {
let verified_blobs = if self.blobs_required_for_block(&block) {
let kzg = self
.kzg
.as_ref()
.ok_or(AvailabilityCheckError::KzgNotInitialized)?;
verify_kzg_for_blob_list(blob_list.iter(), kzg)
.map_err(AvailabilityCheckError::Kzg)?;
Some(blob_list)
} else {
None
};
Ok(MaybeAvailableBlock::Available(AvailableBlock {
block_root,
block,
blobs: verified_blobs,
}))
}
}
}
/// Checks if a vector of blocks are available. Returns a vector of `MaybeAvailableBlock`
/// This is more efficient than calling `verify_kzg_for_rpc_block` in a loop as it does
/// all kzg verification at once
///
/// WARNING: This function assumes all required blobs are already present, it does NOT
/// check if there are any missing blobs.
pub fn verify_kzg_for_rpc_blocks(
&self,
blocks: Vec<RpcBlock<T::EthSpec>>,
) -> Result<Vec<MaybeAvailableBlock<T::EthSpec>>, AvailabilityCheckError> {
let mut results = Vec::with_capacity(blocks.len());
let all_blobs: BlobSidecarList<T::EthSpec> = blocks
.iter()
.filter(|block| self.blobs_required_for_block(block.as_block()))
// this clone is cheap as it's cloning an Arc
.filter_map(|block| block.blobs().cloned())
.flatten()
.collect::<Vec<_>>()
.into();
// verify kzg for all blobs at once
if !all_blobs.is_empty() {
let kzg = self
.kzg
.as_ref()
.ok_or(AvailabilityCheckError::KzgNotInitialized)?;
verify_kzg_for_blob_list(all_blobs.iter(), kzg)?;
}
for block in blocks {
let (block_root, block, blobs) = block.deconstruct();
match blobs {
None => {
if self.blobs_required_for_block(&block) {
results.push(MaybeAvailableBlock::AvailabilityPending { block_root, block })
} else {
results.push(MaybeAvailableBlock::Available(AvailableBlock {
block_root,
block,
blobs: None,
}))
}
}
Some(blob_list) => {
let verified_blobs = if self.blobs_required_for_block(&block) {
Some(blob_list)
} else {
None
};
// already verified kzg for all blobs
results.push(MaybeAvailableBlock::Available(AvailableBlock {
block_root,
block,
blobs: verified_blobs,
}))
}
}
}
Ok(results)
}
/// Determines the blob requirements for a block. If the block is pre-deneb, no blobs are required.
/// If the block's epoch is from prior to the data availability boundary, no blobs are required.
fn blobs_required_for_block(&self, block: &SignedBeaconBlock<T::EthSpec>) -> bool {
block.num_expected_blobs() > 0 && self.da_check_required_for_epoch(block.epoch())
}
/// Adds block commitments to the processing cache. These commitments are unverified but caching
/// them here is useful to avoid duplicate downloads of blocks, as well as understanding
/// our blob download requirements.
pub fn notify_block_commitments(
&self,
slot: Slot,
block_root: Hash256,
commitments: KzgCommitments<T::EthSpec>,
) {
self.processing_cache
.write()
.entry(block_root)
.or_insert_with(|| ProcessingComponents::new(slot))
.merge_block(commitments);
}
/// Add a single blob commitment to the processing cache. This commitment is unverified but caching
/// them here is useful to avoid duplicate downloads of blobs, as well as understanding
/// our block and blob download requirements.
pub fn notify_gossip_blob(
&self,
slot: Slot,
block_root: Hash256,
blob: &GossipVerifiedBlob<T>,
) {
let index = blob.index();
let commitment = blob.kzg_commitment();
self.processing_cache
.write()
.entry(block_root)
.or_insert_with(|| ProcessingComponents::new(slot))
.merge_single_blob(index as usize, commitment);
}
/// Adds blob commitments to the processing cache. These commitments are unverified but caching
/// them here is useful to avoid duplicate downloads of blobs, as well as understanding
/// our block and blob download requirements.
pub fn notify_rpc_blobs(
&self,
slot: Slot,
block_root: Hash256,
blobs: &FixedBlobSidecarList<T::EthSpec>,
) {
let mut commitments = KzgCommitmentOpts::<T::EthSpec>::default();
for blob in blobs.iter().flatten() {
if let Some(commitment) = commitments.get_mut(blob.index as usize) {
*commitment = Some(blob.kzg_commitment);
}
}
self.processing_cache
.write()
.entry(block_root)
.or_insert_with(|| ProcessingComponents::new(slot))
.merge_blobs(commitments);
}
/// Clears the block and all blobs from the processing cache for a give root if they exist.
pub fn remove_notified(&self, block_root: &Hash256) {
self.processing_cache.write().remove(block_root)
}
/// Gather all block roots for which we are not currently processing all components for the
/// given slot.
pub fn incomplete_processing_components(&self, slot: Slot) -> Vec<Hash256> {
self.processing_cache
.read()
.incomplete_processing_components(slot)
}
/// The epoch at which we require a data availability check in block processing.
/// `None` if the `Deneb` fork is disabled.
pub fn data_availability_boundary(&self) -> Option<Epoch> {
self.spec.deneb_fork_epoch.and_then(|fork_epoch| {
self.slot_clock
.now()
.map(|slot| slot.epoch(T::EthSpec::slots_per_epoch()))
.map(|current_epoch| {
std::cmp::max(
fork_epoch,
current_epoch
.saturating_sub(self.spec.min_epochs_for_blob_sidecars_requests),
)
})
})
}
/// Returns true if the given epoch lies within the da boundary and false otherwise.
pub fn da_check_required_for_epoch(&self, block_epoch: Epoch) -> bool {
self.data_availability_boundary()
.map_or(false, |da_epoch| block_epoch >= da_epoch)
}
/// Returns `true` if the current epoch is greater than or equal to the `Deneb` epoch.
pub fn is_deneb(&self) -> bool {
self.slot_clock.now().map_or(false, |slot| {
self.spec.deneb_fork_epoch.map_or(false, |deneb_epoch| {
let now_epoch = slot.epoch(T::EthSpec::slots_per_epoch());
now_epoch >= deneb_epoch
})
})
}
/// Persist all in memory components to disk
pub fn persist_all(&self) -> Result<(), AvailabilityCheckError> {
self.availability_cache.write_all_to_disk()
}
}
pub fn start_availability_cache_maintenance_service<T: BeaconChainTypes>(
executor: TaskExecutor,
chain: Arc<BeaconChain<T>>,
) {
// this cache only needs to be maintained if deneb is configured
if chain.spec.deneb_fork_epoch.is_some() {
let overflow_cache = chain.data_availability_checker.availability_cache.clone();
executor.spawn(
async move { availability_cache_maintenance_service(chain, overflow_cache).await },
"availability_cache_service",
);
} else {
debug!(
chain.log,
"Deneb fork not configured, not starting availability cache maintenance service"
);
}
}
async fn availability_cache_maintenance_service<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>,
overflow_cache: Arc<OverflowLRUCache<T>>,
) {
let epoch_duration = chain.slot_clock.slot_duration() * T::EthSpec::slots_per_epoch() as u32;
loop {
match chain
.slot_clock
.duration_to_next_epoch(T::EthSpec::slots_per_epoch())
{
Some(duration) => {
// this service should run 3/4 of the way through the epoch
let additional_delay = (epoch_duration * 3) / 4;
tokio::time::sleep(duration + additional_delay).await;
let Some(deneb_fork_epoch) = chain.spec.deneb_fork_epoch else {
// shutdown service if deneb fork epoch not set
break;
};
debug!(
chain.log,
"Availability cache maintenance service firing";
);
let Some(current_epoch) = chain
.slot_clock
.now()
.map(|slot| slot.epoch(T::EthSpec::slots_per_epoch()))
else {
continue;
};
if current_epoch < deneb_fork_epoch {
// we are not in deneb yet
continue;
}
let finalized_epoch = chain
.canonical_head
.fork_choice_read_lock()
.finalized_checkpoint()
.epoch;
// any data belonging to an epoch before this should be pruned
let cutoff_epoch = std::cmp::max(
finalized_epoch + 1,
std::cmp::max(
current_epoch
.saturating_sub(chain.spec.min_epochs_for_blob_sidecars_requests),
deneb_fork_epoch,
),
);
if let Err(e) = overflow_cache.do_maintenance(cutoff_epoch) {
error!(chain.log, "Failed to maintain availability cache"; "error" => ?e);
}
}
None => {
error!(chain.log, "Failed to read slot clock");
// If we can't read the slot clock, just wait another slot.
tokio::time::sleep(chain.slot_clock.slot_duration()).await;
}
};
}
}
/// A fully available block that is ready to be imported into fork choice.
#[derive(Clone, Debug, PartialEq)]
pub struct AvailableBlock<E: EthSpec> {
block_root: Hash256,
block: Arc<SignedBeaconBlock<E>>,
blobs: Option<BlobSidecarList<E>>,
}
impl<E: EthSpec> AvailableBlock<E> {
pub fn block(&self) -> &SignedBeaconBlock<E> {
&self.block
}
pub fn block_cloned(&self) -> Arc<SignedBeaconBlock<E>> {
self.block.clone()
}
pub fn blobs(&self) -> Option<&BlobSidecarList<E>> {
self.blobs.as_ref()
}
pub fn deconstruct(
self,
) -> (
Hash256,
Arc<SignedBeaconBlock<E>>,
Option<BlobSidecarList<E>>,
) {
let AvailableBlock {
block_root,
block,
blobs,
} = self;
(block_root, block, blobs)
}
}
#[derive(Debug, Clone)]
pub enum MaybeAvailableBlock<E: EthSpec> {
/// This variant is fully available.
/// i.e. for pre-deneb blocks, it contains a (`SignedBeaconBlock`, `Blobs::None`) and for
/// post-4844 blocks, it contains a `SignedBeaconBlock` and a Blobs variant other than `Blobs::None`.
Available(AvailableBlock<E>),
/// This variant is not fully available and requires blobs to become fully available.
AvailabilityPending {
block_root: Hash256,
block: Arc<SignedBeaconBlock<E>>,
},
}
#[derive(Debug, Clone)]
pub enum MissingBlobs {
/// We know for certain these blobs are missing.
KnownMissing(Vec<BlobIdentifier>),
/// We think these blobs might be missing.
PossibleMissing(Vec<BlobIdentifier>),
/// Blobs are not required.
BlobsNotRequired,
}
impl MissingBlobs {
pub fn new_without_block(block_root: Hash256, is_deneb: bool) -> Self {
if is_deneb {
MissingBlobs::PossibleMissing(BlobIdentifier::get_all_blob_ids::<E>(block_root))
} else {
MissingBlobs::BlobsNotRequired
}
}
pub fn is_empty(&self) -> bool {
match self {
MissingBlobs::KnownMissing(v) => v.is_empty(),
MissingBlobs::PossibleMissing(v) => v.is_empty(),
MissingBlobs::BlobsNotRequired => true,
}
}
pub fn contains(&self, blob_id: &BlobIdentifier) -> bool {
match self {
MissingBlobs::KnownMissing(v) => v.contains(blob_id),
MissingBlobs::PossibleMissing(v) => v.contains(blob_id),
MissingBlobs::BlobsNotRequired => false,
}
}
pub fn remove(&mut self, blob_id: &BlobIdentifier) {
match self {
MissingBlobs::KnownMissing(v) => v.retain(|id| id != blob_id),
MissingBlobs::PossibleMissing(v) => v.retain(|id| id != blob_id),
MissingBlobs::BlobsNotRequired => {}
}
}
pub fn indices(&self) -> Vec<u64> {
match self {
MissingBlobs::KnownMissing(v) => v.iter().map(|id| id.index).collect(),
MissingBlobs::PossibleMissing(v) => v.iter().map(|id| id.index).collect(),
MissingBlobs::BlobsNotRequired => vec![],
}
}
}
impl Into<Vec<BlobIdentifier>> for MissingBlobs {
fn into(self) -> Vec<BlobIdentifier> {
match self {
MissingBlobs::KnownMissing(v) => v,
MissingBlobs::PossibleMissing(v) => v,
MissingBlobs::BlobsNotRequired => vec![],
}
}
}

View File

@ -0,0 +1,553 @@
use super::child_components::ChildComponents;
use super::state_lru_cache::DietAvailabilityPendingExecutedBlock;
use crate::blob_verification::KzgVerifiedBlob;
use crate::block_verification_types::AsBlock;
use crate::data_availability_checker::overflow_lru_cache::PendingComponents;
use crate::data_availability_checker::ProcessingComponents;
use kzg::KzgCommitment;
use ssz_types::FixedVector;
use std::sync::Arc;
use types::beacon_block_body::KzgCommitments;
use types::{BlobSidecar, EthSpec, SignedBeaconBlock};
/// Defines an interface for managing data availability with two key invariants:
///
/// 1. If we haven't seen a block yet, we will insert the first blob for a given (block_root, index)
/// but we won't insert subsequent blobs for the same (block_root, index) if they have a different
/// commitment.
/// 2. On block insertion, any non-matching blob commitments are evicted.
///
/// Types implementing this trait can be used for validating and managing availability
/// of blocks and blobs in a cache-like data structure.
pub trait AvailabilityView<E: EthSpec> {
/// The type representing a block in the implementation.
type BlockType: GetCommitments<E>;
/// The type representing a blob in the implementation. Must implement `Clone`.
type BlobType: Clone + GetCommitment<E>;
/// Returns an immutable reference to the cached block.
fn get_cached_block(&self) -> &Option<Self::BlockType>;
/// Returns an immutable reference to the fixed vector of cached blobs.
fn get_cached_blobs(&self) -> &FixedVector<Option<Self::BlobType>, E::MaxBlobsPerBlock>;
/// Returns a mutable reference to the cached block.
fn get_cached_block_mut(&mut self) -> &mut Option<Self::BlockType>;
/// Returns a mutable reference to the fixed vector of cached blobs.
fn get_cached_blobs_mut(
&mut self,
) -> &mut FixedVector<Option<Self::BlobType>, E::MaxBlobsPerBlock>;
/// Checks if a block exists in the cache.
///
/// Returns:
/// - `true` if a block exists.
/// - `false` otherwise.
fn block_exists(&self) -> bool {
self.get_cached_block().is_some()
}
/// Checks if a blob exists at the given index in the cache.
///
/// Returns:
/// - `true` if a blob exists at the given index.
/// - `false` otherwise.
fn blob_exists(&self, blob_index: usize) -> bool {
self.get_cached_blobs()
.get(blob_index)
.map(|b| b.is_some())
.unwrap_or(false)
}
/// Returns the number of blobs that are expected to be present. Returns `None` if we don't have a
/// block.
///
/// This corresponds to the number of commitments that are present in a block.
fn num_expected_blobs(&self) -> Option<usize> {
self.get_cached_block()
.as_ref()
.map(|b| b.get_commitments().len())
}
/// Returns the number of blobs that have been received and are stored in the cache.
fn num_received_blobs(&self) -> usize {
self.get_cached_blobs().iter().flatten().count()
}
/// Inserts a block into the cache.
fn insert_block(&mut self, block: Self::BlockType) {
*self.get_cached_block_mut() = Some(block)
}
/// Inserts a blob at a specific index in the cache.
///
/// Existing blob at the index will be replaced.
fn insert_blob_at_index(&mut self, blob_index: usize, blob: Self::BlobType) {
if let Some(b) = self.get_cached_blobs_mut().get_mut(blob_index) {
*b = Some(blob);
}
}
/// Merges a given set of blobs into the cache.
///
/// Blobs are only inserted if:
/// 1. The blob entry at the index is empty and no block exists.
/// 2. The block exists and its commitment matches the blob's commitment.
fn merge_blobs(&mut self, blobs: FixedVector<Option<Self::BlobType>, E::MaxBlobsPerBlock>) {
for (index, blob) in blobs.iter().cloned().enumerate() {
let Some(blob) = blob else { continue };
self.merge_single_blob(index, blob);
}
}
/// Merges a single blob into the cache.
///
/// Blobs are only inserted if:
/// 1. The blob entry at the index is empty and no block exists, or
/// 2. The block exists and its commitment matches the blob's commitment.
fn merge_single_blob(&mut self, index: usize, blob: Self::BlobType) {
let commitment = *blob.get_commitment();
if let Some(cached_block) = self.get_cached_block() {
let block_commitment_opt = cached_block.get_commitments().get(index).copied();
if let Some(block_commitment) = block_commitment_opt {
if block_commitment == commitment {
self.insert_blob_at_index(index, blob)
}
}
} else if !self.blob_exists(index) {
self.insert_blob_at_index(index, blob)
}
}
/// Inserts a new block and revalidates the existing blobs against it.
///
/// Blobs that don't match the new block's commitments are evicted.
fn merge_block(&mut self, block: Self::BlockType) {
self.insert_block(block);
let reinsert = std::mem::take(self.get_cached_blobs_mut());
self.merge_blobs(reinsert);
}
/// Checks if the block and all of its expected blobs are available in the cache.
///
/// Returns `true` if both the block exists and the number of received blobs matches the number
/// of expected blobs.
fn is_available(&self) -> bool {
if let Some(num_expected_blobs) = self.num_expected_blobs() {
num_expected_blobs == self.num_received_blobs()
} else {
false
}
}
}
/// Implements the `AvailabilityView` trait for a given struct.
///
/// - `$struct_name`: The name of the struct for which to implement `AvailabilityView`.
/// - `$block_type`: The type to use for `BlockType` in the `AvailabilityView` trait.
/// - `$blob_type`: The type to use for `BlobType` in the `AvailabilityView` trait.
/// - `$block_field`: The field name in the struct that holds the cached block.
/// - `$blob_field`: The field name in the struct that holds the cached blobs.
#[macro_export]
macro_rules! impl_availability_view {
($struct_name:ident, $block_type:ty, $blob_type:ty, $block_field:ident, $blob_field:ident) => {
impl<E: EthSpec> AvailabilityView<E> for $struct_name<E> {
type BlockType = $block_type;
type BlobType = $blob_type;
fn get_cached_block(&self) -> &Option<Self::BlockType> {
&self.$block_field
}
fn get_cached_blobs(
&self,
) -> &FixedVector<Option<Self::BlobType>, E::MaxBlobsPerBlock> {
&self.$blob_field
}
fn get_cached_block_mut(&mut self) -> &mut Option<Self::BlockType> {
&mut self.$block_field
}
fn get_cached_blobs_mut(
&mut self,
) -> &mut FixedVector<Option<Self::BlobType>, E::MaxBlobsPerBlock> {
&mut self.$blob_field
}
}
};
}
impl_availability_view!(
ProcessingComponents,
KzgCommitments<E>,
KzgCommitment,
block_commitments,
blob_commitments
);
impl_availability_view!(
PendingComponents,
DietAvailabilityPendingExecutedBlock<E>,
KzgVerifiedBlob<E>,
executed_block,
verified_blobs
);
impl_availability_view!(
ChildComponents,
Arc<SignedBeaconBlock<E>>,
Arc<BlobSidecar<E>>,
downloaded_block,
downloaded_blobs
);
pub trait GetCommitments<E: EthSpec> {
fn get_commitments(&self) -> KzgCommitments<E>;
}
pub trait GetCommitment<E: EthSpec> {
fn get_commitment(&self) -> &KzgCommitment;
}
// These implementations are required to implement `AvailabilityView` for `ProcessingView`.
impl<E: EthSpec> GetCommitments<E> for KzgCommitments<E> {
fn get_commitments(&self) -> KzgCommitments<E> {
self.clone()
}
}
impl<E: EthSpec> GetCommitment<E> for KzgCommitment {
fn get_commitment(&self) -> &KzgCommitment {
self
}
}
// These implementations are required to implement `AvailabilityView` for `PendingComponents`.
impl<E: EthSpec> GetCommitments<E> for DietAvailabilityPendingExecutedBlock<E> {
fn get_commitments(&self) -> KzgCommitments<E> {
self.as_block()
.message()
.body()
.blob_kzg_commitments()
.cloned()
.unwrap_or_default()
}
}
impl<E: EthSpec> GetCommitment<E> for KzgVerifiedBlob<E> {
fn get_commitment(&self) -> &KzgCommitment {
&self.as_blob().kzg_commitment
}
}
// These implementations are required to implement `AvailabilityView` for `ChildComponents`.
impl<E: EthSpec> GetCommitments<E> for Arc<SignedBeaconBlock<E>> {
fn get_commitments(&self) -> KzgCommitments<E> {
self.message()
.body()
.blob_kzg_commitments()
.ok()
.cloned()
.unwrap_or_default()
}
}
impl<E: EthSpec> GetCommitment<E> for Arc<BlobSidecar<E>> {
fn get_commitment(&self) -> &KzgCommitment {
&self.kzg_commitment
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::block_verification_types::BlockImportData;
use crate::eth1_finalization_cache::Eth1FinalizationData;
use crate::test_utils::{generate_rand_block_and_blobs, NumBlobs};
use crate::AvailabilityPendingExecutedBlock;
use crate::PayloadVerificationOutcome;
use fork_choice::PayloadVerificationStatus;
use rand::rngs::StdRng;
use rand::SeedableRng;
use state_processing::ConsensusContext;
use types::test_utils::TestRandom;
use types::{BeaconState, ChainSpec, ForkName, MainnetEthSpec, Slot};
type E = MainnetEthSpec;
type Setup<E> = (
SignedBeaconBlock<E>,
FixedVector<Option<Arc<BlobSidecar<E>>>, <E as EthSpec>::MaxBlobsPerBlock>,
FixedVector<Option<Arc<BlobSidecar<E>>>, <E as EthSpec>::MaxBlobsPerBlock>,
);
pub fn pre_setup() -> Setup<E> {
let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64);
let (block, blobs_vec) =
generate_rand_block_and_blobs::<E>(ForkName::Deneb, NumBlobs::Random, &mut rng);
let mut blobs: FixedVector<_, <E as EthSpec>::MaxBlobsPerBlock> = FixedVector::default();
for blob in blobs_vec {
if let Some(b) = blobs.get_mut(blob.index as usize) {
*b = Some(Arc::new(blob));
}
}
let mut invalid_blobs: FixedVector<
Option<Arc<BlobSidecar<E>>>,
<E as EthSpec>::MaxBlobsPerBlock,
> = FixedVector::default();
for (index, blob) in blobs.iter().enumerate() {
if let Some(invalid_blob) = blob {
let mut blob_copy = invalid_blob.as_ref().clone();
blob_copy.kzg_commitment = KzgCommitment::random_for_test(&mut rng);
*invalid_blobs.get_mut(index).unwrap() = Some(Arc::new(blob_copy));
}
}
(block, blobs, invalid_blobs)
}
type ProcessingViewSetup<E> = (
KzgCommitments<E>,
FixedVector<Option<KzgCommitment>, <E as EthSpec>::MaxBlobsPerBlock>,
FixedVector<Option<KzgCommitment>, <E as EthSpec>::MaxBlobsPerBlock>,
);
pub fn setup_processing_components(
block: SignedBeaconBlock<E>,
valid_blobs: FixedVector<Option<Arc<BlobSidecar<E>>>, <E as EthSpec>::MaxBlobsPerBlock>,
invalid_blobs: FixedVector<Option<Arc<BlobSidecar<E>>>, <E as EthSpec>::MaxBlobsPerBlock>,
) -> ProcessingViewSetup<E> {
let commitments = block
.message()
.body()
.blob_kzg_commitments()
.unwrap()
.clone();
let blobs = FixedVector::from(
valid_blobs
.iter()
.map(|blob_opt| blob_opt.as_ref().map(|blob| blob.kzg_commitment))
.collect::<Vec<_>>(),
);
let invalid_blobs = FixedVector::from(
invalid_blobs
.iter()
.map(|blob_opt| blob_opt.as_ref().map(|blob| blob.kzg_commitment))
.collect::<Vec<_>>(),
);
(commitments, blobs, invalid_blobs)
}
type PendingComponentsSetup<E> = (
DietAvailabilityPendingExecutedBlock<E>,
FixedVector<Option<KzgVerifiedBlob<E>>, <E as EthSpec>::MaxBlobsPerBlock>,
FixedVector<Option<KzgVerifiedBlob<E>>, <E as EthSpec>::MaxBlobsPerBlock>,
);
pub fn setup_pending_components(
block: SignedBeaconBlock<E>,
valid_blobs: FixedVector<Option<Arc<BlobSidecar<E>>>, <E as EthSpec>::MaxBlobsPerBlock>,
invalid_blobs: FixedVector<Option<Arc<BlobSidecar<E>>>, <E as EthSpec>::MaxBlobsPerBlock>,
) -> PendingComponentsSetup<E> {
let blobs = FixedVector::from(
valid_blobs
.iter()
.map(|blob_opt| {
blob_opt
.as_ref()
.map(|blob| KzgVerifiedBlob::__assumed_valid(blob.clone()))
})
.collect::<Vec<_>>(),
);
let invalid_blobs = FixedVector::from(
invalid_blobs
.iter()
.map(|blob_opt| {
blob_opt
.as_ref()
.map(|blob| KzgVerifiedBlob::__assumed_valid(blob.clone()))
})
.collect::<Vec<_>>(),
);
let dummy_parent = block.clone_as_blinded();
let block = AvailabilityPendingExecutedBlock {
block: Arc::new(block),
import_data: BlockImportData {
block_root: Default::default(),
state: BeaconState::new(0, Default::default(), &ChainSpec::minimal()),
parent_block: dummy_parent,
parent_eth1_finalization_data: Eth1FinalizationData {
eth1_data: Default::default(),
eth1_deposit_index: 0,
},
confirmed_state_roots: vec![],
consensus_context: ConsensusContext::new(Slot::new(0)),
},
payload_verification_outcome: PayloadVerificationOutcome {
payload_verification_status: PayloadVerificationStatus::Verified,
is_valid_merge_transition_block: false,
},
};
(block.into(), blobs, invalid_blobs)
}
type ChildComponentsSetup<E> = (
Arc<SignedBeaconBlock<E>>,
FixedVector<Option<Arc<BlobSidecar<E>>>, <E as EthSpec>::MaxBlobsPerBlock>,
FixedVector<Option<Arc<BlobSidecar<E>>>, <E as EthSpec>::MaxBlobsPerBlock>,
);
pub fn setup_child_components(
block: SignedBeaconBlock<E>,
valid_blobs: FixedVector<Option<Arc<BlobSidecar<E>>>, <E as EthSpec>::MaxBlobsPerBlock>,
invalid_blobs: FixedVector<Option<Arc<BlobSidecar<E>>>, <E as EthSpec>::MaxBlobsPerBlock>,
) -> ChildComponentsSetup<E> {
let blobs = FixedVector::from(valid_blobs.into_iter().cloned().collect::<Vec<_>>());
let invalid_blobs =
FixedVector::from(invalid_blobs.into_iter().cloned().collect::<Vec<_>>());
(Arc::new(block), blobs, invalid_blobs)
}
pub fn assert_cache_consistent<V: AvailabilityView<E>>(cache: V) {
if let Some(cached_block) = cache.get_cached_block() {
let cached_block_commitments = cached_block.get_commitments();
for index in 0..E::max_blobs_per_block() {
let block_commitment = cached_block_commitments.get(index).copied();
let blob_commitment_opt = cache.get_cached_blobs().get(index).unwrap();
let blob_commitment = blob_commitment_opt.as_ref().map(|b| *b.get_commitment());
assert_eq!(block_commitment, blob_commitment);
}
} else {
panic!("No cached block")
}
}
pub fn assert_empty_blob_cache<V: AvailabilityView<E>>(cache: V) {
for blob in cache.get_cached_blobs().iter() {
assert!(blob.is_none());
}
}
#[macro_export]
macro_rules! generate_tests {
($module_name:ident, $type_name:ty, $block_field:ident, $blob_field:ident, $setup_fn:ident) => {
mod $module_name {
use super::*;
use types::Hash256;
#[test]
fn valid_block_invalid_blobs_valid_blobs() {
let (block_commitments, blobs, random_blobs) = pre_setup();
let (block_commitments, blobs, random_blobs) =
$setup_fn(block_commitments, blobs, random_blobs);
let block_root = Hash256::zero();
let mut cache = <$type_name>::empty(block_root);
cache.merge_block(block_commitments);
cache.merge_blobs(random_blobs);
cache.merge_blobs(blobs);
assert_cache_consistent(cache);
}
#[test]
fn invalid_blobs_block_valid_blobs() {
let (block_commitments, blobs, random_blobs) = pre_setup();
let (block_commitments, blobs, random_blobs) =
$setup_fn(block_commitments, blobs, random_blobs);
let block_root = Hash256::zero();
let mut cache = <$type_name>::empty(block_root);
cache.merge_blobs(random_blobs);
cache.merge_block(block_commitments);
cache.merge_blobs(blobs);
assert_cache_consistent(cache);
}
#[test]
fn invalid_blobs_valid_blobs_block() {
let (block_commitments, blobs, random_blobs) = pre_setup();
let (block_commitments, blobs, random_blobs) =
$setup_fn(block_commitments, blobs, random_blobs);
let block_root = Hash256::zero();
let mut cache = <$type_name>::empty(block_root);
cache.merge_blobs(random_blobs);
cache.merge_blobs(blobs);
cache.merge_block(block_commitments);
assert_empty_blob_cache(cache);
}
#[test]
fn block_valid_blobs_invalid_blobs() {
let (block_commitments, blobs, random_blobs) = pre_setup();
let (block_commitments, blobs, random_blobs) =
$setup_fn(block_commitments, blobs, random_blobs);
let block_root = Hash256::zero();
let mut cache = <$type_name>::empty(block_root);
cache.merge_block(block_commitments);
cache.merge_blobs(blobs);
cache.merge_blobs(random_blobs);
assert_cache_consistent(cache);
}
#[test]
fn valid_blobs_block_invalid_blobs() {
let (block_commitments, blobs, random_blobs) = pre_setup();
let (block_commitments, blobs, random_blobs) =
$setup_fn(block_commitments, blobs, random_blobs);
let block_root = Hash256::zero();
let mut cache = <$type_name>::empty(block_root);
cache.merge_blobs(blobs);
cache.merge_block(block_commitments);
cache.merge_blobs(random_blobs);
assert_cache_consistent(cache);
}
#[test]
fn valid_blobs_invalid_blobs_block() {
let (block_commitments, blobs, random_blobs) = pre_setup();
let (block_commitments, blobs, random_blobs) =
$setup_fn(block_commitments, blobs, random_blobs);
let block_root = Hash256::zero();
let mut cache = <$type_name>::empty(block_root);
cache.merge_blobs(blobs);
cache.merge_blobs(random_blobs);
cache.merge_block(block_commitments);
assert_cache_consistent(cache);
}
}
};
}
generate_tests!(
processing_components_tests,
ProcessingComponents::<E>,
kzg_commitments,
processing_blobs,
setup_processing_components
);
generate_tests!(
pending_components_tests,
PendingComponents<E>,
executed_block,
verified_blobs,
setup_pending_components
);
generate_tests!(
child_component_tests,
ChildComponents::<E>,
downloaded_block,
downloaded_blobs,
setup_child_components
);
}

View File

@ -0,0 +1,54 @@
use crate::block_verification_types::RpcBlock;
use crate::data_availability_checker::AvailabilityView;
use bls::Hash256;
use std::sync::Arc;
use types::blob_sidecar::FixedBlobSidecarList;
use types::{EthSpec, SignedBeaconBlock};
/// For requests triggered by an `UnknownBlockParent` or `UnknownBlobParent`, this struct
/// is used to cache components as they are sent to the network service. We can't use the
/// data availability cache currently because any blocks or blobs without parents
/// won't pass validation and therefore won't make it into the cache.
pub struct ChildComponents<E: EthSpec> {
pub block_root: Hash256,
pub downloaded_block: Option<Arc<SignedBeaconBlock<E>>>,
pub downloaded_blobs: FixedBlobSidecarList<E>,
}
impl<E: EthSpec> From<RpcBlock<E>> for ChildComponents<E> {
fn from(value: RpcBlock<E>) -> Self {
let (block_root, block, blobs) = value.deconstruct();
let fixed_blobs = blobs.map(|blobs| {
FixedBlobSidecarList::from(blobs.into_iter().map(Some).collect::<Vec<_>>())
});
Self::new(block_root, Some(block), fixed_blobs)
}
}
impl<E: EthSpec> ChildComponents<E> {
pub fn empty(block_root: Hash256) -> Self {
Self {
block_root,
downloaded_block: None,
downloaded_blobs: <_>::default(),
}
}
pub fn new(
block_root: Hash256,
block: Option<Arc<SignedBeaconBlock<E>>>,
blobs: Option<FixedBlobSidecarList<E>>,
) -> Self {
let mut cache = Self::empty(block_root);
if let Some(block) = block {
cache.merge_block(block);
}
if let Some(blobs) = blobs {
cache.merge_blobs(blobs);
}
cache
}
pub fn clear_blobs(&mut self) {
self.downloaded_blobs = FixedBlobSidecarList::default();
}
}

View File

@ -0,0 +1,79 @@
use kzg::{Error as KzgError, KzgCommitment};
use types::{BeaconStateError, Hash256};
#[derive(Debug)]
pub enum Error {
Kzg(KzgError),
KzgNotInitialized,
KzgVerificationFailed,
KzgCommitmentMismatch {
blob_commitment: KzgCommitment,
block_commitment: KzgCommitment,
},
Unexpected,
SszTypes(ssz_types::Error),
MissingBlobs,
BlobIndexInvalid(u64),
StoreError(store::Error),
DecodeError(ssz::DecodeError),
ParentStateMissing(Hash256),
BlockReplayError(state_processing::BlockReplayError),
RebuildingStateCaches(BeaconStateError),
}
pub enum ErrorCategory {
/// Internal Errors (not caused by peers)
Internal,
/// Errors caused by faulty / malicious peers
Malicious,
}
impl Error {
pub fn category(&self) -> ErrorCategory {
match self {
Error::KzgNotInitialized
| Error::SszTypes(_)
| Error::MissingBlobs
| Error::StoreError(_)
| Error::DecodeError(_)
| Error::Unexpected
| Error::ParentStateMissing(_)
| Error::BlockReplayError(_)
| Error::RebuildingStateCaches(_) => ErrorCategory::Internal,
Error::Kzg(_)
| Error::BlobIndexInvalid(_)
| Error::KzgCommitmentMismatch { .. }
| Error::KzgVerificationFailed => ErrorCategory::Malicious,
}
}
}
impl From<ssz_types::Error> for Error {
fn from(value: ssz_types::Error) -> Self {
Self::SszTypes(value)
}
}
impl From<store::Error> for Error {
fn from(value: store::Error) -> Self {
Self::StoreError(value)
}
}
impl From<ssz::DecodeError> for Error {
fn from(value: ssz::DecodeError) -> Self {
Self::DecodeError(value)
}
}
impl From<state_processing::BlockReplayError> for Error {
fn from(value: state_processing::BlockReplayError) -> Self {
Self::BlockReplayError(value)
}
}
impl From<KzgError> for Error {
fn from(value: KzgError) -> Self {
Self::Kzg(value)
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,74 @@
use crate::data_availability_checker::AvailabilityView;
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use types::beacon_block_body::{KzgCommitmentOpts, KzgCommitments};
use types::{EthSpec, Hash256, Slot};
/// This cache is used only for gossip blocks/blobs and single block/blob lookups, to give req/resp
/// a view of what we have and what we require. This cache serves a slightly different purpose than
/// gossip caches because it allows us to process duplicate blobs that are valid in gossip.
/// See `AvailabilityView`'s trait definition.
#[derive(Default)]
pub struct ProcessingCache<E: EthSpec> {
processing_cache: HashMap<Hash256, ProcessingComponents<E>>,
}
impl<E: EthSpec> ProcessingCache<E> {
pub fn get(&self, block_root: &Hash256) -> Option<&ProcessingComponents<E>> {
self.processing_cache.get(block_root)
}
pub fn entry(&mut self, block_root: Hash256) -> Entry<'_, Hash256, ProcessingComponents<E>> {
self.processing_cache.entry(block_root)
}
pub fn remove(&mut self, block_root: &Hash256) {
self.processing_cache.remove(block_root);
}
pub fn has_block(&self, block_root: &Hash256) -> bool {
self.processing_cache
.get(block_root)
.map_or(false, |b| b.block_exists())
}
pub fn incomplete_processing_components(&self, slot: Slot) -> Vec<Hash256> {
let mut roots_missing_components = vec![];
for (&block_root, info) in self.processing_cache.iter() {
if info.slot == slot && !info.is_available() {
roots_missing_components.push(block_root);
}
}
roots_missing_components
}
}
#[derive(Debug, Clone)]
pub struct ProcessingComponents<E: EthSpec> {
slot: Slot,
/// Blobs required for a block can only be known if we have seen the block. So `Some` here
/// means we've seen it, a `None` means we haven't. The `kzg_commitments` value helps us figure
/// out whether incoming blobs actually match the block.
pub block_commitments: Option<KzgCommitments<E>>,
/// `KzgCommitments` for blobs are always known, even if we haven't seen the block. See
/// `AvailabilityView`'s trait definition for more details.
pub blob_commitments: KzgCommitmentOpts<E>,
}
impl<E: EthSpec> ProcessingComponents<E> {
pub fn new(slot: Slot) -> Self {
Self {
slot,
block_commitments: None,
blob_commitments: KzgCommitmentOpts::<E>::default(),
}
}
}
// Not safe for use outside of tests as this always required a slot.
#[cfg(test)]
impl<E: EthSpec> ProcessingComponents<E> {
pub fn empty(_block_root: Hash256) -> Self {
Self {
slot: Slot::new(0),
block_commitments: None,
blob_commitments: KzgCommitmentOpts::<E>::default(),
}
}
}

View File

@ -0,0 +1,230 @@
use crate::block_verification_types::AsBlock;
use crate::{
block_verification_types::BlockImportData,
data_availability_checker::{AvailabilityCheckError, STATE_LRU_CAPACITY_NON_ZERO},
eth1_finalization_cache::Eth1FinalizationData,
AvailabilityPendingExecutedBlock, BeaconChainTypes, BeaconStore, PayloadVerificationOutcome,
};
use lru::LruCache;
use parking_lot::RwLock;
use ssz_derive::{Decode, Encode};
use state_processing::{BlockReplayer, ConsensusContext, StateProcessingStrategy};
use std::sync::Arc;
use types::{ssz_tagged_signed_beacon_block, ssz_tagged_signed_beacon_block_arc};
use types::{BeaconState, BlindedPayload, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock};
/// This mirrors everything in the `AvailabilityPendingExecutedBlock`, except
/// that it is much smaller because it contains only a state root instead of
/// a full `BeaconState`.
#[derive(Encode, Decode, Clone)]
pub struct DietAvailabilityPendingExecutedBlock<E: EthSpec> {
#[ssz(with = "ssz_tagged_signed_beacon_block_arc")]
block: Arc<SignedBeaconBlock<E>>,
state_root: Hash256,
#[ssz(with = "ssz_tagged_signed_beacon_block")]
parent_block: SignedBeaconBlock<E, BlindedPayload<E>>,
parent_eth1_finalization_data: Eth1FinalizationData,
confirmed_state_roots: Vec<Hash256>,
consensus_context: ConsensusContext<E>,
payload_verification_outcome: PayloadVerificationOutcome,
}
/// just implementing the same methods as `AvailabilityPendingExecutedBlock`
impl<E: EthSpec> DietAvailabilityPendingExecutedBlock<E> {
pub fn as_block(&self) -> &SignedBeaconBlock<E> {
&self.block
}
pub fn num_blobs_expected(&self) -> usize {
self.block
.message()
.body()
.blob_kzg_commitments()
.map_or(0, |commitments| commitments.len())
}
}
/// This LRU cache holds BeaconStates used for block import. If the cache overflows,
/// the least recently used state will be dropped. If the dropped state is needed
/// later on, it will be recovered from the parent state and replaying the block.
///
/// WARNING: This cache assumes the parent block of any `AvailabilityPendingExecutedBlock`
/// has already been imported into ForkChoice. If this is not the case, the cache
/// will fail to recover the state when the cache overflows because it can't load
/// the parent state!
pub struct StateLRUCache<T: BeaconChainTypes> {
states: RwLock<LruCache<Hash256, BeaconState<T::EthSpec>>>,
store: BeaconStore<T>,
spec: ChainSpec,
}
impl<T: BeaconChainTypes> StateLRUCache<T> {
pub fn new(store: BeaconStore<T>, spec: ChainSpec) -> Self {
Self {
states: RwLock::new(LruCache::new(STATE_LRU_CAPACITY_NON_ZERO)),
store,
spec,
}
}
/// This will store the state in the LRU cache and return a
/// `DietAvailabilityPendingExecutedBlock` which is much cheaper to
/// keep around in memory.
pub fn register_pending_executed_block(
&self,
executed_block: AvailabilityPendingExecutedBlock<T::EthSpec>,
) -> DietAvailabilityPendingExecutedBlock<T::EthSpec> {
let state = executed_block.import_data.state;
let state_root = executed_block.block.state_root();
self.states.write().put(state_root, state);
DietAvailabilityPendingExecutedBlock {
block: executed_block.block,
state_root,
parent_block: executed_block.import_data.parent_block,
parent_eth1_finalization_data: executed_block.import_data.parent_eth1_finalization_data,
confirmed_state_roots: executed_block.import_data.confirmed_state_roots,
consensus_context: executed_block.import_data.consensus_context,
payload_verification_outcome: executed_block.payload_verification_outcome,
}
}
/// Recover the `AvailabilityPendingExecutedBlock` from the diet version.
/// This method will first check the cache and if the state is not found
/// it will reconstruct the state by loading the parent state from disk and
/// replaying the block.
pub fn recover_pending_executed_block(
&self,
diet_executed_block: DietAvailabilityPendingExecutedBlock<T::EthSpec>,
) -> Result<AvailabilityPendingExecutedBlock<T::EthSpec>, AvailabilityCheckError> {
let maybe_state = self.states.write().pop(&diet_executed_block.state_root);
if let Some(state) = maybe_state {
let block_root = diet_executed_block.block.canonical_root();
Ok(AvailabilityPendingExecutedBlock {
block: diet_executed_block.block,
import_data: BlockImportData {
block_root,
state,
parent_block: diet_executed_block.parent_block,
parent_eth1_finalization_data: diet_executed_block
.parent_eth1_finalization_data,
confirmed_state_roots: diet_executed_block.confirmed_state_roots,
consensus_context: diet_executed_block.consensus_context,
},
payload_verification_outcome: diet_executed_block.payload_verification_outcome,
})
} else {
self.reconstruct_pending_executed_block(diet_executed_block)
}
}
/// Reconstruct the `AvailabilityPendingExecutedBlock` by loading the parent
/// state from disk and replaying the block. This function does NOT check the
/// LRU cache.
pub fn reconstruct_pending_executed_block(
&self,
diet_executed_block: DietAvailabilityPendingExecutedBlock<T::EthSpec>,
) -> Result<AvailabilityPendingExecutedBlock<T::EthSpec>, AvailabilityCheckError> {
let block_root = diet_executed_block.block.canonical_root();
let state = self.reconstruct_state(&diet_executed_block)?;
Ok(AvailabilityPendingExecutedBlock {
block: diet_executed_block.block,
import_data: BlockImportData {
block_root,
state,
parent_block: diet_executed_block.parent_block,
parent_eth1_finalization_data: diet_executed_block.parent_eth1_finalization_data,
confirmed_state_roots: diet_executed_block.confirmed_state_roots,
consensus_context: diet_executed_block.consensus_context,
},
payload_verification_outcome: diet_executed_block.payload_verification_outcome,
})
}
/// Reconstruct the state by loading the parent state from disk and replaying
/// the block.
fn reconstruct_state(
&self,
diet_executed_block: &DietAvailabilityPendingExecutedBlock<T::EthSpec>,
) -> Result<BeaconState<T::EthSpec>, AvailabilityCheckError> {
let parent_block_root = diet_executed_block.parent_block.canonical_root();
let parent_block_state_root = diet_executed_block.parent_block.state_root();
let (parent_state_root, parent_state) = self
.store
.get_advanced_hot_state(
parent_block_root,
diet_executed_block.parent_block.slot(),
parent_block_state_root,
)
.map_err(AvailabilityCheckError::StoreError)?
.ok_or(AvailabilityCheckError::ParentStateMissing(
parent_block_state_root,
))?;
let state_roots = vec![
Ok((parent_state_root, diet_executed_block.parent_block.slot())),
Ok((
diet_executed_block.state_root,
diet_executed_block.block.slot(),
)),
];
let block_replayer: BlockReplayer<'_, T::EthSpec, AvailabilityCheckError, _> =
BlockReplayer::new(parent_state, &self.spec)
.no_signature_verification()
.state_processing_strategy(StateProcessingStrategy::Accurate)
.state_root_iter(state_roots.into_iter())
.minimal_block_root_verification();
block_replayer
.apply_blocks(vec![diet_executed_block.block.clone_as_blinded()], None)
.map(|block_replayer| block_replayer.into_state())
.and_then(|mut state| {
state
.build_exit_cache(&self.spec)
.map_err(AvailabilityCheckError::RebuildingStateCaches)?;
state
.update_tree_hash_cache()
.map_err(AvailabilityCheckError::RebuildingStateCaches)?;
Ok(state)
})
}
/// returns the state cache for inspection in tests
#[cfg(test)]
pub fn lru_cache(&self) -> &RwLock<LruCache<Hash256, BeaconState<T::EthSpec>>> {
&self.states
}
/// remove any states from the cache from before the given epoch
pub fn do_maintenance(&self, cutoff_epoch: Epoch) {
let mut write_lock = self.states.write();
while let Some((_, state)) = write_lock.peek_lru() {
if state.slot().epoch(T::EthSpec::slots_per_epoch()) < cutoff_epoch {
write_lock.pop_lru();
} else {
break;
}
}
}
}
/// This can only be used during testing. The intended way to
/// obtain a `DietAvailabilityPendingExecutedBlock` is to call
/// `register_pending_executed_block` on the `StateLRUCache`.
#[cfg(test)]
impl<E: EthSpec> From<AvailabilityPendingExecutedBlock<E>>
for DietAvailabilityPendingExecutedBlock<E>
{
fn from(value: AvailabilityPendingExecutedBlock<E>) -> Self {
Self {
block: value.block,
state_root: value.import_data.state.canonical_root(),
parent_block: value.import_data.parent_block,
parent_eth1_finalization_data: value.import_data.parent_eth1_finalization_data,
confirmed_state_roots: value.import_data.confirmed_state_roots,
consensus_context: value.import_data.consensus_context,
payload_verification_outcome: value.payload_verification_outcome,
}
}
}

View File

@ -0,0 +1,121 @@
//! Provides tools for checking if a node is ready for the Deneb upgrade.
use crate::{BeaconChain, BeaconChainTypes};
use execution_layer::http::{
ENGINE_FORKCHOICE_UPDATED_V3, ENGINE_GET_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V3,
};
use serde::{Deserialize, Serialize};
use std::fmt;
use std::time::Duration;
use types::*;
/// The time before the Deneb fork when we will start issuing warnings about preparation.
use super::merge_readiness::SECONDS_IN_A_WEEK;
pub const DENEB_READINESS_PREPARATION_SECONDS: u64 = SECONDS_IN_A_WEEK * 2;
pub const ENGINE_CAPABILITIES_REFRESH_INTERVAL: u64 = 300;
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
#[serde(tag = "type")]
pub enum DenebReadiness {
/// The execution engine is deneb-enabled (as far as we can tell)
Ready,
/// We are connected to an execution engine which doesn't support the V3 engine api methods
V3MethodsNotSupported { error: String },
/// The transition configuration with the EL failed, there might be a problem with
/// connectivity, authentication or a difference in configuration.
ExchangeCapabilitiesFailed { error: String },
/// The user has not configured an execution endpoint
NoExecutionEndpoint,
}
impl fmt::Display for DenebReadiness {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
DenebReadiness::Ready => {
write!(f, "This node appears ready for Deneb.")
}
DenebReadiness::ExchangeCapabilitiesFailed { error } => write!(
f,
"Could not exchange capabilities with the \
execution endpoint: {}",
error
),
DenebReadiness::NoExecutionEndpoint => write!(
f,
"The --execution-endpoint flag is not specified, this is a \
requirement post-merge"
),
DenebReadiness::V3MethodsNotSupported { error } => write!(
f,
"Execution endpoint does not support Deneb methods: {}",
error
),
}
}
}
impl<T: BeaconChainTypes> BeaconChain<T> {
/// Returns `true` if deneb epoch is set and Deneb fork has occurred or will
/// occur within `DENEB_READINESS_PREPARATION_SECONDS`
pub fn is_time_to_prepare_for_deneb(&self, current_slot: Slot) -> bool {
if let Some(deneb_epoch) = self.spec.deneb_fork_epoch {
let deneb_slot = deneb_epoch.start_slot(T::EthSpec::slots_per_epoch());
let deneb_readiness_preparation_slots =
DENEB_READINESS_PREPARATION_SECONDS / self.spec.seconds_per_slot;
// Return `true` if Deneb has happened or is within the preparation time.
current_slot + deneb_readiness_preparation_slots > deneb_slot
} else {
// The Deneb fork epoch has not been defined yet, no need to prepare.
false
}
}
/// Attempts to connect to the EL and confirm that it is ready for capella.
pub async fn check_deneb_readiness(&self) -> DenebReadiness {
if let Some(el) = self.execution_layer.as_ref() {
match el
.get_engine_capabilities(Some(Duration::from_secs(
ENGINE_CAPABILITIES_REFRESH_INTERVAL,
)))
.await
{
Err(e) => {
// The EL was either unreachable or responded with an error
DenebReadiness::ExchangeCapabilitiesFailed {
error: format!("{:?}", e),
}
}
Ok(capabilities) => {
let mut missing_methods = String::from("Required Methods Unsupported:");
let mut all_good = true;
if !capabilities.get_payload_v3 {
missing_methods.push(' ');
missing_methods.push_str(ENGINE_GET_PAYLOAD_V3);
all_good = false;
}
if !capabilities.forkchoice_updated_v3 {
missing_methods.push(' ');
missing_methods.push_str(ENGINE_FORKCHOICE_UPDATED_V3);
all_good = false;
}
if !capabilities.new_payload_v3 {
missing_methods.push(' ');
missing_methods.push_str(ENGINE_NEW_PAYLOAD_V3);
all_good = false;
}
if all_good {
DenebReadiness::Ready
} else {
DenebReadiness::V3MethodsNotSupported {
error: missing_methods,
}
}
}
}
} else {
DenebReadiness::NoExecutionEndpoint
}
}
}

View File

@ -1,3 +1,4 @@
use crate::data_availability_checker::AvailableBlock;
use crate::{
attester_cache::{CommitteeLengths, Error},
metrics,
@ -5,6 +6,7 @@ use crate::{
use parking_lot::RwLock;
use proto_array::Block as ProtoBlock;
use std::sync::Arc;
use types::blob_sidecar::BlobSidecarList;
use types::*;
pub struct CacheItem<E: EthSpec> {
@ -20,6 +22,7 @@ pub struct CacheItem<E: EthSpec> {
* Values used to make the block available.
*/
block: Arc<SignedBeaconBlock<E>>,
blobs: Option<BlobSidecarList<E>>,
proto_block: ProtoBlock,
}
@ -49,7 +52,7 @@ impl<E: EthSpec> EarlyAttesterCache<E> {
pub fn add_head_block(
&self,
beacon_block_root: Hash256,
block: Arc<SignedBeaconBlock<E>>,
block: AvailableBlock<E>,
proto_block: ProtoBlock,
state: &BeaconState<E>,
spec: &ChainSpec,
@ -67,6 +70,7 @@ impl<E: EthSpec> EarlyAttesterCache<E> {
},
};
let (_, block, blobs) = block.deconstruct();
let item = CacheItem {
epoch,
committee_lengths,
@ -74,6 +78,7 @@ impl<E: EthSpec> EarlyAttesterCache<E> {
source,
target,
block,
blobs,
proto_block,
};
@ -94,9 +99,7 @@ impl<E: EthSpec> EarlyAttesterCache<E> {
spec: &ChainSpec,
) -> Result<Option<Attestation<E>>, Error> {
let lock = self.item.read();
let item = if let Some(item) = lock.as_ref() {
item
} else {
let Some(item) = lock.as_ref() else {
return Ok(None);
};
@ -155,6 +158,15 @@ impl<E: EthSpec> EarlyAttesterCache<E> {
.map(|item| item.block.clone())
}
/// Returns the blobs, if `block_root` matches the cached item.
pub fn get_blobs(&self, block_root: Hash256) -> Option<BlobSidecarList<E>> {
self.item
.read()
.as_ref()
.filter(|item| item.beacon_block_root == block_root)
.and_then(|item| item.blobs.clone())
}
/// Returns the proto-array block, if `block_root` matches the cached item.
pub fn get_proto_block(&self, block_root: Hash256) -> Option<ProtoBlock> {
self.item

View File

@ -2,12 +2,14 @@ use crate::attester_cache::Error as AttesterCacheError;
use crate::beacon_block_streamer::Error as BlockStreamerError;
use crate::beacon_chain::ForkChoiceError;
use crate::beacon_fork_choice_store::Error as ForkChoiceStoreError;
use crate::data_availability_checker::AvailabilityCheckError;
use crate::eth1_chain::Error as Eth1ChainError;
use crate::historical_blocks::HistoricalBlockError;
use crate::migrate::PruningError;
use crate::naive_aggregation_pool::Error as NaiveAggregationError;
use crate::observed_aggregates::Error as ObservedAttestationsError;
use crate::observed_attesters::Error as ObservedAttestersError;
use crate::observed_blob_sidecars::Error as ObservedBlobSidecarsError;
use crate::observed_block_producers::Error as ObservedBlockProducersError;
use execution_layer::PayloadStatus;
use fork_choice::ExecutionStatus;
@ -102,6 +104,7 @@ pub enum BeaconChainError {
ObservedAttestationsError(ObservedAttestationsError),
ObservedAttestersError(ObservedAttestersError),
ObservedBlockProducersError(ObservedBlockProducersError),
ObservedBlobSidecarsError(ObservedBlobSidecarsError),
AttesterCacheError(AttesterCacheError),
PruningError(PruningError),
ArithError(ArithError),
@ -217,6 +220,9 @@ pub enum BeaconChainError {
InconsistentFork(InconsistentFork),
ProposerHeadForkChoiceError(fork_choice::Error<proto_array::Error>),
UnableToPublish,
AvailabilityCheckError(AvailabilityCheckError),
LightClientError(LightClientError),
UnsupportedFork,
}
easy_from_to!(SlotProcessingError, BeaconChainError);
@ -233,6 +239,7 @@ easy_from_to!(NaiveAggregationError, BeaconChainError);
easy_from_to!(ObservedAttestationsError, BeaconChainError);
easy_from_to!(ObservedAttestersError, BeaconChainError);
easy_from_to!(ObservedBlockProducersError, BeaconChainError);
easy_from_to!(ObservedBlobSidecarsError, BeaconChainError);
easy_from_to!(AttesterCacheError, BeaconChainError);
easy_from_to!(BlockSignatureVerifierError, BeaconChainError);
easy_from_to!(PruningError, BeaconChainError);
@ -242,6 +249,7 @@ easy_from_to!(HistoricalBlockError, BeaconChainError);
easy_from_to!(StateAdvanceError, BeaconChainError);
easy_from_to!(BlockReplayError, BeaconChainError);
easy_from_to!(InconsistentFork, BeaconChainError);
easy_from_to!(AvailabilityCheckError, BeaconChainError);
#[derive(Debug)]
pub enum BlockProductionError {
@ -270,11 +278,17 @@ pub enum BlockProductionError {
MissingFinalizedBlock(Hash256),
BlockTooLarge(usize),
ShuttingDown,
MissingBlobs,
MissingSyncAggregate,
MissingExecutionPayload,
TokioJoin(tokio::task::JoinError),
MissingKzgCommitment(String),
TokioJoin(JoinError),
BeaconChain(BeaconChainError),
InvalidPayloadFork,
TrustedSetupNotInitialized,
InvalidBlockVariant(String),
KzgError(kzg::Error),
FailedToBuildBlobSidecars(String),
}
easy_from_to!(BlockProcessingError, BlockProductionError);

View File

@ -1,4 +1,5 @@
use slog::{debug, Logger};
use ssz_derive::{Decode, Encode};
use std::cmp;
use std::collections::BTreeMap;
use types::{Checkpoint, Epoch, Eth1Data, Hash256 as Root};
@ -10,7 +11,7 @@ pub const DEFAULT_ETH1_CACHE_SIZE: usize = 5;
/// These fields are named the same as the corresponding fields in the `BeaconState`
/// as this structure stores these values from the `BeaconState` at a `Checkpoint`
#[derive(Clone)]
#[derive(Clone, Debug, PartialEq, Encode, Decode)]
pub struct Eth1FinalizationData {
pub eth1_data: Eth1Data,
pub eth1_deposit_index: u64,
@ -66,7 +67,7 @@ impl CheckpointMap {
pub fn insert(&mut self, checkpoint: Checkpoint, eth1_finalization_data: Eth1FinalizationData) {
self.store
.entry(checkpoint.epoch)
.or_insert_with(Vec::new)
.or_default()
.push((checkpoint.root, eth1_finalization_data));
// faster to reduce size after the fact than do pre-checking to see

View File

@ -9,6 +9,7 @@ const DEFAULT_CHANNEL_CAPACITY: usize = 16;
pub struct ServerSentEventHandler<T: EthSpec> {
attestation_tx: Sender<EventKind<T>>,
block_tx: Sender<EventKind<T>>,
blob_sidecar_tx: Sender<EventKind<T>>,
finalized_tx: Sender<EventKind<T>>,
head_tx: Sender<EventKind<T>>,
exit_tx: Sender<EventKind<T>>,
@ -16,6 +17,8 @@ pub struct ServerSentEventHandler<T: EthSpec> {
contribution_tx: Sender<EventKind<T>>,
payload_attributes_tx: Sender<EventKind<T>>,
late_head: Sender<EventKind<T>>,
light_client_finality_update_tx: Sender<EventKind<T>>,
light_client_optimistic_update_tx: Sender<EventKind<T>>,
block_reward_tx: Sender<EventKind<T>>,
log: Logger,
}
@ -31,6 +34,7 @@ impl<T: EthSpec> ServerSentEventHandler<T> {
pub fn new_with_capacity(log: Logger, capacity: usize) -> Self {
let (attestation_tx, _) = broadcast::channel(capacity);
let (block_tx, _) = broadcast::channel(capacity);
let (blob_sidecar_tx, _) = broadcast::channel(capacity);
let (finalized_tx, _) = broadcast::channel(capacity);
let (head_tx, _) = broadcast::channel(capacity);
let (exit_tx, _) = broadcast::channel(capacity);
@ -38,11 +42,14 @@ impl<T: EthSpec> ServerSentEventHandler<T> {
let (contribution_tx, _) = broadcast::channel(capacity);
let (payload_attributes_tx, _) = broadcast::channel(capacity);
let (late_head, _) = broadcast::channel(capacity);
let (light_client_finality_update_tx, _) = broadcast::channel(capacity);
let (light_client_optimistic_update_tx, _) = broadcast::channel(capacity);
let (block_reward_tx, _) = broadcast::channel(capacity);
Self {
attestation_tx,
block_tx,
blob_sidecar_tx,
finalized_tx,
head_tx,
exit_tx,
@ -50,6 +57,8 @@ impl<T: EthSpec> ServerSentEventHandler<T> {
contribution_tx,
payload_attributes_tx,
late_head,
light_client_finality_update_tx,
light_client_optimistic_update_tx,
block_reward_tx,
log,
}
@ -73,6 +82,10 @@ impl<T: EthSpec> ServerSentEventHandler<T> {
.block_tx
.send(kind)
.map(|count| log_count("block", count)),
EventKind::BlobSidecar(_) => self
.blob_sidecar_tx
.send(kind)
.map(|count| log_count("blob sidecar", count)),
EventKind::FinalizedCheckpoint(_) => self
.finalized_tx
.send(kind)
@ -101,6 +114,14 @@ impl<T: EthSpec> ServerSentEventHandler<T> {
.late_head
.send(kind)
.map(|count| log_count("late head", count)),
EventKind::LightClientFinalityUpdate(_) => self
.light_client_finality_update_tx
.send(kind)
.map(|count| log_count("light client finality update", count)),
EventKind::LightClientOptimisticUpdate(_) => self
.light_client_optimistic_update_tx
.send(kind)
.map(|count| log_count("light client optimistic update", count)),
EventKind::BlockReward(_) => self
.block_reward_tx
.send(kind)
@ -119,6 +140,10 @@ impl<T: EthSpec> ServerSentEventHandler<T> {
self.block_tx.subscribe()
}
pub fn subscribe_blob_sidecar(&self) -> Receiver<EventKind<T>> {
self.blob_sidecar_tx.subscribe()
}
pub fn subscribe_finalized(&self) -> Receiver<EventKind<T>> {
self.finalized_tx.subscribe()
}
@ -147,6 +172,14 @@ impl<T: EthSpec> ServerSentEventHandler<T> {
self.late_head.subscribe()
}
pub fn subscribe_light_client_finality_update(&self) -> Receiver<EventKind<T>> {
self.light_client_finality_update_tx.subscribe()
}
pub fn subscribe_light_client_optimistic_update(&self) -> Receiver<EventKind<T>> {
self.light_client_optimistic_update_tx.subscribe()
}
pub fn subscribe_block_reward(&self) -> Receiver<EventKind<T>> {
self.block_reward_tx.subscribe()
}
@ -159,6 +192,10 @@ impl<T: EthSpec> ServerSentEventHandler<T> {
self.block_tx.receiver_count() > 0
}
pub fn has_blob_sidecar_subscribers(&self) -> bool {
self.blob_sidecar_tx.receiver_count() > 0
}
pub fn has_finalized_subscribers(&self) -> bool {
self.finalized_tx.receiver_count() > 0
}

View File

@ -12,7 +12,10 @@ use crate::{
BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, BlockProductionError,
ExecutionPayloadError,
};
use execution_layer::{BlockProposalContents, BuilderParams, PayloadAttributes, PayloadStatus};
use execution_layer::{
BlockProposalContents, BlockProposalContentsType, BuilderParams, NewPayloadRequest,
PayloadAttributes, PayloadStatus,
};
use fork_choice::{InvalidationOperation, PayloadVerificationStatus};
use proto_array::{Block as ProtoBlock, ExecutionStatus};
use slog::{debug, warn};
@ -24,11 +27,11 @@ use state_processing::per_block_processing::{
use std::sync::Arc;
use tokio::task::JoinHandle;
use tree_hash::TreeHash;
use types::payload::BlockProductionVersion;
use types::*;
pub type PreparePayloadResult<E, Payload> =
Result<BlockProposalContents<E, Payload>, BlockProductionError>;
pub type PreparePayloadHandle<E, Payload> = JoinHandle<Option<PreparePayloadResult<E, Payload>>>;
pub type PreparePayloadResult<E> = Result<BlockProposalContentsType<E>, BlockProductionError>;
pub type PreparePayloadHandle<E> = JoinHandle<Option<PreparePayloadResult<E>>>;
#[derive(PartialEq)]
pub enum AllowOptimisticImport {
@ -68,11 +71,10 @@ impl<T: BeaconChainTypes> PayloadNotifier<T> {
// the block as optimistically imported. This is particularly relevant in the case
// where we do not send the block to the EL at all.
let block_message = block.message();
let payload = block_message.execution_payload()?;
partially_verify_execution_payload::<_, FullPayload<_>>(
state,
block.slot(),
payload,
block_message.body(),
&chain.spec,
)
.map_err(BlockError::PerBlockProcessingError)?;
@ -86,13 +88,11 @@ impl<T: BeaconChainTypes> PayloadNotifier<T> {
.as_ref()
.ok_or(ExecutionPayloadError::NoExecutionConnection)?;
if let Err(e) =
execution_layer.verify_payload_block_hash(payload.execution_payload_ref())
{
if let Err(e) = execution_layer.verify_payload_block_hash(block_message) {
warn!(
chain.log,
"Falling back to slow block hash verification";
"block_number" => payload.block_number(),
"block_number" => ?block_message.execution_payload().map(|payload| payload.block_number()),
"info" => "you can silence this warning with --disable-optimistic-finalized-sync",
"error" => ?e,
);
@ -138,15 +138,15 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>(
chain: &Arc<BeaconChain<T>>,
block: BeaconBlockRef<'a, T::EthSpec>,
) -> Result<PayloadVerificationStatus, BlockError<T::EthSpec>> {
let execution_payload = block.execution_payload()?;
let execution_layer = chain
.execution_layer
.as_ref()
.ok_or(ExecutionPayloadError::NoExecutionConnection)?;
let new_payload_request: NewPayloadRequest<T::EthSpec> = block.try_into()?;
let execution_block_hash = new_payload_request.block_hash();
let new_payload_response = execution_layer
.notify_new_payload(&execution_payload.into())
.notify_new_payload(new_payload_request)
.await;
match new_payload_response {
@ -164,7 +164,7 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>(
"Invalid execution payload";
"validation_error" => ?validation_error,
"latest_valid_hash" => ?latest_valid_hash,
"execution_block_hash" => ?execution_payload.block_hash(),
"execution_block_hash" => ?execution_block_hash,
"root" => ?block.tree_hash_root(),
"graffiti" => block.body().graffiti().as_utf8_lossy(),
"proposer_index" => block.proposer_index(),
@ -210,7 +210,7 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>(
chain.log,
"Invalid execution payload block hash";
"validation_error" => ?validation_error,
"execution_block_hash" => ?execution_payload.block_hash(),
"execution_block_hash" => ?execution_block_hash,
"root" => ?block.tree_hash_root(),
"graffiti" => block.body().graffiti().as_utf8_lossy(),
"proposer_index" => block.proposer_index(),
@ -399,15 +399,15 @@ pub fn validate_execution_payload_for_gossip<T: BeaconChainTypes>(
/// Equivalent to the `get_execution_payload` function in the Validator Guide:
///
/// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md#block-proposal
pub fn get_execution_payload<
T: BeaconChainTypes,
Payload: AbstractExecPayload<T::EthSpec> + 'static,
>(
pub fn get_execution_payload<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>,
state: &BeaconState<T::EthSpec>,
parent_block_root: Hash256,
proposer_index: u64,
builder_params: BuilderParams,
) -> Result<PreparePayloadHandle<T::EthSpec, Payload>, BlockProductionError> {
builder_boost_factor: Option<u64>,
block_production_version: BlockProductionVersion,
) -> Result<PreparePayloadHandle<T::EthSpec>, BlockProductionError> {
// Compute all required values from the `state` now to avoid needing to pass it into a spawned
// task.
let spec = &chain.spec;
@ -419,11 +419,19 @@ pub fn get_execution_payload<
let latest_execution_payload_header_block_hash =
state.latest_execution_payload_header()?.block_hash();
let withdrawals = match state {
&BeaconState::Capella(_) => Some(get_expected_withdrawals(state, spec)?.into()),
&BeaconState::Capella(_) | &BeaconState::Deneb(_) => {
Some(get_expected_withdrawals(state, spec)?.into())
}
&BeaconState::Merge(_) => None,
// These shouldn't happen but they're here to make the pattern irrefutable
&BeaconState::Base(_) | &BeaconState::Altair(_) => None,
};
let parent_beacon_block_root = match state {
BeaconState::Deneb(_) => Some(parent_block_root),
BeaconState::Merge(_) | BeaconState::Capella(_) => None,
// These shouldn't happen but they're here to make the pattern irrefutable
BeaconState::Base(_) | BeaconState::Altair(_) => None,
};
// Spawn a task to obtain the execution payload from the EL via a series of async calls. The
// `join_handle` can be used to await the result of the function.
@ -432,7 +440,7 @@ pub fn get_execution_payload<
.clone()
.spawn_handle(
async move {
prepare_execution_payload::<T, Payload>(
prepare_execution_payload::<T>(
&chain,
is_merge_transition_complete,
timestamp,
@ -441,6 +449,9 @@ pub fn get_execution_payload<
latest_execution_payload_header_block_hash,
builder_params,
withdrawals,
parent_beacon_block_root,
builder_boost_factor,
block_production_version,
)
.await
},
@ -466,7 +477,7 @@ pub fn get_execution_payload<
///
/// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md#block-proposal
#[allow(clippy::too_many_arguments)]
pub async fn prepare_execution_payload<T, Payload>(
pub async fn prepare_execution_payload<T>(
chain: &Arc<BeaconChain<T>>,
is_merge_transition_complete: bool,
timestamp: u64,
@ -475,10 +486,12 @@ pub async fn prepare_execution_payload<T, Payload>(
latest_execution_payload_header_block_hash: ExecutionBlockHash,
builder_params: BuilderParams,
withdrawals: Option<Vec<Withdrawal>>,
) -> Result<BlockProposalContents<T::EthSpec, Payload>, BlockProductionError>
parent_beacon_block_root: Option<Hash256>,
builder_boost_factor: Option<u64>,
block_production_version: BlockProductionVersion,
) -> Result<BlockProposalContentsType<T::EthSpec>, BlockProductionError>
where
T: BeaconChainTypes,
Payload: AbstractExecPayload<T::EthSpec>,
{
let current_epoch = builder_params.slot.epoch(T::EthSpec::slots_per_epoch());
let spec = &chain.spec;
@ -496,7 +509,12 @@ where
if is_terminal_block_hash_set && !is_activation_epoch_reached {
// Use the "empty" payload if there's a terminal block hash, but we haven't reached the
// terminal block epoch yet.
return BlockProposalContents::default_at_fork(fork).map_err(Into::into);
return Ok(BlockProposalContentsType::Full(
BlockProposalContents::Payload {
payload: FullPayload::default_at_fork(fork)?,
block_value: Uint256::zero(),
},
));
}
let terminal_pow_block_hash = execution_layer
@ -509,7 +527,12 @@ where
} else {
// If the merge transition hasn't occurred yet and the EL hasn't found the terminal
// block, return an "empty" payload.
return BlockProposalContents::default_at_fork(fork).map_err(Into::into);
return Ok(BlockProposalContentsType::Full(
BlockProposalContents::Payload {
payload: FullPayload::default_at_fork(fork)?,
block_value: Uint256::zero(),
},
));
}
} else {
latest_execution_payload_header_block_hash
@ -536,20 +559,27 @@ where
let suggested_fee_recipient = execution_layer
.get_suggested_fee_recipient(proposer_index)
.await;
let payload_attributes =
PayloadAttributes::new(timestamp, random, suggested_fee_recipient, withdrawals);
let payload_attributes = PayloadAttributes::new(
timestamp,
random,
suggested_fee_recipient,
withdrawals,
parent_beacon_block_root,
);
// Note: the suggested_fee_recipient is stored in the `execution_layer`, it will add this parameter.
//
// This future is not executed here, it's up to the caller to await it.
let block_contents = execution_layer
.get_payload::<Payload>(
.get_payload(
parent_hash,
&payload_attributes,
forkchoice_update_params,
builder_params,
fork,
&chain.spec,
builder_boost_factor,
block_production_version,
)
.await
.map_err(BlockProductionError::GetPayloadFailed)?;

View File

@ -1,4 +1,4 @@
use parking_lot::RwLock;
use parking_lot::{RwLock, RwLockReadGuard};
use ssz_derive::{Decode, Encode};
use std::collections::HashMap;
use types::{Hash256, Slot};
@ -16,6 +16,8 @@ pub enum Error {
#[derive(Default, Debug)]
pub struct HeadTracker(pub RwLock<HashMap<Hash256, Slot>>);
pub type HeadTrackerReader<'a> = RwLockReadGuard<'a, HashMap<Hash256, Slot>>;
impl HeadTracker {
/// Register a block with `Self`, so it may or may not be included in a `Self::heads` call.
///
@ -44,6 +46,11 @@ impl HeadTracker {
/// Returns a `SszHeadTracker`, which contains all necessary information to restore the state
/// of `Self` at some later point.
///
/// Should ONLY be used for tests, due to the potential for database races.
///
/// See <https://github.com/sigp/lighthouse/issues/4773>
#[cfg(test)]
pub fn to_ssz_container(&self) -> SszHeadTracker {
SszHeadTracker::from_map(&self.0.read())
}

View File

@ -1,3 +1,4 @@
use crate::data_availability_checker::AvailableBlock;
use crate::{errors::BeaconChainError as Error, metrics, BeaconChain, BeaconChainTypes};
use itertools::Itertools;
use slog::debug;
@ -7,10 +8,9 @@ use state_processing::{
};
use std::borrow::Cow;
use std::iter;
use std::sync::Arc;
use std::time::Duration;
use store::{chunked_vector::BlockRoots, AnchorInfo, ChunkWriter, KeyValueStore};
use types::{Hash256, SignedBlindedBeaconBlock, Slot};
use store::{chunked_vector::BlockRoots, AnchorInfo, BlobInfo, ChunkWriter, KeyValueStore};
use types::{Hash256, Slot};
/// Use a longer timeout on the pubkey cache.
///
@ -59,27 +59,30 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// Return the number of blocks successfully imported.
pub fn import_historical_block_batch(
&self,
blocks: Vec<Arc<SignedBlindedBeaconBlock<T::EthSpec>>>,
mut blocks: Vec<AvailableBlock<T::EthSpec>>,
) -> Result<usize, Error> {
let anchor_info = self
.store
.get_anchor_info()
.ok_or(HistoricalBlockError::NoAnchorInfo)?;
let blob_info = self.store.get_blob_info();
// Take all blocks with slots less than the oldest block slot.
let num_relevant =
blocks.partition_point(|block| block.slot() < anchor_info.oldest_block_slot);
let blocks_to_import = &blocks
.get(..num_relevant)
.ok_or(HistoricalBlockError::IndexOutOfBounds)?;
let num_relevant = blocks.partition_point(|available_block| {
available_block.block().slot() < anchor_info.oldest_block_slot
});
if blocks_to_import.len() != blocks.len() {
let total_blocks = blocks.len();
blocks.truncate(num_relevant);
let blocks_to_import = blocks;
if blocks_to_import.len() != total_blocks {
debug!(
self.log,
"Ignoring some historic blocks";
"oldest_block_slot" => anchor_info.oldest_block_slot,
"total_blocks" => blocks.len(),
"ignored" => blocks.len().saturating_sub(blocks_to_import.len()),
"total_blocks" => total_blocks,
"ignored" => total_blocks.saturating_sub(blocks_to_import.len()),
);
}
@ -87,17 +90,24 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
return Ok(0);
}
let n_blobs_lists_to_import = blocks_to_import
.iter()
.filter(|available_block| available_block.blobs().is_some())
.count();
let mut expected_block_root = anchor_info.oldest_block_parent;
let mut prev_block_slot = anchor_info.oldest_block_slot;
let mut chunk_writer =
ChunkWriter::<BlockRoots, _, _>::new(&self.store.cold_db, prev_block_slot.as_usize())?;
let mut new_oldest_blob_slot = blob_info.oldest_blob_slot;
let mut cold_batch = Vec::with_capacity(blocks.len());
let mut hot_batch = Vec::with_capacity(blocks.len());
let mut blob_batch = Vec::with_capacity(n_blobs_lists_to_import);
let mut cold_batch = Vec::with_capacity(blocks_to_import.len());
let mut hot_batch = Vec::with_capacity(blocks_to_import.len());
let mut signed_blocks = Vec::with_capacity(blocks_to_import.len());
for block in blocks_to_import.iter().rev() {
// Check chain integrity.
let block_root = block.canonical_root();
for available_block in blocks_to_import.into_iter().rev() {
let (block_root, block, maybe_blobs) = available_block.deconstruct();
if block_root != expected_block_root {
return Err(HistoricalBlockError::MismatchedBlockRoot {
@ -107,9 +117,16 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.into());
}
let blinded_block = block.clone_as_blinded();
// Store block in the hot database without payload.
self.store
.blinded_block_as_kv_store_ops(&block_root, block, &mut hot_batch);
.blinded_block_as_kv_store_ops(&block_root, &blinded_block, &mut hot_batch);
// Store the blobs too
if let Some(blobs) = maybe_blobs {
new_oldest_blob_slot = Some(block.slot());
self.store
.blobs_as_kv_store_ops(&block_root, blobs, &mut blob_batch);
}
// Store block roots, including at all skip slots in the freezer DB.
for slot in (block.slot().as_usize()..prev_block_slot.as_usize()).rev() {
@ -119,21 +136,23 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
prev_block_slot = block.slot();
expected_block_root = block.message().parent_root();
// If we've reached genesis, add the genesis block root to the batch and set the
// anchor slot to 0 to indicate completion.
// If we've reached genesis, add the genesis block root to the batch for all slots
// between 0 and the first block slot, and set the anchor slot to 0 to indicate
// completion.
if expected_block_root == self.genesis_block_root {
let genesis_slot = self.spec.genesis_slot;
chunk_writer.set(
genesis_slot.as_usize(),
self.genesis_block_root,
&mut cold_batch,
)?;
for slot in genesis_slot.as_usize()..block.slot().as_usize() {
chunk_writer.set(slot, self.genesis_block_root, &mut cold_batch)?;
}
prev_block_slot = genesis_slot;
expected_block_root = Hash256::zero();
break;
}
signed_blocks.push(block);
}
chunk_writer.write(&mut cold_batch)?;
// these were pushed in reverse order so we reverse again
signed_blocks.reverse();
// Verify signatures in one batch, holding the pubkey cache lock for the shortest duration
// possible. For each block fetch the parent root from its successor. Slicing from index 1
@ -144,15 +163,16 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.validator_pubkey_cache
.try_read_for(PUBKEY_CACHE_LOCK_TIMEOUT)
.ok_or(HistoricalBlockError::ValidatorPubkeyCacheTimeout)?;
let block_roots = blocks_to_import
let block_roots = signed_blocks
.get(1..)
.ok_or(HistoricalBlockError::IndexOutOfBounds)?
.iter()
.map(|block| block.parent_root())
.chain(iter::once(anchor_info.oldest_block_parent));
let signature_set = blocks_to_import
let signature_set = signed_blocks
.iter()
.zip_eq(block_roots)
.filter(|&(_block, block_root)| (block_root != self.genesis_block_root))
.map(|(block, block_root)| {
block_proposal_signature_set_from_parts(
block,
@ -180,9 +200,26 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// Write the I/O batches to disk, writing the blocks themselves first, as it's better
// for the hot DB to contain extra blocks than for the cold DB to point to blocks that
// do not exist.
self.store.blobs_db.do_atomically(blob_batch)?;
self.store.hot_db.do_atomically(hot_batch)?;
self.store.cold_db.do_atomically(cold_batch)?;
let mut anchor_and_blob_batch = Vec::with_capacity(2);
// Update the blob info.
if new_oldest_blob_slot != blob_info.oldest_blob_slot {
if let Some(oldest_blob_slot) = new_oldest_blob_slot {
let new_blob_info = BlobInfo {
oldest_blob_slot: Some(oldest_blob_slot),
..blob_info.clone()
};
anchor_and_blob_batch.push(
self.store
.compare_and_set_blob_info(blob_info, new_blob_info)?,
);
}
}
// Update the anchor.
let new_anchor = AnchorInfo {
oldest_block_slot: prev_block_slot,
@ -190,8 +227,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
..anchor_info
};
let backfill_complete = new_anchor.block_backfill_complete(self.genesis_backfill_slot);
self.store
.compare_and_set_anchor_info_with_write(Some(anchor_info), Some(new_anchor))?;
anchor_and_blob_batch.push(
self.store
.compare_and_set_anchor_info(Some(anchor_info), Some(new_anchor))?,
);
self.store.hot_db.do_atomically(anchor_and_blob_batch)?;
// If backfill has completed and the chain is configured to reconstruct historic states,
// send a message to the background migrator instructing it to begin reconstruction.
@ -203,6 +243,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
self.store_migrator.process_reconstruction();
}
Ok(blocks_to_import.len())
Ok(num_relevant)
}
}

View File

@ -0,0 +1,78 @@
use kzg::{Blob as KzgBlob, Error as KzgError, Kzg};
use types::{Blob, EthSpec, Hash256, KzgCommitment, KzgProof};
/// Converts a blob ssz List object to an array to be used with the kzg
/// crypto library.
fn ssz_blob_to_crypto_blob<T: EthSpec>(blob: &Blob<T>) -> Result<KzgBlob, KzgError> {
KzgBlob::from_bytes(blob.as_ref()).map_err(Into::into)
}
/// Validate a single blob-commitment-proof triplet from a `BlobSidecar`.
pub fn validate_blob<T: EthSpec>(
kzg: &Kzg,
blob: &Blob<T>,
kzg_commitment: KzgCommitment,
kzg_proof: KzgProof,
) -> Result<(), KzgError> {
let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_SINGLE_TIMES);
let kzg_blob = ssz_blob_to_crypto_blob::<T>(blob)?;
kzg.verify_blob_kzg_proof(&kzg_blob, kzg_commitment, kzg_proof)
}
/// Validate a batch of blob-commitment-proof triplets from multiple `BlobSidecars`.
pub fn validate_blobs<T: EthSpec>(
kzg: &Kzg,
expected_kzg_commitments: &[KzgCommitment],
blobs: Vec<&Blob<T>>,
kzg_proofs: &[KzgProof],
) -> Result<(), KzgError> {
let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_BATCH_TIMES);
let blobs = blobs
.into_iter()
.map(|blob| ssz_blob_to_crypto_blob::<T>(blob))
.collect::<Result<Vec<_>, KzgError>>()?;
kzg.verify_blob_kzg_proof_batch(&blobs, expected_kzg_commitments, kzg_proofs)
}
/// Compute the kzg proof given an ssz blob and its kzg commitment.
pub fn compute_blob_kzg_proof<T: EthSpec>(
kzg: &Kzg,
blob: &Blob<T>,
kzg_commitment: KzgCommitment,
) -> Result<KzgProof, KzgError> {
let kzg_blob = ssz_blob_to_crypto_blob::<T>(blob)?;
kzg.compute_blob_kzg_proof(&kzg_blob, kzg_commitment)
}
/// Compute the kzg commitment for a given blob.
pub fn blob_to_kzg_commitment<T: EthSpec>(
kzg: &Kzg,
blob: &Blob<T>,
) -> Result<KzgCommitment, KzgError> {
let kzg_blob = ssz_blob_to_crypto_blob::<T>(blob)?;
kzg.blob_to_kzg_commitment(&kzg_blob)
}
/// Compute the kzg proof for a given blob and an evaluation point z.
pub fn compute_kzg_proof<T: EthSpec>(
kzg: &Kzg,
blob: &Blob<T>,
z: Hash256,
) -> Result<(KzgProof, Hash256), KzgError> {
let z = z.0.into();
let kzg_blob = ssz_blob_to_crypto_blob::<T>(blob)?;
kzg.compute_kzg_proof(&kzg_blob, &z)
.map(|(proof, z)| (proof, Hash256::from_slice(&z.to_vec())))
}
/// Verify a `kzg_proof` for a `kzg_commitment` that evaluating a polynomial at `z` results in `y`
pub fn verify_kzg_proof<T: EthSpec>(
kzg: &Kzg,
kzg_commitment: KzgCommitment,
kzg_proof: KzgProof,
z: Hash256,
y: Hash256,
) -> Result<bool, KzgError> {
kzg.verify_kzg_proof(kzg_commitment, &z.0.into(), &y.0.into(), kzg_proof)
}

View File

@ -1,4 +1,5 @@
pub mod attestation_rewards;
pub mod attestation_simulator;
pub mod attestation_verification;
mod attester_cache;
pub mod beacon_block_reward;
@ -7,13 +8,17 @@ mod beacon_chain;
mod beacon_fork_choice_store;
pub mod beacon_proposer_cache;
mod beacon_snapshot;
pub mod blob_verification;
pub mod block_reward;
mod block_times_cache;
mod block_verification;
pub mod block_verification_types;
pub mod builder;
pub mod canonical_head;
pub mod capella_readiness;
pub mod chain_config;
pub mod data_availability_checker;
pub mod deneb_readiness;
mod early_attester_cache;
mod errors;
pub mod eth1_chain;
@ -24,6 +29,7 @@ pub mod fork_choice_signal;
pub mod fork_revert;
mod head_tracker;
pub mod historical_blocks;
pub mod kzg_utils;
pub mod light_client_finality_update_verification;
pub mod light_client_optimistic_update_verification;
pub mod merge_readiness;
@ -32,8 +38,10 @@ pub mod migrate;
mod naive_aggregation_pool;
mod observed_aggregates;
mod observed_attesters;
mod observed_blob_sidecars;
pub mod observed_block_producers;
pub mod observed_operations;
mod observed_slashable;
pub mod otb_verification_service;
mod persisted_beacon_chain;
mod persisted_fork_choice;
@ -51,7 +59,8 @@ pub mod validator_monitor;
pub mod validator_pubkey_cache;
pub use self::beacon_chain::{
AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult,
AttestationProcessingOutcome, AvailabilityProcessingStatus, BeaconBlockResponse,
BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult,
ForkChoiceError, OverrideForkchoiceUpdate, ProduceBlockVerification, StateSkipConfig,
WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON,
INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON,
@ -63,15 +72,19 @@ pub use self::historical_blocks::HistoricalBlockError;
pub use attestation_verification::Error as AttestationError;
pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError};
pub use block_verification::{
get_block_root, BlockError, ExecutionPayloadError, GossipVerifiedBlock,
IntoExecutionPendingBlock, IntoGossipVerifiedBlock,
get_block_root, BlockError, ExecutionPayloadError, ExecutionPendingBlock, GossipVerifiedBlock,
IntoExecutionPendingBlock, IntoGossipVerifiedBlockContents, PayloadVerificationOutcome,
PayloadVerificationStatus,
};
pub use block_verification_types::AvailabilityPendingExecutedBlock;
pub use block_verification_types::ExecutedBlock;
pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock};
pub use eth1_chain::{Eth1Chain, Eth1ChainBackend};
pub use events::ServerSentEventHandler;
pub use execution_layer::EngineState;
pub use execution_payload::NotifyExecutionLayer;
pub use fork_choice::{ExecutionStatus, ForkchoiceUpdateParameters};
pub use kzg::TrustedSetup;
pub use metrics::scrape_for_metrics;
pub use migrate::MigratorConfig;
pub use parking_lot;

View File

@ -34,7 +34,7 @@ pub enum Error {
SigSlotStartIsNone,
/// Failed to construct a LightClientFinalityUpdate from state.
FailedConstructingUpdate,
/// Beacon chain error occured.
/// Beacon chain error occurred.
BeaconChainError(BeaconChainError),
LightClientUpdateError(LightClientUpdateError),
}
@ -67,7 +67,7 @@ impl<T: BeaconChainTypes> VerifiedLightClientFinalityUpdate<T> {
chain: &BeaconChain<T>,
seen_timestamp: Duration,
) -> Result<Self, Error> {
let gossiped_finality_slot = light_client_finality_update.finalized_header.slot;
let gossiped_finality_slot = light_client_finality_update.finalized_header.beacon.slot;
let one_third_slot_duration = Duration::new(chain.spec.seconds_per_slot / 3, 0);
let signature_slot = light_client_finality_update.signature_slot;
let start_time = chain.slot_clock.start_of(signature_slot);
@ -88,7 +88,7 @@ impl<T: BeaconChainTypes> VerifiedLightClientFinalityUpdate<T> {
.get_blinded_block(&finalized_block_root)?
.ok_or(Error::FailedConstructingUpdate)?;
let latest_seen_finality_update_slot = match latest_seen_finality_update.as_ref() {
Some(update) => update.finalized_header.slot,
Some(update) => update.finalized_header.beacon.slot,
None => Slot::new(0),
};

View File

@ -37,7 +37,7 @@ pub enum Error {
FailedConstructingUpdate,
/// Unknown block with parent root.
UnknownBlockParentRoot(Hash256),
/// Beacon chain error occured.
/// Beacon chain error occurred.
BeaconChainError(BeaconChainError),
LightClientUpdateError(LightClientUpdateError),
}
@ -71,7 +71,7 @@ impl<T: BeaconChainTypes> VerifiedLightClientOptimisticUpdate<T> {
chain: &BeaconChain<T>,
seen_timestamp: Duration,
) -> Result<Self, Error> {
let gossiped_optimistic_slot = light_client_optimistic_update.attested_header.slot;
let gossiped_optimistic_slot = light_client_optimistic_update.attested_header.beacon.slot;
let one_third_slot_duration = Duration::new(chain.spec.seconds_per_slot / 3, 0);
let signature_slot = light_client_optimistic_update.signature_slot;
let start_time = chain.slot_clock.start_of(signature_slot);
@ -88,7 +88,7 @@ impl<T: BeaconChainTypes> VerifiedLightClientOptimisticUpdate<T> {
.get_state(&attested_block.state_root(), Some(attested_block.slot()))?
.ok_or(Error::FailedConstructingUpdate)?;
let latest_seen_optimistic_update_slot = match latest_seen_optimistic_update.as_ref() {
Some(update) => update.attested_header.slot,
Some(update) => update.attested_header.beacon.slot,
None => Slot::new(0),
};
@ -114,6 +114,7 @@ impl<T: BeaconChainTypes> VerifiedLightClientOptimisticUpdate<T> {
// otherwise queue
let canonical_root = light_client_optimistic_update
.attested_header
.beacon
.canonical_root();
if canonical_root != head_block.message().parent_root() {

View File

@ -10,6 +10,20 @@ use types::{BeaconState, Epoch, EthSpec, Hash256, Slot};
/// The maximum time to wait for the snapshot cache lock during a metrics scrape.
const SNAPSHOT_CACHE_TIMEOUT: Duration = Duration::from_millis(100);
// Attestation simulator metrics
pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT_TOTAL: &str =
"validator_monitor_attestation_simulator_head_attester_hit_total";
pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_MISS_TOTAL: &str =
"validator_monitor_attestation_simulator_head_attester_miss_total";
pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_HIT_TOTAL: &str =
"validator_monitor_attestation_simulator_target_attester_hit_total";
pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_MISS_TOTAL: &str =
"validator_monitor_attestation_simulator_target_attester_miss_total";
pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT_TOTAL: &str =
"validator_monitor_attestation_simulator_source_attester_hit_total";
pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS_TOTAL: &str =
"validator_monitor_attestation_simulator_source_attester_miss_total";
lazy_static! {
/*
* Block Processing
@ -40,6 +54,14 @@ lazy_static! {
"beacon_block_processing_block_root_seconds",
"Time spent calculating the block root when processing a block."
);
pub static ref BLOCK_HEADER_PROCESSING_BLOCK_ROOT: Result<Histogram> = try_create_histogram(
"beacon_block_header_processing_block_root_seconds",
"Time spent calculating the block root for a beacon block header."
);
pub static ref BLOCK_PROCESSING_BLOB_ROOT: Result<Histogram> = try_create_histogram(
"beacon_block_processing_blob_root_seconds",
"Time spent calculating the blob root when processing a block."
);
pub static ref BLOCK_PROCESSING_DB_READ: Result<Histogram> = try_create_histogram(
"beacon_block_processing_db_read_seconds",
"Time spent loading block and state from DB for block processing"
@ -282,6 +304,11 @@ lazy_static! {
"Count of times the early attester cache returns an attestation"
);
}
// Second lazy-static block is used to account for macro recursion limit.
lazy_static! {
/*
* Attestation Production
*/
@ -301,10 +328,7 @@ lazy_static! {
"attestation_production_cache_prime_seconds",
"Time spent loading a new state from the disk due to a cache miss"
);
}
// Second lazy-static block is used to account for macro recursion limit.
lazy_static! {
/*
* Fork Choice
*/
@ -380,6 +404,8 @@ lazy_static! {
try_create_histogram("beacon_persist_eth1_cache", "Time taken to persist the eth1 caches");
pub static ref PERSIST_FORK_CHOICE: Result<Histogram> =
try_create_histogram("beacon_persist_fork_choice", "Time taken to persist the fork choice struct");
pub static ref PERSIST_DATA_AVAILABILITY_CHECKER: Result<Histogram> =
try_create_histogram("beacon_persist_data_availability_checker", "Time taken to persist the data availability checker");
/*
* Eth1
@ -980,6 +1006,30 @@ lazy_static! {
"beacon_pre_finalization_block_lookup_count",
"Number of block roots subject to single block lookups"
);
/*
* Blob sidecar Verification
*/
pub static ref BLOBS_SIDECAR_PROCESSING_REQUESTS: Result<IntCounter> = try_create_int_counter(
"beacon_blobs_sidecar_processing_requests_total",
"Count of all blob sidecars submitted for processing"
);
pub static ref BLOBS_SIDECAR_PROCESSING_SUCCESSES: Result<IntCounter> = try_create_int_counter(
"beacon_blobs_sidecar_processing_successes_total",
"Number of blob sidecars verified for gossip"
);
pub static ref BLOBS_SIDECAR_GOSSIP_VERIFICATION_TIMES: Result<Histogram> = try_create_histogram(
"beacon_blobs_sidecar_gossip_verification_seconds",
"Full runtime of blob sidecars gossip verification"
);
pub static ref BLOB_SIDECAR_INCLUSION_PROOF_VERIFICATION: Result<Histogram> = try_create_histogram(
"blob_sidecar_inclusion_proof_verification_seconds",
"Time taken to verify blob sidecar inclusion proof"
);
pub static ref BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION: Result<Histogram> = try_create_histogram(
"blob_sidecar_inclusion_proof_computation_seconds",
"Time taken to compute blob sidecar inclusion proof"
);
}
// Fifth lazy-static block is used to account for macro recursion limit.
@ -1009,6 +1059,75 @@ lazy_static! {
"beacon_aggregated_attestation_subsets_total",
"Count of new aggregated attestations that are subsets of already known aggregates"
);
/*
* Attestation simulator metrics
*/
pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT: Result<IntCounter> =
try_create_int_counter(
VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT_TOTAL,
"Incremented if a validator is flagged as a previous slot head attester \
during per slot processing",
);
pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_MISS: Result<IntCounter> =
try_create_int_counter(
VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_MISS_TOTAL,
"Incremented if a validator is not flagged as a previous slot head attester \
during per slot processing",
);
pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_HIT: Result<IntCounter> =
try_create_int_counter(
VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_HIT_TOTAL,
"Incremented if a validator is flagged as a previous slot target attester \
during per slot processing",
);
pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_MISS: Result<IntCounter> =
try_create_int_counter(
VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_MISS_TOTAL,
"Incremented if a validator is not flagged as a previous slot target attester \
during per slot processing",
);
pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT: Result<IntCounter> =
try_create_int_counter(
VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT_TOTAL,
"Incremented if a validator is flagged as a previous slot source attester \
during per slot processing",
);
pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS: Result<IntCounter> =
try_create_int_counter(
VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS_TOTAL,
"Incremented if a validator is not flagged as a previous slot source attester \
during per slot processing",
);
/*
* Missed block metrics
*/
pub static ref VALIDATOR_MONITOR_MISSED_BLOCKS_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec(
"validator_monitor_missed_blocks_total",
"Number of non-finalized blocks missed",
&["validator"]
);
/*
* Kzg related metrics
*/
pub static ref KZG_VERIFICATION_SINGLE_TIMES: Result<Histogram> =
try_create_histogram("kzg_verification_single_seconds", "Runtime of single kzg verification");
pub static ref KZG_VERIFICATION_BATCH_TIMES: Result<Histogram> =
try_create_histogram("kzg_verification_batch_seconds", "Runtime of batched kzg verification");
pub static ref BLOCK_PRODUCTION_BLOBS_VERIFICATION_TIMES: Result<Histogram> = try_create_histogram(
"beacon_block_production_blobs_verification_seconds",
"Time taken to verify blobs against commitments and creating BlobSidecar objects in block production"
);
/*
* Availability related metrics
*/
pub static ref BLOCK_AVAILABILITY_DELAY: Result<Histogram> = try_create_histogram_with_buckets(
"block_availability_delay",
"Duration between start of the slot and the time at which all components of the block are available.",
// Create a custom bucket list for greater granularity in block delay
Ok(vec![0.1, 0.2, 0.3,0.4,0.5,0.75,1.0,1.25,1.5,1.75,2.0,2.5,3.0,3.5,4.0,5.0,6.0,7.0,8.0,9.0,10.0,15.0,20.0])
);
}
/// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot,

View File

@ -117,6 +117,7 @@ pub enum PruningError {
pub enum Notification {
Finalization(FinalizationNotification),
Reconstruction,
PruneBlobs(Epoch),
}
pub struct FinalizationNotification {
@ -191,6 +192,14 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
}
}
pub fn process_prune_blobs(&self, data_availability_boundary: Epoch) {
if let Some(Notification::PruneBlobs(data_availability_boundary)) =
self.send_background_notification(Notification::PruneBlobs(data_availability_boundary))
{
Self::run_prune_blobs(self.db.clone(), data_availability_boundary, &self.log);
}
}
pub fn run_reconstruction(db: Arc<HotColdDB<E, Hot, Cold>>, log: &Logger) {
if let Err(e) = db.reconstruct_historic_states() {
error!(
@ -201,6 +210,20 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
}
}
pub fn run_prune_blobs(
db: Arc<HotColdDB<E, Hot, Cold>>,
data_availability_boundary: Epoch,
log: &Logger,
) {
if let Err(e) = db.try_prune_blobs(false, data_availability_boundary) {
error!(
log,
"Blob pruning failed";
"error" => ?e,
);
}
}
/// If configured to run in the background, send `notif` to the background thread.
///
/// Return `None` if the message was sent to the background thread, `Some(notif)` otherwise.
@ -367,29 +390,44 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
let (tx, rx) = mpsc::channel();
let thread = thread::spawn(move || {
while let Ok(notif) = rx.recv() {
// Read the rest of the messages in the channel, preferring any reconstruction
// notification, or the finalization notification with the greatest finalized epoch.
let notif =
rx.try_iter()
.fold(notif, |best, other: Notification| match (&best, &other) {
(Notification::Reconstruction, _)
| (_, Notification::Reconstruction) => Notification::Reconstruction,
(
Notification::Finalization(fin1),
Notification::Finalization(fin2),
) => {
if fin2.finalized_checkpoint.epoch > fin1.finalized_checkpoint.epoch
{
other
} else {
best
}
}
});
let mut reconstruction_notif = None;
let mut finalization_notif = None;
let mut prune_blobs_notif = None;
match notif {
Notification::Reconstruction => Self::run_reconstruction(db.clone(), &log),
Notification::Finalization(fin) => Self::run_migration(db.clone(), fin, &log),
Notification::Reconstruction => reconstruction_notif = Some(notif),
Notification::Finalization(fin) => finalization_notif = Some(fin),
Notification::PruneBlobs(dab) => prune_blobs_notif = Some(dab),
}
// Read the rest of the messages in the channel, taking the best of each type.
for notif in rx.try_iter() {
match notif {
Notification::Reconstruction => reconstruction_notif = Some(notif),
Notification::Finalization(fin) => {
if let Some(current) = finalization_notif.as_mut() {
if fin.finalized_checkpoint.epoch
> current.finalized_checkpoint.epoch
{
*current = fin;
}
} else {
finalization_notif = Some(fin);
}
}
Notification::PruneBlobs(dab) => {
prune_blobs_notif = std::cmp::max(prune_blobs_notif, Some(dab));
}
}
}
// If reconstruction is on-going, ignore finalization migration and blob pruning.
if reconstruction_notif.is_some() {
Self::run_reconstruction(db.clone(), &log);
} else {
if let Some(fin) = finalization_notif {
Self::run_migration(db.clone(), fin, &log);
}
if let Some(dab) = prune_blobs_notif {
Self::run_prune_blobs(db.clone(), dab, &log);
}
}
}
});
@ -630,13 +668,14 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
head_tracker_lock.remove(&head_hash);
}
let batch: Vec<StoreOp<E>> = abandoned_blocks
let mut batch: Vec<StoreOp<E>> = abandoned_blocks
.into_iter()
.map(Into::into)
.flat_map(|block_root: Hash256| {
[
StoreOp::DeleteBlock(block_root),
StoreOp::DeleteExecutionPayload(block_root),
StoreOp::DeleteBlobs(block_root),
]
})
.chain(
@ -646,8 +685,6 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
)
.collect();
let mut kv_batch = store.convert_to_kv_batch(batch)?;
// Persist the head in case the process is killed or crashes here. This prevents
// the head tracker reverting after our mutation above.
let persisted_head = PersistedBeaconChain {
@ -656,12 +693,16 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
ssz_head_tracker: SszHeadTracker::from_map(&head_tracker_lock),
};
drop(head_tracker_lock);
kv_batch.push(persisted_head.as_kv_store_op(BEACON_CHAIN_DB_KEY));
batch.push(StoreOp::KeyValueOp(
persisted_head.as_kv_store_op(BEACON_CHAIN_DB_KEY),
));
// Persist the new finalized checkpoint as the pruning checkpoint.
kv_batch.push(store.pruning_checkpoint_store_op(new_finalized_checkpoint));
batch.push(StoreOp::KeyValueOp(
store.pruning_checkpoint_store_op(new_finalized_checkpoint),
));
store.hot_db.do_atomically(kv_batch)?;
store.do_atomically_with_block_and_blobs_cache(batch)?;
debug!(log, "Database pruning complete");
Ok(PruningOutcome::Successful {

View File

@ -0,0 +1,430 @@
//! Provides the `ObservedBlobSidecars` struct which allows for rejecting `BlobSidecar`s
//! that we have already seen over the gossip network.
//! Only `BlobSidecar`s that have completed proposer signature verification can be added
//! to this cache to reduce DoS risks.
use crate::observed_block_producers::ProposalKey;
use std::collections::{HashMap, HashSet};
use std::marker::PhantomData;
use types::{BlobSidecar, EthSpec, Slot};
#[derive(Debug, PartialEq)]
pub enum Error {
/// The slot of the provided `BlobSidecar` is prior to finalization and should not have been provided
/// to this function. This is an internal error.
FinalizedBlob { slot: Slot, finalized_slot: Slot },
/// The blob sidecar contains an invalid blob index, the blob sidecar is invalid.
/// Note: The invalid blob should have been caught and flagged as an error much before reaching
/// here.
InvalidBlobIndex(u64),
}
/// Maintains a cache of seen `BlobSidecar`s that are received over gossip
/// and have been gossip verified.
///
/// The cache supports pruning based upon the finalized epoch. It does not automatically prune, you
/// must call `Self::prune` manually.
///
/// Note: To prevent DoS attacks, this cache must include only items that have received some DoS resistance
/// like checking the proposer signature.
pub struct ObservedBlobSidecars<T: EthSpec> {
finalized_slot: Slot,
/// Stores all received blob indices for a given `(ValidatorIndex, Slot)` tuple.
items: HashMap<ProposalKey, HashSet<u64>>,
_phantom: PhantomData<T>,
}
impl<E: EthSpec> Default for ObservedBlobSidecars<E> {
/// Instantiates `Self` with `finalized_slot == 0`.
fn default() -> Self {
Self {
finalized_slot: Slot::new(0),
items: HashMap::new(),
_phantom: PhantomData,
}
}
}
impl<T: EthSpec> ObservedBlobSidecars<T> {
/// Observe the `blob_sidecar` at (`blob_sidecar.block_proposer_index, blob_sidecar.slot`).
/// This will update `self` so future calls to it indicate that this `blob_sidecar` is known.
///
/// The supplied `blob_sidecar` **MUST** have completed proposer signature verification.
pub fn observe_sidecar(&mut self, blob_sidecar: &BlobSidecar<T>) -> Result<bool, Error> {
self.sanitize_blob_sidecar(blob_sidecar)?;
let blob_indices = self
.items
.entry(ProposalKey {
slot: blob_sidecar.slot(),
proposer: blob_sidecar.block_proposer_index(),
})
.or_insert_with(|| HashSet::with_capacity(T::max_blobs_per_block()));
let did_not_exist = blob_indices.insert(blob_sidecar.index);
Ok(!did_not_exist)
}
/// Returns `true` if the `blob_sidecar` has already been observed in the cache within the prune window.
pub fn proposer_is_known(&self, blob_sidecar: &BlobSidecar<T>) -> Result<bool, Error> {
self.sanitize_blob_sidecar(blob_sidecar)?;
let is_known = self
.items
.get(&ProposalKey {
slot: blob_sidecar.slot(),
proposer: blob_sidecar.block_proposer_index(),
})
.map_or(false, |blob_indices| {
blob_indices.contains(&blob_sidecar.index)
});
Ok(is_known)
}
fn sanitize_blob_sidecar(&self, blob_sidecar: &BlobSidecar<T>) -> Result<(), Error> {
if blob_sidecar.index >= T::max_blobs_per_block() as u64 {
return Err(Error::InvalidBlobIndex(blob_sidecar.index));
}
let finalized_slot = self.finalized_slot;
if finalized_slot > 0 && blob_sidecar.slot() <= finalized_slot {
return Err(Error::FinalizedBlob {
slot: blob_sidecar.slot(),
finalized_slot,
});
}
Ok(())
}
/// Prune `blob_sidecar` observations for slots less than or equal to the given slot.
pub fn prune(&mut self, finalized_slot: Slot) {
if finalized_slot == 0 {
return;
}
self.finalized_slot = finalized_slot;
self.items.retain(|k, _| k.slot > finalized_slot);
}
}
#[cfg(test)]
mod tests {
use super::*;
use bls::Hash256;
use std::sync::Arc;
use types::{BlobSidecar, MainnetEthSpec};
type E = MainnetEthSpec;
fn get_blob_sidecar(slot: u64, proposer_index: u64, index: u64) -> Arc<BlobSidecar<E>> {
let mut blob_sidecar = BlobSidecar::empty();
blob_sidecar.signed_block_header.message.slot = slot.into();
blob_sidecar.signed_block_header.message.proposer_index = proposer_index;
blob_sidecar.index = index;
Arc::new(blob_sidecar)
}
#[test]
fn pruning() {
let mut cache = ObservedBlobSidecars::default();
assert_eq!(cache.finalized_slot, 0, "finalized slot is zero");
assert_eq!(cache.items.len(), 0, "no slots should be present");
// Slot 0, index 0
let proposer_index_a = 420;
let sidecar_a = get_blob_sidecar(0, proposer_index_a, 0);
assert_eq!(
cache.observe_sidecar(&sidecar_a),
Ok(false),
"can observe proposer, indicates proposer unobserved"
);
/*
* Preconditions.
*/
assert_eq!(cache.finalized_slot, 0, "finalized slot is zero");
assert_eq!(
cache.items.len(),
1,
"only one (validator_index, slot) tuple should be present"
);
let cached_blob_indices = cache
.items
.get(&ProposalKey::new(proposer_index_a, Slot::new(0)))
.expect("slot zero should be present");
assert_eq!(
cached_blob_indices.len(),
1,
"only one proposer should be present"
);
/*
* Check that a prune at the genesis slot does nothing.
*/
cache.prune(Slot::new(0));
assert_eq!(cache.finalized_slot, 0, "finalized slot is zero");
assert_eq!(cache.items.len(), 1, "only one slot should be present");
let cached_blob_indices = cache
.items
.get(&ProposalKey::new(proposer_index_a, Slot::new(0)))
.expect("slot zero should be present");
assert_eq!(
cached_blob_indices.len(),
1,
"only one proposer should be present"
);
/*
* Check that a prune empties the cache
*/
cache.prune(E::slots_per_epoch().into());
assert_eq!(
cache.finalized_slot,
Slot::from(E::slots_per_epoch()),
"finalized slot is updated"
);
assert_eq!(cache.items.len(), 0, "no items left");
/*
* Check that we can't insert a finalized sidecar
*/
// First slot of finalized epoch
let block_b = get_blob_sidecar(E::slots_per_epoch(), 419, 0);
assert_eq!(
cache.observe_sidecar(&block_b),
Err(Error::FinalizedBlob {
slot: E::slots_per_epoch().into(),
finalized_slot: E::slots_per_epoch().into(),
}),
"cant insert finalized sidecar"
);
assert_eq!(cache.items.len(), 0, "sidecar was not added");
/*
* Check that we _can_ insert a non-finalized block
*/
let three_epochs = E::slots_per_epoch() * 3;
// First slot of finalized epoch
let proposer_index_b = 421;
let block_b = get_blob_sidecar(three_epochs, proposer_index_b, 0);
assert_eq!(
cache.observe_sidecar(&block_b),
Ok(false),
"can insert non-finalized block"
);
assert_eq!(cache.items.len(), 1, "only one slot should be present");
let cached_blob_indices = cache
.items
.get(&ProposalKey::new(proposer_index_b, Slot::new(three_epochs)))
.expect("the three epochs slot should be present");
assert_eq!(
cached_blob_indices.len(),
1,
"only one proposer should be present"
);
/*
* Check that a prune doesnt wipe later blocks
*/
let two_epochs = E::slots_per_epoch() * 2;
cache.prune(two_epochs.into());
assert_eq!(
cache.finalized_slot,
Slot::from(two_epochs),
"finalized slot is updated"
);
assert_eq!(cache.items.len(), 1, "only one slot should be present");
let cached_blob_indices = cache
.items
.get(&ProposalKey::new(proposer_index_b, Slot::new(three_epochs)))
.expect("the three epochs slot should be present");
assert_eq!(
cached_blob_indices.len(),
1,
"only one proposer should be present"
);
}
#[test]
fn simple_observations() {
let mut cache = ObservedBlobSidecars::default();
// Slot 0, index 0
let proposer_index_a = 420;
let sidecar_a = get_blob_sidecar(0, proposer_index_a, 0);
assert_eq!(
cache.proposer_is_known(&sidecar_a),
Ok(false),
"no observation in empty cache"
);
assert_eq!(
cache.observe_sidecar(&sidecar_a),
Ok(false),
"can observe proposer, indicates proposer unobserved"
);
assert_eq!(
cache.proposer_is_known(&sidecar_a),
Ok(true),
"observed block is indicated as true"
);
assert_eq!(
cache.observe_sidecar(&sidecar_a),
Ok(true),
"observing again indicates true"
);
assert_eq!(cache.finalized_slot, 0, "finalized slot is zero");
assert_eq!(cache.items.len(), 1, "only one slot should be present");
let cached_blob_indices = cache
.items
.get(&ProposalKey::new(proposer_index_a, Slot::new(0)))
.expect("slot zero should be present");
assert_eq!(
cached_blob_indices.len(),
1,
"only one proposer should be present"
);
// Slot 1, proposer 0
let proposer_index_b = 421;
let sidecar_b = get_blob_sidecar(1, proposer_index_b, 0);
assert_eq!(
cache.proposer_is_known(&sidecar_b),
Ok(false),
"no observation for new slot"
);
assert_eq!(
cache.observe_sidecar(&sidecar_b),
Ok(false),
"can observe proposer for new slot, indicates proposer unobserved"
);
assert_eq!(
cache.proposer_is_known(&sidecar_b),
Ok(true),
"observed block in slot 1 is indicated as true"
);
assert_eq!(
cache.observe_sidecar(&sidecar_b),
Ok(true),
"observing slot 1 again indicates true"
);
assert_eq!(cache.finalized_slot, 0, "finalized slot is zero");
assert_eq!(cache.items.len(), 2, "two slots should be present");
let cached_blob_indices = cache
.items
.get(&ProposalKey::new(proposer_index_a, Slot::new(0)))
.expect("slot zero should be present");
assert_eq!(
cached_blob_indices.len(),
1,
"only one proposer should be present in slot 0"
);
let cached_blob_indices = cache
.items
.get(&ProposalKey::new(proposer_index_b, Slot::new(1)))
.expect("slot zero should be present");
assert_eq!(
cached_blob_indices.len(),
1,
"only one proposer should be present in slot 1"
);
// Slot 0, index 1
let sidecar_c = get_blob_sidecar(0, proposer_index_a, 1);
assert_eq!(
cache.proposer_is_known(&sidecar_c),
Ok(false),
"no observation for new index"
);
assert_eq!(
cache.observe_sidecar(&sidecar_c),
Ok(false),
"can observe new index, indicates sidecar unobserved for new index"
);
assert_eq!(
cache.proposer_is_known(&sidecar_c),
Ok(true),
"observed new sidecar is indicated as true"
);
assert_eq!(
cache.observe_sidecar(&sidecar_c),
Ok(true),
"observing new sidecar again indicates true"
);
assert_eq!(cache.finalized_slot, 0, "finalized slot is zero");
assert_eq!(cache.items.len(), 2, "two slots should be present");
let cached_blob_indices = cache
.items
.get(&ProposalKey::new(proposer_index_a, Slot::new(0)))
.expect("slot zero should be present");
assert_eq!(
cached_blob_indices.len(),
2,
"two blob indices should be present in slot 0"
);
// Create a sidecar sharing slot and proposer but with a different block root.
let mut sidecar_d: BlobSidecar<E> = BlobSidecar {
index: sidecar_c.index,
blob: sidecar_c.blob.clone(),
kzg_commitment: sidecar_c.kzg_commitment,
kzg_proof: sidecar_c.kzg_proof,
signed_block_header: sidecar_c.signed_block_header.clone(),
kzg_commitment_inclusion_proof: sidecar_c.kzg_commitment_inclusion_proof.clone(),
};
sidecar_d.signed_block_header.message.body_root = Hash256::repeat_byte(7);
assert_eq!(
cache.proposer_is_known(&sidecar_d),
Ok(true),
"there has been an observation for this proposer index"
);
assert_eq!(
cache.observe_sidecar(&sidecar_d),
Ok(true),
"indicates sidecar proposer was observed"
);
let cached_blob_indices = cache
.items
.get(&ProposalKey::new(proposer_index_a, Slot::new(0)))
.expect("slot zero should be present");
assert_eq!(
cached_blob_indices.len(),
2,
"two blob indices should be present in slot 0"
);
// Try adding an out of bounds index
let invalid_index = E::max_blobs_per_block() as u64;
let sidecar_d = get_blob_sidecar(0, proposer_index_a, invalid_index);
assert_eq!(
cache.observe_sidecar(&sidecar_d),
Err(Error::InvalidBlobIndex(invalid_index)),
"cannot add an index > MaxBlobsPerBlock"
);
}
}

View File

@ -16,9 +16,15 @@ pub enum Error {
}
#[derive(Eq, Hash, PartialEq, Debug, Default)]
struct ProposalKey {
slot: Slot,
proposer: u64,
pub struct ProposalKey {
pub slot: Slot,
pub proposer: u64,
}
impl ProposalKey {
pub fn new(proposer: u64, slot: Slot) -> Self {
Self { slot, proposer }
}
}
/// Maintains a cache of observed `(block.slot, block.proposer)`.

View File

@ -0,0 +1,486 @@
//! Provides the `ObservedSlashable` struct which tracks slashable messages seen in
//! gossip or via RPC. Useful in supporting `broadcast_validation` in the Beacon API.
use crate::observed_block_producers::Error;
use std::collections::hash_map::Entry;
use std::collections::{HashMap, HashSet};
use std::marker::PhantomData;
use types::{EthSpec, Hash256, Slot, Unsigned};
#[derive(Eq, Hash, PartialEq, Debug, Default)]
pub struct ProposalKey {
pub slot: Slot,
pub proposer: u64,
}
/// Maintains a cache of observed `(block.slot, block.proposer)`.
///
/// The cache supports pruning based upon the finalized epoch. It does not automatically prune, you
/// must call `Self::prune` manually.
///
/// The maximum size of the cache is determined by `slots_since_finality *
/// VALIDATOR_REGISTRY_LIMIT`. This is quite a large size, so it's important that upstream
/// functions only use this cache for blocks with a valid signature. Only allowing valid signed
/// blocks reduces the theoretical maximum size of this cache to `slots_since_finality *
/// active_validator_count`, however in reality that is more like `slots_since_finality *
/// known_distinct_shufflings` which is much smaller.
pub struct ObservedSlashable<E: EthSpec> {
finalized_slot: Slot,
items: HashMap<ProposalKey, HashSet<Hash256>>,
_phantom: PhantomData<E>,
}
impl<E: EthSpec> Default for ObservedSlashable<E> {
/// Instantiates `Self` with `finalized_slot == 0`.
fn default() -> Self {
Self {
finalized_slot: Slot::new(0),
items: HashMap::new(),
_phantom: PhantomData,
}
}
}
impl<E: EthSpec> ObservedSlashable<E> {
/// Observe that the `header` was produced by `header.proposer_index` at `header.slot`. This will
/// update `self` so future calls to it indicate that this block is known.
///
/// The supplied `block` **MUST** be signature verified (see struct-level documentation).
///
/// ## Errors
///
/// - `header.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`.
/// - `header.slot` is equal to or less than the latest pruned `finalized_slot`.
pub fn observe_slashable(
&mut self,
slot: Slot,
proposer_index: u64,
block_root: Hash256,
) -> Result<(), Error> {
self.sanitize_header(slot, proposer_index)?;
let key = ProposalKey {
slot,
proposer: proposer_index,
};
let entry = self.items.entry(key);
match entry {
Entry::Occupied(mut occupied_entry) => {
let block_roots = occupied_entry.get_mut();
block_roots.insert(block_root);
}
Entry::Vacant(vacant_entry) => {
let block_roots = HashSet::from([block_root]);
vacant_entry.insert(block_roots);
}
}
Ok(())
}
/// Returns `Ok(true)` if the `block_root` is slashable, `Ok(false)` if not. Does not
/// update the cache, so calling this function multiple times will continue to return
/// `Ok(false)`, until `Self::observe_proposer` is called.
///
/// ## Errors
///
/// - `proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`.
/// - `slot` is equal to or less than the latest pruned `finalized_slot`.
pub fn is_slashable(
&self,
slot: Slot,
proposer_index: u64,
block_root: Hash256,
) -> Result<bool, Error> {
self.sanitize_header(slot, proposer_index)?;
let key = ProposalKey {
slot,
proposer: proposer_index,
};
if let Some(block_roots) = self.items.get(&key) {
let no_prev_known_blocks =
block_roots.difference(&HashSet::from([block_root])).count() == 0;
Ok(!no_prev_known_blocks)
} else {
Ok(false)
}
}
/// Returns `Ok(())` if the given `header` is sane.
fn sanitize_header(&self, slot: Slot, proposer_index: u64) -> Result<(), Error> {
if proposer_index >= E::ValidatorRegistryLimit::to_u64() {
return Err(Error::ValidatorIndexTooHigh(proposer_index));
}
let finalized_slot = self.finalized_slot;
if finalized_slot > 0 && slot <= finalized_slot {
return Err(Error::FinalizedBlock {
slot,
finalized_slot,
});
}
Ok(())
}
/// Removes all observations of blocks equal to or earlier than `finalized_slot`.
///
/// Stores `finalized_slot` in `self`, so that `self` will reject any block that has a slot
/// equal to or less than `finalized_slot`.
///
/// No-op if `finalized_slot == 0`.
pub fn prune(&mut self, finalized_slot: Slot) {
if finalized_slot == 0 {
return;
}
self.finalized_slot = finalized_slot;
self.items.retain(|key, _| key.slot > finalized_slot);
}
}
#[cfg(test)]
mod tests {
use super::*;
use types::{BeaconBlock, Graffiti, MainnetEthSpec};
type E = MainnetEthSpec;
fn get_block(slot: u64, proposer: u64) -> BeaconBlock<E> {
let mut block = BeaconBlock::empty(&E::default_spec());
*block.slot_mut() = slot.into();
*block.proposer_index_mut() = proposer;
block
}
#[test]
fn pruning() {
let mut cache = ObservedSlashable::<E>::default();
assert_eq!(cache.finalized_slot, 0, "finalized slot is zero");
assert_eq!(cache.items.len(), 0, "no slots should be present");
// Slot 0, proposer 0
let block_a = get_block(0, 0);
let block_root = block_a.canonical_root();
assert_eq!(
cache.observe_slashable(block_a.slot(), block_a.proposer_index(), block_root),
Ok(()),
"can observe proposer"
);
/*
* Preconditions.
*/
assert_eq!(cache.finalized_slot, 0, "finalized slot is zero");
assert_eq!(cache.items.len(), 1, "only one slot should be present");
assert_eq!(
cache
.items
.get(&ProposalKey {
slot: Slot::new(0),
proposer: 0
})
.expect("slot zero should be present")
.len(),
1,
"only one proposer should be present"
);
/*
* Check that a prune at the genesis slot does nothing.
*/
cache.prune(Slot::new(0));
assert_eq!(cache.finalized_slot, 0, "finalized slot is zero");
assert_eq!(cache.items.len(), 1, "only one slot should be present");
assert_eq!(
cache
.items
.get(&ProposalKey {
slot: Slot::new(0),
proposer: 0
})
.expect("slot zero should be present")
.len(),
1,
"only one block root should be present"
);
/*
* Check that a prune empties the cache
*/
cache.prune(E::slots_per_epoch().into());
assert_eq!(
cache.finalized_slot,
Slot::from(E::slots_per_epoch()),
"finalized slot is updated"
);
assert_eq!(cache.items.len(), 0, "no items left");
/*
* Check that we can't insert a finalized block
*/
// First slot of finalized epoch, proposer 0
let block_b = get_block(E::slots_per_epoch(), 0);
let block_root_b = block_b.canonical_root();
assert_eq!(
cache.observe_slashable(block_b.slot(), block_b.proposer_index(), block_root_b),
Err(Error::FinalizedBlock {
slot: E::slots_per_epoch().into(),
finalized_slot: E::slots_per_epoch().into(),
}),
"cant insert finalized block"
);
assert_eq!(cache.items.len(), 0, "block was not added");
/*
* Check that we _can_ insert a non-finalized block
*/
let three_epochs = E::slots_per_epoch() * 3;
// First slot of finalized epoch, proposer 0
let block_b = get_block(three_epochs, 0);
assert_eq!(
cache.observe_slashable(block_b.slot(), block_b.proposer_index(), block_root_b),
Ok(()),
"can insert non-finalized block"
);
assert_eq!(cache.items.len(), 1, "only one slot should be present");
assert_eq!(
cache
.items
.get(&ProposalKey {
slot: Slot::new(three_epochs),
proposer: 0
})
.expect("the three epochs slot should be present")
.len(),
1,
"only one proposer should be present"
);
/*
* Check that a prune doesnt wipe later blocks
*/
let two_epochs = E::slots_per_epoch() * 2;
cache.prune(two_epochs.into());
assert_eq!(
cache.finalized_slot,
Slot::from(two_epochs),
"finalized slot is updated"
);
assert_eq!(cache.items.len(), 1, "only one slot should be present");
assert_eq!(
cache
.items
.get(&ProposalKey {
slot: Slot::new(three_epochs),
proposer: 0
})
.expect("the three epochs slot should be present")
.len(),
1,
"only one block root should be present"
);
}
#[test]
fn simple_observations() {
let mut cache = ObservedSlashable::<E>::default();
// Slot 0, proposer 0
let block_a = get_block(0, 0);
let block_root_a = block_a.canonical_root();
assert_eq!(
cache.is_slashable(
block_a.slot(),
block_a.proposer_index(),
block_a.canonical_root()
),
Ok(false),
"no observation in empty cache"
);
assert_eq!(
cache.observe_slashable(block_a.slot(), block_a.proposer_index(), block_root_a),
Ok(()),
"can observe proposer"
);
assert_eq!(
cache.is_slashable(
block_a.slot(),
block_a.proposer_index(),
block_a.canonical_root()
),
Ok(false),
"observed but unslashed block"
);
assert_eq!(
cache.observe_slashable(block_a.slot(), block_a.proposer_index(), block_root_a),
Ok(()),
"observing again"
);
assert_eq!(cache.finalized_slot, 0, "finalized slot is zero");
assert_eq!(cache.items.len(), 1, "only one slot should be present");
assert_eq!(
cache
.items
.get(&ProposalKey {
slot: Slot::new(0),
proposer: 0
})
.expect("slot zero should be present")
.len(),
1,
"only one block root should be present"
);
// Slot 1, proposer 0
let block_b = get_block(1, 0);
let block_root_b = block_b.canonical_root();
assert_eq!(
cache.is_slashable(
block_b.slot(),
block_b.proposer_index(),
block_b.canonical_root()
),
Ok(false),
"not slashable for new slot"
);
assert_eq!(
cache.observe_slashable(block_b.slot(), block_b.proposer_index(), block_root_b),
Ok(()),
"can observe proposer for new slot"
);
assert_eq!(
cache.is_slashable(
block_b.slot(),
block_b.proposer_index(),
block_b.canonical_root()
),
Ok(false),
"observed but not slashable block in slot 1"
);
assert_eq!(
cache.observe_slashable(block_b.slot(), block_b.proposer_index(), block_root_b),
Ok(()),
"observing slot 1 again"
);
assert_eq!(cache.finalized_slot, 0, "finalized slot is zero");
assert_eq!(cache.items.len(), 2, "two slots should be present");
assert_eq!(
cache
.items
.get(&ProposalKey {
slot: Slot::new(0),
proposer: 0
})
.expect("slot zero should be present")
.len(),
1,
"only one block root should be present in slot 0"
);
assert_eq!(
cache
.items
.get(&ProposalKey {
slot: Slot::new(1),
proposer: 0
})
.expect("slot zero should be present")
.len(),
1,
"only one block root should be present in slot 1"
);
// Slot 0, proposer 1
let block_c = get_block(0, 1);
let block_root_c = block_c.canonical_root();
assert_eq!(
cache.is_slashable(
block_c.slot(),
block_c.proposer_index(),
block_c.canonical_root()
),
Ok(false),
"not slashable due to new proposer"
);
assert_eq!(
cache.observe_slashable(block_c.slot(), block_c.proposer_index(), block_root_c),
Ok(()),
"can observe new proposer, indicates proposer unobserved"
);
assert_eq!(
cache.is_slashable(
block_c.slot(),
block_c.proposer_index(),
block_c.canonical_root()
),
Ok(false),
"not slashable due to new proposer"
);
assert_eq!(
cache.observe_slashable(block_c.slot(), block_c.proposer_index(), block_root_c),
Ok(()),
"observing new proposer again"
);
assert_eq!(cache.finalized_slot, 0, "finalized slot is zero");
assert_eq!(cache.items.len(), 3, "three slots should be present");
assert_eq!(
cache
.items
.iter()
.filter(|(k, _)| k.slot == cache.finalized_slot)
.count(),
2,
"two proposers should be present in slot 0"
);
assert_eq!(
cache
.items
.iter()
.filter(|(k, _)| k.slot == Slot::new(1))
.count(),
1,
"only one proposer should be present in slot 1"
);
// Slot 0, proposer 1 (again)
let mut block_d = get_block(0, 1);
*block_d.body_mut().graffiti_mut() = Graffiti::from(*b"this is slashable ");
let block_root_d = block_d.canonical_root();
assert_eq!(
cache.is_slashable(
block_d.slot(),
block_d.proposer_index(),
block_d.canonical_root()
),
Ok(true),
"slashable due to new proposer"
);
assert_eq!(
cache.observe_slashable(block_d.slot(), block_d.proposer_index(), block_root_d),
Ok(()),
"can observe new proposer, indicates proposer unobserved"
);
}
}

View File

@ -119,10 +119,13 @@ pub fn start_otb_verification_service<T: BeaconChainTypes>(
pub fn load_optimistic_transition_blocks<T: BeaconChainTypes>(
chain: &BeaconChain<T>,
) -> Result<Vec<OptimisticTransitionBlock>, StoreError> {
process_results(chain.store.hot_db.iter_column(OTBColumn), |iter| {
iter.map(|(_, bytes)| OptimisticTransitionBlock::from_store_bytes(&bytes))
.collect()
})?
process_results(
chain.store.hot_db.iter_column::<Hash256>(OTBColumn),
|iter| {
iter.map(|(_, bytes)| OptimisticTransitionBlock::from_store_bytes(&bytes))
.collect()
},
)?
}
#[derive(Debug)]

View File

@ -3,11 +3,13 @@ use itertools::process_results;
use lru::LruCache;
use parking_lot::Mutex;
use slog::debug;
use std::num::NonZeroUsize;
use std::time::Duration;
use types::non_zero_usize::new_non_zero_usize;
use types::Hash256;
const BLOCK_ROOT_CACHE_LIMIT: usize = 512;
const LOOKUP_LIMIT: usize = 8;
const BLOCK_ROOT_CACHE_LIMIT: NonZeroUsize = new_non_zero_usize(512);
const LOOKUP_LIMIT: NonZeroUsize = new_non_zero_usize(8);
const METRICS_TIMEOUT: Duration = Duration::from_millis(100);
/// Cache for rejecting attestations to blocks from before finalization.
@ -78,7 +80,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// 3. Check the network with a single block lookup.
cache.in_progress_lookups.put(block_root, ());
if cache.in_progress_lookups.len() == LOOKUP_LIMIT {
if cache.in_progress_lookups.len() == LOOKUP_LIMIT.get() {
// NOTE: we expect this to occur sometimes if a lot of blocks that we look up fail to be
// imported for reasons other than being pre-finalization. The cache will eventually
// self-repair in this case by replacing old entries with new ones until all the failed

View File

@ -1,19 +1,15 @@
//! Utilities for managing database schema changes.
mod migration_schema_v12;
mod migration_schema_v13;
mod migration_schema_v14;
mod migration_schema_v15;
mod migration_schema_v16;
mod migration_schema_v17;
mod migration_schema_v18;
mod migration_schema_v19;
use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY};
use crate::eth1_chain::SszEth1;
use crate::beacon_chain::BeaconChainTypes;
use crate::types::ChainSpec;
use slog::{warn, Logger};
use slog::Logger;
use std::sync::Arc;
use store::hot_cold_store::{HotColdDB, HotColdDBError};
use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION};
use store::{Error as StoreError, StoreItem};
use store::Error as StoreError;
/// Migrate the database from one schema version to another, applying all requisite mutations.
#[allow(clippy::only_used_in_recursion)] // spec is not used but likely to be used in future
@ -56,92 +52,8 @@ pub fn migrate_schema<T: BeaconChainTypes>(
}
//
// Migrations from before SchemaVersion(11) are deprecated.
// Migrations from before SchemaVersion(16) are deprecated.
//
// Upgrade from v11 to v12 to store richer metadata in the attestation op pool.
(SchemaVersion(11), SchemaVersion(12)) => {
let ops = migration_schema_v12::upgrade_to_v12::<T>(db.clone(), log)?;
db.store_schema_version_atomically(to, ops)
}
// Downgrade from v12 to v11 to drop richer metadata from the attestation op pool.
(SchemaVersion(12), SchemaVersion(11)) => {
let ops = migration_schema_v12::downgrade_from_v12::<T>(db.clone(), log)?;
db.store_schema_version_atomically(to, ops)
}
(SchemaVersion(12), SchemaVersion(13)) => {
let mut ops = vec![];
if let Some(persisted_eth1_v1) = db.get_item::<SszEth1>(&ETH1_CACHE_DB_KEY)? {
let upgraded_eth1_cache =
match migration_schema_v13::update_eth1_cache(persisted_eth1_v1) {
Ok(upgraded_eth1) => upgraded_eth1,
Err(e) => {
warn!(log, "Failed to deserialize SszEth1CacheV1"; "error" => ?e);
warn!(log, "Reinitializing eth1 cache");
migration_schema_v13::reinitialized_eth1_cache_v13(
deposit_contract_deploy_block,
)
}
};
ops.push(upgraded_eth1_cache.as_kv_store_op(ETH1_CACHE_DB_KEY));
}
db.store_schema_version_atomically(to, ops)?;
Ok(())
}
(SchemaVersion(13), SchemaVersion(12)) => {
let mut ops = vec![];
if let Some(persisted_eth1_v13) = db.get_item::<SszEth1>(&ETH1_CACHE_DB_KEY)? {
let downgraded_eth1_cache = match migration_schema_v13::downgrade_eth1_cache(
persisted_eth1_v13,
) {
Ok(Some(downgraded_eth1)) => downgraded_eth1,
Ok(None) => {
warn!(log, "Unable to downgrade eth1 cache from newer version: reinitializing eth1 cache");
migration_schema_v13::reinitialized_eth1_cache_v1(
deposit_contract_deploy_block,
)
}
Err(e) => {
warn!(log, "Unable to downgrade eth1 cache from newer version: failed to deserialize SszEth1CacheV13"; "error" => ?e);
warn!(log, "Reinitializing eth1 cache");
migration_schema_v13::reinitialized_eth1_cache_v1(
deposit_contract_deploy_block,
)
}
};
ops.push(downgraded_eth1_cache.as_kv_store_op(ETH1_CACHE_DB_KEY));
}
db.store_schema_version_atomically(to, ops)?;
Ok(())
}
(SchemaVersion(13), SchemaVersion(14)) => {
let ops = migration_schema_v14::upgrade_to_v14::<T>(db.clone(), log)?;
db.store_schema_version_atomically(to, ops)
}
(SchemaVersion(14), SchemaVersion(13)) => {
let ops = migration_schema_v14::downgrade_from_v14::<T>(db.clone(), log)?;
db.store_schema_version_atomically(to, ops)
}
(SchemaVersion(14), SchemaVersion(15)) => {
let ops = migration_schema_v15::upgrade_to_v15::<T>(db.clone(), log)?;
db.store_schema_version_atomically(to, ops)
}
(SchemaVersion(15), SchemaVersion(14)) => {
let ops = migration_schema_v15::downgrade_from_v15::<T>(db.clone(), log)?;
db.store_schema_version_atomically(to, ops)
}
(SchemaVersion(15), SchemaVersion(16)) => {
let ops = migration_schema_v16::upgrade_to_v16::<T>(db.clone(), log)?;
db.store_schema_version_atomically(to, ops)
}
(SchemaVersion(16), SchemaVersion(15)) => {
let ops = migration_schema_v16::downgrade_from_v16::<T>(db.clone(), log)?;
db.store_schema_version_atomically(to, ops)
}
(SchemaVersion(16), SchemaVersion(17)) => {
let ops = migration_schema_v17::upgrade_to_v17::<T>(db.clone(), log)?;
db.store_schema_version_atomically(to, ops)
@ -150,6 +62,22 @@ pub fn migrate_schema<T: BeaconChainTypes>(
let ops = migration_schema_v17::downgrade_from_v17::<T>(db.clone(), log)?;
db.store_schema_version_atomically(to, ops)
}
(SchemaVersion(17), SchemaVersion(18)) => {
let ops = migration_schema_v18::upgrade_to_v18::<T>(db.clone(), log)?;
db.store_schema_version_atomically(to, ops)
}
(SchemaVersion(18), SchemaVersion(17)) => {
let ops = migration_schema_v18::downgrade_from_v18::<T>(db.clone(), log)?;
db.store_schema_version_atomically(to, ops)
}
(SchemaVersion(18), SchemaVersion(19)) => {
let ops = migration_schema_v19::upgrade_to_v19::<T>(db.clone(), log)?;
db.store_schema_version_atomically(to, ops)
}
(SchemaVersion(19), SchemaVersion(18)) => {
let ops = migration_schema_v19::downgrade_from_v19::<T>(db.clone(), log)?;
db.store_schema_version_atomically(to, ops)
}
// Anything else is an error.
(_, _) => Err(HotColdDBError::UnsupportedSchemaVersion {
target_version: to,

View File

@ -1,224 +0,0 @@
use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY};
use crate::persisted_fork_choice::PersistedForkChoiceV11;
use operation_pool::{PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV5};
use slog::{debug, info, Logger};
use state_processing::{
common::get_indexed_attestation, per_block_processing::is_valid_indexed_attestation,
VerifyOperation, VerifySignatures,
};
use std::sync::Arc;
use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem};
pub fn upgrade_to_v12<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> {
let spec = db.get_chain_spec();
// Load a V5 op pool and transform it to V12.
let PersistedOperationPoolV5 {
attestations_v5,
sync_contributions,
attester_slashings_v5,
proposer_slashings_v5,
voluntary_exits_v5,
} = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? {
op_pool
} else {
debug!(log, "Nothing to do, no operation pool stored");
return Ok(vec![]);
};
// Load the persisted fork choice so we can grab the state of the justified block and use
// it to verify the stored attestations, slashings and exits.
let fork_choice = db
.get_item::<PersistedForkChoiceV11>(&FORK_CHOICE_DB_KEY)?
.ok_or_else(|| Error::SchemaMigrationError("fork choice missing from database".into()))?;
let justified_block_root = fork_choice
.fork_choice_store
.unrealized_justified_checkpoint
.root;
let justified_block = db
.get_blinded_block(&justified_block_root)?
.ok_or_else(|| {
Error::SchemaMigrationError(format!(
"unrealized justified block missing for migration: {justified_block_root:?}",
))
})?;
let justified_state_root = justified_block.state_root();
let mut state = db
.get_state(&justified_state_root, Some(justified_block.slot()))?
.ok_or_else(|| {
Error::SchemaMigrationError(format!(
"justified state missing for migration: {justified_state_root:?}"
))
})?;
state.build_all_committee_caches(spec).map_err(|e| {
Error::SchemaMigrationError(format!("unable to build committee caches: {e:?}"))
})?;
// Re-verify attestations while adding attesting indices.
let attestations = attestations_v5
.into_iter()
.flat_map(|(_, attestations)| attestations)
.filter_map(|attestation| {
let res = state
.get_beacon_committee(attestation.data.slot, attestation.data.index)
.map_err(Into::into)
.and_then(|committee| get_indexed_attestation(committee.committee, &attestation))
.and_then(|indexed_attestation| {
is_valid_indexed_attestation(
&state,
&indexed_attestation,
VerifySignatures::True,
spec,
)?;
Ok(indexed_attestation)
});
match res {
Ok(indexed) => Some((attestation, indexed.attesting_indices.into())),
Err(e) => {
debug!(
log,
"Dropping attestation on migration";
"err" => ?e,
"head_block" => ?attestation.data.beacon_block_root,
);
None
}
}
})
.collect::<Vec<_>>();
let attester_slashings = attester_slashings_v5
.iter()
.filter_map(|(slashing, _)| {
slashing
.clone()
.validate(&state, spec)
.map_err(|e| {
debug!(
log,
"Dropping attester slashing on migration";
"err" => ?e,
"slashing" => ?slashing,
);
})
.ok()
})
.collect::<Vec<_>>();
let proposer_slashings = proposer_slashings_v5
.iter()
.filter_map(|slashing| {
slashing
.clone()
.validate(&state, spec)
.map_err(|e| {
debug!(
log,
"Dropping proposer slashing on migration";
"err" => ?e,
"slashing" => ?slashing,
);
})
.ok()
})
.collect::<Vec<_>>();
let voluntary_exits = voluntary_exits_v5
.iter()
.filter_map(|exit| {
exit.clone()
.validate(&state, spec)
.map_err(|e| {
debug!(
log,
"Dropping voluntary exit on migration";
"err" => ?e,
"exit" => ?exit,
);
})
.ok()
})
.collect::<Vec<_>>();
debug!(
log,
"Migrated op pool";
"attestations" => attestations.len(),
"attester_slashings" => attester_slashings.len(),
"proposer_slashings" => proposer_slashings.len(),
"voluntary_exits" => voluntary_exits.len()
);
let v12 = PersistedOperationPool::V12(PersistedOperationPoolV12 {
attestations,
sync_contributions,
attester_slashings,
proposer_slashings,
voluntary_exits,
});
Ok(vec![v12.as_kv_store_op(OP_POOL_DB_KEY)])
}
pub fn downgrade_from_v12<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> {
// Load a V12 op pool and transform it to V5.
let PersistedOperationPoolV12::<T::EthSpec> {
attestations,
sync_contributions,
attester_slashings,
proposer_slashings,
voluntary_exits,
} = if let Some(op_pool_v12) = db.get_item(&OP_POOL_DB_KEY)? {
op_pool_v12
} else {
debug!(log, "Nothing to do, no operation pool stored");
return Ok(vec![]);
};
info!(
log,
"Dropping attestations from pool";
"count" => attestations.len(),
);
let attester_slashings_v5 = attester_slashings
.into_iter()
.filter_map(|slashing| {
let fork_version = slashing.first_fork_verified_against()?;
Some((slashing.into_inner(), fork_version))
})
.collect::<Vec<_>>();
let proposer_slashings_v5 = proposer_slashings
.into_iter()
.map(|slashing| slashing.into_inner())
.collect::<Vec<_>>();
let voluntary_exits_v5 = voluntary_exits
.into_iter()
.map(|exit| exit.into_inner())
.collect::<Vec<_>>();
info!(
log,
"Migrated slashings and exits";
"attester_slashings" => attester_slashings_v5.len(),
"proposer_slashings" => proposer_slashings_v5.len(),
"voluntary_exits" => voluntary_exits_v5.len(),
);
let v5 = PersistedOperationPoolV5 {
attestations_v5: vec![],
sync_contributions,
attester_slashings_v5,
proposer_slashings_v5,
voluntary_exits_v5,
};
Ok(vec![v5.as_kv_store_op(OP_POOL_DB_KEY)])
}

View File

@ -1,150 +0,0 @@
use crate::eth1_chain::SszEth1;
use eth1::{BlockCache, SszDepositCacheV1, SszDepositCacheV13, SszEth1CacheV1, SszEth1CacheV13};
use ssz::{Decode, Encode};
use state_processing::common::DepositDataTree;
use store::Error;
use types::DEPOSIT_TREE_DEPTH;
pub fn update_eth1_cache(persisted_eth1_v1: SszEth1) -> Result<SszEth1, Error> {
if persisted_eth1_v1.use_dummy_backend {
// backend_bytes is empty when using dummy backend
return Ok(persisted_eth1_v1);
}
let SszEth1 {
use_dummy_backend,
backend_bytes,
} = persisted_eth1_v1;
let ssz_eth1_cache_v1 = SszEth1CacheV1::from_ssz_bytes(&backend_bytes)?;
let SszEth1CacheV1 {
block_cache,
deposit_cache: deposit_cache_v1,
last_processed_block,
} = ssz_eth1_cache_v1;
let SszDepositCacheV1 {
logs,
leaves,
deposit_contract_deploy_block,
deposit_roots,
} = deposit_cache_v1;
let deposit_cache_v13 = SszDepositCacheV13 {
logs,
leaves,
deposit_contract_deploy_block,
finalized_deposit_count: 0,
finalized_block_height: deposit_contract_deploy_block.saturating_sub(1),
deposit_tree_snapshot: None,
deposit_roots,
};
let ssz_eth1_cache_v13 = SszEth1CacheV13 {
block_cache,
deposit_cache: deposit_cache_v13,
last_processed_block,
};
let persisted_eth1_v13 = SszEth1 {
use_dummy_backend,
backend_bytes: ssz_eth1_cache_v13.as_ssz_bytes(),
};
Ok(persisted_eth1_v13)
}
pub fn downgrade_eth1_cache(persisted_eth1_v13: SszEth1) -> Result<Option<SszEth1>, Error> {
if persisted_eth1_v13.use_dummy_backend {
// backend_bytes is empty when using dummy backend
return Ok(Some(persisted_eth1_v13));
}
let SszEth1 {
use_dummy_backend,
backend_bytes,
} = persisted_eth1_v13;
let ssz_eth1_cache_v13 = SszEth1CacheV13::from_ssz_bytes(&backend_bytes)?;
let SszEth1CacheV13 {
block_cache,
deposit_cache: deposit_cache_v13,
last_processed_block,
} = ssz_eth1_cache_v13;
let SszDepositCacheV13 {
logs,
leaves,
deposit_contract_deploy_block,
finalized_deposit_count,
finalized_block_height: _,
deposit_tree_snapshot,
deposit_roots,
} = deposit_cache_v13;
if finalized_deposit_count == 0 && deposit_tree_snapshot.is_none() {
// This tree was never finalized and can be directly downgraded to v1 without re-initializing
let deposit_cache_v1 = SszDepositCacheV1 {
logs,
leaves,
deposit_contract_deploy_block,
deposit_roots,
};
let ssz_eth1_cache_v1 = SszEth1CacheV1 {
block_cache,
deposit_cache: deposit_cache_v1,
last_processed_block,
};
return Ok(Some(SszEth1 {
use_dummy_backend,
backend_bytes: ssz_eth1_cache_v1.as_ssz_bytes(),
}));
}
// deposit cache was finalized; can't downgrade
Ok(None)
}
pub fn reinitialized_eth1_cache_v13(deposit_contract_deploy_block: u64) -> SszEth1 {
let empty_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH);
let deposit_cache_v13 = SszDepositCacheV13 {
logs: vec![],
leaves: vec![],
deposit_contract_deploy_block,
finalized_deposit_count: 0,
finalized_block_height: deposit_contract_deploy_block.saturating_sub(1),
deposit_tree_snapshot: empty_tree.get_snapshot(),
deposit_roots: vec![empty_tree.root()],
};
let ssz_eth1_cache_v13 = SszEth1CacheV13 {
block_cache: BlockCache::default(),
deposit_cache: deposit_cache_v13,
last_processed_block: None,
};
SszEth1 {
use_dummy_backend: false,
backend_bytes: ssz_eth1_cache_v13.as_ssz_bytes(),
}
}
pub fn reinitialized_eth1_cache_v1(deposit_contract_deploy_block: u64) -> SszEth1 {
let empty_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH);
let deposit_cache_v1 = SszDepositCacheV1 {
logs: vec![],
leaves: vec![],
deposit_contract_deploy_block,
deposit_roots: vec![empty_tree.root()],
};
let ssz_eth1_cache_v1 = SszEth1CacheV1 {
block_cache: BlockCache::default(),
deposit_cache: deposit_cache_v1,
last_processed_block: None,
};
SszEth1 {
use_dummy_backend: false,
backend_bytes: ssz_eth1_cache_v1.as_ssz_bytes(),
}
}

View File

@ -1,125 +0,0 @@
use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY};
use operation_pool::{
PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV14,
};
use slog::{debug, error, info, Logger};
use slot_clock::SlotClock;
use std::sync::Arc;
use std::time::Duration;
use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem};
use types::{EthSpec, Hash256, Slot};
/// The slot clock isn't usually available before the database is initialized, so we construct a
/// temporary slot clock by reading the genesis state. It should always exist if the database is
/// initialized at a prior schema version, however we still handle the lack of genesis state
/// gracefully.
fn get_slot_clock<T: BeaconChainTypes>(
db: &HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>,
log: &Logger,
) -> Result<Option<T::SlotClock>, Error> {
let spec = db.get_chain_spec();
let genesis_block = if let Some(block) = db.get_blinded_block(&Hash256::zero())? {
block
} else {
error!(log, "Missing genesis block");
return Ok(None);
};
let genesis_state =
if let Some(state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? {
state
} else {
error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root());
return Ok(None);
};
Ok(Some(T::SlotClock::new(
spec.genesis_slot,
Duration::from_secs(genesis_state.genesis_time()),
Duration::from_secs(spec.seconds_per_slot),
)))
}
pub fn upgrade_to_v14<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> {
// Load a V12 op pool and transform it to V14.
let PersistedOperationPoolV12::<T::EthSpec> {
attestations,
sync_contributions,
attester_slashings,
proposer_slashings,
voluntary_exits,
} = if let Some(op_pool_v12) = db.get_item(&OP_POOL_DB_KEY)? {
op_pool_v12
} else {
debug!(log, "Nothing to do, no operation pool stored");
return Ok(vec![]);
};
// initialize with empty vector
let bls_to_execution_changes = vec![];
let v14 = PersistedOperationPool::V14(PersistedOperationPoolV14 {
attestations,
sync_contributions,
attester_slashings,
proposer_slashings,
voluntary_exits,
bls_to_execution_changes,
});
Ok(vec![v14.as_kv_store_op(OP_POOL_DB_KEY)])
}
pub fn downgrade_from_v14<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> {
// We cannot downgrade from V14 once the Capella fork has been reached because there will
// be HistoricalSummaries stored in the database instead of HistoricalRoots and prior versions
// of Lighthouse can't handle that.
if let Some(capella_fork_epoch) = db.get_chain_spec().capella_fork_epoch {
let current_epoch = get_slot_clock::<T>(&db, &log)?
.and_then(|clock| clock.now())
.map(|slot| slot.epoch(T::EthSpec::slots_per_epoch()))
.ok_or(Error::SlotClockUnavailableForMigration)?;
if current_epoch >= capella_fork_epoch {
error!(
log,
"Capella already active: v14+ is mandatory";
"current_epoch" => current_epoch,
"capella_fork_epoch" => capella_fork_epoch,
);
return Err(Error::UnableToDowngrade);
}
}
// Load a V14 op pool and transform it to V12.
let PersistedOperationPoolV14::<T::EthSpec> {
attestations,
sync_contributions,
attester_slashings,
proposer_slashings,
voluntary_exits,
bls_to_execution_changes,
} = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? {
op_pool
} else {
debug!(log, "Nothing to do, no operation pool stored");
return Ok(vec![]);
};
info!(
log,
"Dropping bls_to_execution_changes from pool";
"count" => bls_to_execution_changes.len(),
);
let v12 = PersistedOperationPoolV12 {
attestations,
sync_contributions,
attester_slashings,
proposer_slashings,
voluntary_exits,
};
Ok(vec![v12.as_kv_store_op(OP_POOL_DB_KEY)])
}

View File

@ -1,76 +0,0 @@
use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY};
use operation_pool::{
PersistedOperationPool, PersistedOperationPoolV14, PersistedOperationPoolV15,
};
use slog::{debug, info, Logger};
use std::sync::Arc;
use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem};
pub fn upgrade_to_v15<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> {
// Load a V14 op pool and transform it to V15.
let PersistedOperationPoolV14::<T::EthSpec> {
attestations,
sync_contributions,
attester_slashings,
proposer_slashings,
voluntary_exits,
bls_to_execution_changes,
} = if let Some(op_pool_v14) = db.get_item(&OP_POOL_DB_KEY)? {
op_pool_v14
} else {
debug!(log, "Nothing to do, no operation pool stored");
return Ok(vec![]);
};
let v15 = PersistedOperationPool::V15(PersistedOperationPoolV15 {
attestations,
sync_contributions,
attester_slashings,
proposer_slashings,
voluntary_exits,
bls_to_execution_changes,
// Initialize with empty set
capella_bls_change_broadcast_indices: <_>::default(),
});
Ok(vec![v15.as_kv_store_op(OP_POOL_DB_KEY)])
}
pub fn downgrade_from_v15<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> {
// Load a V15 op pool and transform it to V14.
let PersistedOperationPoolV15::<T::EthSpec> {
attestations,
sync_contributions,
attester_slashings,
proposer_slashings,
voluntary_exits,
bls_to_execution_changes,
capella_bls_change_broadcast_indices,
} = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? {
op_pool
} else {
debug!(log, "Nothing to do, no operation pool stored");
return Ok(vec![]);
};
info!(
log,
"Forgetting address changes for Capella broadcast";
"count" => capella_bls_change_broadcast_indices.len(),
);
let v14 = PersistedOperationPoolV14 {
attestations,
sync_contributions,
attester_slashings,
proposer_slashings,
voluntary_exits,
bls_to_execution_changes,
};
Ok(vec![v14.as_kv_store_op(OP_POOL_DB_KEY)])
}

View File

@ -1,46 +0,0 @@
use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY};
use crate::persisted_fork_choice::PersistedForkChoiceV11;
use slog::{debug, Logger};
use std::sync::Arc;
use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem};
pub fn upgrade_to_v16<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> {
drop_balances_cache::<T>(db, log)
}
pub fn downgrade_from_v16<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> {
drop_balances_cache::<T>(db, log)
}
/// Drop the balances cache from the fork choice store.
///
/// There aren't any type-level changes in this schema migration, however the
/// way that we compute the `JustifiedBalances` has changed due to:
/// https://github.com/sigp/lighthouse/pull/3962
pub fn drop_balances_cache<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> {
let mut persisted_fork_choice = db
.get_item::<PersistedForkChoiceV11>(&FORK_CHOICE_DB_KEY)?
.ok_or_else(|| Error::SchemaMigrationError("fork choice missing from database".into()))?;
debug!(
log,
"Dropping fork choice balances cache";
"item_count" => persisted_fork_choice.fork_choice_store.balances_cache.items.len()
);
// Drop all items in the balances cache.
persisted_fork_choice.fork_choice_store.balances_cache = <_>::default();
let kv_op = persisted_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY);
Ok(vec![kv_op])
}

View File

@ -0,0 +1,119 @@
use crate::beacon_chain::BeaconChainTypes;
use slog::{error, info, warn, Logger};
use slot_clock::SlotClock;
use std::sync::Arc;
use std::time::Duration;
use store::{
get_key_for_col, metadata::BLOB_INFO_KEY, DBColumn, Error, HotColdDB, KeyValueStoreOp,
};
use types::{Epoch, EthSpec, Hash256, Slot};
/// The slot clock isn't usually available before the database is initialized, so we construct a
/// temporary slot clock by reading the genesis state. It should always exist if the database is
/// initialized at a prior schema version, however we still handle the lack of genesis state
/// gracefully.
fn get_slot_clock<T: BeaconChainTypes>(
db: &HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>,
log: &Logger,
) -> Result<Option<T::SlotClock>, Error> {
let spec = db.get_chain_spec();
let Some(genesis_block) = db.get_blinded_block(&Hash256::zero())? else {
error!(log, "Missing genesis block");
return Ok(None);
};
let Some(genesis_state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? else {
error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root());
return Ok(None);
};
Ok(Some(T::SlotClock::new(
spec.genesis_slot,
Duration::from_secs(genesis_state.genesis_time()),
Duration::from_secs(spec.seconds_per_slot),
)))
}
fn get_current_epoch<T: BeaconChainTypes>(
db: &Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: &Logger,
) -> Result<Epoch, Error> {
get_slot_clock::<T>(db, log)?
.and_then(|clock| clock.now())
.map(|slot| slot.epoch(T::EthSpec::slots_per_epoch()))
.ok_or(Error::SlotClockUnavailableForMigration)
}
pub fn upgrade_to_v18<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> {
db.heal_freezer_block_roots_at_split()?;
db.heal_freezer_block_roots_at_genesis()?;
info!(log, "Healed freezer block roots");
// No-op, even if Deneb has already occurred. The database is probably borked in this case, but
// *maybe* the fork recovery will revert the minority fork and succeed.
if let Some(deneb_fork_epoch) = db.get_chain_spec().deneb_fork_epoch {
let current_epoch = get_current_epoch::<T>(&db, &log)?;
if current_epoch >= deneb_fork_epoch {
warn!(
log,
"Attempting upgrade to v18 schema";
"info" => "this may not work as Deneb has already been activated"
);
} else {
info!(
log,
"Upgrading to v18 schema";
"info" => "ready for Deneb",
"epochs_until_deneb" => deneb_fork_epoch - current_epoch
);
}
} else {
info!(
log,
"Upgrading to v18 schema";
"info" => "ready for Deneb once it is scheduled"
);
}
Ok(vec![])
}
pub fn downgrade_from_v18<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> {
// We cannot downgrade from V18 once the Deneb fork has been activated, because there will
// be blobs and blob metadata in the database that aren't understood by the V17 schema.
if let Some(deneb_fork_epoch) = db.get_chain_spec().deneb_fork_epoch {
let current_epoch = get_current_epoch::<T>(&db, &log)?;
if current_epoch >= deneb_fork_epoch {
error!(
log,
"Deneb already active: v18+ is mandatory";
"current_epoch" => current_epoch,
"deneb_fork_epoch" => deneb_fork_epoch,
);
return Err(Error::UnableToDowngrade);
} else {
info!(
log,
"Downgrading to v17 schema";
"info" => "you will need to upgrade before Deneb",
"epochs_until_deneb" => deneb_fork_epoch - current_epoch
);
}
} else {
info!(
log,
"Downgrading to v17 schema";
"info" => "you need to upgrade before Deneb",
);
}
let ops = vec![KeyValueStoreOp::DeleteKey(get_key_for_col(
DBColumn::BeaconMeta.into(),
BLOB_INFO_KEY.as_bytes(),
))];
Ok(ops)
}

View File

@ -0,0 +1,65 @@
use crate::beacon_chain::BeaconChainTypes;
use slog::{debug, info, Logger};
use std::sync::Arc;
use store::{get_key_for_col, DBColumn, Error, HotColdDB, KeyValueStore, KeyValueStoreOp};
pub fn upgrade_to_v19<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> {
let mut hot_delete_ops = vec![];
let mut blob_keys = vec![];
let column = DBColumn::BeaconBlob;
debug!(log, "Migrating from v18 to v19");
// Iterate through the blobs on disk.
for res in db.hot_db.iter_column_keys::<Vec<u8>>(column) {
let key = res?;
let key_col = get_key_for_col(column.as_str(), &key);
hot_delete_ops.push(KeyValueStoreOp::DeleteKey(key_col));
blob_keys.push(key);
}
let num_blobs = blob_keys.len();
debug!(log, "Collected {} blob lists to migrate", num_blobs);
let batch_size = 500;
let mut batch = Vec::with_capacity(batch_size);
for key in blob_keys {
let next_blob = db.hot_db.get_bytes(column.as_str(), &key)?;
if let Some(next_blob) = next_blob {
let key_col = get_key_for_col(column.as_str(), &key);
batch.push(KeyValueStoreOp::PutKeyValue(key_col, next_blob));
if batch.len() >= batch_size {
db.blobs_db.do_atomically(batch.clone())?;
batch.clear();
}
}
}
// Process the remaining batch if it's not empty
if !batch.is_empty() {
db.blobs_db.do_atomically(batch)?;
}
debug!(log, "Wrote {} blobs to the blobs db", num_blobs);
// Delete all the blobs
info!(log, "Upgrading to v19 schema");
Ok(hot_delete_ops)
}
pub fn downgrade_from_v19<T: BeaconChainTypes>(
_db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> {
// No-op
info!(
log,
"Downgrading to v18 schema";
);
Ok(vec![])
}

View File

@ -45,6 +45,9 @@ const MAX_ADVANCE_DISTANCE: u64 = 4;
/// impact whilst having 8 epochs without a block is a comfortable grace period.
const MAX_FORK_CHOICE_DISTANCE: u64 = 256;
/// Drop any unused block production state cache after this many slots.
const MAX_BLOCK_PRODUCTION_CACHE_DISTANCE: u64 = 4;
#[derive(Debug)]
enum Error {
BeaconChain(BeaconChainError),
@ -113,14 +116,11 @@ async fn state_advance_timer<T: BeaconChainTypes>(
let slot_duration = slot_clock.slot_duration();
loop {
let duration_to_next_slot = match beacon_chain.slot_clock.duration_to_next_slot() {
Some(duration) => duration,
None => {
error!(log, "Failed to read slot clock");
// If we can't read the slot clock, just wait another slot.
sleep(slot_duration).await;
continue;
}
let Some(duration_to_next_slot) = beacon_chain.slot_clock.duration_to_next_slot() else {
error!(log, "Failed to read slot clock");
// If we can't read the slot clock, just wait another slot.
sleep(slot_duration).await;
continue;
};
// Run the state advance 3/4 of the way through the slot (9s on mainnet).
@ -230,19 +230,73 @@ async fn state_advance_timer<T: BeaconChainTypes>(
// Prepare proposers so that the node can send payload attributes in the case where
// it decides to abandon a proposer boost re-org.
if let Err(e) = beacon_chain.prepare_beacon_proposer(current_slot).await {
warn!(
log,
"Unable to prepare proposer with lookahead";
"error" => ?e,
"slot" => next_slot,
);
}
let proposer_head = beacon_chain
.prepare_beacon_proposer(current_slot)
.await
.unwrap_or_else(|e| {
warn!(
log,
"Unable to prepare proposer with lookahead";
"error" => ?e,
"slot" => next_slot,
);
None
});
// Use a blocking task to avoid blocking the core executor whilst waiting for locks
// in `ForkChoiceSignalTx`.
beacon_chain.task_executor.clone().spawn_blocking(
move || {
// If we're proposing, clone the head state preemptively so that it isn't on
// the hot path of proposing. We can delete this once we have tree-states.
if let Some(proposer_head) = proposer_head {
let mut cache = beacon_chain.block_production_state.lock();
// Avoid holding two states in memory. It's OK to hold the lock because
// we always lock the block production cache before the snapshot cache
// and we prefer for block production to wait for the block production
// cache if a clone is in-progress.
if cache
.as_ref()
.map_or(false, |(cached_head, _)| *cached_head != proposer_head)
{
drop(cache.take());
}
if let Some(proposer_state) = beacon_chain
.snapshot_cache
.try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT)
.and_then(|snapshot_cache| {
snapshot_cache.get_state_for_block_production(proposer_head)
})
{
*cache = Some((proposer_head, proposer_state));
debug!(
log,
"Cloned state ready for block production";
"head_block_root" => ?proposer_head,
"slot" => next_slot
);
} else {
warn!(
log,
"Block production state missing from snapshot cache";
"head_block_root" => ?proposer_head,
"slot" => next_slot
);
}
} else {
// If we aren't proposing, drop any old block production cache to save
// memory.
let mut cache = beacon_chain.block_production_state.lock();
if let Some((_, state)) = &*cache {
if state.pre_state.slot() + MAX_BLOCK_PRODUCTION_CACHE_DISTANCE
<= current_slot
{
drop(cache.take());
}
}
}
// Signal block proposal for the next slot (if it happens to be waiting).
if let Some(tx) = &beacon_chain.fork_choice_signal_tx {
if let Err(e) = tx.notify_fork_choice_complete(next_slot) {

View File

@ -1,10 +1,12 @@
use crate::block_verification_types::{AsBlock, RpcBlock};
use crate::observed_operations::ObservationOutcome;
pub use crate::persisted_beacon_chain::PersistedBeaconChain;
use crate::BeaconBlockResponseWrapper;
pub use crate::{
beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY},
migrate::MigratorConfig,
sync_committee_verification::Error as SyncCommitteeError,
validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD,
validator_monitor::{ValidatorMonitor, ValidatorMonitorConfig},
BeaconChainError, NotifyExecutionLayer, ProduceBlockVerification,
};
use crate::{
@ -14,17 +16,21 @@ use crate::{
StateSkipConfig,
};
use bls::get_withdrawal_credentials;
use eth2::types::SignedBlockContentsTuple;
use eth2_network_config::TRUSTED_SETUP_BYTES;
use execution_layer::test_utils::generate_genesis_header;
use execution_layer::{
auth::JwtKey,
test_utils::{
ExecutionBlockGenerator, MockBuilder, MockBuilderServer, MockExecutionLayer,
DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK,
ExecutionBlockGenerator, MockBuilder, MockExecutionLayer, DEFAULT_JWT_SECRET,
DEFAULT_TERMINAL_BLOCK,
},
ExecutionLayer,
};
use futures::channel::mpsc::Receiver;
pub use genesis::{interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH};
use int_to_bytes::int_to_bytes32;
use kzg::{Kzg, TrustedSetup};
use merkle_proof::MerkleTree;
use operation_pool::ReceivedPreCapella;
use parking_lot::Mutex;
@ -34,7 +40,9 @@ use rand::Rng;
use rand::SeedableRng;
use rayon::prelude::*;
use sensitive_url::SensitiveUrl;
use slog::Logger;
use slog::{o, Drain, Logger};
use slog_async::Async;
use slog_term::{FullFormat, TermDecorator};
use slot_clock::{SlotClock, TestingSlotClock};
use state_processing::per_block_processing::compute_timestamp_at_slot;
use state_processing::{
@ -45,19 +53,23 @@ use std::borrow::Cow;
use std::collections::{HashMap, HashSet};
use std::fmt;
use std::str::FromStr;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::time::Duration;
use store::{config::StoreConfig, HotColdDB, ItemStore, LevelDB, MemoryStore};
use task_executor::TaskExecutor;
use task_executor::{test_utils::TestRuntime, ShutdownReason};
use tree_hash::TreeHash;
use types::payload::BlockProductionVersion;
use types::sync_selection_proof::SyncSelectionProof;
pub use types::test_utils::generate_deterministic_keypairs;
use types::test_utils::TestRandom;
use types::{typenum::U4294967296, *};
// 4th September 2019
pub const HARNESS_GENESIS_TIME: u64 = 1_567_552_690;
// Environment variable to read if `fork_from_env` feature is enabled.
const FORK_NAME_ENV_VAR: &str = "FORK_NAME";
pub const FORK_NAME_ENV_VAR: &str = "FORK_NAME";
// Default target aggregators to set during testing, this ensures an aggregator at each slot.
//
@ -168,6 +180,7 @@ pub struct Builder<T: BeaconChainTypes> {
execution_layer: Option<ExecutionLayer<T::EthSpec>>,
mock_execution_layer: Option<MockExecutionLayer<T::EthSpec>>,
testing_slot_clock: Option<TestingSlotClock>,
validator_monitor_config: Option<ValidatorMonitorConfig>,
runtime: TestRuntime,
log: Logger,
}
@ -189,11 +202,12 @@ impl<E: EthSpec> Builder<EphemeralHarnessType<E>> {
.unwrap(),
);
let mutator = move |builder: BeaconChainBuilder<_>| {
let header = generate_genesis_header::<E>(builder.get_spec(), false);
let genesis_state = interop_genesis_state_with_eth1::<E>(
&validator_keypairs,
HARNESS_GENESIS_TIME,
Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH),
None,
header,
builder.get_spec(),
)
.expect("should generate interop state");
@ -250,11 +264,12 @@ impl<E: EthSpec> Builder<DiskHarnessType<E>> {
.expect("cannot build without validator keypairs");
let mutator = move |builder: BeaconChainBuilder<_>| {
let header = generate_genesis_header::<E>(builder.get_spec(), false);
let genesis_state = interop_genesis_state_with_eth1::<E>(
&validator_keypairs,
HARNESS_GENESIS_TIME,
Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH),
None,
header,
builder.get_spec(),
)
.expect("should generate interop state");
@ -301,6 +316,7 @@ where
execution_layer: None,
mock_execution_layer: None,
testing_slot_clock: None,
validator_monitor_config: None,
runtime,
log,
}
@ -317,6 +333,11 @@ where
self
}
pub fn withdrawal_keypairs(mut self, withdrawal_keypairs: Vec<Option<Keypair>>) -> Self {
self.withdrawal_keypairs = withdrawal_keypairs;
self
}
/// Initializes the BLS withdrawal keypairs for `num_keypairs` validators to
/// the "determistic" values, regardless of wether or not the validator has
/// a BLS or execution address in the genesis deposits.
@ -332,11 +353,6 @@ where
)
}
pub fn withdrawal_keypairs(mut self, withdrawal_keypairs: Vec<Option<Keypair>>) -> Self {
self.withdrawal_keypairs = withdrawal_keypairs;
self
}
pub fn default_spec(self) -> Self {
self.spec_or_default(None)
}
@ -373,6 +389,14 @@ where
self
}
pub fn validator_monitor_config(
mut self,
validator_monitor_config: ValidatorMonitorConfig,
) -> Self {
self.validator_monitor_config = Some(validator_monitor_config);
self
}
/// Purposefully replace the `store_mutator`.
pub fn override_store_mutator(mut self, mutator: BoxedMutator<E, Hot, Cold>) -> Self {
assert!(self.store_mutator.is_some(), "store mutator not set");
@ -385,7 +409,7 @@ where
self
}
pub fn execution_layer(mut self, urls: &[&str]) -> Self {
pub fn execution_layer_from_urls(mut self, urls: &[&str]) -> Self {
assert!(
self.execution_layer.is_none(),
"execution layer already defined"
@ -414,6 +438,11 @@ where
self
}
pub fn execution_layer(mut self, el: Option<ExecutionLayer<E>>) -> Self {
self.execution_layer = el;
self
}
pub fn recalculate_fork_times_with_genesis(mut self, genesis_time: u64) -> Self {
let mock = self
.mock_execution_layer
@ -427,26 +456,21 @@ where
spec.capella_fork_epoch.map(|epoch| {
genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
});
mock.server.execution_block_generator().cancun_time = spec.deneb_fork_epoch.map(|epoch| {
genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
});
self
}
pub fn mock_execution_layer(self) -> Self {
self.mock_execution_layer_with_config(None)
self.mock_execution_layer_with_config()
}
pub fn mock_execution_layer_with_config(mut self, builder_threshold: Option<u128>) -> Self {
let spec = self.spec.clone().expect("cannot build without spec");
let shanghai_time = spec.capella_fork_epoch.map(|epoch| {
HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
});
let mock = MockExecutionLayer::new(
pub fn mock_execution_layer_with_config(mut self) -> Self {
let mock = mock_execution_layer_from_parts::<E>(
self.spec.as_ref().expect("cannot build without spec"),
self.runtime.task_executor.clone(),
DEFAULT_TERMINAL_BLOCK,
shanghai_time,
builder_threshold,
Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()),
spec,
);
self.execution_layer = Some(mock.el.clone());
self.mock_execution_layer = Some(mock);
@ -478,8 +502,13 @@ where
let validator_keypairs = self
.validator_keypairs
.expect("cannot build without validator keypairs");
let chain_config = self.chain_config.unwrap_or_default();
let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES)
.map_err(|e| format!("Unable to read trusted setup file: {}", e))
.unwrap();
let validator_monitor_config = self.validator_monitor_config.unwrap_or_default();
let chain_config = self.chain_config.unwrap_or_default();
let mut builder = BeaconChainBuilder::new(self.eth_spec_instance)
.logger(log.clone())
.custom_spec(spec)
@ -499,7 +528,8 @@ where
log.clone(),
5,
)))
.monitor_validators(true, vec![], DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, log);
.validator_monitor_config(validator_monitor_config)
.trusted_setup(trusted_setup);
builder = if let Some(mutator) = self.initial_mutator {
mutator(builder)
@ -540,6 +570,33 @@ where
}
}
pub fn mock_execution_layer_from_parts<T: EthSpec>(
spec: &ChainSpec,
task_executor: TaskExecutor,
) -> MockExecutionLayer<T> {
let shanghai_time = spec.capella_fork_epoch.map(|epoch| {
HARNESS_GENESIS_TIME + spec.seconds_per_slot * T::slots_per_epoch() * epoch.as_u64()
});
let cancun_time = spec.deneb_fork_epoch.map(|epoch| {
HARNESS_GENESIS_TIME + spec.seconds_per_slot * T::slots_per_epoch() * epoch.as_u64()
});
let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES)
.map_err(|e| format!("Unable to read trusted setup file: {}", e))
.expect("should have trusted setup");
let kzg = Kzg::new_from_trusted_setup(trusted_setup).expect("should create kzg");
MockExecutionLayer::new(
task_executor,
DEFAULT_TERMINAL_BLOCK,
shanghai_time,
cancun_time,
Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()),
spec.clone(),
Some(kzg),
)
}
/// A testing harness which can instantiate a `BeaconChain` and populate it with blocks and
/// attestations.
///
@ -595,7 +652,10 @@ where
.execution_block_generator()
}
pub fn set_mock_builder(&mut self, beacon_url: SensitiveUrl) -> MockBuilderServer {
pub fn set_mock_builder(
&mut self,
beacon_url: SensitiveUrl,
) -> impl futures::Future<Output = ()> {
let mock_el = self
.mock_execution_layer
.as_ref()
@ -604,7 +664,7 @@ where
let mock_el_url = SensitiveUrl::parse(mock_el.server.url().as_str()).unwrap();
// Create the builder, listening on a free port.
let (mock_builder, mock_builder_server) = MockBuilder::new_for_testing(
let (mock_builder, (addr, mock_builder_server)) = MockBuilder::new_for_testing(
mock_el_url,
beacon_url,
self.spec.clone(),
@ -612,8 +672,7 @@ where
);
// Set the builder URL in the execution layer now that its port is known.
let builder_listen_addr = mock_builder_server.local_addr();
let port = builder_listen_addr.port();
let port = addr.port();
mock_el
.el
.set_builder_url(
@ -638,6 +697,20 @@ where
mock_builder_server
}
pub fn get_head_block(&self) -> RpcBlock<E> {
let block = self.chain.head_beacon_block();
let block_root = block.canonical_root();
let blobs = self.chain.get_blobs(&block_root).unwrap();
RpcBlock::new(Some(block_root), block, Some(blobs)).unwrap()
}
pub fn get_full_block(&self, block_root: &Hash256) -> RpcBlock<E> {
let block = self.chain.get_blinded_block(block_root).unwrap().unwrap();
let full_block = self.chain.store.make_full_block(block_root, block).unwrap();
let blobs = self.chain.get_blobs(block_root).unwrap();
RpcBlock::new(Some(*block_root), Arc::new(full_block), Some(blobs)).unwrap()
}
pub fn get_all_validators(&self) -> Vec<usize> {
(0..self.validator_keypairs.len()).collect()
}
@ -749,7 +822,7 @@ where
slot: Slot,
) -> (SignedBlindedBeaconBlock<E>, BeaconState<E>) {
let (unblinded, new_state) = self.make_block(state, slot).await;
(unblinded.into(), new_state)
((*unblinded.0).clone().into(), new_state)
}
/// Returns a newly created block, signed by the proposer for the given slot.
@ -757,7 +830,7 @@ where
&self,
mut state: BeaconState<E>,
slot: Slot,
) -> (SignedBeaconBlock<E>, BeaconState<E>) {
) -> (SignedBlockContentsTuple<E>, BeaconState<E>) {
assert_ne!(slot, 0, "can't produce a block at slot 0");
assert!(slot >= state.slot());
@ -775,7 +848,7 @@ where
let randao_reveal = self.sign_randao_reveal(&state, proposer_index, slot);
let (block, state) = self
let BeaconBlockResponseWrapper::Full(block_response) = self
.chain
.produce_block_on_state(
state,
@ -784,18 +857,31 @@ where
randao_reveal,
Some(graffiti),
ProduceBlockVerification::VerifyRandao,
None,
BlockProductionVersion::FullV2,
)
.await
.unwrap();
.unwrap()
else {
panic!("Should always be a full payload response");
};
let signed_block = block.sign(
let signed_block = Arc::new(block_response.block.sign(
&self.validator_keypairs[proposer_index].sk,
&state.fork(),
state.genesis_validators_root(),
&block_response.state.fork(),
block_response.state.genesis_validators_root(),
&self.spec,
);
));
(signed_block, state)
let block_contents: SignedBlockContentsTuple<E> = match *signed_block {
SignedBeaconBlock::Base(_)
| SignedBeaconBlock::Altair(_)
| SignedBeaconBlock::Merge(_)
| SignedBeaconBlock::Capella(_) => (signed_block, None),
SignedBeaconBlock::Deneb(_) => (signed_block, block_response.blob_items),
};
(block_contents, block_response.state)
}
/// Useful for the `per_block_processing` tests. Creates a block, and returns the state after
@ -804,7 +890,7 @@ where
&self,
mut state: BeaconState<E>,
slot: Slot,
) -> (SignedBeaconBlock<E>, BeaconState<E>) {
) -> (SignedBlockContentsTuple<E>, BeaconState<E>) {
assert_ne!(slot, 0, "can't produce a block at slot 0");
assert!(slot >= state.slot());
@ -824,7 +910,7 @@ where
let pre_state = state.clone();
let (block, state) = self
let BeaconBlockResponseWrapper::Full(block_response) = self
.chain
.produce_block_on_state(
state,
@ -833,18 +919,30 @@ where
randao_reveal,
Some(graffiti),
ProduceBlockVerification::VerifyRandao,
None,
BlockProductionVersion::FullV2,
)
.await
.unwrap();
.unwrap()
else {
panic!("Should always be a full payload response");
};
let signed_block = block.sign(
let signed_block = Arc::new(block_response.block.sign(
&self.validator_keypairs[proposer_index].sk,
&state.fork(),
state.genesis_validators_root(),
&block_response.state.fork(),
block_response.state.genesis_validators_root(),
&self.spec,
);
));
(signed_block, pre_state)
let block_contents: SignedBlockContentsTuple<E> = match *signed_block {
SignedBeaconBlock::Base(_)
| SignedBeaconBlock::Altair(_)
| SignedBeaconBlock::Merge(_)
| SignedBeaconBlock::Capella(_) => (signed_block, None),
SignedBeaconBlock::Deneb(_) => (signed_block, block_response.blob_items),
};
(block_contents, pre_state)
}
/// Create a randao reveal for a block at `slot`.
@ -980,9 +1078,9 @@ where
) -> (Vec<CommitteeAttestations<E>>, Vec<usize>) {
let MakeAttestationOptions { limit, fork } = opts;
let committee_count = state.get_committee_count_at_slot(state.slot()).unwrap();
let attesters = Mutex::new(vec![]);
let num_attesters = AtomicUsize::new(0);
let attestations = state
let (attestations, split_attesters) = state
.get_beacon_committees_at_slot(attestation_slot)
.expect("should get committees")
.iter()
@ -995,13 +1093,14 @@ where
return None;
}
let mut attesters = attesters.lock();
if let Some(limit) = limit {
if attesters.len() >= limit {
// This atomics stuff is necessary because we're under a par_iter,
// and Rayon will deadlock if we use a mutex.
if num_attesters.fetch_add(1, Ordering::Relaxed) >= limit {
num_attesters.fetch_sub(1, Ordering::Relaxed);
return None;
}
}
attesters.push(*validator_index);
let mut attestation = self
.produce_unaggregated_attestation_for_block(
@ -1041,14 +1140,17 @@ where
)
.unwrap();
Some((attestation, subnet_id))
Some(((attestation, subnet_id), validator_index))
})
.collect::<Vec<_>>()
.unzip::<_, _, Vec<_>, Vec<_>>()
})
.collect::<Vec<_>>();
.unzip::<_, _, Vec<_>, Vec<_>>();
// Flatten attesters.
let attesters = split_attesters.into_iter().flatten().collect::<Vec<_>>();
let attesters = attesters.into_inner();
if let Some(limit) = limit {
assert_eq!(limit, num_attesters.load(Ordering::Relaxed));
assert_eq!(
limit,
attesters.len(),
@ -1519,14 +1621,13 @@ where
pub fn make_voluntary_exit(&self, validator_index: u64, epoch: Epoch) -> SignedVoluntaryExit {
let sk = &self.validator_keypairs[validator_index as usize].sk;
let fork = self.chain.canonical_head.cached_head().head_fork();
let genesis_validators_root = self.chain.genesis_validators_root;
VoluntaryExit {
epoch,
validator_index,
}
.sign(sk, &fork, genesis_validators_root, &self.chain.spec)
.sign(sk, genesis_validators_root, &self.chain.spec)
}
pub fn add_proposer_slashing(&self, validator_index: u64) -> Result<(), String> {
@ -1635,12 +1736,13 @@ where
state: BeaconState<E>,
slot: Slot,
block_modifier: impl FnOnce(&mut BeaconBlock<E>),
) -> (SignedBeaconBlock<E>, BeaconState<E>) {
) -> (SignedBlockContentsTuple<E>, BeaconState<E>) {
assert_ne!(slot, 0, "can't produce a block at slot 0");
assert!(slot >= state.slot());
let (block, state) = self.make_block_return_pre_state(state, slot).await;
let (mut block, _) = block.deconstruct();
let ((block, blobs), state) = self.make_block_return_pre_state(state, slot).await;
let (mut block, _) = (*block).clone().deconstruct();
block_modifier(&mut block);
@ -1652,7 +1754,33 @@ where
state.genesis_validators_root(),
&self.spec,
);
(signed_block, state)
((Arc::new(signed_block), blobs), state)
}
pub async fn make_blob_with_modifier(
&self,
state: BeaconState<E>,
slot: Slot,
blob_modifier: impl FnOnce(&mut BlobsList<E>),
) -> (SignedBlockContentsTuple<E>, BeaconState<E>) {
assert_ne!(slot, 0, "can't produce a block at slot 0");
assert!(slot >= state.slot());
let ((block, mut blobs), state) = self.make_block_return_pre_state(state, slot).await;
let (block, _) = (*block).clone().deconstruct();
blob_modifier(&mut blobs.as_mut().unwrap().1);
let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap();
let signed_block = block.sign(
&self.validator_keypairs[proposer_index].sk,
&state.fork(),
state.genesis_validators_root(),
&self.spec,
);
((Arc::new(signed_block), blobs), state)
}
pub fn make_deposits<'a>(
@ -1732,37 +1860,52 @@ where
&self,
slot: Slot,
block_root: Hash256,
block: SignedBeaconBlock<E>,
block_contents: SignedBlockContentsTuple<E>,
) -> Result<SignedBeaconBlockHash, BlockError<E>> {
self.set_current_slot(slot);
let (block, blob_items) = block_contents;
let sidecars = blob_items
.map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs))
.transpose()
.unwrap();
let block_hash: SignedBeaconBlockHash = self
.chain
.process_block(
block_root,
Arc::new(block),
RpcBlock::new(Some(block_root), block, sidecars).unwrap(),
NotifyExecutionLayer::Yes,
|| Ok(()),
)
.await?
.into();
.try_into()
.unwrap();
self.chain.recompute_head_at_current_slot().await;
Ok(block_hash)
}
pub async fn process_block_result(
&self,
block: SignedBeaconBlock<E>,
block_contents: SignedBlockContentsTuple<E>,
) -> Result<SignedBeaconBlockHash, BlockError<E>> {
let (block, blob_items) = block_contents;
let sidecars = blob_items
.map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs))
.transpose()
.unwrap();
let block_root = block.canonical_root();
let block_hash: SignedBeaconBlockHash = self
.chain
.process_block(
block.canonical_root(),
Arc::new(block),
block_root,
RpcBlock::new(Some(block_root), block, sidecars).unwrap(),
NotifyExecutionLayer::Yes,
|| Ok(()),
)
.await?
.into();
.try_into()
.expect("block blobs are available");
self.chain.recompute_head_at_current_slot().await;
Ok(block_hash)
}
@ -1822,13 +1965,25 @@ where
&self,
slot: Slot,
state: BeaconState<E>,
) -> Result<(SignedBeaconBlockHash, SignedBeaconBlock<E>, BeaconState<E>), BlockError<E>> {
) -> Result<
(
SignedBeaconBlockHash,
SignedBlockContentsTuple<E>,
BeaconState<E>,
),
BlockError<E>,
> {
self.set_current_slot(slot);
let (block, new_state) = self.make_block(state, slot).await;
let (block_contents, new_state) = self.make_block(state, slot).await;
let block_hash = self
.process_block(slot, block.canonical_root(), block.clone())
.process_block(
slot,
block_contents.0.canonical_root(),
block_contents.clone(),
)
.await?;
Ok((block_hash, block, new_state))
Ok((block_hash, block_contents, new_state))
}
pub fn attest_block(
@ -1882,7 +2037,7 @@ where
sync_committee_strategy: SyncCommitteeStrategy,
) -> Result<(SignedBeaconBlockHash, BeaconState<E>), BlockError<E>> {
let (block_hash, block, state) = self.add_block_at_slot(slot, state).await?;
self.attest_block(&state, state_root, block_hash, &block, validators);
self.attest_block(&state, state_root, block_hash, &block.0, validators);
if sync_committee_strategy == SyncCommitteeStrategy::AllValidators
&& state.current_sync_committee().is_ok()
@ -2080,8 +2235,9 @@ where
chain_dump
.iter()
.cloned()
.map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint().root.into())
.filter(|block_hash| *block_hash != Hash256::zero().into())
.map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint().root)
.filter(|block_hash| *block_hash != Hash256::zero())
.map(|hash| hash.into())
.collect()
}
@ -2133,6 +2289,29 @@ where
.await
}
/// Uses `Self::extend_chain` to `num_slots` blocks.
///
/// Utilizes:
///
/// - BlockStrategy::OnCanonicalHead,
/// - AttestationStrategy::SomeValidators(validators),
pub async fn extend_slots_some_validators(
&self,
num_slots: usize,
validators: Vec<usize>,
) -> Hash256 {
if self.chain.slot().unwrap() == self.chain.canonical_head.cached_head().head_slot() {
self.advance_slot();
}
self.extend_chain(
num_slots,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::SomeValidators(validators),
)
.await
}
/// Extend the `BeaconChain` with some blocks and attestations. Returns the root of the
/// last-produced block (the head of the chain).
///
@ -2293,3 +2472,74 @@ pub struct MakeAttestationOptions {
/// Fork to use for signing attestations.
pub fork: Fork,
}
pub fn build_log(level: slog::Level, enabled: bool) -> Logger {
let decorator = TermDecorator::new().build();
let drain = FullFormat::new(decorator).build().fuse();
let drain = Async::new(drain).build().fuse();
if enabled {
Logger::root(drain.filter_level(level).fuse(), o!())
} else {
Logger::root(drain.filter(|_| false).fuse(), o!())
}
}
pub enum NumBlobs {
Random,
None,
}
pub fn generate_rand_block_and_blobs<E: EthSpec>(
fork_name: ForkName,
num_blobs: NumBlobs,
rng: &mut impl Rng,
) -> (SignedBeaconBlock<E, FullPayload<E>>, Vec<BlobSidecar<E>>) {
let inner = map_fork_name!(fork_name, BeaconBlock, <_>::random_for_test(rng));
let mut block = SignedBeaconBlock::from_block(inner, types::Signature::random_for_test(rng));
let mut blob_sidecars = vec![];
if let Ok(message) = block.message_deneb_mut() {
// Get either zero blobs or a random number of blobs between 1 and Max Blobs.
let payload: &mut FullPayloadDeneb<E> = &mut message.body.execution_payload;
let num_blobs = match num_blobs {
NumBlobs::Random => rng.gen_range(1..=E::max_blobs_per_block()),
NumBlobs::None => 0,
};
let (bundle, transactions) =
execution_layer::test_utils::generate_blobs::<E>(num_blobs).unwrap();
payload.execution_payload.transactions = <_>::default();
for tx in Vec::from(transactions) {
payload.execution_payload.transactions.push(tx).unwrap();
}
message.body.blob_kzg_commitments = bundle.commitments.clone();
let eth2::types::BlobsBundle {
commitments,
proofs,
blobs,
} = bundle;
for (index, ((blob, kzg_commitment), kzg_proof)) in blobs
.into_iter()
.zip(commitments.into_iter())
.zip(proofs.into_iter())
.enumerate()
{
blob_sidecars.push(BlobSidecar {
index: index as u64,
blob: blob.clone(),
kzg_commitment,
kzg_proof,
signed_block_header: block.signed_block_header(),
kzg_commitment_inclusion_proof: block
.message()
.body()
.kzg_commitment_merkle_proof(index)
.unwrap(),
});
}
}
(block, blob_sidecars)
}

View File

@ -2,10 +2,15 @@
//!
//! This component should not affect consensus.
use crate::beacon_proposer_cache::{BeaconProposerCache, TYPICAL_SLOTS_PER_EPOCH};
use crate::metrics;
use parking_lot::RwLock;
use slog::{crit, debug, info, Logger};
use itertools::Itertools;
use parking_lot::{Mutex, RwLock};
use serde::{Deserialize, Serialize};
use slog::{crit, debug, error, info, warn, Logger};
use slot_clock::SlotClock;
use smallvec::SmallVec;
use state_processing::common::get_attestation_participation_flag_indices;
use state_processing::per_epoch_processing::{
errors::EpochProcessingError, EpochProcessingSummary,
};
@ -14,12 +19,16 @@ use std::convert::TryFrom;
use std::io;
use std::marker::PhantomData;
use std::str::Utf8Error;
use std::sync::Arc;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use store::AbstractExecPayload;
use types::consts::altair::{
TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX,
};
use types::{
AttesterSlashing, BeaconBlockRef, BeaconState, ChainSpec, Epoch, EthSpec, Hash256,
IndexedAttestation, ProposerSlashing, PublicKeyBytes, SignedAggregateAndProof,
SignedContributionAndProof, Slot, SyncCommitteeMessage, VoluntaryExit,
Attestation, AttestationData, AttesterSlashing, BeaconBlockRef, BeaconState, BeaconStateError,
ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, ProposerSlashing, PublicKeyBytes,
SignedAggregateAndProof, SignedContributionAndProof, Slot, SyncCommitteeMessage, VoluntaryExit,
};
/// Used for Prometheus labels.
@ -35,7 +44,43 @@ pub const HISTORIC_EPOCHS: usize = 10;
/// Once the validator monitor reaches this number of validators it will stop
/// tracking their metrics/logging individually in an effort to reduce
/// Prometheus cardinality and log volume.
pub const DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD: usize = 64;
const DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD: usize = 64;
/// Lag slots used in detecting missed blocks for the monitored validators
pub const MISSED_BLOCK_LAG_SLOTS: usize = 4;
/// The number of epochs to look back when determining if a validator has missed a block. This value is used with
/// the beacon_proposer_cache to determine if a validator has missed a block.
/// And so, setting this value to anything higher than 1 is likely going to be problematic because the beacon_proposer_cache
/// is only populated for the current and the previous epoch.
pub const MISSED_BLOCK_LOOKBACK_EPOCHS: u64 = 1;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
// Initial configuration values for the `ValidatorMonitor`.
pub struct ValidatorMonitorConfig {
pub auto_register: bool,
pub validators: Vec<PublicKeyBytes>,
pub individual_tracking_threshold: usize,
}
impl Default for ValidatorMonitorConfig {
fn default() -> Self {
Self {
auto_register: false,
validators: vec![],
individual_tracking_threshold: DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD,
}
}
}
/// The goal is to check the behaviour of the BN if it pretends to attest at each slot
/// Check the head/target/source once the state.slot is some slots beyond attestation.data.slot
/// to defend against re-orgs. 16 slots is the minimum to defend against re-orgs of up to 16 slots.
pub const UNAGGREGATED_ATTESTATION_LAG_SLOTS: usize = 16;
/// Bound the storage size of simulated attestations. The head state can only verify attestations
/// from the current and previous epoch.
pub const MAX_UNAGGREGATED_ATTESTATION_HASHMAP_LENGTH: usize = 64;
#[derive(Debug)]
pub enum Error {
@ -323,6 +368,13 @@ impl MonitoredValidator {
}
}
#[derive(PartialEq, Hash, Eq)]
struct MissedBlock {
slot: Slot,
parent_root: Hash256,
validator_index: u64,
}
/// Holds a collection of `MonitoredValidator` and is notified about a variety of events on the P2P
/// network, HTTP API and `BeaconChain`.
///
@ -331,7 +383,7 @@ impl MonitoredValidator {
///
/// The intention of this struct is to provide users with more logging and Prometheus metrics around
/// validators that they are interested in.
pub struct ValidatorMonitor<T> {
pub struct ValidatorMonitor<T: EthSpec> {
/// The validators that require additional monitoring.
validators: HashMap<PublicKeyBytes, MonitoredValidator>,
/// A map of validator index (state.validators) to a validator public key.
@ -343,26 +395,40 @@ pub struct ValidatorMonitor<T> {
/// large validator counts causing infeasibly high cardinailty for
/// Prometheus and high log volumes.
individual_tracking_threshold: usize,
/// A Map representing the (non-finalized) missed blocks by epoch, validator_index(state.validators) and slot
missed_blocks: HashSet<MissedBlock>,
// A beacon proposer cache
beacon_proposer_cache: Arc<Mutex<BeaconProposerCache>>,
// Unaggregated attestations generated by the committee index at each slot.
unaggregated_attestations: HashMap<Slot, Attestation<T>>,
log: Logger,
_phantom: PhantomData<T>,
}
impl<T: EthSpec> ValidatorMonitor<T> {
pub fn new(
pubkeys: Vec<PublicKeyBytes>,
auto_register: bool,
individual_tracking_threshold: usize,
config: ValidatorMonitorConfig,
beacon_proposer_cache: Arc<Mutex<BeaconProposerCache>>,
log: Logger,
) -> Self {
let ValidatorMonitorConfig {
auto_register,
validators,
individual_tracking_threshold,
} = config;
let mut s = Self {
validators: <_>::default(),
indices: <_>::default(),
auto_register,
individual_tracking_threshold,
missed_blocks: <_>::default(),
beacon_proposer_cache,
unaggregated_attestations: <_>::default(),
log,
_phantom: PhantomData,
};
for pubkey in pubkeys {
for pubkey in validators {
s.add_validator_pubkey(pubkey)
}
s
@ -376,7 +442,7 @@ impl<T: EthSpec> ValidatorMonitor<T> {
}
/// Add some validators to `self` for additional monitoring.
fn add_validator_pubkey(&mut self, pubkey: PublicKeyBytes) {
pub fn add_validator_pubkey(&mut self, pubkey: PublicKeyBytes) {
let index_opt = self
.indices
.iter()
@ -394,9 +460,32 @@ impl<T: EthSpec> ValidatorMonitor<T> {
});
}
/// Add an unaggregated attestation
pub fn set_unaggregated_attestation(&mut self, attestation: Attestation<T>) {
let unaggregated_attestations = &mut self.unaggregated_attestations;
// Pruning, this removes the oldest key/pair of the hashmap if it's greater than MAX_UNAGGREGATED_ATTESTATION_HASHMAP_LENGTH
if unaggregated_attestations.len() >= MAX_UNAGGREGATED_ATTESTATION_HASHMAP_LENGTH {
if let Some(oldest_slot) = unaggregated_attestations.keys().min().copied() {
unaggregated_attestations.remove(&oldest_slot);
}
}
let slot = attestation.data.slot;
self.unaggregated_attestations.insert(slot, attestation);
}
pub fn get_unaggregated_attestation(&self, slot: Slot) -> Option<&Attestation<T>> {
self.unaggregated_attestations.get(&slot)
}
/// Reads information from the given `state`. The `state` *must* be valid (i.e, able to be
/// imported).
pub fn process_valid_state(&mut self, current_epoch: Epoch, state: &BeaconState<T>) {
pub fn process_valid_state(
&mut self,
current_epoch: Epoch,
state: &BeaconState<T>,
spec: &ChainSpec,
) {
// Add any new validator indices.
state
.validators()
@ -411,6 +500,10 @@ impl<T: EthSpec> ValidatorMonitor<T> {
self.indices.insert(i, validator.pubkey);
});
// Add missed non-finalized blocks for the monitored validators
self.add_validators_missed_blocks(state);
self.process_unaggregated_attestations(state, spec);
// Update metrics for individual validators.
for monitored_validator in self.validators.values() {
if let Some(i) = monitored_validator.index {
@ -489,6 +582,192 @@ impl<T: EthSpec> ValidatorMonitor<T> {
}
}
}
// Prune missed blocks that are prior to last finalized epochs - MISSED_BLOCK_LOOKBACK_EPOCHS
let finalized_epoch = state.finalized_checkpoint().epoch;
self.missed_blocks.retain(|missed_block| {
let epoch = missed_block.slot.epoch(T::slots_per_epoch());
epoch + Epoch::new(MISSED_BLOCK_LOOKBACK_EPOCHS) >= finalized_epoch
});
}
/// Add missed non-finalized blocks for the monitored validators
fn add_validators_missed_blocks(&mut self, state: &BeaconState<T>) {
// Define range variables
let current_slot = state.slot();
let current_epoch = current_slot.epoch(T::slots_per_epoch());
// start_slot needs to be coherent with what can be retrieved from the beacon_proposer_cache
let start_slot = current_epoch.start_slot(T::slots_per_epoch())
- Slot::new(MISSED_BLOCK_LOOKBACK_EPOCHS * T::slots_per_epoch());
let end_slot = current_slot.saturating_sub(MISSED_BLOCK_LAG_SLOTS).as_u64();
// List of proposers per epoch from the beacon_proposer_cache, and the epoch at which the
// cache is valid.
let mut proposers_per_epoch: Option<(SmallVec<[usize; TYPICAL_SLOTS_PER_EPOCH]>, Epoch)> =
None;
for (prev_slot, slot) in (start_slot.as_u64()..=end_slot)
.map(Slot::new)
.tuple_windows()
{
// Condition for missed_block is defined such as block_root(slot) == block_root(slot - 1)
// where the proposer who missed the block is the proposer of the block at block_root(slot)
if let (Ok(block_root), Ok(prev_block_root)) =
(state.get_block_root(slot), state.get_block_root(prev_slot))
{
// Found missed block
if block_root == prev_block_root {
let slot_epoch = slot.epoch(T::slots_per_epoch());
if let Ok(shuffling_decision_block) =
state.proposer_shuffling_decision_root_at_epoch(slot_epoch, *block_root)
{
// Update the cache if it has not yet been initialised, or if it is
// initialised for a prior epoch. This is an optimisation to avoid bouncing
// the proposer shuffling cache lock when there are lots of missed blocks.
if proposers_per_epoch
.as_ref()
.map_or(true, |(_, cached_epoch)| *cached_epoch != slot_epoch)
{
proposers_per_epoch = self
.get_proposers_by_epoch_from_cache(
slot_epoch,
shuffling_decision_block,
)
.map(|cache| (cache, slot_epoch));
}
// Only add missed blocks for the proposer if it's in the list of monitored validators
let slot_in_epoch = slot % T::slots_per_epoch();
if let Some(proposer_index) = proposers_per_epoch
.as_ref()
.and_then(|(proposers, _)| proposers.get(slot_in_epoch.as_usize()))
{
let i = *proposer_index as u64;
if let Some(pub_key) = self.indices.get(&i) {
if let Some(validator) = self.validators.get(pub_key) {
let missed_block = MissedBlock {
slot,
parent_root: *prev_block_root,
validator_index: i,
};
// Incr missed block counter for the validator only if it doesn't already exist in the hashset
if self.missed_blocks.insert(missed_block) {
self.aggregatable_metric(&validator.id, |label| {
metrics::inc_counter_vec(
&metrics::VALIDATOR_MONITOR_MISSED_BLOCKS_TOTAL,
&[label],
);
});
error!(
self.log,
"Validator missed a block";
"index" => i,
"slot" => slot,
"parent block root" => ?prev_block_root,
);
}
}
} else {
warn!(
self.log,
"Missing validator index";
"info" => "potentially inconsistency in the validator manager",
"index" => i,
)
}
} else {
debug!(
self.log,
"Could not get proposers from cache";
"epoch" => ?slot_epoch,
"decision_root" => ?shuffling_decision_block,
);
}
}
}
}
}
}
fn get_proposers_by_epoch_from_cache(
&mut self,
epoch: Epoch,
shuffling_decision_block: Hash256,
) -> Option<SmallVec<[usize; TYPICAL_SLOTS_PER_EPOCH]>> {
let mut cache = self.beacon_proposer_cache.lock();
cache
.get_epoch::<T>(shuffling_decision_block, epoch)
.cloned()
}
/// Process the unaggregated attestations generated by the service `attestation_simulator_service`
/// and check if the attestation qualifies for a reward matching the flags source/target/head
fn process_unaggregated_attestations(&mut self, state: &BeaconState<T>, spec: &ChainSpec) {
let current_slot = state.slot();
// Ensures that we process attestation when there have been skipped slots between blocks
let attested_slots: Vec<_> = self
.unaggregated_attestations
.keys()
.filter(|&&attestation_slot| {
attestation_slot
< current_slot - Slot::new(UNAGGREGATED_ATTESTATION_LAG_SLOTS as u64)
})
.cloned()
.collect();
let unaggregated_attestations = &mut self.unaggregated_attestations;
for slot in attested_slots {
if let Some(unaggregated_attestation) = unaggregated_attestations.remove(&slot) {
// Don't process this attestation, it's too old to be processed by this state.
if slot.epoch(T::slots_per_epoch()) < state.previous_epoch() {
continue;
}
// We are simulating that unaggregated attestation in a service that produces unaggregated attestations
// every slot, the inclusion_delay shouldn't matter here as long as the minimum value
// that qualifies the committee index for reward is included
let inclusion_delay = spec.min_attestation_inclusion_delay;
let data = &unaggregated_attestation.data;
// Get the reward indices for the unaggregated attestation or log an error
match get_attestation_participation_flag_indices(
state,
&unaggregated_attestation.data,
inclusion_delay,
spec,
) {
Ok(flag_indices) => {
let head_hit = flag_indices.contains(&TIMELY_HEAD_FLAG_INDEX);
let target_hit = flag_indices.contains(&TIMELY_TARGET_FLAG_INDEX);
let source_hit = flag_indices.contains(&TIMELY_SOURCE_FLAG_INDEX);
register_simulated_attestation(
data, head_hit, target_hit, source_hit, &self.log,
)
}
Err(BeaconStateError::IncorrectAttestationSource) => {
register_simulated_attestation(data, false, false, false, &self.log)
}
Err(err) => {
error!(
self.log,
"Failed to get attestation participation flag indices";
"error" => ?err,
"unaggregated_attestation" => ?unaggregated_attestation,
);
}
}
} else {
error!(
self.log,
"Failed to remove unaggregated attestation from the hashmap";
"slot" => ?slot,
);
}
}
}
/// Run `func` with the `TOTAL_LABEL` and optionally the
@ -822,6 +1101,17 @@ impl<T: EthSpec> ValidatorMonitor<T> {
}
}
pub fn get_monitored_validator_missed_block_count(&self, validator_index: u64) -> u64 {
self.missed_blocks
.iter()
.filter(|missed_block| missed_block.validator_index == validator_index)
.count() as u64
}
pub fn get_beacon_proposer_cache(&self) -> Arc<Mutex<BeaconProposerCache>> {
self.beacon_proposer_cache.clone()
}
/// If `self.auto_register == true`, add the `validator_index` to `self.monitored_validators`.
/// Otherwise, do nothing.
pub fn auto_register_local_validator(&mut self, validator_index: u64) {
@ -1731,6 +2021,46 @@ impl<T: EthSpec> ValidatorMonitor<T> {
}
}
fn register_simulated_attestation(
data: &AttestationData,
head_hit: bool,
target_hit: bool,
source_hit: bool,
log: &Logger,
) {
if head_hit {
metrics::inc_counter(&metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT);
} else {
metrics::inc_counter(&metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_MISS);
}
if target_hit {
metrics::inc_counter(&metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_HIT);
} else {
metrics::inc_counter(
&metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_MISS,
);
}
if source_hit {
metrics::inc_counter(&metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT);
} else {
metrics::inc_counter(
&metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS,
);
}
debug!(
log,
"Simulated attestation evaluated";
"attestation_source" => ?data.source.root,
"attestation_target" => ?data.target.root,
"attestation_head" => ?data.beacon_block_root,
"attestation_slot" => ?data.slot,
"source_hit" => source_hit,
"target_hit" => target_hit,
"head_hit" => head_hit,
);
}
/// Returns the duration since the unix epoch.
pub fn timestamp_now() -> Duration {
SystemTime::now()

View File

@ -38,7 +38,7 @@ impl<T: BeaconChainTypes> ValidatorPubkeyCache<T> {
};
let store_ops = cache.import_new_pubkeys(state)?;
store.do_atomically(store_ops)?;
store.do_atomically_with_block_and_blobs_cache(store_ops)?;
Ok(cache)
}
@ -299,7 +299,7 @@ mod test {
let ops = cache
.import_new_pubkeys(&state)
.expect("should import pubkeys");
store.do_atomically(ops).unwrap();
store.do_atomically_with_block_and_blobs_cache(ops).unwrap();
check_cache_get(&cache, &keypairs[..]);
drop(cache);

View File

@ -1,7 +1,10 @@
#![cfg(not(debug_assertions))]
use beacon_chain::attestation_simulator::produce_unaggregated_attestation;
use beacon_chain::block_verification_types::RpcBlock;
use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy};
use beacon_chain::{StateSkipConfig, WhenSlotSkipped};
use beacon_chain::validator_monitor::UNAGGREGATED_ATTESTATION_LAG_SLOTS;
use beacon_chain::{metrics, StateSkipConfig, WhenSlotSkipped};
use lazy_static::lazy_static;
use std::sync::Arc;
use tree_hash::TreeHash;
@ -14,6 +17,91 @@ lazy_static! {
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
}
/// This test builds a chain that is testing the performance of the unaggregated attestations
/// produced by the attestation simulator service.
#[tokio::test]
async fn produces_attestations_from_attestation_simulator_service() {
// Produce 2 epochs, or 64 blocks
let num_blocks_produced = MainnetEthSpec::slots_per_epoch() * 2;
let harness = BeaconChainHarness::builder(MainnetEthSpec)
.default_spec()
.keypairs(KEYPAIRS[..].to_vec())
.fresh_ephemeral_store()
.mock_execution_layer()
.build();
let chain = &harness.chain;
// Test all valid committee indices and their rewards for all slots in the chain
// using validator monitor
for slot in 0..=num_blocks_produced {
// We do not produce at slot=0, and there's no committe cache available anyway
if slot > 0 && slot <= num_blocks_produced {
harness.advance_slot();
harness
.extend_chain(
1,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
)
.await;
}
// Set the state to the current slot
let slot = Slot::from(slot);
let mut state = chain
.state_at_slot(slot, StateSkipConfig::WithStateRoots)
.expect("should get state");
// Prebuild the committee cache for the current epoch
state
.build_committee_cache(RelativeEpoch::Current, &harness.chain.spec)
.unwrap();
// Produce an unaggragetated attestation
produce_unaggregated_attestation(chain.clone(), chain.slot().unwrap());
// Verify that the ua is stored in validator monitor
let validator_monitor = chain.validator_monitor.read();
validator_monitor
.get_unaggregated_attestation(slot)
.expect("should get unaggregated attestation");
}
// Compare the prometheus metrics that evaluates the performance of the unaggregated attestations
let hit_prometheus_metrics = vec![
metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT_TOTAL,
metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_HIT_TOTAL,
metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT_TOTAL,
];
let miss_prometheus_metrics = vec![
metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_MISS_TOTAL,
metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_MISS_TOTAL,
metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS_TOTAL,
];
// Expected metrics count should only apply to hit metrics as miss metrics are never set, nor can be found
// when gathering prometheus metrics. If they are found, which should not, it will diff from 0 and fail the test
let expected_miss_metrics_count = 0;
let expected_hit_metrics_count =
num_blocks_produced - UNAGGREGATED_ATTESTATION_LAG_SLOTS as u64;
lighthouse_metrics::gather().iter().for_each(|mf| {
if hit_prometheus_metrics.contains(&mf.get_name()) {
assert_eq!(
mf.get_metric()[0].get_counter().get_value() as u64,
expected_hit_metrics_count
);
}
if miss_prometheus_metrics.contains(&mf.get_name()) {
assert_eq!(
mf.get_metric()[0].get_counter().get_value() as u64,
expected_miss_metrics_count
);
}
});
}
/// This test builds a chain that is just long enough to finalize an epoch then it produces an
/// attestation at each slot from genesis through to three epochs past the head.
///
@ -67,6 +155,7 @@ async fn produces_attestations() {
.store
.make_full_block(&block_root, blinded_block)
.unwrap();
let blobs = chain.get_blobs(&block_root).unwrap();
let epoch_boundary_slot = state
.current_epoch()
@ -131,6 +220,19 @@ async fn produces_attestations() {
assert_eq!(data.target.epoch, state.current_epoch(), "bad target epoch");
assert_eq!(data.target.root, target_root, "bad target root");
let rpc_block =
RpcBlock::<MainnetEthSpec>::new(None, Arc::new(block.clone()), Some(blobs.clone()))
.unwrap();
let beacon_chain::data_availability_checker::MaybeAvailableBlock::Available(
available_block,
) = chain
.data_availability_checker
.verify_kzg_for_rpc_block(rpc_block)
.unwrap()
else {
panic!("block should be available")
};
let early_attestation = {
let proto_block = chain
.canonical_head
@ -141,7 +243,7 @@ async fn produces_attestations() {
.early_attester_cache
.add_head_block(
block_root,
Arc::new(block.clone()),
available_block,
proto_block,
&state,
&chain.spec,
@ -192,12 +294,29 @@ async fn early_attester_cache_old_request() {
.get_block(&head.beacon_block_root)
.unwrap();
let head_blobs = harness
.chain
.get_blobs(&head.beacon_block_root)
.expect("should get blobs");
let rpc_block =
RpcBlock::<MainnetEthSpec>::new(None, head.beacon_block.clone(), Some(head_blobs)).unwrap();
let beacon_chain::data_availability_checker::MaybeAvailableBlock::Available(available_block) =
harness
.chain
.data_availability_checker
.verify_kzg_for_rpc_block(rpc_block)
.unwrap()
else {
panic!("block should be available")
};
harness
.chain
.early_attester_cache
.add_head_block(
head.beacon_block_root,
head.beacon_block.clone(),
available_block,
head_proto_block,
&head.beacon_state,
&harness.chain.spec,

View File

@ -334,10 +334,28 @@ impl GossipTester {
self.harness.chain.epoch().unwrap()
}
pub fn two_epochs_ago(&self) -> Slot {
pub fn earliest_valid_attestation_slot(&self) -> Slot {
let offset = match self.harness.spec.fork_name_at_epoch(self.epoch()) {
ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => {
// Subtract an additional slot since the harness will be exactly on the start of the
// slot and the propagation tolerance will allow an extra slot.
E::slots_per_epoch() + 1
}
// EIP-7045
ForkName::Deneb => {
let epoch_slot_offset = (self.slot() % E::slots_per_epoch()).as_u64();
if epoch_slot_offset != 0 {
E::slots_per_epoch() + epoch_slot_offset
} else {
// Here the propagation tolerance will cause the cutoff to be an entire epoch earlier
2 * E::slots_per_epoch()
}
}
};
self.slot()
.as_u64()
.checked_sub(E::slots_per_epoch() + 2)
.checked_sub(offset)
.expect("chain is not sufficiently deep for test")
.into()
}
@ -484,18 +502,21 @@ async fn aggregated_gossip_verification() {
)
.inspect_aggregate_err(
"aggregate from past slot",
|tester, a| a.message.aggregate.data.slot = tester.two_epochs_ago(),
|tester, a| {
let too_early_slot = tester.earliest_valid_attestation_slot() - 1;
a.message.aggregate.data.slot = too_early_slot;
a.message.aggregate.data.target.epoch = too_early_slot.epoch(E::slots_per_epoch());
},
|tester, err| {
let valid_early_slot = tester.earliest_valid_attestation_slot();
assert!(matches!(
err,
AttnError::PastSlot {
attestation_slot,
// Subtract an additional slot since the harness will be exactly on the start of the
// slot and the propagation tolerance will allow an extra slot.
earliest_permissible_slot
}
if attestation_slot == tester.two_epochs_ago()
&& earliest_permissible_slot == tester.slot() - E::slots_per_epoch() - 1
if attestation_slot == valid_early_slot - 1
&& earliest_permissible_slot == valid_early_slot
))
},
)
@ -800,22 +821,20 @@ async fn unaggregated_gossip_verification() {
.inspect_unaggregate_err(
"attestation from past slot",
|tester, a, _| {
let early_slot = tester.two_epochs_ago();
a.data.slot = early_slot;
a.data.target.epoch = early_slot.epoch(E::slots_per_epoch());
let too_early_slot = tester.earliest_valid_attestation_slot() - 1;
a.data.slot = too_early_slot;
a.data.target.epoch = too_early_slot.epoch(E::slots_per_epoch());
},
|tester, err| {
dbg!(&err);
let valid_early_slot = tester.earliest_valid_attestation_slot();
assert!(matches!(
err,
AttnError::PastSlot {
attestation_slot,
// Subtract an additional slot since the harness will be exactly on the start of the
// slot and the propagation tolerance will allow an extra slot.
earliest_permissible_slot,
}
if attestation_slot == tester.two_epochs_ago()
&& earliest_permissible_slot == tester.slot() - E::slots_per_epoch() - 1
if attestation_slot == valid_early_slot - 1
&& earliest_permissible_slot == valid_early_slot
))
},
)

View File

@ -1,7 +1,9 @@
#![cfg(not(debug_assertions))]
// #![cfg(not(debug_assertions))]
use beacon_chain::test_utils::{
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
use beacon_chain::block_verification_types::{AsBlock, ExecutedBlock, RpcBlock};
use beacon_chain::{
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType},
AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, ExecutionPendingBlock,
};
use beacon_chain::{
BeaconSnapshot, BlockError, ChainConfig, ChainSegmentResult, IntoExecutionPendingBlock,
@ -33,7 +35,7 @@ lazy_static! {
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
}
async fn get_chain_segment() -> Vec<BeaconSnapshot<E>> {
async fn get_chain_segment() -> (Vec<BeaconSnapshot<E>>, Vec<Option<BlobSidecarList<E>>>) {
let harness = get_harness(VALIDATOR_COUNT);
harness
@ -45,6 +47,7 @@ async fn get_chain_segment() -> Vec<BeaconSnapshot<E>> {
.await;
let mut segment = Vec::with_capacity(CHAIN_SEGMENT_LENGTH);
let mut segment_blobs = Vec::with_capacity(CHAIN_SEGMENT_LENGTH);
for snapshot in harness
.chain
.chain_dump()
@ -63,8 +66,55 @@ async fn get_chain_segment() -> Vec<BeaconSnapshot<E>> {
beacon_block: Arc::new(full_block),
beacon_state: snapshot.beacon_state,
});
segment_blobs.push(Some(
harness
.chain
.get_blobs(&snapshot.beacon_block_root)
.unwrap(),
))
}
segment
(segment, segment_blobs)
}
async fn get_chain_segment_with_blob_sidecars(
) -> (Vec<BeaconSnapshot<E>>, Vec<Option<BlobSidecarList<E>>>) {
let harness = get_harness(VALIDATOR_COUNT);
harness
.extend_chain(
CHAIN_SEGMENT_LENGTH,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
)
.await;
let mut segment = Vec::with_capacity(CHAIN_SEGMENT_LENGTH);
let mut segment_blobs = Vec::with_capacity(CHAIN_SEGMENT_LENGTH);
for snapshot in harness
.chain
.chain_dump()
.expect("should dump chain")
.into_iter()
.skip(1)
{
let full_block = harness
.chain
.get_block(&snapshot.beacon_block_root)
.await
.unwrap()
.unwrap();
segment.push(BeaconSnapshot {
beacon_block_root: snapshot.beacon_block_root,
beacon_block: Arc::new(full_block),
beacon_state: snapshot.beacon_state,
});
let blob_sidecars = harness
.chain
.get_blobs(&snapshot.beacon_block_root)
.unwrap();
segment_blobs.push(Some(blob_sidecars))
}
(segment, segment_blobs)
}
fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessType<E>> {
@ -84,10 +134,16 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessTyp
harness
}
fn chain_segment_blocks(chain_segment: &[BeaconSnapshot<E>]) -> Vec<Arc<SignedBeaconBlock<E>>> {
fn chain_segment_blocks(
chain_segment: &[BeaconSnapshot<E>],
blobs: &[Option<BlobSidecarList<E>>],
) -> Vec<RpcBlock<E>> {
chain_segment
.iter()
.map(|snapshot| snapshot.beacon_block.clone())
.zip(blobs.iter())
.map(|(snapshot, blobs)| {
RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap()
})
.collect()
}
@ -129,22 +185,52 @@ fn update_proposal_signatures(
}
}
fn update_parent_roots(snapshots: &mut [BeaconSnapshot<E>]) {
fn update_parent_roots(
snapshots: &mut [BeaconSnapshot<E>],
blobs: &mut [Option<BlobSidecarList<E>>],
) {
for i in 0..snapshots.len() {
let root = snapshots[i].beacon_block.canonical_root();
if let Some(child) = snapshots.get_mut(i + 1) {
if let (Some(child), Some(child_blobs)) = (snapshots.get_mut(i + 1), blobs.get_mut(i + 1)) {
let (mut block, signature) = child.beacon_block.as_ref().clone().deconstruct();
*block.parent_root_mut() = root;
child.beacon_block = Arc::new(SignedBeaconBlock::from_block(block, signature))
let new_child = Arc::new(SignedBeaconBlock::from_block(block, signature));
if let Some(blobs) = child_blobs {
update_blob_signed_header(&new_child, blobs);
}
child.beacon_block = new_child;
}
}
}
fn update_blob_signed_header<E: EthSpec>(
signed_block: &SignedBeaconBlock<E>,
blobs: &mut BlobSidecarList<E>,
) {
for old_blob_sidecar in blobs.iter_mut() {
let new_blob = Arc::new(BlobSidecar::<E> {
index: old_blob_sidecar.index,
blob: old_blob_sidecar.blob.clone(),
kzg_commitment: old_blob_sidecar.kzg_commitment,
kzg_proof: old_blob_sidecar.kzg_proof,
signed_block_header: signed_block.signed_block_header(),
kzg_commitment_inclusion_proof: signed_block
.message()
.body()
.kzg_commitment_merkle_proof(old_blob_sidecar.index as usize)
.unwrap(),
});
*old_blob_sidecar = new_blob;
}
}
#[tokio::test]
async fn chain_segment_full_segment() {
let harness = get_harness(VALIDATOR_COUNT);
let chain_segment = get_chain_segment().await;
let blocks = chain_segment_blocks(&chain_segment);
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
let blocks: Vec<RpcBlock<E>> = chain_segment_blocks(&chain_segment, &chain_segment_blobs)
.into_iter()
.collect();
harness
.chain
@ -179,8 +265,10 @@ async fn chain_segment_full_segment() {
async fn chain_segment_varying_chunk_size() {
for chunk_size in &[1, 2, 3, 5, 31, 32, 33, 42] {
let harness = get_harness(VALIDATOR_COUNT);
let chain_segment = get_chain_segment().await;
let blocks = chain_segment_blocks(&chain_segment);
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
let blocks: Vec<RpcBlock<E>> = chain_segment_blocks(&chain_segment, &chain_segment_blobs)
.into_iter()
.collect();
harness
.chain
@ -209,7 +297,7 @@ async fn chain_segment_varying_chunk_size() {
#[tokio::test]
async fn chain_segment_non_linear_parent_roots() {
let harness = get_harness(VALIDATOR_COUNT);
let chain_segment = get_chain_segment().await;
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
harness
.chain
@ -219,7 +307,9 @@ async fn chain_segment_non_linear_parent_roots() {
/*
* Test with a block removed.
*/
let mut blocks = chain_segment_blocks(&chain_segment);
let mut blocks: Vec<RpcBlock<E>> = chain_segment_blocks(&chain_segment, &chain_segment_blobs)
.into_iter()
.collect();
blocks.remove(2);
assert!(
@ -237,10 +327,16 @@ async fn chain_segment_non_linear_parent_roots() {
/*
* Test with a modified parent root.
*/
let mut blocks = chain_segment_blocks(&chain_segment);
let (mut block, signature) = blocks[3].as_ref().clone().deconstruct();
let mut blocks: Vec<RpcBlock<E>> = chain_segment_blocks(&chain_segment, &chain_segment_blobs)
.into_iter()
.collect();
let (mut block, signature) = blocks[3].as_block().clone().deconstruct();
*block.parent_root_mut() = Hash256::zero();
blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature));
blocks[3] = RpcBlock::new_without_blobs(
None,
Arc::new(SignedBeaconBlock::from_block(block, signature)),
);
assert!(
matches!(
@ -258,7 +354,7 @@ async fn chain_segment_non_linear_parent_roots() {
#[tokio::test]
async fn chain_segment_non_linear_slots() {
let harness = get_harness(VALIDATOR_COUNT);
let chain_segment = get_chain_segment().await;
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
harness
.chain
.slot_clock
@ -268,10 +364,15 @@ async fn chain_segment_non_linear_slots() {
* Test where a child is lower than the parent.
*/
let mut blocks = chain_segment_blocks(&chain_segment);
let (mut block, signature) = blocks[3].as_ref().clone().deconstruct();
let mut blocks: Vec<RpcBlock<E>> = chain_segment_blocks(&chain_segment, &chain_segment_blobs)
.into_iter()
.collect();
let (mut block, signature) = blocks[3].as_block().clone().deconstruct();
*block.slot_mut() = Slot::new(0);
blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature));
blocks[3] = RpcBlock::new_without_blobs(
None,
Arc::new(SignedBeaconBlock::from_block(block, signature)),
);
assert!(
matches!(
@ -289,10 +390,15 @@ async fn chain_segment_non_linear_slots() {
* Test where a child is equal to the parent.
*/
let mut blocks = chain_segment_blocks(&chain_segment);
let (mut block, signature) = blocks[3].as_ref().clone().deconstruct();
let mut blocks: Vec<RpcBlock<E>> = chain_segment_blocks(&chain_segment, &chain_segment_blobs)
.into_iter()
.collect();
let (mut block, signature) = blocks[3].as_block().clone().deconstruct();
*block.slot_mut() = blocks[2].slot();
blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature));
blocks[3] = RpcBlock::new_without_blobs(
None,
Arc::new(SignedBeaconBlock::from_block(block, signature)),
);
assert!(
matches!(
@ -309,14 +415,18 @@ async fn chain_segment_non_linear_slots() {
async fn assert_invalid_signature(
chain_segment: &[BeaconSnapshot<E>],
chain_segment_blobs: &[Option<BlobSidecarList<E>>],
harness: &BeaconChainHarness<EphemeralHarnessType<E>>,
block_index: usize,
snapshots: &[BeaconSnapshot<E>],
item: &str,
) {
let blocks = snapshots
let blocks: Vec<RpcBlock<E>> = snapshots
.iter()
.map(|snapshot| snapshot.beacon_block.clone())
.zip(chain_segment_blobs.iter())
.map(|(snapshot, blobs)| {
RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap()
})
.collect();
// Ensure the block will be rejected if imported in a chain segment.
@ -340,7 +450,10 @@ async fn assert_invalid_signature(
let ancestor_blocks = chain_segment
.iter()
.take(block_index)
.map(|snapshot| snapshot.beacon_block.clone())
.zip(chain_segment_blobs.iter())
.map(|(snapshot, blobs)| {
RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap()
})
.collect();
// We don't care if this fails, we just call this to ensure that all prior blocks have been
// imported prior to this test.
@ -354,7 +467,12 @@ async fn assert_invalid_signature(
.chain
.process_block(
snapshots[block_index].beacon_block.canonical_root(),
snapshots[block_index].beacon_block.clone(),
RpcBlock::new(
None,
snapshots[block_index].beacon_block.clone(),
chain_segment_blobs[block_index].clone(),
)
.unwrap(),
NotifyExecutionLayer::Yes,
|| Ok(()),
)
@ -386,7 +504,7 @@ async fn get_invalid_sigs_harness(
}
#[tokio::test]
async fn invalid_signature_gossip_block() {
let chain_segment = get_chain_segment().await;
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
for &block_index in BLOCK_INDICES {
// Ensure the block will be rejected if imported on its own (without gossip checking).
let harness = get_invalid_sigs_harness(&chain_segment).await;
@ -404,7 +522,10 @@ async fn invalid_signature_gossip_block() {
let ancestor_blocks = chain_segment
.iter()
.take(block_index)
.map(|snapshot| snapshot.beacon_block.clone())
.zip(chain_segment_blobs.iter())
.map(|(snapshot, blobs)| {
RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap()
})
.collect();
harness
.chain
@ -433,7 +554,7 @@ async fn invalid_signature_gossip_block() {
#[tokio::test]
async fn invalid_signature_block_proposal() {
let chain_segment = get_chain_segment().await;
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
for &block_index in BLOCK_INDICES {
let harness = get_invalid_sigs_harness(&chain_segment).await;
let mut snapshots = chain_segment.clone();
@ -446,9 +567,12 @@ async fn invalid_signature_block_proposal() {
block.clone(),
junk_signature(),
));
let blocks = snapshots
let blocks: Vec<RpcBlock<E>> = snapshots
.iter()
.map(|snapshot| snapshot.beacon_block.clone())
.zip(chain_segment_blobs.iter())
.map(|(snapshot, blobs)| {
RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap()
})
.collect::<Vec<_>>();
// Ensure the block will be rejected if imported in a chain segment.
assert!(
@ -467,7 +591,7 @@ async fn invalid_signature_block_proposal() {
#[tokio::test]
async fn invalid_signature_randao_reveal() {
let chain_segment = get_chain_segment().await;
let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await;
for &block_index in BLOCK_INDICES {
let harness = get_invalid_sigs_harness(&chain_segment).await;
let mut snapshots = chain_segment.clone();
@ -479,15 +603,23 @@ async fn invalid_signature_randao_reveal() {
*block.body_mut().randao_reveal_mut() = junk_signature();
snapshots[block_index].beacon_block =
Arc::new(SignedBeaconBlock::from_block(block, signature));
update_parent_roots(&mut snapshots);
update_parent_roots(&mut snapshots, &mut chain_segment_blobs);
update_proposal_signatures(&mut snapshots, &harness);
assert_invalid_signature(&chain_segment, &harness, block_index, &snapshots, "randao").await;
assert_invalid_signature(
&chain_segment,
&chain_segment_blobs,
&harness,
block_index,
&snapshots,
"randao",
)
.await;
}
}
#[tokio::test]
async fn invalid_signature_proposer_slashing() {
let chain_segment = get_chain_segment().await;
let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await;
for &block_index in BLOCK_INDICES {
let harness = get_invalid_sigs_harness(&chain_segment).await;
let mut snapshots = chain_segment.clone();
@ -513,10 +645,11 @@ async fn invalid_signature_proposer_slashing() {
.expect("should update proposer slashing");
snapshots[block_index].beacon_block =
Arc::new(SignedBeaconBlock::from_block(block, signature));
update_parent_roots(&mut snapshots);
update_parent_roots(&mut snapshots, &mut chain_segment_blobs);
update_proposal_signatures(&mut snapshots, &harness);
assert_invalid_signature(
&chain_segment,
&chain_segment_blobs,
&harness,
block_index,
&snapshots,
@ -528,7 +661,7 @@ async fn invalid_signature_proposer_slashing() {
#[tokio::test]
async fn invalid_signature_attester_slashing() {
let chain_segment = get_chain_segment().await;
let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await;
for &block_index in BLOCK_INDICES {
let harness = get_invalid_sigs_harness(&chain_segment).await;
let mut snapshots = chain_segment.clone();
@ -565,10 +698,11 @@ async fn invalid_signature_attester_slashing() {
.expect("should update attester slashing");
snapshots[block_index].beacon_block =
Arc::new(SignedBeaconBlock::from_block(block, signature));
update_parent_roots(&mut snapshots);
update_parent_roots(&mut snapshots, &mut chain_segment_blobs);
update_proposal_signatures(&mut snapshots, &harness);
assert_invalid_signature(
&chain_segment,
&chain_segment_blobs,
&harness,
block_index,
&snapshots,
@ -580,7 +714,7 @@ async fn invalid_signature_attester_slashing() {
#[tokio::test]
async fn invalid_signature_attestation() {
let chain_segment = get_chain_segment().await;
let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await;
let mut checked_attestation = false;
for &block_index in BLOCK_INDICES {
@ -595,10 +729,11 @@ async fn invalid_signature_attestation() {
attestation.signature = junk_aggregate_signature();
snapshots[block_index].beacon_block =
Arc::new(SignedBeaconBlock::from_block(block, signature));
update_parent_roots(&mut snapshots);
update_parent_roots(&mut snapshots, &mut chain_segment_blobs);
update_proposal_signatures(&mut snapshots, &harness);
assert_invalid_signature(
&chain_segment,
&chain_segment_blobs,
&harness,
block_index,
&snapshots,
@ -617,7 +752,7 @@ async fn invalid_signature_attestation() {
#[tokio::test]
async fn invalid_signature_deposit() {
let chain_segment = get_chain_segment().await;
let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await;
for &block_index in BLOCK_INDICES {
// Note: an invalid deposit signature is permitted!
let harness = get_invalid_sigs_harness(&chain_segment).await;
@ -643,11 +778,14 @@ async fn invalid_signature_deposit() {
.expect("should update deposit");
snapshots[block_index].beacon_block =
Arc::new(SignedBeaconBlock::from_block(block, signature));
update_parent_roots(&mut snapshots);
update_parent_roots(&mut snapshots, &mut chain_segment_blobs);
update_proposal_signatures(&mut snapshots, &harness);
let blocks = snapshots
let blocks: Vec<RpcBlock<E>> = snapshots
.iter()
.map(|snapshot| snapshot.beacon_block.clone())
.zip(chain_segment_blobs.iter())
.map(|(snapshot, blobs)| {
RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap()
})
.collect();
assert!(
!matches!(
@ -665,7 +803,7 @@ async fn invalid_signature_deposit() {
#[tokio::test]
async fn invalid_signature_exit() {
let chain_segment = get_chain_segment().await;
let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await;
for &block_index in BLOCK_INDICES {
let harness = get_invalid_sigs_harness(&chain_segment).await;
let mut snapshots = chain_segment.clone();
@ -688,10 +826,11 @@ async fn invalid_signature_exit() {
.expect("should update deposit");
snapshots[block_index].beacon_block =
Arc::new(SignedBeaconBlock::from_block(block, signature));
update_parent_roots(&mut snapshots);
update_parent_roots(&mut snapshots, &mut chain_segment_blobs);
update_proposal_signatures(&mut snapshots, &harness);
assert_invalid_signature(
&chain_segment,
&chain_segment_blobs,
&harness,
block_index,
&snapshots,
@ -711,7 +850,7 @@ fn unwrap_err<T, E>(result: Result<T, E>) -> E {
#[tokio::test]
async fn block_gossip_verification() {
let harness = get_harness(VALIDATOR_COUNT);
let chain_segment = get_chain_segment().await;
let (chain_segment, chain_segment_blobs) = get_chain_segment_with_blob_sidecars().await;
let block_index = CHAIN_SEGMENT_LENGTH - 2;
@ -721,7 +860,10 @@ async fn block_gossip_verification() {
.set_slot(chain_segment[block_index].beacon_block.slot().as_u64());
// Import the ancestors prior to the block we're testing.
for snapshot in &chain_segment[0..block_index] {
for (snapshot, blobs_opt) in chain_segment[0..block_index]
.iter()
.zip(chain_segment_blobs.iter())
{
let gossip_verified = harness
.chain
.verify_block_for_gossip(snapshot.beacon_block.clone())
@ -738,6 +880,21 @@ async fn block_gossip_verification() {
)
.await
.expect("should import valid gossip verified block");
if let Some(blob_sidecars) = blobs_opt {
for blob_sidecar in blob_sidecars {
let blob_index = blob_sidecar.index;
let gossip_verified = harness
.chain
.verify_blob_sidecar_for_gossip(blob_sidecar.clone(), blob_index)
.expect("should obtain gossip verified blob");
harness
.chain
.process_gossip_blob(gossip_verified)
.await
.expect("should import valid gossip verified blob");
}
}
}
// Recompute the head to ensure we cache the latest view of fork choice.
@ -906,7 +1063,6 @@ async fn block_gossip_verification() {
.0;
let expected_proposer = block.proposer_index();
let other_proposer = (0..VALIDATOR_COUNT as u64)
.into_iter()
.find(|i| *i != block.proposer_index())
.expect("there must be more than one validator in this test");
*block.proposer_index_mut() = other_proposer;
@ -958,8 +1114,7 @@ async fn block_gossip_verification() {
.chain
.verify_block_for_gossip(block.clone())
.await
.err()
.expect("should error when processing known block"),
.expect_err("should error when processing known block"),
BlockError::BlockIsAlreadyKnown
),
"the second proposal by this validator should be rejected"
@ -984,14 +1139,27 @@ async fn verify_block_for_gossip_slashing_detection() {
harness.advance_slot();
let state = harness.get_current_state();
let (block1, _) = harness.make_block(state.clone(), Slot::new(1)).await;
let (block2, _) = harness.make_block(state, Slot::new(1)).await;
let ((block1, blobs1), _) = harness.make_block(state.clone(), Slot::new(1)).await;
let ((block2, _blobs2), _) = harness.make_block(state, Slot::new(1)).await;
let verified_block = harness
.chain
.verify_block_for_gossip(Arc::new(block1))
.await
.unwrap();
let verified_block = harness.chain.verify_block_for_gossip(block1).await.unwrap();
if let Some((kzg_proofs, blobs)) = blobs1 {
let sidecars =
BlobSidecar::build_sidecars(blobs, verified_block.block(), kzg_proofs).unwrap();
for sidecar in sidecars {
let blob_index = sidecar.index;
let verified_blob = harness
.chain
.verify_blob_sidecar_for_gossip(sidecar, blob_index)
.unwrap();
harness
.chain
.process_gossip_blob(verified_blob)
.await
.unwrap();
}
}
harness
.chain
.process_block(
@ -1002,12 +1170,7 @@ async fn verify_block_for_gossip_slashing_detection() {
)
.await
.unwrap();
unwrap_err(
harness
.chain
.verify_block_for_gossip(Arc::new(block2))
.await,
);
unwrap_err(harness.chain.verify_block_for_gossip(block2).await);
// Slasher should have been handed the two conflicting blocks and crafted a slashing.
slasher.process_queued(Epoch::new(0)).unwrap();
@ -1024,13 +1187,9 @@ async fn verify_block_for_gossip_doppelganger_detection() {
let harness = get_harness(VALIDATOR_COUNT);
let state = harness.get_current_state();
let (block, _) = harness.make_block(state.clone(), Slot::new(1)).await;
let ((block, _), _) = harness.make_block(state.clone(), Slot::new(1)).await;
let verified_block = harness
.chain
.verify_block_for_gossip(Arc::new(block))
.await
.unwrap();
let verified_block = harness.chain.verify_block_for_gossip(block).await.unwrap();
let attestations = verified_block.block.message().body().attestations().clone();
harness
.chain
@ -1111,7 +1270,7 @@ async fn add_base_block_to_altair_chain() {
// Produce an Altair block.
let state = harness.get_current_state();
let slot = harness.get_current_slot();
let (altair_signed_block, _) = harness.make_block(state.clone(), slot).await;
let ((altair_signed_block, _), _) = harness.make_block(state.clone(), slot).await;
let altair_block = &altair_signed_block
.as_altair()
.expect("test expects an altair block")
@ -1170,8 +1329,7 @@ async fn add_base_block_to_altair_chain() {
.chain
.verify_block_for_gossip(Arc::new(base_block.clone()))
.await
.err()
.expect("should error when processing base block"),
.expect_err("should error when processing base block"),
BlockError::InconsistentFork(InconsistentFork {
fork_at_slot: ForkName::Altair,
object_fork: ForkName::Base,
@ -1189,8 +1347,7 @@ async fn add_base_block_to_altair_chain() {
|| Ok(()),
)
.await
.err()
.expect("should error when processing base block"),
.expect_err("should error when processing base block"),
BlockError::InconsistentFork(InconsistentFork {
fork_at_slot: ForkName::Altair,
object_fork: ForkName::Base,
@ -1201,7 +1358,10 @@ async fn add_base_block_to_altair_chain() {
assert!(matches!(
harness
.chain
.process_chain_segment(vec![Arc::new(base_block)], NotifyExecutionLayer::Yes,)
.process_chain_segment(
vec![RpcBlock::new_without_blobs(None, Arc::new(base_block))],
NotifyExecutionLayer::Yes,
)
.await,
ChainSegmentResult::Failed {
imported_blocks: 0,
@ -1245,7 +1405,7 @@ async fn add_altair_block_to_base_chain() {
// Produce an altair block.
let state = harness.get_current_state();
let slot = harness.get_current_slot();
let (base_signed_block, _) = harness.make_block(state.clone(), slot).await;
let ((base_signed_block, _), _) = harness.make_block(state.clone(), slot).await;
let base_block = &base_signed_block
.as_base()
.expect("test expects a base block")
@ -1305,8 +1465,7 @@ async fn add_altair_block_to_base_chain() {
.chain
.verify_block_for_gossip(Arc::new(altair_block.clone()))
.await
.err()
.expect("should error when processing altair block"),
.expect_err("should error when processing altair block"),
BlockError::InconsistentFork(InconsistentFork {
fork_at_slot: ForkName::Base,
object_fork: ForkName::Altair,
@ -1324,8 +1483,7 @@ async fn add_altair_block_to_base_chain() {
|| Ok(()),
)
.await
.err()
.expect("should error when processing altair block"),
.expect_err("should error when processing altair block"),
BlockError::InconsistentFork(InconsistentFork {
fork_at_slot: ForkName::Base,
object_fork: ForkName::Altair,
@ -1336,7 +1494,10 @@ async fn add_altair_block_to_base_chain() {
assert!(matches!(
harness
.chain
.process_chain_segment(vec![Arc::new(altair_block)], NotifyExecutionLayer::Yes)
.process_chain_segment(
vec![RpcBlock::new_without_blobs(None, Arc::new(altair_block))],
NotifyExecutionLayer::Yes
)
.await,
ChainSegmentResult::Failed {
imported_blocks: 0,
@ -1378,16 +1539,18 @@ async fn import_duplicate_block_unrealized_justification() {
// The store's justified checkpoint must still be at epoch 0, while unrealized justification
// must be at epoch 1.
let fc = chain.canonical_head.fork_choice_read_lock();
assert_eq!(fc.justified_checkpoint().epoch, 0);
assert_eq!(fc.unrealized_justified_checkpoint().epoch, 1);
drop(fc);
{
let fc = chain.canonical_head.fork_choice_read_lock();
assert_eq!(fc.justified_checkpoint().epoch, 0);
assert_eq!(fc.unrealized_justified_checkpoint().epoch, 1);
drop(fc);
}
// Produce a block to justify epoch 2.
let state = harness.get_current_state();
let slot = harness.get_current_slot();
let (block, _) = harness.make_block(state.clone(), slot).await;
let block = Arc::new(block);
let (block_contents, _) = harness.make_block(state.clone(), slot).await;
let (block, _) = block_contents;
let block_root = block.canonical_root();
// Create two verified variants of the block, representing the same block being processed in
@ -1395,52 +1558,71 @@ async fn import_duplicate_block_unrealized_justification() {
let notify_execution_layer = NotifyExecutionLayer::Yes;
let verified_block1 = block
.clone()
.into_execution_pending_block(block_root, &chain, notify_execution_layer)
.into_execution_pending_block(block_root, chain, notify_execution_layer)
.unwrap();
let verified_block2 = block
.into_execution_pending_block(block_root, &chain, notify_execution_layer)
.into_execution_pending_block(block_root, chain, notify_execution_layer)
.unwrap();
// Import the first block, simulating a block processed via a finalized chain segment.
chain
.clone()
.import_execution_pending_block(verified_block1)
import_execution_pending_block(chain.clone(), verified_block1)
.await
.unwrap();
// Unrealized justification should NOT have updated.
let fc = chain.canonical_head.fork_choice_read_lock();
assert_eq!(fc.justified_checkpoint().epoch, 0);
let unrealized_justification = fc.unrealized_justified_checkpoint();
assert_eq!(unrealized_justification.epoch, 2);
// The fork choice node for the block should have unrealized justification.
let fc_block = fc.get_block(&block_root).unwrap();
assert_eq!(
fc_block.unrealized_justified_checkpoint,
Some(unrealized_justification)
);
drop(fc);
let unrealized_justification = {
let fc = chain.canonical_head.fork_choice_read_lock();
assert_eq!(fc.justified_checkpoint().epoch, 0);
let unrealized_justification = fc.unrealized_justified_checkpoint();
assert_eq!(unrealized_justification.epoch, 2);
// The fork choice node for the block should have unrealized justification.
let fc_block = fc.get_block(&block_root).unwrap();
assert_eq!(
fc_block.unrealized_justified_checkpoint,
Some(unrealized_justification)
);
drop(fc);
unrealized_justification
};
// Import the second verified block, simulating a block processed via RPC.
chain
.clone()
.import_execution_pending_block(verified_block2)
import_execution_pending_block(chain.clone(), verified_block2)
.await
.unwrap();
// Unrealized justification should still be updated.
let fc = chain.canonical_head.fork_choice_read_lock();
assert_eq!(fc.justified_checkpoint().epoch, 0);
let fc3 = chain.canonical_head.fork_choice_read_lock();
assert_eq!(fc3.justified_checkpoint().epoch, 0);
assert_eq!(
fc.unrealized_justified_checkpoint(),
fc3.unrealized_justified_checkpoint(),
unrealized_justification
);
// The fork choice node for the block should still have the unrealized justified checkpoint.
let fc_block = fc.get_block(&block_root).unwrap();
let fc_block = fc3.get_block(&block_root).unwrap();
drop(fc3);
assert_eq!(
fc_block.unrealized_justified_checkpoint,
Some(unrealized_justification)
);
}
async fn import_execution_pending_block<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>,
execution_pending_block: ExecutionPendingBlock<T>,
) -> Result<AvailabilityProcessingStatus, String> {
match chain
.clone()
.into_executed_block(execution_pending_block)
.await
.unwrap()
{
ExecutedBlock::Available(block) => chain
.import_available_block(Box::from(block))
.await
.map_err(|e| format!("{e:?}")),
ExecutedBlock::AvailabilityPending(_) => {
Err("AvailabilityPending not expected in this test. Block not imported.".to_string())
}
}
}

View File

@ -0,0 +1,93 @@
use beacon_chain::blob_verification::GossipVerifiedBlob;
use beacon_chain::test_utils::BeaconChainHarness;
use eth2::types::{EventKind, SseBlobSidecar};
use rand::rngs::StdRng;
use rand::SeedableRng;
use std::sync::Arc;
use types::blob_sidecar::FixedBlobSidecarList;
use types::{BlobSidecar, EthSpec, ForkName, MinimalEthSpec};
type E = MinimalEthSpec;
/// Verifies that a blob event is emitted when a gossip verified blob is received via gossip or the publish block API.
#[tokio::test]
async fn blob_sidecar_event_on_process_gossip_blob() {
let spec = ForkName::Deneb.make_genesis_spec(E::default_spec());
let harness = BeaconChainHarness::builder(E::default())
.spec(spec)
.deterministic_keypairs(8)
.fresh_ephemeral_store()
.mock_execution_layer()
.build();
// subscribe to blob sidecar events
let event_handler = harness.chain.event_handler.as_ref().unwrap();
let mut blob_event_receiver = event_handler.subscribe_blob_sidecar();
// build and process a gossip verified blob
let kzg = harness.chain.kzg.as_ref().unwrap();
let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64);
let sidecar = BlobSidecar::random_valid(&mut rng, kzg)
.map(Arc::new)
.unwrap();
let gossip_verified_blob = GossipVerifiedBlob::__assumed_valid(sidecar);
let expected_sse_blobs = SseBlobSidecar::from_blob_sidecar(gossip_verified_blob.as_blob());
let _ = harness
.chain
.process_gossip_blob(gossip_verified_blob)
.await
.unwrap();
let sidecar_event = blob_event_receiver.try_recv().unwrap();
assert_eq!(sidecar_event, EventKind::BlobSidecar(expected_sse_blobs));
}
/// Verifies that a blob event is emitted when blobs are received via RPC.
#[tokio::test]
async fn blob_sidecar_event_on_process_rpc_blobs() {
let spec = ForkName::Deneb.make_genesis_spec(E::default_spec());
let harness = BeaconChainHarness::builder(E::default())
.spec(spec)
.deterministic_keypairs(8)
.fresh_ephemeral_store()
.mock_execution_layer()
.build();
// subscribe to blob sidecar events
let event_handler = harness.chain.event_handler.as_ref().unwrap();
let mut blob_event_receiver = event_handler.subscribe_blob_sidecar();
// build and process multiple rpc blobs
let kzg = harness.chain.kzg.as_ref().unwrap();
let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64);
let blob_1 = BlobSidecar::random_valid(&mut rng, kzg)
.map(Arc::new)
.unwrap();
let blob_2 = Arc::new(BlobSidecar {
index: 1,
..BlobSidecar::random_valid(&mut rng, kzg).unwrap()
});
let blobs = FixedBlobSidecarList::from(vec![Some(blob_1.clone()), Some(blob_2.clone())]);
let expected_sse_blobs = vec![
SseBlobSidecar::from_blob_sidecar(blob_1.as_ref()),
SseBlobSidecar::from_blob_sidecar(blob_2.as_ref()),
];
let _ = harness
.chain
.process_rpc_blobs(blob_1.slot(), blob_1.block_root(), blobs)
.await
.unwrap();
let mut sse_blobs: Vec<SseBlobSidecar> = vec![];
while let Ok(sidecar_event) = blob_event_receiver.try_recv() {
if let EventKind::BlobSidecar(sse_blob_sidecar) = sidecar_event {
sse_blobs.push(sse_blob_sidecar);
} else {
panic!("`BlobSidecar` event kind expected.");
}
}
assert_eq!(sse_blobs, expected_sse_blobs);
}

View File

@ -2,6 +2,7 @@ mod attestation_production;
mod attestation_verification;
mod block_verification;
mod capella;
mod events;
mod merge;
mod op_verification;
mod payload_invalidation;
@ -9,3 +10,4 @@ mod rewards;
mod store_tests;
mod sync_committee_verification;
mod tests;
mod validator_monitor;

View File

@ -29,10 +29,19 @@ fn get_store(db_path: &TempDir) -> Arc<HotColdDB> {
let spec = test_spec::<E>();
let hot_path = db_path.path().join("hot_db");
let cold_path = db_path.path().join("cold_db");
let blobs_path = db_path.path().join("blobs_db");
let config = StoreConfig::default();
let log = NullLoggerBuilder.build().expect("logger should build");
HotColdDB::open(&hot_path, &cold_path, |_, _, _| Ok(()), config, spec, log)
.expect("disk store should initialize")
HotColdDB::open(
&hot_path,
&cold_path,
&blobs_path,
|_, _, _| Ok(()),
config,
spec,
log,
)
.expect("disk store should initialize")
}
fn get_harness(store: Arc<HotColdDB>, validator_count: usize) -> TestHarness {

View File

@ -171,7 +171,7 @@ impl InvalidPayloadRig {
async fn build_blocks(&mut self, num_blocks: u64, is_valid: Payload) -> Vec<Hash256> {
let mut roots = Vec::with_capacity(num_blocks as usize);
for _ in 0..num_blocks {
roots.push(self.import_block(is_valid.clone()).await);
roots.push(self.import_block(is_valid).await);
}
roots
}
@ -225,7 +225,7 @@ impl InvalidPayloadRig {
let head = self.harness.chain.head_snapshot();
let state = head.beacon_state.clone_with_only_committee_caches();
let slot = slot_override.unwrap_or(state.slot() + 1);
let (block, post_state) = self.harness.make_block(state, slot).await;
let ((block, blobs), post_state) = self.harness.make_block(state, slot).await;
let block_root = block.canonical_root();
let set_new_payload = |payload: Payload| match payload {
@ -289,7 +289,7 @@ impl InvalidPayloadRig {
}
let root = self
.harness
.process_block(slot, block.canonical_root(), block.clone())
.process_block(slot, block.canonical_root(), (block.clone(), blobs.clone()))
.await
.unwrap();
@ -319,7 +319,7 @@ impl InvalidPayloadRig {
.get_full_block(&block_root)
.unwrap()
.unwrap(),
block,
*block,
"block from db must match block imported"
);
}
@ -330,7 +330,7 @@ impl InvalidPayloadRig {
match self
.harness
.process_block(slot, block.canonical_root(), block)
.process_block(slot, block.canonical_root(), (block, blobs))
.await
{
Err(error) if evaluate_error(&error) => (),
@ -693,17 +693,20 @@ async fn invalidates_all_descendants() {
.state_at_slot(fork_parent_slot, StateSkipConfig::WithStateRoots)
.unwrap();
assert_eq!(fork_parent_state.slot(), fork_parent_slot);
let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot).await;
let ((fork_block, _), _fork_post_state) =
rig.harness.make_block(fork_parent_state, fork_slot).await;
let fork_block_root = rig
.harness
.chain
.process_block(
fork_block.canonical_root(),
Arc::new(fork_block),
fork_block,
NotifyExecutionLayer::Yes,
|| Ok(()),
)
.await
.unwrap()
.try_into()
.unwrap();
rig.recompute_head().await;
@ -789,18 +792,21 @@ async fn switches_heads() {
.state_at_slot(fork_parent_slot, StateSkipConfig::WithStateRoots)
.unwrap();
assert_eq!(fork_parent_state.slot(), fork_parent_slot);
let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot).await;
let ((fork_block, _), _fork_post_state) =
rig.harness.make_block(fork_parent_state, fork_slot).await;
let fork_parent_root = fork_block.parent_root();
let fork_block_root = rig
.harness
.chain
.process_block(
fork_block.canonical_root(),
Arc::new(fork_block),
fork_block,
NotifyExecutionLayer::Yes,
|| Ok(()),
)
.await
.unwrap()
.try_into()
.unwrap();
rig.recompute_head().await;
@ -815,13 +821,16 @@ async fn switches_heads() {
})
.await;
// The fork block should become the head.
assert_eq!(rig.harness.head_block_root(), fork_block_root);
// NOTE: The `import_block` method above will cause the `ExecutionStatus` of the
// `fork_block_root`'s payload to switch from `Optimistic` to `Invalid`. This means it *won't*
// be set as head, it's parent block will instead. This is an issue with the mock EL and/or
// the payload invalidation rig.
assert_eq!(rig.harness.head_block_root(), fork_parent_root);
// The fork block has not yet been validated.
assert!(rig
.execution_status(fork_block_root)
.is_strictly_optimistic());
.is_optimistic_or_invalid());
for root in blocks {
let slot = rig
@ -1012,6 +1021,7 @@ async fn payload_preparation() {
.unwrap(),
fee_recipient,
None,
None,
);
assert_eq!(rig.previous_payload_attributes(), payload_attributes);
}
@ -1034,8 +1044,7 @@ async fn invalid_parent() {
// Produce another block atop the parent, but don't import yet.
let slot = parent_block.slot() + 1;
rig.harness.set_current_slot(slot);
let (block, state) = rig.harness.make_block(parent_state, slot).await;
let block = Arc::new(block);
let ((block, _), state) = rig.harness.make_block(parent_state, slot).await;
let block_root = block.canonical_root();
assert_eq!(block.parent_root(), parent_root);
@ -1045,7 +1054,7 @@ async fn invalid_parent() {
// Ensure the block built atop an invalid payload is invalid for gossip.
assert!(matches!(
rig.harness.chain.clone().verify_block_for_gossip(block.clone()).await,
rig.harness.chain.clone().verify_block_for_gossip(block.clone().into()).await,
Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root })
if invalid_root == parent_root
));
@ -1428,13 +1437,13 @@ async fn build_optimistic_chain(
.server
.all_get_block_by_hash_requests_return_natural_value();
return rig;
rig
}
#[tokio::test]
async fn optimistic_transition_block_valid_unfinalized() {
let ttd = 42;
let num_blocks = 16 as usize;
let num_blocks = 16_usize;
let rig = build_optimistic_chain(ttd, ttd, num_blocks).await;
let post_transition_block_root = rig
@ -1488,7 +1497,7 @@ async fn optimistic_transition_block_valid_unfinalized() {
#[tokio::test]
async fn optimistic_transition_block_valid_finalized() {
let ttd = 42;
let num_blocks = 130 as usize;
let num_blocks = 130_usize;
let rig = build_optimistic_chain(ttd, ttd, num_blocks).await;
let post_transition_block_root = rig
@ -1543,7 +1552,7 @@ async fn optimistic_transition_block_valid_finalized() {
async fn optimistic_transition_block_invalid_unfinalized() {
let block_ttd = 42;
let rig_ttd = 1337;
let num_blocks = 22 as usize;
let num_blocks = 22_usize;
let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await;
let post_transition_block_root = rig
@ -1619,7 +1628,7 @@ async fn optimistic_transition_block_invalid_unfinalized() {
async fn optimistic_transition_block_invalid_unfinalized_syncing_ee() {
let block_ttd = 42;
let rig_ttd = 1337;
let num_blocks = 22 as usize;
let num_blocks = 22_usize;
let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await;
let post_transition_block_root = rig
@ -1732,7 +1741,7 @@ async fn optimistic_transition_block_invalid_unfinalized_syncing_ee() {
async fn optimistic_transition_block_invalid_finalized() {
let block_ttd = 42;
let rig_ttd = 1337;
let num_blocks = 130 as usize;
let num_blocks = 130_usize;
let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await;
let post_transition_block_root = rig
@ -1854,8 +1863,8 @@ impl InvalidHeadSetup {
.chain
.state_at_slot(slot - 1, StateSkipConfig::WithStateRoots)
.unwrap();
let (fork_block, _) = rig.harness.make_block(parent_state, slot).await;
opt_fork_block = Some(Arc::new(fork_block));
let (fork_block_tuple, _) = rig.harness.make_block(parent_state, slot).await;
opt_fork_block = Some(fork_block_tuple.0);
} else {
// Skipped slot.
};

View File

@ -14,7 +14,7 @@ use eth2::lighthouse::StandardAttestationRewards;
use eth2::types::ValidatorId;
use lazy_static::lazy_static;
use types::beacon_state::Error as BeaconStateError;
use types::{BeaconState, ChainSpec};
use types::{BeaconState, ChainSpec, ForkName, Slot};
pub const VALIDATOR_COUNT: usize = 64;
@ -219,6 +219,359 @@ async fn test_verify_attestation_rewards_base_inactivity_leak() {
assert_eq!(expected_balances, balances);
}
#[tokio::test]
async fn test_verify_attestation_rewards_base_inactivity_leak_justification_epoch() {
let spec = E::default_spec();
let harness = get_harness(spec.clone());
let half = VALIDATOR_COUNT / 2;
let half_validators: Vec<usize> = (0..half).collect();
// target epoch is the epoch where the chain enters inactivity leak
let mut target_epoch = &spec.min_epochs_to_inactivity_penalty + 2;
// advance until beginning of epoch N + 2
harness
.extend_chain(
(E::slots_per_epoch() * (target_epoch + 1)) as usize,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::SomeValidators(half_validators.clone()),
)
.await;
// advance to create first justification epoch and get initial balances
harness.extend_slots(E::slots_per_epoch() as usize).await;
target_epoch += 1;
let initial_balances: Vec<u64> = harness.get_current_state().balances().clone().into();
//assert previous_justified_checkpoint matches 0 as we were in inactivity leak from beginning
assert_eq!(
0,
harness
.get_current_state()
.previous_justified_checkpoint()
.epoch
.as_u64()
);
// extend slots to beginning of epoch N + 1
harness.extend_slots(E::slots_per_epoch() as usize).await;
//assert target epoch and previous_justified_checkpoint match
assert_eq!(
target_epoch,
harness
.get_current_state()
.previous_justified_checkpoint()
.epoch
.as_u64()
);
// compute reward deltas for all validators in epoch N
let StandardAttestationRewards {
ideal_rewards,
total_rewards,
} = harness
.chain
.compute_attestation_rewards(Epoch::new(target_epoch), vec![])
.unwrap();
// assert we successfully get ideal rewards for justified epoch out of inactivity leak
assert!(ideal_rewards
.iter()
.all(|reward| reward.head > 0 && reward.target > 0 && reward.source > 0));
// apply attestation rewards to initial balances
let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards);
// verify expected balances against actual balances
let balances: Vec<u64> = harness.get_current_state().balances().clone().into();
assert_eq!(expected_balances, balances);
}
#[tokio::test]
async fn test_verify_attestation_rewards_altair() {
let spec = ForkName::Altair.make_genesis_spec(E::default_spec());
let harness = get_harness(spec.clone());
let target_epoch = 0;
// advance until epoch N + 1 and get initial balances
harness
.extend_slots((E::slots_per_epoch() * (target_epoch + 1)) as usize)
.await;
let initial_balances: Vec<u64> = harness.get_current_state().balances().clone().into();
// advance until epoch N + 2 and build proposal rewards map
let mut proposal_rewards_map: HashMap<u64, u64> = HashMap::new();
let mut sync_committee_rewards_map: HashMap<u64, i64> = HashMap::new();
for _ in 0..E::slots_per_epoch() {
let state = harness.get_current_state();
let slot = state.slot() + Slot::new(1);
// calculate beacon block rewards / penalties
let ((signed_block, _maybe_blob_sidecars), mut state) =
harness.make_block_return_pre_state(state, slot).await;
let beacon_block_reward = harness
.chain
.compute_beacon_block_reward(
signed_block.message(),
signed_block.canonical_root(),
&mut state,
)
.unwrap();
let total_proposer_reward = proposal_rewards_map
.get(&beacon_block_reward.proposer_index)
.unwrap_or(&0u64)
+ beacon_block_reward.total;
proposal_rewards_map.insert(beacon_block_reward.proposer_index, total_proposer_reward);
// calculate sync committee rewards / penalties
let reward_payload = harness
.chain
.compute_sync_committee_rewards(signed_block.message(), &mut state)
.unwrap();
reward_payload.iter().for_each(|reward| {
let mut amount = *sync_committee_rewards_map
.get(&reward.validator_index)
.unwrap_or(&0);
amount += reward.reward;
sync_committee_rewards_map.insert(reward.validator_index, amount);
});
harness.extend_slots(1).await;
}
// compute reward deltas for all validators in epoch N
let StandardAttestationRewards {
ideal_rewards,
total_rewards,
} = harness
.chain
.compute_attestation_rewards(Epoch::new(target_epoch), vec![])
.unwrap();
// assert ideal rewards are greater than 0
assert!(ideal_rewards
.iter()
.all(|reward| reward.head > 0 && reward.target > 0 && reward.source > 0));
// apply attestation, proposal, and sync committee rewards and penalties to initial balances
let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards);
let expected_balances = apply_beacon_block_rewards(&proposal_rewards_map, expected_balances);
let expected_balances =
apply_sync_committee_rewards(&sync_committee_rewards_map, expected_balances);
// verify expected balances against actual balances
let balances: Vec<u64> = harness.get_current_state().balances().clone().into();
assert_eq!(expected_balances, balances);
}
#[tokio::test]
async fn test_verify_attestation_rewards_altair_inactivity_leak() {
let spec = ForkName::Altair.make_genesis_spec(E::default_spec());
let harness = get_harness(spec.clone());
let half = VALIDATOR_COUNT / 2;
let half_validators: Vec<usize> = (0..half).collect();
// target epoch is the epoch where the chain enters inactivity leak
let target_epoch = &spec.min_epochs_to_inactivity_penalty + 1;
// advance until beginning of epoch N + 1 and get balances
harness
.extend_slots_some_validators(
(E::slots_per_epoch() * (target_epoch + 1)) as usize,
half_validators.clone(),
)
.await;
let initial_balances: Vec<u64> = harness.get_current_state().balances().clone().into();
// advance until epoch N + 2 and build proposal rewards map
let mut proposal_rewards_map: HashMap<u64, u64> = HashMap::new();
let mut sync_committee_rewards_map: HashMap<u64, i64> = HashMap::new();
for _ in 0..E::slots_per_epoch() {
let state = harness.get_current_state();
let slot = state.slot() + Slot::new(1);
// calculate beacon block rewards / penalties
let ((signed_block, _maybe_blob_sidecars), mut state) =
harness.make_block_return_pre_state(state, slot).await;
let beacon_block_reward = harness
.chain
.compute_beacon_block_reward(
signed_block.message(),
signed_block.canonical_root(),
&mut state,
)
.unwrap();
let total_proposer_reward = proposal_rewards_map
.get(&beacon_block_reward.proposer_index)
.unwrap_or(&0u64)
+ beacon_block_reward.total;
proposal_rewards_map.insert(beacon_block_reward.proposer_index, total_proposer_reward);
// calculate sync committee rewards / penalties
let reward_payload = harness
.chain
.compute_sync_committee_rewards(signed_block.message(), &mut state)
.unwrap();
reward_payload.iter().for_each(|reward| {
let mut amount = *sync_committee_rewards_map
.get(&reward.validator_index)
.unwrap_or(&0);
amount += reward.reward;
sync_committee_rewards_map.insert(reward.validator_index, amount);
});
harness
.extend_slots_some_validators(1, half_validators.clone())
.await;
}
// compute reward deltas for all validators in epoch N
let StandardAttestationRewards {
ideal_rewards,
total_rewards,
} = harness
.chain
.compute_attestation_rewards(Epoch::new(target_epoch), vec![])
.unwrap();
// assert inactivity penalty for both ideal rewards and individual validators
assert!(ideal_rewards.iter().all(|reward| reward.inactivity == 0));
assert!(total_rewards[..half]
.iter()
.all(|reward| reward.inactivity == 0));
assert!(total_rewards[half..]
.iter()
.all(|reward| reward.inactivity < 0));
// apply attestation, proposal, and sync committee rewards and penalties to initial balances
let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards);
let expected_balances = apply_beacon_block_rewards(&proposal_rewards_map, expected_balances);
let expected_balances =
apply_sync_committee_rewards(&sync_committee_rewards_map, expected_balances);
// verify expected balances against actual balances
let balances: Vec<u64> = harness.get_current_state().balances().clone().into();
assert_eq!(expected_balances, balances);
}
#[tokio::test]
async fn test_verify_attestation_rewards_altair_inactivity_leak_justification_epoch() {
let spec = ForkName::Altair.make_genesis_spec(E::default_spec());
let harness = get_harness(spec.clone());
let half = VALIDATOR_COUNT / 2;
let half_validators: Vec<usize> = (0..half).collect();
// target epoch is the epoch where the chain enters inactivity leak + 1
let mut target_epoch = &spec.min_epochs_to_inactivity_penalty + 2;
// advance until beginning of epoch N + 1
harness
.extend_slots_some_validators(
(E::slots_per_epoch() * (target_epoch + 1)) as usize,
half_validators.clone(),
)
.await;
let validator_inactivity_score = harness
.get_current_state()
.get_inactivity_score(VALIDATOR_COUNT - 1)
.unwrap();
//assert to ensure we are in inactivity leak
assert_eq!(4, validator_inactivity_score);
// advance for first justification epoch and get balances
harness.extend_slots(E::slots_per_epoch() as usize).await;
target_epoch += 1;
let initial_balances: Vec<u64> = harness.get_current_state().balances().clone().into();
// advance until epoch N + 2 and build proposal rewards map
let mut proposal_rewards_map: HashMap<u64, u64> = HashMap::new();
let mut sync_committee_rewards_map: HashMap<u64, i64> = HashMap::new();
for _ in 0..E::slots_per_epoch() {
let state = harness.get_current_state();
let slot = state.slot() + Slot::new(1);
// calculate beacon block rewards / penalties
let ((signed_block, _maybe_blob_sidecars), mut state) =
harness.make_block_return_pre_state(state, slot).await;
let beacon_block_reward = harness
.chain
.compute_beacon_block_reward(
signed_block.message(),
signed_block.canonical_root(),
&mut state,
)
.unwrap();
let total_proposer_reward = proposal_rewards_map
.get(&beacon_block_reward.proposer_index)
.unwrap_or(&0u64)
+ beacon_block_reward.total;
proposal_rewards_map.insert(beacon_block_reward.proposer_index, total_proposer_reward);
// calculate sync committee rewards / penalties
let reward_payload = harness
.chain
.compute_sync_committee_rewards(signed_block.message(), &mut state)
.unwrap();
reward_payload.iter().for_each(|reward| {
let mut amount = *sync_committee_rewards_map
.get(&reward.validator_index)
.unwrap_or(&0);
amount += reward.reward;
sync_committee_rewards_map.insert(reward.validator_index, amount);
});
harness.extend_slots(1).await;
}
//assert target epoch and previous_justified_checkpoint match
assert_eq!(
target_epoch,
harness
.get_current_state()
.previous_justified_checkpoint()
.epoch
.as_u64()
);
// compute reward deltas for all validators in epoch N
let StandardAttestationRewards {
ideal_rewards,
total_rewards,
} = harness
.chain
.compute_attestation_rewards(Epoch::new(target_epoch), vec![])
.unwrap();
// assert ideal rewards are greater than 0
assert!(ideal_rewards
.iter()
.all(|reward| reward.head > 0 && reward.target > 0 && reward.source > 0));
// apply attestation, proposal, and sync committee rewards and penalties to initial balances
let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards);
let expected_balances = apply_beacon_block_rewards(&proposal_rewards_map, expected_balances);
let expected_balances =
apply_sync_committee_rewards(&sync_committee_rewards_map, expected_balances);
// verify expected balances against actual balances
let balances: Vec<u64> = harness.get_current_state().balances().clone().into();
assert_eq!(expected_balances, balances);
}
#[tokio::test]
async fn test_verify_attestation_rewards_base_subset_only() {
let harness = get_harness(E::default_spec());
@ -297,3 +650,32 @@ fn get_validator_balances(state: BeaconState<E>, validators: &[usize]) -> Vec<u6
})
.collect()
}
fn apply_beacon_block_rewards(
proposal_rewards_map: &HashMap<u64, u64>,
expected_balances: Vec<u64>,
) -> Vec<u64> {
let calculated_balances = expected_balances
.iter()
.enumerate()
.map(|(i, balance)| balance + proposal_rewards_map.get(&(i as u64)).unwrap_or(&0u64))
.collect();
calculated_balances
}
fn apply_sync_committee_rewards(
sync_committee_rewards_map: &HashMap<u64, i64>,
expected_balances: Vec<u64>,
) -> Vec<u64> {
let calculated_balances = expected_balances
.iter()
.enumerate()
.map(|(i, balance)| {
(*balance as i64 + sync_committee_rewards_map.get(&(i as u64)).unwrap_or(&0i64))
.unsigned_abs()
})
.collect();
calculated_balances
}

File diff suppressed because it is too large Load Diff

View File

@ -684,19 +684,20 @@ async fn run_skip_slot_test(skip_slots: u64) {
Slot::new(0)
);
assert_eq!(
harness_b
.chain
.process_block(
harness_a.chain.head_snapshot().beacon_block_root,
harness_a.chain.head_snapshot().beacon_block.clone(),
NotifyExecutionLayer::Yes,
|| Ok(())
)
.await
.unwrap(),
harness_a.chain.head_snapshot().beacon_block_root
);
let status = harness_b
.chain
.process_block(
harness_a.chain.head_snapshot().beacon_block_root,
harness_a.get_head_block(),
NotifyExecutionLayer::Yes,
|| Ok(()),
)
.await
.unwrap();
let root: Hash256 = status.try_into().unwrap();
assert_eq!(root, harness_a.chain.head_snapshot().beacon_block_root);
harness_b.chain.recompute_head_at_current_slot().await;

View File

@ -0,0 +1,377 @@
use beacon_chain::test_utils::{
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
};
use beacon_chain::validator_monitor::{ValidatorMonitorConfig, MISSED_BLOCK_LAG_SLOTS};
use lazy_static::lazy_static;
use logging::test_logger;
use types::{Epoch, EthSpec, Keypair, MainnetEthSpec, PublicKeyBytes, Slot};
// Should ideally be divisible by 3.
pub const VALIDATOR_COUNT: usize = 48;
lazy_static! {
/// A cached set of keys.
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
}
type E = MainnetEthSpec;
fn get_harness(
validator_count: usize,
validator_indexes_to_monitor: Vec<usize>,
) -> BeaconChainHarness<EphemeralHarnessType<E>> {
let harness = BeaconChainHarness::builder(MainnetEthSpec)
.default_spec()
.keypairs(KEYPAIRS[0..validator_count].to_vec())
.logger(test_logger())
.fresh_ephemeral_store()
.mock_execution_layer()
.validator_monitor_config(ValidatorMonitorConfig {
validators: validator_indexes_to_monitor
.iter()
.map(|i| PublicKeyBytes::from(KEYPAIRS[*i].pk.clone()))
.collect(),
..<_>::default()
})
.build();
harness.advance_slot();
harness
}
// Regression test for off-by-one caching issue in missed block detection.
#[tokio::test]
async fn missed_blocks_across_epochs() {
let slots_per_epoch = E::slots_per_epoch();
let all_validators = (0..VALIDATOR_COUNT).collect::<Vec<_>>();
let harness = get_harness(VALIDATOR_COUNT, vec![]);
let validator_monitor = &harness.chain.validator_monitor;
let mut genesis_state = harness.get_current_state();
let genesis_state_root = genesis_state.update_tree_hash_cache().unwrap();
let genesis_block_root = harness.head_block_root();
// Skip a slot in the first epoch (to prime the cache inside the missed block function) and then
// at a different offset in the 2nd epoch. The missed block in the 2nd epoch MUST NOT reuse
// the cache from the first epoch.
let first_skip_offset = 3;
let second_skip_offset = slots_per_epoch / 2;
assert_ne!(first_skip_offset, second_skip_offset);
let first_skip_slot = Slot::new(first_skip_offset);
let second_skip_slot = Slot::new(slots_per_epoch + second_skip_offset);
let slots = (1..2 * slots_per_epoch)
.map(Slot::new)
.filter(|slot| *slot != first_skip_slot && *slot != second_skip_slot)
.collect::<Vec<_>>();
let (block_roots_by_slot, state_roots_by_slot, _, head_state) = harness
.add_attested_blocks_at_slots(genesis_state, genesis_state_root, &slots, &all_validators)
.await;
// Prime the proposer shuffling cache.
let mut proposer_shuffling_cache = harness.chain.beacon_proposer_cache.lock();
for epoch in [0, 1].into_iter().map(Epoch::new) {
let start_slot = epoch.start_slot(slots_per_epoch) + 1;
let state = harness
.get_hot_state(state_roots_by_slot[&start_slot])
.unwrap();
let decision_root = state
.proposer_shuffling_decision_root(genesis_block_root)
.unwrap();
proposer_shuffling_cache
.insert(
epoch,
decision_root,
state
.get_beacon_proposer_indices(&harness.chain.spec)
.unwrap(),
state.fork(),
)
.unwrap();
}
drop(proposer_shuffling_cache);
// Monitor the validator that proposed the block at the same offset in the 0th epoch as the skip
// in the 1st epoch.
let innocent_proposer_slot = Slot::new(second_skip_offset);
let innocent_proposer = harness
.get_block(block_roots_by_slot[&innocent_proposer_slot])
.unwrap()
.message()
.proposer_index();
let mut vm_write = validator_monitor.write();
// Call `process_` once to update validator indices.
vm_write.process_valid_state(head_state.current_epoch(), &head_state, &harness.chain.spec);
// Start monitoring the innocent validator.
vm_write.add_validator_pubkey(KEYPAIRS[innocent_proposer as usize].pk.compress());
// Check for missed blocks.
vm_write.process_valid_state(head_state.current_epoch(), &head_state, &harness.chain.spec);
// My client is innocent, your honour!
assert_eq!(
vm_write.get_monitored_validator_missed_block_count(innocent_proposer),
0
);
}
#[tokio::test]
async fn produces_missed_blocks() {
let validator_count = 16;
let slots_per_epoch = E::slots_per_epoch();
let nb_epoch_to_simulate = Epoch::new(2);
// Generate 63 slots (2 epochs * 32 slots per epoch - 1)
let initial_blocks = slots_per_epoch * nb_epoch_to_simulate.as_u64() - 1;
// The validator index of the validator that is 'supposed' to miss a block
let mut validator_index_to_monitor = 1;
// 1st scenario //
//
// Missed block happens when slot and prev_slot are in the same epoch
let harness1 = get_harness(validator_count, vec![validator_index_to_monitor]);
harness1
.extend_chain(
initial_blocks as usize,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
)
.await;
let mut _state = &mut harness1.get_current_state();
let mut epoch = _state.current_epoch();
// We have a total of 63 slots and we want slot 57 to be a missed block
// and this is slot=25 in epoch=1
let mut idx = initial_blocks - 6;
let mut slot = Slot::new(idx);
let mut slot_in_epoch = slot % slots_per_epoch;
let mut prev_slot = Slot::new(idx - 1);
let mut duplicate_block_root = *_state.block_roots().get(idx as usize).unwrap();
let mut validator_indexes = _state.get_beacon_proposer_indices(&harness1.spec).unwrap();
let mut validator_index = validator_indexes[slot_in_epoch.as_usize()];
let mut proposer_shuffling_decision_root = _state
.proposer_shuffling_decision_root(duplicate_block_root)
.unwrap();
let beacon_proposer_cache = harness1
.chain
.validator_monitor
.read()
.get_beacon_proposer_cache();
// Let's fill the cache with the proposers for the current epoch
// and push the duplicate_block_root to the block_roots vector
assert_eq!(
beacon_proposer_cache.lock().insert(
epoch,
proposer_shuffling_decision_root,
validator_indexes.into_iter().collect::<Vec<usize>>(),
_state.fork()
),
Ok(())
);
// Modify the block root of the previous slot to be the same as the block root of the current slot
// in order to simulate a missed block
assert_eq!(
_state.set_block_root(prev_slot, duplicate_block_root),
Ok(())
);
{
// Let's validate the state which will call the function responsible for
// adding the missed blocks to the validator monitor
let mut validator_monitor = harness1.chain.validator_monitor.write();
validator_monitor.process_valid_state(nb_epoch_to_simulate, _state, &harness1.chain.spec);
// We should have one entry in the missed blocks map
assert_eq!(
validator_monitor.get_monitored_validator_missed_block_count(validator_index as u64),
1
);
}
// 2nd scenario //
//
// Missed block happens when slot and prev_slot are not in the same epoch
// making sure that the cache reloads when the epoch changes
// in that scenario the slot that missed a block is the first slot of the epoch
validator_index_to_monitor = 7;
// We are adding other validators to monitor as thoses one will miss a block depending on
// the fork name specified when running the test as the proposer cache differs depending on the fork name (cf. seed)
let validator_index_to_monitor_altair = 2;
// Same as above but for the merge upgrade
let validator_index_to_monitor_merge = 4;
// Same as above but for the capella upgrade
let validator_index_to_monitor_capella = 11;
// Same as above but for the deneb upgrade
let validator_index_to_monitor_deneb = 3;
let harness2 = get_harness(
validator_count,
vec![
validator_index_to_monitor,
validator_index_to_monitor_altair,
validator_index_to_monitor_merge,
validator_index_to_monitor_capella,
validator_index_to_monitor_deneb,
],
);
let advance_slot_by = 9;
harness2
.extend_chain(
(initial_blocks + advance_slot_by) as usize,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
)
.await;
let mut _state2 = &mut harness2.get_current_state();
epoch = _state2.current_epoch();
// We have a total of 72 slots and we want slot 64 to be the missed block
// and this is slot=64 in epoch=2
idx = initial_blocks + (advance_slot_by) - 8;
slot = Slot::new(idx);
prev_slot = Slot::new(idx - 1);
slot_in_epoch = slot % slots_per_epoch;
duplicate_block_root = *_state2.block_roots().get(idx as usize).unwrap();
validator_indexes = _state2.get_beacon_proposer_indices(&harness2.spec).unwrap();
validator_index = validator_indexes[slot_in_epoch.as_usize()];
let beacon_proposer_cache = harness2
.chain
.validator_monitor
.read()
.get_beacon_proposer_cache();
// Let's fill the cache with the proposers for the current epoch
// and push the duplicate_block_root to the block_roots vector
assert_eq!(
beacon_proposer_cache.lock().insert(
epoch,
duplicate_block_root,
validator_indexes.into_iter().collect::<Vec<usize>>(),
_state2.fork()
),
Ok(())
);
assert_eq!(
_state2.set_block_root(prev_slot, duplicate_block_root),
Ok(())
);
{
// Let's validate the state which will call the function responsible for
// adding the missed blocks to the validator monitor
let mut validator_monitor2 = harness2.chain.validator_monitor.write();
validator_monitor2.process_valid_state(epoch, _state2, &harness2.chain.spec);
// We should have one entry in the missed blocks map
assert_eq!(
validator_monitor2.get_monitored_validator_missed_block_count(validator_index as u64),
1
);
// 3rd scenario //
//
// A missed block happens but the validator is not monitored
// it should not be flagged as a missed block
idx = initial_blocks + (advance_slot_by) - 7;
slot = Slot::new(idx);
prev_slot = Slot::new(idx - 1);
slot_in_epoch = slot % slots_per_epoch;
duplicate_block_root = *_state2.block_roots().get(idx as usize).unwrap();
validator_indexes = _state2.get_beacon_proposer_indices(&harness2.spec).unwrap();
let not_monitored_validator_index = validator_indexes[slot_in_epoch.as_usize()];
assert_eq!(
_state2.set_block_root(prev_slot, duplicate_block_root),
Ok(())
);
// Let's validate the state which will call the function responsible for
// adding the missed blocks to the validator monitor
validator_monitor2.process_valid_state(epoch, _state2, &harness2.chain.spec);
// We shouldn't have any entry in the missed blocks map
assert_ne!(validator_index, not_monitored_validator_index);
assert_eq!(
validator_monitor2
.get_monitored_validator_missed_block_count(not_monitored_validator_index as u64),
0
);
}
// 4th scenario //
//
// A missed block happens at state.slot - LOG_SLOTS_PER_EPOCH
// it shouldn't be flagged as a missed block
let harness3 = get_harness(validator_count, vec![validator_index_to_monitor]);
harness3
.extend_chain(
slots_per_epoch as usize,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
)
.await;
let mut _state3 = &mut harness3.get_current_state();
epoch = _state3.current_epoch();
// We have a total of 32 slots and we want slot 30 to be a missed block
// and this is slot=30 in epoch=0
idx = slots_per_epoch - MISSED_BLOCK_LAG_SLOTS as u64 + 2;
slot = Slot::new(idx);
slot_in_epoch = slot % slots_per_epoch;
prev_slot = Slot::new(idx - 1);
duplicate_block_root = *_state3.block_roots().get(idx as usize).unwrap();
validator_indexes = _state3.get_beacon_proposer_indices(&harness3.spec).unwrap();
validator_index = validator_indexes[slot_in_epoch.as_usize()];
proposer_shuffling_decision_root = _state3
.proposer_shuffling_decision_root_at_epoch(epoch, duplicate_block_root)
.unwrap();
let beacon_proposer_cache = harness3
.chain
.validator_monitor
.read()
.get_beacon_proposer_cache();
// Let's fill the cache with the proposers for the current epoch
// and push the duplicate_block_root to the block_roots vector
assert_eq!(
beacon_proposer_cache.lock().insert(
epoch,
proposer_shuffling_decision_root,
validator_indexes.into_iter().collect::<Vec<usize>>(),
_state3.fork()
),
Ok(())
);
// Modify the block root of the previous slot to be the same as the block root of the current slot
// in order to simulate a missed block
assert_eq!(
_state3.set_block_root(prev_slot, duplicate_block_root),
Ok(())
);
{
// Let's validate the state which will call the function responsible for
// adding the missed blocks to the validator monitor
let mut validator_monitor3 = harness3.chain.validator_monitor.write();
validator_monitor3.process_valid_state(epoch, _state3, &harness3.chain.spec);
// We shouldn't have one entry in the missed blocks map
assert_eq!(
validator_monitor3.get_monitored_validator_missed_block_count(validator_index as u64),
0
);
}
}

View File

@ -39,13 +39,11 @@
//! task.
use crate::work_reprocessing_queue::{
spawn_reprocess_scheduler, QueuedAggregate, QueuedBackfillBatch, QueuedGossipBlock,
QueuedLightClientUpdate, QueuedRpcBlock, QueuedUnaggregate, ReadyWork, ReprocessQueueMessage,
QueuedBackfillBatch, QueuedGossipBlock, ReprocessQueueMessage,
};
use futures::stream::{Stream, StreamExt};
use futures::task::Poll;
use lighthouse_network::NetworkGlobals;
use lighthouse_network::{MessageId, PeerId};
use lighthouse_network::{MessageId, NetworkGlobals, PeerId};
use logging::TimeLatch;
use parking_lot::Mutex;
use serde::{Deserialize, Serialize};
@ -62,8 +60,13 @@ use std::time::Duration;
use task_executor::TaskExecutor;
use tokio::sync::mpsc;
use tokio::sync::mpsc::error::TrySendError;
use types::{Attestation, EthSpec, Hash256, SignedAggregateAndProof, Slot, SubnetId};
use types::{Attestation, Hash256, SignedAggregateAndProof, SubnetId};
use types::{EthSpec, Slot};
use work_reprocessing_queue::IgnoredRpcBlock;
use work_reprocessing_queue::{
spawn_reprocess_scheduler, QueuedAggregate, QueuedLightClientUpdate, QueuedRpcBlock,
QueuedUnaggregate, ReadyWork,
};
mod metrics;
pub mod work_reprocessing_queue;
@ -102,6 +105,10 @@ const MAX_AGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN: usize = 1_024;
/// before we start dropping them.
const MAX_GOSSIP_BLOCK_QUEUE_LEN: usize = 1_024;
/// The maximum number of queued `BlobSidecar` objects received on gossip that
/// will be stored before we start dropping them.
const MAX_GOSSIP_BLOB_QUEUE_LEN: usize = 1_024;
/// The maximum number of queued `SignedBeaconBlock` objects received prior to their slot (but
/// within acceptable clock disparity) that will be queued before we start dropping them.
const MAX_DELAYED_BLOCK_QUEUE_LEN: usize = 1_024;
@ -142,6 +149,10 @@ const MAX_SYNC_CONTRIBUTION_QUEUE_LEN: usize = 1024;
/// will be stored before we start dropping them.
const MAX_RPC_BLOCK_QUEUE_LEN: usize = 1_024;
/// The maximum number of queued `BlobSidecar` objects received from the network RPC that
/// will be stored before we start dropping them.
const MAX_RPC_BLOB_QUEUE_LEN: usize = 1_024;
/// The maximum number of queued `Vec<SignedBeaconBlock>` objects received during syncing that will
/// be stored before we start dropping them.
const MAX_CHAIN_SEGMENT_QUEUE_LEN: usize = 64;
@ -154,10 +165,18 @@ const MAX_STATUS_QUEUE_LEN: usize = 1_024;
/// will be stored before we start dropping them.
const MAX_BLOCKS_BY_RANGE_QUEUE_LEN: usize = 1_024;
/// The maximum number of queued `BlobsByRangeRequest` objects received from the network RPC that
/// will be stored before we start dropping them.
const MAX_BLOBS_BY_RANGE_QUEUE_LEN: usize = 1024;
/// The maximum number of queued `BlocksByRootRequest` objects received from the network RPC that
/// will be stored before we start dropping them.
const MAX_BLOCKS_BY_ROOTS_QUEUE_LEN: usize = 1_024;
/// The maximum number of queued `BlobsByRootRequest` objects received from the network RPC that
/// will be stored before we start dropping them.
const MAX_BLOBS_BY_ROOTS_QUEUE_LEN: usize = 1_024;
/// Maximum number of `SignedBlsToExecutionChange` messages to queue before dropping them.
///
/// This value is set high to accommodate the large spike that is expected immediately after Capella
@ -204,6 +223,7 @@ pub const GOSSIP_ATTESTATION_BATCH: &str = "gossip_attestation_batch";
pub const GOSSIP_AGGREGATE: &str = "gossip_aggregate";
pub const GOSSIP_AGGREGATE_BATCH: &str = "gossip_aggregate_batch";
pub const GOSSIP_BLOCK: &str = "gossip_block";
pub const GOSSIP_BLOBS_SIDECAR: &str = "gossip_blobs_sidecar";
pub const DELAYED_IMPORT_BLOCK: &str = "delayed_import_block";
pub const GOSSIP_VOLUNTARY_EXIT: &str = "gossip_voluntary_exit";
pub const GOSSIP_PROPOSER_SLASHING: &str = "gossip_proposer_slashing";
@ -214,11 +234,14 @@ pub const GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_upd
pub const GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update";
pub const RPC_BLOCK: &str = "rpc_block";
pub const IGNORED_RPC_BLOCK: &str = "ignored_rpc_block";
pub const RPC_BLOBS: &str = "rpc_blob";
pub const CHAIN_SEGMENT: &str = "chain_segment";
pub const CHAIN_SEGMENT_BACKFILL: &str = "chain_segment_backfill";
pub const STATUS_PROCESSING: &str = "status_processing";
pub const BLOCKS_BY_RANGE_REQUEST: &str = "blocks_by_range_request";
pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request";
pub const BLOBS_BY_RANGE_REQUEST: &str = "blobs_by_range_request";
pub const BLOBS_BY_ROOTS_REQUEST: &str = "blobs_by_roots_request";
pub const LIGHT_CLIENT_BOOTSTRAP_REQUEST: &str = "light_client_bootstrap";
pub const UNKNOWN_BLOCK_ATTESTATION: &str = "unknown_block_attestation";
pub const UNKNOWN_BLOCK_AGGREGATE: &str = "unknown_block_aggregate";
@ -566,6 +589,7 @@ pub enum Work<E: EthSpec> {
process_batch: Box<dyn FnOnce(Vec<GossipAggregatePackage<E>>) + Send + Sync>,
},
GossipBlock(AsyncFn),
GossipBlobSidecar(AsyncFn),
DelayedImportBlock {
beacon_block_slot: Slot,
beacon_block_root: Hash256,
@ -581,6 +605,9 @@ pub enum Work<E: EthSpec> {
RpcBlock {
process_fn: AsyncFn,
},
RpcBlobs {
process_fn: AsyncFn,
},
IgnoredRpcBlock {
process_fn: BlockingFn,
},
@ -589,6 +616,8 @@ pub enum Work<E: EthSpec> {
Status(BlockingFn),
BlocksByRangeRequest(BlockingFnWithManualSendOnIdle),
BlocksByRootsRequest(BlockingFnWithManualSendOnIdle),
BlobsByRangeRequest(BlockingFn),
BlobsByRootsRequest(BlockingFn),
GossipBlsToExecutionChange(BlockingFn),
LightClientBootstrapRequest(BlockingFn),
ApiRequestP0(BlockingOrAsync),
@ -610,6 +639,7 @@ impl<E: EthSpec> Work<E> {
Work::GossipAggregate { .. } => GOSSIP_AGGREGATE,
Work::GossipAggregateBatch { .. } => GOSSIP_AGGREGATE_BATCH,
Work::GossipBlock(_) => GOSSIP_BLOCK,
Work::GossipBlobSidecar(_) => GOSSIP_BLOBS_SIDECAR,
Work::DelayedImportBlock { .. } => DELAYED_IMPORT_BLOCK,
Work::GossipVoluntaryExit(_) => GOSSIP_VOLUNTARY_EXIT,
Work::GossipProposerSlashing(_) => GOSSIP_PROPOSER_SLASHING,
@ -619,12 +649,15 @@ impl<E: EthSpec> Work<E> {
Work::GossipLightClientFinalityUpdate(_) => GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE,
Work::GossipLightClientOptimisticUpdate(_) => GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE,
Work::RpcBlock { .. } => RPC_BLOCK,
Work::RpcBlobs { .. } => RPC_BLOBS,
Work::IgnoredRpcBlock { .. } => IGNORED_RPC_BLOCK,
Work::ChainSegment { .. } => CHAIN_SEGMENT,
Work::ChainSegmentBackfill(_) => CHAIN_SEGMENT_BACKFILL,
Work::Status(_) => STATUS_PROCESSING,
Work::BlocksByRangeRequest(_) => BLOCKS_BY_RANGE_REQUEST,
Work::BlocksByRootsRequest(_) => BLOCKS_BY_ROOTS_REQUEST,
Work::BlobsByRangeRequest(_) => BLOBS_BY_RANGE_REQUEST,
Work::BlobsByRootsRequest(_) => BLOBS_BY_ROOTS_REQUEST,
Work::LightClientBootstrapRequest(_) => LIGHT_CLIENT_BOOTSTRAP_REQUEST,
Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION,
Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE,
@ -771,14 +804,18 @@ impl<E: EthSpec> BeaconProcessor<E> {
// Using a FIFO queue since blocks need to be imported sequentially.
let mut rpc_block_queue = FifoQueue::new(MAX_RPC_BLOCK_QUEUE_LEN);
let mut rpc_blob_queue = FifoQueue::new(MAX_RPC_BLOB_QUEUE_LEN);
let mut chain_segment_queue = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN);
let mut backfill_chain_segment = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN);
let mut gossip_block_queue = FifoQueue::new(MAX_GOSSIP_BLOCK_QUEUE_LEN);
let mut gossip_blob_queue = FifoQueue::new(MAX_GOSSIP_BLOB_QUEUE_LEN);
let mut delayed_block_queue = FifoQueue::new(MAX_DELAYED_BLOCK_QUEUE_LEN);
let mut status_queue = FifoQueue::new(MAX_STATUS_QUEUE_LEN);
let mut bbrange_queue = FifoQueue::new(MAX_BLOCKS_BY_RANGE_QUEUE_LEN);
let mut bbroots_queue = FifoQueue::new(MAX_BLOCKS_BY_ROOTS_QUEUE_LEN);
let mut blbroots_queue = FifoQueue::new(MAX_BLOBS_BY_ROOTS_QUEUE_LEN);
let mut blbrange_queue = FifoQueue::new(MAX_BLOBS_BY_RANGE_QUEUE_LEN);
let mut gossip_bls_to_execution_change_queue =
FifoQueue::new(MAX_BLS_TO_EXECUTION_CHANGE_QUEUE_LEN);
@ -915,6 +952,8 @@ impl<E: EthSpec> BeaconProcessor<E> {
// requested these blocks.
} else if let Some(item) = rpc_block_queue.pop() {
self.spawn_worker(item, idle_tx);
} else if let Some(item) = rpc_blob_queue.pop() {
self.spawn_worker(item, idle_tx);
// Check delayed blocks before gossip blocks, the gossip blocks might rely
// on the delayed ones.
} else if let Some(item) = delayed_block_queue.pop() {
@ -923,7 +962,9 @@ impl<E: EthSpec> BeaconProcessor<E> {
// required to verify some attestations.
} else if let Some(item) = gossip_block_queue.pop() {
self.spawn_worker(item, idle_tx);
// Check the priority 0 API requests after blocks, but before attestations.
} else if let Some(item) = gossip_blob_queue.pop() {
self.spawn_worker(item, idle_tx);
// Check the priority 0 API requests after blocks and blobs, but before attestations.
} else if let Some(item) = api_request_p0_queue.pop() {
self.spawn_worker(item, idle_tx);
// Check the aggregates, *then* the unaggregates since we assume that
@ -1068,6 +1109,10 @@ impl<E: EthSpec> BeaconProcessor<E> {
self.spawn_worker(item, idle_tx);
} else if let Some(item) = bbroots_queue.pop() {
self.spawn_worker(item, idle_tx);
} else if let Some(item) = blbrange_queue.pop() {
self.spawn_worker(item, idle_tx);
} else if let Some(item) = blbroots_queue.pop() {
self.spawn_worker(item, idle_tx);
// Check slashings after all other consensus messages so we prioritize
// following head.
//
@ -1158,6 +1203,9 @@ impl<E: EthSpec> BeaconProcessor<E> {
Work::GossipBlock { .. } => {
gossip_block_queue.push(work, work_id, &self.log)
}
Work::GossipBlobSidecar { .. } => {
gossip_blob_queue.push(work, work_id, &self.log)
}
Work::DelayedImportBlock { .. } => {
delayed_block_queue.push(work, work_id, &self.log)
}
@ -1183,6 +1231,7 @@ impl<E: EthSpec> BeaconProcessor<E> {
Work::RpcBlock { .. } | Work::IgnoredRpcBlock { .. } => {
rpc_block_queue.push(work, work_id, &self.log)
}
Work::RpcBlobs { .. } => rpc_blob_queue.push(work, work_id, &self.log),
Work::ChainSegment { .. } => {
chain_segment_queue.push(work, work_id, &self.log)
}
@ -1196,6 +1245,9 @@ impl<E: EthSpec> BeaconProcessor<E> {
Work::BlocksByRootsRequest { .. } => {
bbroots_queue.push(work, work_id, &self.log)
}
Work::BlobsByRangeRequest { .. } => {
blbrange_queue.push(work, work_id, &self.log)
}
Work::LightClientBootstrapRequest { .. } => {
lcbootstrap_queue.push(work, work_id, &self.log)
}
@ -1208,6 +1260,9 @@ impl<E: EthSpec> BeaconProcessor<E> {
Work::GossipBlsToExecutionChange { .. } => {
gossip_bls_to_execution_change_queue.push(work, work_id, &self.log)
}
Work::BlobsByRootsRequest { .. } => {
blbroots_queue.push(work, work_id, &self.log)
}
Work::UnknownLightClientOptimisticUpdate { .. } => {
unknown_light_client_update_queue.push(work, work_id, &self.log)
}
@ -1245,10 +1300,18 @@ impl<E: EthSpec> BeaconProcessor<E> {
&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_QUEUE_TOTAL,
gossip_block_queue.len() as i64,
);
metrics::set_gauge(
&metrics::BEACON_PROCESSOR_GOSSIP_BLOB_QUEUE_TOTAL,
gossip_blob_queue.len() as i64,
);
metrics::set_gauge(
&metrics::BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL,
rpc_block_queue.len() as i64,
);
metrics::set_gauge(
&metrics::BEACON_PROCESSOR_RPC_BLOB_QUEUE_TOTAL,
rpc_blob_queue.len() as i64,
);
metrics::set_gauge(
&metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL,
chain_segment_queue.len() as i64,
@ -1388,11 +1451,18 @@ impl<E: EthSpec> BeaconProcessor<E> {
beacon_block_root: _,
process_fn,
} => task_spawner.spawn_async(process_fn),
Work::RpcBlock { process_fn } => task_spawner.spawn_async(process_fn),
Work::RpcBlock { process_fn } | Work::RpcBlobs { process_fn } => {
task_spawner.spawn_async(process_fn)
}
Work::IgnoredRpcBlock { process_fn } => task_spawner.spawn_blocking(process_fn),
Work::GossipBlock(work) => task_spawner.spawn_async(async move {
work.await;
}),
Work::GossipBlock(work) | Work::GossipBlobSidecar(work) => {
task_spawner.spawn_async(async move {
work.await;
})
}
Work::BlobsByRangeRequest(process_fn) | Work::BlobsByRootsRequest(process_fn) => {
task_spawner.spawn_blocking(process_fn)
}
Work::BlocksByRangeRequest(work) | Work::BlocksByRootsRequest(work) => {
task_spawner.spawn_blocking_with_manual_send_idle(work)
}

View File

@ -46,6 +46,11 @@ lazy_static::lazy_static! {
"beacon_processor_gossip_block_queue_total",
"Count of blocks from gossip waiting to be verified."
);
// Gossip blobs.
pub static ref BEACON_PROCESSOR_GOSSIP_BLOB_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_gossip_blob_queue_total",
"Count of blobs from gossip waiting to be verified."
);
// Gossip Exits.
pub static ref BEACON_PROCESSOR_EXIT_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_exit_queue_total",
@ -71,6 +76,11 @@ lazy_static::lazy_static! {
"beacon_processor_rpc_block_queue_total",
"Count of blocks from the rpc waiting to be verified."
);
// Rpc blobs.
pub static ref BEACON_PROCESSOR_RPC_BLOB_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_rpc_blob_queue_total",
"Count of blobs from the rpc waiting to be verified."
);
// Chain segments.
pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_chain_segment_queue_total",

View File

@ -1,9 +1,9 @@
use eth2::types::builder_bid::SignedBuilderBid;
use eth2::types::{
AbstractExecPayload, BlindedPayload, EthSpec, ExecutionBlockHash, ExecutionPayload,
ForkVersionedResponse, PublicKeyBytes, SignedBeaconBlock, SignedValidatorRegistrationData,
Slot,
EthSpec, ExecutionBlockHash, ForkVersionedResponse, PublicKeyBytes,
SignedValidatorRegistrationData, Slot,
};
use eth2::types::{FullPayloadContents, SignedBlindedBeaconBlock};
pub use eth2::Error;
use eth2::{ok_or_error, StatusCode};
use reqwest::{IntoUrl, Response};
@ -140,8 +140,8 @@ impl BuilderHttpClient {
/// `POST /eth/v1/builder/blinded_blocks`
pub async fn post_builder_blinded_blocks<E: EthSpec>(
&self,
blinded_block: &SignedBeaconBlock<E, BlindedPayload<E>>,
) -> Result<ForkVersionedResponse<ExecutionPayload<E>>, Error> {
blinded_block: &SignedBlindedBeaconBlock<E>,
) -> Result<ForkVersionedResponse<FullPayloadContents<E>>, Error> {
let mut path = self.server.full.clone();
path.path_segments_mut()
@ -163,12 +163,12 @@ impl BuilderHttpClient {
}
/// `GET /eth/v1/builder/header`
pub async fn get_builder_header<E: EthSpec, Payload: AbstractExecPayload<E>>(
pub async fn get_builder_header<E: EthSpec>(
&self,
slot: Slot,
parent_hash: ExecutionBlockHash,
pubkey: &PublicKeyBytes,
) -> Result<Option<ForkVersionedResponse<SignedBuilderBid<E, Payload>>>, Error> {
) -> Result<Option<ForkVersionedResponse<SignedBuilderBid<E>>>, Error> {
let mut path = self.server.full.clone();
path.path_segments_mut()

View File

@ -22,7 +22,6 @@ types = { workspace = true }
eth2_config = { workspace = true }
slot_clock = { workspace = true }
serde = { workspace = true }
serde_derive = "1.0.116"
error-chain = { workspace = true }
slog = { workspace = true }
tokio = { workspace = true }

View File

@ -99,7 +99,7 @@ pub async fn broadcast_address_changes<T: BeaconChainTypes>(
messages: vec![pubsub_message],
};
// It seems highly unlikely that this unbounded send will fail, but
// we handle the result nontheless.
// we handle the result nonetheless.
if let Err(e) = network_send.send(message) {
debug!(
log,

View File

@ -2,6 +2,8 @@ use crate::address_change_broadcast::broadcast_address_changes_at_capella;
use crate::config::{ClientGenesis, Config as ClientConfig};
use crate::notifier::spawn_notifier;
use crate::Client;
use beacon_chain::attestation_simulator::start_attestation_simulator_service;
use beacon_chain::data_availability_checker::start_availability_cache_maintenance_service;
use beacon_chain::otb_verification_service::start_otb_verification_service;
use beacon_chain::proposer_prep_service::start_proposer_prep_service;
use beacon_chain::schema_change::migrate_schema;
@ -33,6 +35,7 @@ use std::net::TcpListener;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;
use std::time::{SystemTime, UNIX_EPOCH};
use timer::spawn_timer;
use tokio::sync::oneshot;
use types::{
@ -43,6 +46,11 @@ use types::{
/// Interval between polling the eth1 node for genesis information.
pub const ETH1_GENESIS_UPDATE_INTERVAL_MILLIS: u64 = 7_000;
/// Reduces the blob availability period by some epochs. Helps prevent the user
/// from starting a genesis sync so near to the blob pruning window that blobs
/// have been pruned before they can manage to sync the chain.
const BLOB_AVAILABILITY_REDUCTION_EPOCHS: u64 = 2;
/// Builds a `Client` instance.
///
/// ## Notes
@ -67,7 +75,7 @@ pub struct ClientBuilder<T: BeaconChainTypes> {
eth1_service: Option<Eth1Service>,
network_globals: Option<Arc<NetworkGlobals<T::EthSpec>>>,
network_senders: Option<NetworkSenders<T::EthSpec>>,
gossipsub_registry: Option<Registry>,
libp2p_registry: Option<Registry>,
db_path: Option<PathBuf>,
freezer_db_path: Option<PathBuf>,
http_api_config: http_api::Config,
@ -101,7 +109,7 @@ where
eth1_service: None,
network_globals: None,
network_senders: None,
gossipsub_registry: None,
libp2p_registry: None,
db_path: None,
freezer_db_path: None,
http_api_config: <_>::default(),
@ -190,15 +198,7 @@ where
.graffiti(graffiti)
.event_handler(event_handler)
.execution_layer(execution_layer)
.monitor_validators(
config.validator_monitor_auto,
config.validator_monitor_pubkeys.clone(),
config.validator_monitor_individual_tracking_threshold,
runtime_context
.service_context("val_mon".to_string())
.log()
.clone(),
);
.validator_monitor_config(config.validator_monitor.clone());
let builder = if let Some(slasher) = self.slasher.clone() {
builder.slasher(slasher)
@ -258,6 +258,45 @@ where
let genesis_state = genesis_state(&runtime_context, &config, log).await?;
// If the user has not explicitly allowed genesis sync, prevent
// them from trying to sync from genesis if we're outside of the
// blob P2P availability window.
//
// It doesn't make sense to try and sync the chain if we can't
// verify blob availability by downloading blobs from the P2P
// network. The user should do a checkpoint sync instead.
if !config.allow_insecure_genesis_sync {
if let Some(deneb_fork_epoch) = spec.deneb_fork_epoch {
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.map_err(|e| format!("Unable to read system time: {e:}"))?
.as_secs();
let genesis_time = genesis_state.genesis_time();
let deneb_time =
genesis_time + (deneb_fork_epoch.as_u64() * spec.seconds_per_slot);
// Shrink the blob availability window so users don't start
// a sync right before blobs start to disappear from the P2P
// network.
let reduced_p2p_availability_epochs = spec
.min_epochs_for_blob_sidecars_requests
.saturating_sub(BLOB_AVAILABILITY_REDUCTION_EPOCHS);
let blob_availability_window = reduced_p2p_availability_epochs
* TEthSpec::slots_per_epoch()
* spec.seconds_per_slot;
if now > deneb_time + blob_availability_window {
return Err(
"Syncing from genesis is insecure and incompatible with data availability checks. \
You should instead perform a checkpoint sync from a trusted node using the --checkpoint-sync-url option. \
For a list of public endpoints, see: https://eth-clients.github.io/checkpoint-sync-endpoints/ \
Alternatively, use --allow-insecure-genesis-sync if the risks are understood."
.to_string(),
);
}
}
}
builder.genesis_state(genesis_state).map(|v| (v, None))?
}
ClientGenesis::WeakSubjSszBytes {
@ -508,6 +547,12 @@ where
ClientGenesis::FromStore => builder.resume_from_db().map(|v| (v, None))?,
};
let beacon_chain_builder = if let Some(trusted_setup) = config.trusted_setup {
beacon_chain_builder.trusted_setup(trusted_setup)
} else {
beacon_chain_builder
};
if config.sync_eth1_chain {
self.eth1_service = eth1_service_option;
}
@ -532,7 +577,7 @@ where
.ok_or("network requires beacon_processor_channels")?;
// If gossipsub metrics are required we build a registry to record them
let mut gossipsub_registry = if config.metrics_enabled {
let mut libp2p_registry = if config.metrics_enabled {
Some(Registry::default())
} else {
None
@ -542,9 +587,7 @@ where
beacon_chain,
config,
context.executor,
gossipsub_registry
.as_mut()
.map(|registry| registry.sub_registry_with_prefix("gossipsub")),
libp2p_registry.as_mut(),
beacon_processor_channels.beacon_processor_tx.clone(),
beacon_processor_channels.work_reprocessing_tx.clone(),
)
@ -553,7 +596,7 @@ where
self.network_globals = Some(network_globals);
self.network_senders = Some(network_senders);
self.gossipsub_registry = gossipsub_registry;
self.libp2p_registry = libp2p_registry;
Ok(self)
}
@ -719,7 +762,7 @@ where
chain: self.beacon_chain.clone(),
db_path: self.db_path.clone(),
freezer_db_path: self.freezer_db_path.clone(),
gossipsub_registry: self.gossipsub_registry.take().map(std::sync::Mutex::new),
gossipsub_registry: self.libp2p_registry.take().map(std::sync::Mutex::new),
log: log.clone(),
});
@ -838,6 +881,14 @@ where
start_proposer_prep_service(runtime_context.executor.clone(), beacon_chain.clone());
start_otb_verification_service(runtime_context.executor.clone(), beacon_chain.clone());
start_availability_cache_maintenance_service(
runtime_context.executor.clone(),
beacon_chain.clone(),
);
start_attestation_simulator_service(
beacon_chain.task_executor.clone(),
beacon_chain.clone(),
);
}
Ok(Client {
@ -898,6 +949,7 @@ where
mut self,
hot_path: &Path,
cold_path: &Path,
blobs_path: &Path,
config: StoreConfig,
log: Logger,
) -> Result<Self, String> {
@ -935,6 +987,7 @@ where
let store = HotColdDB::open(
hot_path,
cold_path,
blobs_path,
schema_upgrade,
config,
spec,

View File

@ -1,16 +1,20 @@
use beacon_chain::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD;
use beacon_chain::validator_monitor::ValidatorMonitorConfig;
use beacon_chain::TrustedSetup;
use beacon_processor::BeaconProcessorConfig;
use directory::DEFAULT_ROOT_DIR;
use environment::LoggerConfig;
use network::NetworkConfig;
use sensitive_url::SensitiveUrl;
use serde_derive::{Deserialize, Serialize};
use serde::{Deserialize, Serialize};
use std::fs;
use std::path::PathBuf;
use std::time::Duration;
use types::{Graffiti, PublicKeyBytes};
use types::Graffiti;
/// Default directory name for the freezer database under the top-level data dir.
const DEFAULT_FREEZER_DB_DIR: &str = "freezer_db";
/// Default directory name for the blobs database under the top-level data dir.
const DEFAULT_BLOBS_DB_DIR: &str = "blobs_db";
/// Defines how the client should initialize the `BeaconChain` and other components.
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
@ -45,6 +49,8 @@ pub struct Config {
pub db_name: String,
/// Path where the freezer database will be located.
pub freezer_db_path: Option<PathBuf>,
/// Path where the blobs database will be located if blobs should be in a separate database.
pub blobs_db_path: Option<PathBuf>,
pub log_file: PathBuf,
/// If true, the node will use co-ordinated junk for eth1 values.
///
@ -53,15 +59,7 @@ pub struct Config {
pub sync_eth1_chain: bool,
/// Graffiti to be inserted everytime we create a block.
pub graffiti: Graffiti,
/// When true, automatically monitor validators using the HTTP API.
pub validator_monitor_auto: bool,
/// A list of validator pubkeys to monitor.
pub validator_monitor_pubkeys: Vec<PublicKeyBytes>,
/// Once the number of monitored validators goes above this threshold, we
/// will stop tracking metrics on a per-validator basis. This prevents large
/// validator counts causing infeasibly high cardinailty for Prometheus and
/// high log volumes.
pub validator_monitor_individual_tracking_threshold: usize,
pub validator_monitor: ValidatorMonitorConfig,
#[serde(skip)]
/// The `genesis` field is not serialized or deserialized by `serde` to ensure it is defined
/// via the CLI at runtime, instead of from a configuration file saved to disk.
@ -71,6 +69,7 @@ pub struct Config {
pub chain: beacon_chain::ChainConfig,
pub eth1: eth1::Config,
pub execution_layer: Option<execution_layer::Config>,
pub trusted_setup: Option<TrustedSetup>,
pub http_api: http_api::Config,
pub http_metrics: http_metrics::Config,
pub monitoring_api: Option<monitoring_api::Config>,
@ -79,6 +78,7 @@ pub struct Config {
pub beacon_processor: BeaconProcessorConfig,
pub genesis_state_url: Option<String>,
pub genesis_state_url_timeout: Duration,
pub allow_insecure_genesis_sync: bool,
}
impl Default for Config {
@ -87,6 +87,7 @@ impl Default for Config {
data_dir: PathBuf::from(DEFAULT_ROOT_DIR),
db_name: "chain_db".to_string(),
freezer_db_path: None,
blobs_db_path: None,
log_file: PathBuf::from(""),
genesis: <_>::default(),
store: <_>::default(),
@ -96,19 +97,19 @@ impl Default for Config {
sync_eth1_chain: false,
eth1: <_>::default(),
execution_layer: None,
trusted_setup: None,
graffiti: Graffiti::default(),
http_api: <_>::default(),
http_metrics: <_>::default(),
monitoring_api: None,
slasher: None,
validator_monitor_auto: false,
validator_monitor_pubkeys: vec![],
validator_monitor_individual_tracking_threshold: DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD,
validator_monitor: <_>::default(),
logger_config: LoggerConfig::default(),
beacon_processor: <_>::default(),
genesis_state_url: <_>::default(),
// This default value should always be overwritten by the CLI default value.
genesis_state_url_timeout: Duration::from_secs(60),
allow_insecure_genesis_sync: false,
}
}
}
@ -150,11 +151,31 @@ impl Config {
.unwrap_or_else(|| self.default_freezer_db_path())
}
/// Fetch default path to use for the blobs database.
fn default_blobs_db_path(&self) -> PathBuf {
self.get_data_dir().join(DEFAULT_BLOBS_DB_DIR)
}
/// Returns the path to which the client may initialize the on-disk blobs database.
///
/// Will attempt to use the user-supplied path from e.g. the CLI, or will default
/// to None.
pub fn get_blobs_db_path(&self) -> PathBuf {
self.blobs_db_path
.clone()
.unwrap_or_else(|| self.default_blobs_db_path())
}
/// Get the freezer DB path, creating it if necessary.
pub fn create_freezer_db_path(&self) -> Result<PathBuf, String> {
ensure_dir_exists(self.get_freezer_db_path())
}
/// Get the blobs DB path, creating it if necessary.
pub fn create_blobs_db_path(&self) -> Result<PathBuf, String> {
ensure_dir_exists(self.get_blobs_db_path())
}
/// Returns the "modern" path to the data_dir.
///
/// See `Self::get_data_dir` documentation for more info.

View File

@ -1,6 +1,7 @@
use crate::metrics;
use beacon_chain::{
capella_readiness::CapellaReadiness,
deneb_readiness::DenebReadiness,
merge_readiness::{GenesisExecutionPayloadStatus, MergeConfig, MergeReadiness},
BeaconChain, BeaconChainTypes, ExecutionStatus,
};
@ -319,6 +320,7 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
eth1_logging(&beacon_chain, &log);
merge_readiness_logging(current_slot, &beacon_chain, &log).await;
capella_readiness_logging(current_slot, &beacon_chain, &log).await;
deneb_readiness_logging(current_slot, &beacon_chain, &log).await;
}
};
@ -356,8 +358,8 @@ async fn merge_readiness_logging<T: BeaconChainTypes>(
}
if merge_completed && !has_execution_layer {
// Logging of the EE being offline is handled in the other readiness logging functions.
if !beacon_chain.is_time_to_prepare_for_capella(current_slot) {
// logging of the EE being offline is handled in `capella_readiness_logging()`
error!(
log,
"Execution endpoint required";
@ -445,12 +447,15 @@ async fn capella_readiness_logging<T: BeaconChainTypes>(
}
if capella_completed && !has_execution_layer {
error!(
log,
"Execution endpoint required";
"info" => "you need a Capella enabled execution engine to validate blocks, see: \
https://lighthouse-book.sigmaprime.io/merge-migration.html"
);
// Logging of the EE being offline is handled in the other readiness logging functions.
if !beacon_chain.is_time_to_prepare_for_deneb(current_slot) {
error!(
log,
"Execution endpoint required";
"info" => "you need a Capella enabled execution engine to validate blocks, see: \
https://lighthouse-book.sigmaprime.io/merge-migration.html"
);
}
return;
}
@ -479,6 +484,65 @@ async fn capella_readiness_logging<T: BeaconChainTypes>(
}
}
/// Provides some helpful logging to users to indicate if their node is ready for Deneb
async fn deneb_readiness_logging<T: BeaconChainTypes>(
current_slot: Slot,
beacon_chain: &BeaconChain<T>,
log: &Logger,
) {
let deneb_completed = beacon_chain
.canonical_head
.cached_head()
.snapshot
.beacon_block
.message()
.body()
.execution_payload()
.map_or(false, |payload| payload.blob_gas_used().is_ok());
let has_execution_layer = beacon_chain.execution_layer.is_some();
if deneb_completed && has_execution_layer
|| !beacon_chain.is_time_to_prepare_for_deneb(current_slot)
{
return;
}
if deneb_completed && !has_execution_layer {
error!(
log,
"Execution endpoint required";
"info" => "you need a Deneb enabled execution engine to validate blocks, see: \
https://lighthouse-book.sigmaprime.io/merge-migration.html"
);
return;
}
match beacon_chain.check_deneb_readiness().await {
DenebReadiness::Ready => {
info!(
log,
"Ready for Deneb";
"info" => "ensure the execution endpoint is updated to the latest Deneb/Cancun release"
)
}
readiness @ DenebReadiness::ExchangeCapabilitiesFailed { error: _ } => {
error!(
log,
"Not ready for Deneb";
"hint" => "the execution endpoint may be offline",
"info" => %readiness,
)
}
readiness => warn!(
log,
"Not ready for Deneb";
"hint" => "try updating the execution endpoint",
"info" => %readiness,
),
}
}
async fn genesis_execution_payload_logging<T: BeaconChainTypes>(
beacon_chain: &BeaconChain<T>,
log: &Logger,

View File

@ -13,7 +13,7 @@ pub enum Error {
/// Some `Eth1Block` was provided with the same block number but different data. The source
/// of eth1 data is inconsistent.
Conflicting(u64),
/// The given block was not one block number higher than the higest known block number.
/// The given block was not one block number higher than the highest known block number.
NonConsecutive { given: u64, expected: u64 },
/// Some invariant was violated, there is a likely bug in the code.
Internal(String),

View File

@ -25,6 +25,7 @@ hex = { workspace = true }
ethereum_ssz = { workspace = true }
ssz_types = { workspace = true }
eth2 = { workspace = true }
kzg = { workspace = true }
state_processing = { workspace = true }
superstruct = { workspace = true }
lru = { workspace = true }
@ -41,11 +42,6 @@ lazy_static = { workspace = true }
ethers-core = { workspace = true }
builder_client = { path = "../builder_client" }
fork_choice = { workspace = true }
mev-rs = { git = "https://github.com/ralexstokes/mev-rs", rev = "216657016d5c0889b505857c89ae42c7aa2764af" }
axum = "0.6"
hyper = "0.14"
ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus", rev = "e380108" }
ssz_rs = "0.9.0"
tokio-stream = { workspace = true }
strum = { workspace = true }
keccak-hash = "0.10.0"
@ -54,3 +50,4 @@ triehash = "0.8.4"
hash-db = "0.15.2"
pretty_reqwest_error = { workspace = true }
arc-swap = "1.6.0"
eth2_network_config = { workspace = true }

View File

@ -7,7 +7,7 @@ use ethers_core::utils::rlp::RlpStream;
use keccak_hash::KECCAK_EMPTY_LIST_RLP;
use triehash::ordered_trie_root;
use types::{
map_execution_block_header_fields_except_withdrawals, Address, EthSpec, ExecutionBlockHash,
map_execution_block_header_fields_base, Address, BeaconBlockRef, EthSpec, ExecutionBlockHash,
ExecutionBlockHeader, ExecutionPayloadRef, Hash256, Hash64, Uint256,
};
@ -18,6 +18,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
/// transactions.
pub fn calculate_execution_block_hash(
payload: ExecutionPayloadRef<T>,
parent_beacon_block_root: Hash256,
) -> (ExecutionBlockHash, Hash256) {
// Calculate the transactions root.
// We're currently using a deprecated Parity library for this. We should move to a
@ -37,12 +38,23 @@ impl<T: EthSpec> ExecutionLayer<T> {
None
};
let rlp_blob_gas_used = payload.blob_gas_used().ok();
let rlp_excess_blob_gas = payload.excess_blob_gas().ok();
// Calculate parent beacon block root (post-Deneb).
let rlp_parent_beacon_block_root = rlp_excess_blob_gas
.as_ref()
.map(|_| parent_beacon_block_root);
// Construct the block header.
let exec_block_header = ExecutionBlockHeader::from_payload(
payload,
KECCAK_EMPTY_LIST_RLP.as_fixed_bytes().into(),
rlp_transactions_root,
rlp_withdrawals_root,
rlp_blob_gas_used,
rlp_excess_blob_gas,
rlp_parent_beacon_block_root,
);
// Hash the RLP encoding of the block header.
@ -56,10 +68,14 @@ impl<T: EthSpec> ExecutionLayer<T> {
/// Verify `payload.block_hash` locally within Lighthouse.
///
/// No remote calls to the execution client will be made, so this is quite a cheap check.
pub fn verify_payload_block_hash(&self, payload: ExecutionPayloadRef<T>) -> Result<(), Error> {
pub fn verify_payload_block_hash(&self, block: BeaconBlockRef<T>) -> Result<(), Error> {
let payload = block.execution_payload()?.execution_payload_ref();
let parent_beacon_block_root = block.parent_root();
let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_VERIFY_BLOCK_HASH);
let (header_hash, rlp_transactions_root) = Self::calculate_execution_block_hash(payload);
let (header_hash, rlp_transactions_root) =
Self::calculate_execution_block_hash(payload, parent_beacon_block_root);
if header_hash != payload.block_hash() {
return Err(Error::BlockHashMismatch {
@ -88,12 +104,21 @@ pub fn rlp_encode_withdrawal(withdrawal: &JsonWithdrawal) -> Vec<u8> {
pub fn rlp_encode_block_header(header: &ExecutionBlockHeader) -> Vec<u8> {
let mut rlp_header_stream = RlpStream::new();
rlp_header_stream.begin_unbounded_list();
map_execution_block_header_fields_except_withdrawals!(&header, |_, field| {
map_execution_block_header_fields_base!(&header, |_, field| {
rlp_header_stream.append(field);
});
if let Some(withdrawals_root) = &header.withdrawals_root {
rlp_header_stream.append(withdrawals_root);
}
if let Some(blob_gas_used) = &header.blob_gas_used {
rlp_header_stream.append(blob_gas_used);
}
if let Some(excess_blob_gas) = &header.excess_blob_gas {
rlp_header_stream.append(excess_blob_gas);
}
if let Some(parent_beacon_block_root) = &header.parent_beacon_block_root {
rlp_header_stream.append(parent_beacon_block_root);
}
rlp_header_stream.finalize_unbounded_list();
rlp_header_stream.out().into()
}
@ -140,6 +165,9 @@ mod test {
nonce: Hash64::zero(),
base_fee_per_gas: 0x036b_u64.into(),
withdrawals_root: None,
blob_gas_used: None,
excess_blob_gas: None,
parent_beacon_block_root: None,
};
let expected_rlp = "f90200a0e0a94a7a3c9617401586b1a27025d2d9671332d22d540e0af72b069170380f2aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0ec3c94b18b8a1cff7d60f8d258ec723312932928626b4c9355eb4ab3568ec7f7a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000000000088000000000000000082036b";
let expected_hash =
@ -168,6 +196,9 @@ mod test {
nonce: Hash64::zero(),
base_fee_per_gas: 0x036b_u64.into(),
withdrawals_root: None,
blob_gas_used: None,
excess_blob_gas: None,
parent_beacon_block_root: None,
};
let expected_rlp = "f901fda0927ca537f06c783a3a2635b8805eef1c8c2124f7444ad4a3389898dd832f2dbea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0e97859b065bd8dbbb4519c7cb935024de2484c2b7f881181b4360492f0b06b82a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000002000088000000000000000082036b";
let expected_hash =
@ -197,10 +228,43 @@ mod test {
nonce: Hash64::zero(),
base_fee_per_gas: 0x34187b238_u64.into(),
withdrawals_root: None,
blob_gas_used: None,
excess_blob_gas: None,
parent_beacon_block_root: None,
};
let expected_hash =
Hash256::from_str("6da69709cd5a34079b6604d29cd78fc01dacd7c6268980057ad92a2bede87351")
.unwrap();
test_rlp_encoding(&header, None, expected_hash);
}
#[test]
fn test_rlp_encode_block_deneb() {
let header = ExecutionBlockHeader {
parent_hash: Hash256::from_str("172864416698b842f4c92f7b476be294b4ef720202779df194cd225f531053ab").unwrap(),
ommers_hash: Hash256::from_str("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347").unwrap(),
beneficiary: Address::from_str("878705ba3f8bc32fcf7f4caa1a35e72af65cf766").unwrap(),
state_root: Hash256::from_str("c6457d0df85c84c62d1c68f68138b6e796e8a44fb44de221386fb2d5611c41e0").unwrap(),
transactions_root: Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(),
receipts_root: Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(),
logs_bloom:<[u8; 256]>::from_hex("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap().into(),
difficulty: 0.into(),
number: 97.into(),
gas_limit: 27482534.into(),
gas_used: 0.into(),
timestamp: 1692132829u64,
extra_data: hex::decode("d883010d00846765746888676f312e32302e37856c696e7578").unwrap(),
mix_hash: Hash256::from_str("0b493c22d2ad4ca76c77ae6ad916af429b42b1dc98fdcb8e5ddbd049bbc5d623").unwrap(),
nonce: Hash64::zero(),
base_fee_per_gas: 2374u64.into(),
withdrawals_root: Some(Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap()),
blob_gas_used: Some(0x0u64),
excess_blob_gas: Some(0x0u64),
parent_beacon_block_root: Some(Hash256::from_str("f7d327d2c04e4f12e9cdd492e53d39a1d390f8b1571e3b2a22ac6e1e170e5b1a").unwrap()),
};
let expected_hash =
Hash256::from_str("a7448e600ead0a23d16f96aa46e8dea9eef8a7c5669a5f0a5ff32709afe9c408")
.unwrap();
test_rlp_encoding(&header, None, expected_hash);
}
}

View File

@ -1,26 +1,35 @@
use crate::engines::ForkchoiceState;
use crate::http::{
ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2,
ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_FORKCHOICE_UPDATED_V3,
ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1,
ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2,
ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_GET_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V1,
ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3,
};
use eth2::types::{SsePayloadAttributes, SsePayloadAttributesV1, SsePayloadAttributesV2};
pub use ethers_core::types::Transaction;
use ethers_core::utils::rlp::{self, Decodable, Rlp};
use eth2::types::{
BlobsBundle, SsePayloadAttributes, SsePayloadAttributesV1, SsePayloadAttributesV2,
SsePayloadAttributesV3,
};
use ethers_core::types::Transaction;
use ethers_core::utils::rlp;
use ethers_core::utils::rlp::{Decodable, Rlp};
use http::deposit_methods::RpcError;
pub use json_structures::{JsonWithdrawal, TransitionConfigurationV1};
use pretty_reqwest_error::PrettyReqwestError;
use reqwest::StatusCode;
use serde::{Deserialize, Serialize};
use state_processing::per_block_processing::deneb::kzg_commitment_to_versioned_hash;
use std::convert::TryFrom;
use strum::IntoStaticStr;
use superstruct::superstruct;
pub use types::{
Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader,
Address, BeaconBlockRef, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader,
ExecutionPayloadRef, FixedVector, ForkName, Hash256, Transactions, Uint256, VariableList,
Withdrawal, Withdrawals,
};
use types::{ExecutionPayloadCapella, ExecutionPayloadMerge};
use types::{
BeaconStateError, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadMerge,
KzgProofs, VersionedHash,
};
pub mod auth;
pub mod http;
@ -48,14 +57,12 @@ pub enum Error {
PayloadIdUnavailable,
TransitionConfigurationMismatch,
PayloadConversionLogicFlaw,
DeserializeTransaction(ssz_types::Error),
DeserializeTransactions(ssz_types::Error),
SszError(ssz_types::Error),
DeserializeWithdrawals(ssz_types::Error),
BuilderApi(builder_client::Error),
IncorrectStateVariant,
RequiredMethodUnsupported(&'static str),
UnsupportedForkVariant(String),
BadConversion(String),
RlpDecoderError(rlp::DecoderError),
}
@ -96,6 +103,12 @@ impl From<rlp::DecoderError> for Error {
}
}
impl From<ssz_types::Error> for Error {
fn from(e: ssz_types::Error) -> Self {
Error::SszError(e)
}
}
#[derive(Clone, Copy, Debug, PartialEq, IntoStaticStr)]
#[strum(serialize_all = "snake_case")]
pub enum PayloadStatusV1Status {
@ -137,7 +150,7 @@ pub struct ExecutionBlock {
/// Representation of an execution block with enough detail to reconstruct a payload.
#[superstruct(
variants(Merge, Capella),
variants(Merge, Capella, Deneb),
variant_attributes(
derive(Clone, Debug, PartialEq, Serialize, Deserialize,),
serde(bound = "T: EthSpec", rename_all = "camelCase"),
@ -171,8 +184,14 @@ pub struct ExecutionBlockWithTransactions<T: EthSpec> {
#[serde(rename = "hash")]
pub block_hash: ExecutionBlockHash,
pub transactions: Vec<Transaction>,
#[superstruct(only(Capella))]
#[superstruct(only(Capella, Deneb))]
pub withdrawals: Vec<JsonWithdrawal>,
#[superstruct(only(Deneb))]
#[serde(with = "serde_utils::u64_hex_be")]
pub blob_gas_used: u64,
#[superstruct(only(Deneb))]
#[serde(with = "serde_utils::u64_hex_be")]
pub excess_blob_gas: u64,
}
impl<T: EthSpec> TryFrom<ExecutionPayload<T>> for ExecutionBlockWithTransactions<T> {
@ -226,13 +245,39 @@ impl<T: EthSpec> TryFrom<ExecutionPayload<T>> for ExecutionBlockWithTransactions
.collect(),
})
}
ExecutionPayload::Deneb(block) => Self::Deneb(ExecutionBlockWithTransactionsDeneb {
parent_hash: block.parent_hash,
fee_recipient: block.fee_recipient,
state_root: block.state_root,
receipts_root: block.receipts_root,
logs_bloom: block.logs_bloom,
prev_randao: block.prev_randao,
block_number: block.block_number,
gas_limit: block.gas_limit,
gas_used: block.gas_used,
timestamp: block.timestamp,
extra_data: block.extra_data,
base_fee_per_gas: block.base_fee_per_gas,
block_hash: block.block_hash,
transactions: block
.transactions
.iter()
.map(|tx| Transaction::decode(&Rlp::new(tx)))
.collect::<Result<Vec<_>, _>>()?,
withdrawals: Vec::from(block.withdrawals)
.into_iter()
.map(|withdrawal| withdrawal.into())
.collect(),
blob_gas_used: block.blob_gas_used,
excess_blob_gas: block.excess_blob_gas,
}),
};
Ok(json_payload)
}
}
#[superstruct(
variants(V1, V2),
variants(V1, V2, V3),
variant_attributes(derive(Clone, Debug, Eq, Hash, PartialEq),),
cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"),
partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant")
@ -245,8 +290,10 @@ pub struct PayloadAttributes {
pub prev_randao: Hash256,
#[superstruct(getter(copy))]
pub suggested_fee_recipient: Address,
#[superstruct(only(V2))]
#[superstruct(only(V2, V3))]
pub withdrawals: Vec<Withdrawal>,
#[superstruct(only(V3), partial_getter(copy))]
pub parent_beacon_block_root: Hash256,
}
impl PayloadAttributes {
@ -255,14 +302,24 @@ impl PayloadAttributes {
prev_randao: Hash256,
suggested_fee_recipient: Address,
withdrawals: Option<Vec<Withdrawal>>,
parent_beacon_block_root: Option<Hash256>,
) -> Self {
match withdrawals {
Some(withdrawals) => PayloadAttributes::V2(PayloadAttributesV2 {
timestamp,
prev_randao,
suggested_fee_recipient,
withdrawals,
}),
Some(withdrawals) => match parent_beacon_block_root {
Some(parent_beacon_block_root) => PayloadAttributes::V3(PayloadAttributesV3 {
timestamp,
prev_randao,
suggested_fee_recipient,
withdrawals,
parent_beacon_block_root,
}),
None => PayloadAttributes::V2(PayloadAttributesV2 {
timestamp,
prev_randao,
suggested_fee_recipient,
withdrawals,
}),
},
None => PayloadAttributes::V1(PayloadAttributesV1 {
timestamp,
prev_randao,
@ -295,6 +352,19 @@ impl From<PayloadAttributes> for SsePayloadAttributes {
suggested_fee_recipient,
withdrawals,
}),
PayloadAttributes::V3(PayloadAttributesV3 {
timestamp,
prev_randao,
suggested_fee_recipient,
withdrawals,
parent_beacon_block_root,
}) => Self::V3(SsePayloadAttributesV3 {
timestamp,
prev_randao,
suggested_fee_recipient,
withdrawals,
parent_beacon_block_root,
}),
}
}
}
@ -320,7 +390,7 @@ pub struct ProposeBlindedBlockResponse {
}
#[superstruct(
variants(Merge, Capella),
variants(Merge, Capella, Deneb),
variant_attributes(derive(Clone, Debug, PartialEq),),
map_into(ExecutionPayload),
map_ref_into(ExecutionPayloadRef),
@ -333,7 +403,27 @@ pub struct GetPayloadResponse<T: EthSpec> {
pub execution_payload: ExecutionPayloadMerge<T>,
#[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))]
pub execution_payload: ExecutionPayloadCapella<T>,
#[superstruct(only(Deneb), partial_getter(rename = "execution_payload_deneb"))]
pub execution_payload: ExecutionPayloadDeneb<T>,
pub block_value: Uint256,
#[superstruct(only(Deneb))]
pub blobs_bundle: BlobsBundle<T>,
#[superstruct(only(Deneb), partial_getter(copy))]
pub should_override_builder: bool,
}
impl<E: EthSpec> GetPayloadResponse<E> {
pub fn fee_recipient(&self) -> Address {
ExecutionPayloadRef::from(self.to_ref()).fee_recipient()
}
pub fn block_hash(&self) -> ExecutionBlockHash {
ExecutionPayloadRef::from(self.to_ref()).block_hash()
}
pub fn block_number(&self) -> u64 {
ExecutionPayloadRef::from(self.to_ref()).block_number()
}
}
impl<'a, T: EthSpec> From<GetPayloadResponseRef<'a, T>> for ExecutionPayloadRef<'a, T> {
@ -352,21 +442,35 @@ impl<T: EthSpec> From<GetPayloadResponse<T>> for ExecutionPayload<T> {
}
}
impl<T: EthSpec> From<GetPayloadResponse<T>> for (ExecutionPayload<T>, Uint256) {
impl<T: EthSpec> From<GetPayloadResponse<T>>
for (ExecutionPayload<T>, Uint256, Option<BlobsBundle<T>>)
{
fn from(response: GetPayloadResponse<T>) -> Self {
match response {
GetPayloadResponse::Merge(inner) => (
ExecutionPayload::Merge(inner.execution_payload),
inner.block_value,
None,
),
GetPayloadResponse::Capella(inner) => (
ExecutionPayload::Capella(inner.execution_payload),
inner.block_value,
None,
),
GetPayloadResponse::Deneb(inner) => (
ExecutionPayload::Deneb(inner.execution_payload),
inner.block_value,
Some(inner.blobs_bundle),
),
}
}
}
pub enum GetPayloadResponseType<E: EthSpec> {
Full(GetPayloadResponse<E>),
Blinded(GetPayloadResponse<E>),
}
impl<T: EthSpec> GetPayloadResponse<T> {
pub fn execution_payload_ref(&self) -> ExecutionPayloadRef<T> {
self.to_ref().into()
@ -435,6 +539,138 @@ impl<E: EthSpec> ExecutionPayloadBodyV1<E> {
))
}
}
ExecutionPayloadHeader::Deneb(header) => {
if let Some(withdrawals) = self.withdrawals {
Ok(ExecutionPayload::Deneb(ExecutionPayloadDeneb {
parent_hash: header.parent_hash,
fee_recipient: header.fee_recipient,
state_root: header.state_root,
receipts_root: header.receipts_root,
logs_bloom: header.logs_bloom,
prev_randao: header.prev_randao,
block_number: header.block_number,
gas_limit: header.gas_limit,
gas_used: header.gas_used,
timestamp: header.timestamp,
extra_data: header.extra_data,
base_fee_per_gas: header.base_fee_per_gas,
block_hash: header.block_hash,
transactions: self.transactions,
withdrawals,
blob_gas_used: header.blob_gas_used,
excess_blob_gas: header.excess_blob_gas,
}))
} else {
Err(format!(
"block {} is post capella but payload body doesn't have withdrawals",
header.block_hash
))
}
}
}
}
}
#[superstruct(
variants(Merge, Capella, Deneb),
variant_attributes(derive(Clone, Debug, PartialEq),),
map_into(ExecutionPayload),
map_ref_into(ExecutionPayloadRef),
cast_error(
ty = "BeaconStateError",
expr = "BeaconStateError::IncorrectStateVariant"
),
partial_getter_error(
ty = "BeaconStateError",
expr = "BeaconStateError::IncorrectStateVariant"
)
)]
#[derive(Clone, Debug, PartialEq)]
pub struct NewPayloadRequest<E: EthSpec> {
#[superstruct(only(Merge), partial_getter(rename = "execution_payload_merge"))]
pub execution_payload: ExecutionPayloadMerge<E>,
#[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))]
pub execution_payload: ExecutionPayloadCapella<E>,
#[superstruct(only(Deneb), partial_getter(rename = "execution_payload_deneb"))]
pub execution_payload: ExecutionPayloadDeneb<E>,
#[superstruct(only(Deneb))]
pub versioned_hashes: Vec<VersionedHash>,
#[superstruct(only(Deneb))]
pub parent_beacon_block_root: Hash256,
}
impl<E: EthSpec> NewPayloadRequest<E> {
pub fn parent_hash(&self) -> ExecutionBlockHash {
match self {
Self::Merge(payload) => payload.execution_payload.parent_hash,
Self::Capella(payload) => payload.execution_payload.parent_hash,
Self::Deneb(payload) => payload.execution_payload.parent_hash,
}
}
pub fn block_hash(&self) -> ExecutionBlockHash {
match self {
Self::Merge(payload) => payload.execution_payload.block_hash,
Self::Capella(payload) => payload.execution_payload.block_hash,
Self::Deneb(payload) => payload.execution_payload.block_hash,
}
}
pub fn block_number(&self) -> u64 {
match self {
Self::Merge(payload) => payload.execution_payload.block_number,
Self::Capella(payload) => payload.execution_payload.block_number,
Self::Deneb(payload) => payload.execution_payload.block_number,
}
}
pub fn into_execution_payload(self) -> ExecutionPayload<E> {
map_new_payload_request_into_execution_payload!(self, |request, cons| {
cons(request.execution_payload)
})
}
}
impl<'a, E: EthSpec> TryFrom<BeaconBlockRef<'a, E>> for NewPayloadRequest<E> {
type Error = BeaconStateError;
fn try_from(block: BeaconBlockRef<'a, E>) -> Result<Self, Self::Error> {
match block {
BeaconBlockRef::Base(_) | BeaconBlockRef::Altair(_) => {
Err(Self::Error::IncorrectStateVariant)
}
BeaconBlockRef::Merge(block_ref) => Ok(Self::Merge(NewPayloadRequestMerge {
execution_payload: block_ref.body.execution_payload.execution_payload.clone(),
})),
BeaconBlockRef::Capella(block_ref) => Ok(Self::Capella(NewPayloadRequestCapella {
execution_payload: block_ref.body.execution_payload.execution_payload.clone(),
})),
BeaconBlockRef::Deneb(block_ref) => Ok(Self::Deneb(NewPayloadRequestDeneb {
execution_payload: block_ref.body.execution_payload.execution_payload.clone(),
versioned_hashes: block_ref
.body
.blob_kzg_commitments
.iter()
.map(kzg_commitment_to_versioned_hash)
.collect(),
parent_beacon_block_root: block_ref.parent_root,
})),
}
}
}
impl<E: EthSpec> TryFrom<ExecutionPayload<E>> for NewPayloadRequest<E> {
type Error = BeaconStateError;
fn try_from(payload: ExecutionPayload<E>) -> Result<Self, Self::Error> {
match payload {
ExecutionPayload::Merge(payload) => Ok(Self::Merge(NewPayloadRequestMerge {
execution_payload: payload,
})),
ExecutionPayload::Capella(payload) => Ok(Self::Capella(NewPayloadRequestCapella {
execution_payload: payload,
})),
ExecutionPayload::Deneb(_) => Err(Self::Error::IncorrectStateVariant),
}
}
}
@ -443,12 +679,15 @@ impl<E: EthSpec> ExecutionPayloadBodyV1<E> {
pub struct EngineCapabilities {
pub new_payload_v1: bool,
pub new_payload_v2: bool,
pub new_payload_v3: bool,
pub forkchoice_updated_v1: bool,
pub forkchoice_updated_v2: bool,
pub forkchoice_updated_v3: bool,
pub get_payload_bodies_by_hash_v1: bool,
pub get_payload_bodies_by_range_v1: bool,
pub get_payload_v1: bool,
pub get_payload_v2: bool,
pub get_payload_v3: bool,
}
impl EngineCapabilities {
@ -460,12 +699,18 @@ impl EngineCapabilities {
if self.new_payload_v2 {
response.push(ENGINE_NEW_PAYLOAD_V2);
}
if self.new_payload_v3 {
response.push(ENGINE_NEW_PAYLOAD_V3);
}
if self.forkchoice_updated_v1 {
response.push(ENGINE_FORKCHOICE_UPDATED_V1);
}
if self.forkchoice_updated_v2 {
response.push(ENGINE_FORKCHOICE_UPDATED_V2);
}
if self.forkchoice_updated_v3 {
response.push(ENGINE_FORKCHOICE_UPDATED_V3);
}
if self.get_payload_bodies_by_hash_v1 {
response.push(ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1);
}
@ -478,6 +723,9 @@ impl EngineCapabilities {
if self.get_payload_v2 {
response.push(ENGINE_GET_PAYLOAD_V2);
}
if self.get_payload_v3 {
response.push(ENGINE_GET_PAYLOAD_V3);
}
response
}

View File

@ -32,14 +32,17 @@ pub const ETH_SYNCING_TIMEOUT: Duration = Duration::from_secs(1);
pub const ENGINE_NEW_PAYLOAD_V1: &str = "engine_newPayloadV1";
pub const ENGINE_NEW_PAYLOAD_V2: &str = "engine_newPayloadV2";
pub const ENGINE_NEW_PAYLOAD_V3: &str = "engine_newPayloadV3";
pub const ENGINE_NEW_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(8);
pub const ENGINE_GET_PAYLOAD_V1: &str = "engine_getPayloadV1";
pub const ENGINE_GET_PAYLOAD_V2: &str = "engine_getPayloadV2";
pub const ENGINE_GET_PAYLOAD_V3: &str = "engine_getPayloadV3";
pub const ENGINE_GET_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2);
pub const ENGINE_FORKCHOICE_UPDATED_V1: &str = "engine_forkchoiceUpdatedV1";
pub const ENGINE_FORKCHOICE_UPDATED_V2: &str = "engine_forkchoiceUpdatedV2";
pub const ENGINE_FORKCHOICE_UPDATED_V3: &str = "engine_forkchoiceUpdatedV3";
pub const ENGINE_FORKCHOICE_UPDATED_TIMEOUT: Duration = Duration::from_secs(8);
pub const ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1: &str = "engine_getPayloadBodiesByHashV1";
@ -58,10 +61,13 @@ pub const METHOD_NOT_FOUND_CODE: i64 = -32601;
pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[
ENGINE_NEW_PAYLOAD_V1,
ENGINE_NEW_PAYLOAD_V2,
ENGINE_NEW_PAYLOAD_V3,
ENGINE_GET_PAYLOAD_V1,
ENGINE_GET_PAYLOAD_V2,
ENGINE_GET_PAYLOAD_V3,
ENGINE_FORKCHOICE_UPDATED_V1,
ENGINE_FORKCHOICE_UPDATED_V2,
ENGINE_FORKCHOICE_UPDATED_V3,
ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1,
ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1,
];
@ -72,12 +78,15 @@ pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[
pub static PRE_CAPELLA_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities {
new_payload_v1: true,
new_payload_v2: false,
new_payload_v3: false,
forkchoice_updated_v1: true,
forkchoice_updated_v2: false,
forkchoice_updated_v3: false,
get_payload_bodies_by_hash_v1: false,
get_payload_bodies_by_range_v1: false,
get_payload_v1: true,
get_payload_v2: false,
get_payload_v3: false,
};
/// Contains methods to convert arbitrary bytes to an ETH2 deposit contract object.
@ -741,6 +750,14 @@ impl HttpJsonRpc {
)
.await?,
),
ForkName::Deneb => ExecutionBlockWithTransactions::Deneb(
self.rpc_request(
ETH_GET_BLOCK_BY_HASH,
params,
ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier,
)
.await?,
),
ForkName::Base | ForkName::Altair => {
return Err(Error::UnsupportedForkVariant(format!(
"called get_block_by_hash_with_txns with fork {:?}",
@ -784,6 +801,27 @@ impl HttpJsonRpc {
Ok(response.into())
}
pub async fn new_payload_v3<T: EthSpec>(
&self,
new_payload_request_deneb: NewPayloadRequestDeneb<T>,
) -> Result<PayloadStatusV1, Error> {
let params = json!([
JsonExecutionPayload::V3(new_payload_request_deneb.execution_payload.into()),
new_payload_request_deneb.versioned_hashes,
new_payload_request_deneb.parent_beacon_block_root,
]);
let response: JsonPayloadStatusV1 = self
.rpc_request(
ENGINE_NEW_PAYLOAD_V3,
params,
ENGINE_NEW_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier,
)
.await?;
Ok(response.into())
}
pub async fn get_payload_v1<T: EthSpec>(
&self,
payload_id: PayloadId,
@ -835,10 +873,33 @@ impl HttpJsonRpc {
.await?;
Ok(JsonGetPayloadResponse::V2(response).into())
}
ForkName::Base | ForkName::Altair => Err(Error::UnsupportedForkVariant(format!(
"called get_payload_v2 with {}",
fork_name
))),
ForkName::Base | ForkName::Altair | ForkName::Deneb => Err(
Error::UnsupportedForkVariant(format!("called get_payload_v2 with {}", fork_name)),
),
}
}
pub async fn get_payload_v3<T: EthSpec>(
&self,
fork_name: ForkName,
payload_id: PayloadId,
) -> Result<GetPayloadResponse<T>, Error> {
let params = json!([JsonPayloadIdRequest::from(payload_id)]);
match fork_name {
ForkName::Deneb => {
let response: JsonGetPayloadResponseV3<T> = self
.rpc_request(
ENGINE_GET_PAYLOAD_V3,
params,
ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier,
)
.await?;
Ok(JsonGetPayloadResponse::V3(response).into())
}
ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => Err(
Error::UnsupportedForkVariant(format!("called get_payload_v3 with {}", fork_name)),
),
}
}
@ -884,6 +945,27 @@ impl HttpJsonRpc {
Ok(response.into())
}
pub async fn forkchoice_updated_v3(
&self,
forkchoice_state: ForkchoiceState,
payload_attributes: Option<PayloadAttributes>,
) -> Result<ForkchoiceUpdatedResponse, Error> {
let params = json!([
JsonForkchoiceStateV1::from(forkchoice_state),
payload_attributes.map(JsonPayloadAttributes::from)
]);
let response: JsonForkchoiceUpdatedV1Response = self
.rpc_request(
ENGINE_FORKCHOICE_UPDATED_V3,
params,
ENGINE_FORKCHOICE_UPDATED_TIMEOUT * self.execution_timeout_multiplier,
)
.await?;
Ok(response.into())
}
pub async fn get_payload_bodies_by_hash_v1<E: EthSpec>(
&self,
block_hashes: Vec<ExecutionBlockHash>,
@ -950,14 +1032,17 @@ impl HttpJsonRpc {
Ok(capabilities) => Ok(EngineCapabilities {
new_payload_v1: capabilities.contains(ENGINE_NEW_PAYLOAD_V1),
new_payload_v2: capabilities.contains(ENGINE_NEW_PAYLOAD_V2),
new_payload_v3: capabilities.contains(ENGINE_NEW_PAYLOAD_V3),
forkchoice_updated_v1: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V1),
forkchoice_updated_v2: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V2),
forkchoice_updated_v3: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V3),
get_payload_bodies_by_hash_v1: capabilities
.contains(ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1),
get_payload_bodies_by_range_v1: capabilities
.contains(ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1),
get_payload_v1: capabilities.contains(ENGINE_GET_PAYLOAD_V1),
get_payload_v2: capabilities.contains(ENGINE_GET_PAYLOAD_V2),
get_payload_v3: capabilities.contains(ENGINE_GET_PAYLOAD_V3),
}),
}
}
@ -994,15 +1079,28 @@ impl HttpJsonRpc {
// new_payload that the execution engine supports
pub async fn new_payload<T: EthSpec>(
&self,
execution_payload: ExecutionPayload<T>,
new_payload_request: NewPayloadRequest<T>,
) -> Result<PayloadStatusV1, Error> {
let engine_capabilities = self.get_engine_capabilities(None).await?;
if engine_capabilities.new_payload_v2 {
self.new_payload_v2(execution_payload).await
} else if engine_capabilities.new_payload_v1 {
self.new_payload_v1(execution_payload).await
} else {
Err(Error::RequiredMethodUnsupported("engine_newPayload"))
match new_payload_request {
NewPayloadRequest::Merge(_) | NewPayloadRequest::Capella(_) => {
if engine_capabilities.new_payload_v2 {
self.new_payload_v2(new_payload_request.into_execution_payload())
.await
} else if engine_capabilities.new_payload_v1 {
self.new_payload_v1(new_payload_request.into_execution_payload())
.await
} else {
Err(Error::RequiredMethodUnsupported("engine_newPayload"))
}
}
NewPayloadRequest::Deneb(new_payload_request_deneb) => {
if engine_capabilities.new_payload_v3 {
self.new_payload_v3(new_payload_request_deneb).await
} else {
Err(Error::RequiredMethodUnsupported("engine_newPayloadV3"))
}
}
}
}
@ -1014,12 +1112,27 @@ impl HttpJsonRpc {
payload_id: PayloadId,
) -> Result<GetPayloadResponse<T>, Error> {
let engine_capabilities = self.get_engine_capabilities(None).await?;
if engine_capabilities.get_payload_v2 {
self.get_payload_v2(fork_name, payload_id).await
} else if engine_capabilities.new_payload_v1 {
self.get_payload_v1(payload_id).await
} else {
Err(Error::RequiredMethodUnsupported("engine_getPayload"))
match fork_name {
ForkName::Merge | ForkName::Capella => {
if engine_capabilities.get_payload_v2 {
self.get_payload_v2(fork_name, payload_id).await
} else if engine_capabilities.new_payload_v1 {
self.get_payload_v1(payload_id).await
} else {
Err(Error::RequiredMethodUnsupported("engine_getPayload"))
}
}
ForkName::Deneb => {
if engine_capabilities.get_payload_v3 {
self.get_payload_v3(fork_name, payload_id).await
} else {
Err(Error::RequiredMethodUnsupported("engine_getPayloadV3"))
}
}
ForkName::Base | ForkName::Altair => Err(Error::UnsupportedForkVariant(format!(
"called get_payload with {}",
fork_name
))),
}
}
@ -1028,14 +1141,41 @@ impl HttpJsonRpc {
pub async fn forkchoice_updated(
&self,
forkchoice_state: ForkchoiceState,
payload_attributes: Option<PayloadAttributes>,
maybe_payload_attributes: Option<PayloadAttributes>,
) -> Result<ForkchoiceUpdatedResponse, Error> {
let engine_capabilities = self.get_engine_capabilities(None).await?;
if engine_capabilities.forkchoice_updated_v2 {
self.forkchoice_updated_v2(forkchoice_state, payload_attributes)
if let Some(payload_attributes) = maybe_payload_attributes.as_ref() {
match payload_attributes {
PayloadAttributes::V1(_) | PayloadAttributes::V2(_) => {
if engine_capabilities.forkchoice_updated_v2 {
self.forkchoice_updated_v2(forkchoice_state, maybe_payload_attributes)
.await
} else if engine_capabilities.forkchoice_updated_v1 {
self.forkchoice_updated_v1(forkchoice_state, maybe_payload_attributes)
.await
} else {
Err(Error::RequiredMethodUnsupported("engine_forkchoiceUpdated"))
}
}
PayloadAttributes::V3(_) => {
if engine_capabilities.forkchoice_updated_v3 {
self.forkchoice_updated_v3(forkchoice_state, maybe_payload_attributes)
.await
} else {
Err(Error::RequiredMethodUnsupported(
"engine_forkchoiceUpdatedV3",
))
}
}
}
} else if engine_capabilities.forkchoice_updated_v3 {
self.forkchoice_updated_v3(forkchoice_state, maybe_payload_attributes)
.await
} else if engine_capabilities.forkchoice_updated_v2 {
self.forkchoice_updated_v2(forkchoice_state, maybe_payload_attributes)
.await
} else if engine_capabilities.forkchoice_updated_v1 {
self.forkchoice_updated_v1(forkchoice_state, payload_attributes)
self.forkchoice_updated_v1(forkchoice_state, maybe_payload_attributes)
.await
} else {
Err(Error::RequiredMethodUnsupported("engine_forkchoiceUpdated"))

View File

@ -2,10 +2,12 @@ use super::*;
use serde::{Deserialize, Serialize};
use strum::EnumString;
use superstruct::superstruct;
use types::beacon_block_body::KzgCommitments;
use types::blob_sidecar::BlobsList;
use types::{
EthSpec, ExecutionBlockHash, FixedVector, Transactions, Unsigned, VariableList, Withdrawal,
EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadDeneb,
ExecutionPayloadMerge, FixedVector, Transactions, Unsigned, VariableList, Withdrawal,
};
use types::{ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge};
#[derive(Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
@ -61,7 +63,7 @@ pub struct JsonPayloadIdResponse {
}
#[superstruct(
variants(V1, V2),
variants(V1, V2, V3),
variant_attributes(
derive(Debug, PartialEq, Default, Serialize, Deserialize,),
serde(bound = "T: EthSpec", rename_all = "camelCase"),
@ -94,8 +96,14 @@ pub struct JsonExecutionPayload<T: EthSpec> {
pub block_hash: ExecutionBlockHash,
#[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")]
pub transactions: Transactions<T>,
#[superstruct(only(V2))]
#[superstruct(only(V2, V3))]
pub withdrawals: VariableList<JsonWithdrawal, T::MaxWithdrawalsPerPayload>,
#[superstruct(only(V3))]
#[serde(with = "serde_utils::u64_hex_be")]
pub blob_gas_used: u64,
#[superstruct(only(V3))]
#[serde(with = "serde_utils::u64_hex_be")]
pub excess_blob_gas: u64,
}
impl<T: EthSpec> From<ExecutionPayloadMerge<T>> for JsonExecutionPayloadV1<T> {
@ -144,12 +152,41 @@ impl<T: EthSpec> From<ExecutionPayloadCapella<T>> for JsonExecutionPayloadV2<T>
}
}
}
impl<T: EthSpec> From<ExecutionPayloadDeneb<T>> for JsonExecutionPayloadV3<T> {
fn from(payload: ExecutionPayloadDeneb<T>) -> Self {
JsonExecutionPayloadV3 {
parent_hash: payload.parent_hash,
fee_recipient: payload.fee_recipient,
state_root: payload.state_root,
receipts_root: payload.receipts_root,
logs_bloom: payload.logs_bloom,
prev_randao: payload.prev_randao,
block_number: payload.block_number,
gas_limit: payload.gas_limit,
gas_used: payload.gas_used,
timestamp: payload.timestamp,
extra_data: payload.extra_data,
base_fee_per_gas: payload.base_fee_per_gas,
block_hash: payload.block_hash,
transactions: payload.transactions,
withdrawals: payload
.withdrawals
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
blob_gas_used: payload.blob_gas_used,
excess_blob_gas: payload.excess_blob_gas,
}
}
}
impl<T: EthSpec> From<ExecutionPayload<T>> for JsonExecutionPayload<T> {
fn from(execution_payload: ExecutionPayload<T>) -> Self {
match execution_payload {
ExecutionPayload::Merge(payload) => JsonExecutionPayload::V1(payload.into()),
ExecutionPayload::Capella(payload) => JsonExecutionPayload::V2(payload.into()),
ExecutionPayload::Deneb(payload) => JsonExecutionPayload::V3(payload.into()),
}
}
}
@ -200,18 +237,47 @@ impl<T: EthSpec> From<JsonExecutionPayloadV2<T>> for ExecutionPayloadCapella<T>
}
}
}
impl<T: EthSpec> From<JsonExecutionPayloadV3<T>> for ExecutionPayloadDeneb<T> {
fn from(payload: JsonExecutionPayloadV3<T>) -> Self {
ExecutionPayloadDeneb {
parent_hash: payload.parent_hash,
fee_recipient: payload.fee_recipient,
state_root: payload.state_root,
receipts_root: payload.receipts_root,
logs_bloom: payload.logs_bloom,
prev_randao: payload.prev_randao,
block_number: payload.block_number,
gas_limit: payload.gas_limit,
gas_used: payload.gas_used,
timestamp: payload.timestamp,
extra_data: payload.extra_data,
base_fee_per_gas: payload.base_fee_per_gas,
block_hash: payload.block_hash,
transactions: payload.transactions,
withdrawals: payload
.withdrawals
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
blob_gas_used: payload.blob_gas_used,
excess_blob_gas: payload.excess_blob_gas,
}
}
}
impl<T: EthSpec> From<JsonExecutionPayload<T>> for ExecutionPayload<T> {
fn from(json_execution_payload: JsonExecutionPayload<T>) -> Self {
match json_execution_payload {
JsonExecutionPayload::V1(payload) => ExecutionPayload::Merge(payload.into()),
JsonExecutionPayload::V2(payload) => ExecutionPayload::Capella(payload.into()),
JsonExecutionPayload::V3(payload) => ExecutionPayload::Deneb(payload.into()),
}
}
}
#[superstruct(
variants(V1, V2),
variants(V1, V2, V3),
variant_attributes(
derive(Debug, PartialEq, Serialize, Deserialize),
serde(bound = "T: EthSpec", rename_all = "camelCase")
@ -226,8 +292,14 @@ pub struct JsonGetPayloadResponse<T: EthSpec> {
pub execution_payload: JsonExecutionPayloadV1<T>,
#[superstruct(only(V2), partial_getter(rename = "execution_payload_v2"))]
pub execution_payload: JsonExecutionPayloadV2<T>,
#[superstruct(only(V3), partial_getter(rename = "execution_payload_v3"))]
pub execution_payload: JsonExecutionPayloadV3<T>,
#[serde(with = "serde_utils::u256_hex_be")]
pub block_value: Uint256,
#[superstruct(only(V3))]
pub blobs_bundle: JsonBlobsBundleV1<T>,
#[superstruct(only(V3))]
pub should_override_builder: bool,
}
impl<T: EthSpec> From<JsonGetPayloadResponse<T>> for GetPayloadResponse<T> {
@ -245,6 +317,14 @@ impl<T: EthSpec> From<JsonGetPayloadResponse<T>> for GetPayloadResponse<T> {
block_value: response.block_value,
})
}
JsonGetPayloadResponse::V3(response) => {
GetPayloadResponse::Deneb(GetPayloadResponseDeneb {
execution_payload: response.execution_payload.into(),
block_value: response.block_value,
blobs_bundle: response.blobs_bundle.into(),
should_override_builder: response.should_override_builder,
})
}
}
}
}
@ -284,7 +364,7 @@ impl From<JsonWithdrawal> for Withdrawal {
}
#[superstruct(
variants(V1, V2),
variants(V1, V2, V3),
variant_attributes(
derive(Debug, Clone, PartialEq, Serialize, Deserialize),
serde(rename_all = "camelCase")
@ -299,13 +379,15 @@ pub struct JsonPayloadAttributes {
pub timestamp: u64,
pub prev_randao: Hash256,
pub suggested_fee_recipient: Address,
#[superstruct(only(V2))]
#[superstruct(only(V2, V3))]
pub withdrawals: Vec<JsonWithdrawal>,
#[superstruct(only(V3))]
pub parent_beacon_block_root: Hash256,
}
impl From<PayloadAttributes> for JsonPayloadAttributes {
fn from(payload_atributes: PayloadAttributes) -> Self {
match payload_atributes {
fn from(payload_attributes: PayloadAttributes) -> Self {
match payload_attributes {
PayloadAttributes::V1(pa) => Self::V1(JsonPayloadAttributesV1 {
timestamp: pa.timestamp,
prev_randao: pa.prev_randao,
@ -317,6 +399,13 @@ impl From<PayloadAttributes> for JsonPayloadAttributes {
suggested_fee_recipient: pa.suggested_fee_recipient,
withdrawals: pa.withdrawals.into_iter().map(Into::into).collect(),
}),
PayloadAttributes::V3(pa) => Self::V3(JsonPayloadAttributesV3 {
timestamp: pa.timestamp,
prev_randao: pa.prev_randao,
suggested_fee_recipient: pa.suggested_fee_recipient,
withdrawals: pa.withdrawals.into_iter().map(Into::into).collect(),
parent_beacon_block_root: pa.parent_beacon_block_root,
}),
}
}
}
@ -335,6 +424,41 @@ impl From<JsonPayloadAttributes> for PayloadAttributes {
suggested_fee_recipient: jpa.suggested_fee_recipient,
withdrawals: jpa.withdrawals.into_iter().map(Into::into).collect(),
}),
JsonPayloadAttributes::V3(jpa) => Self::V3(PayloadAttributesV3 {
timestamp: jpa.timestamp,
prev_randao: jpa.prev_randao,
suggested_fee_recipient: jpa.suggested_fee_recipient,
withdrawals: jpa.withdrawals.into_iter().map(Into::into).collect(),
parent_beacon_block_root: jpa.parent_beacon_block_root,
}),
}
}
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
#[serde(bound = "E: EthSpec", rename_all = "camelCase")]
pub struct JsonBlobsBundleV1<E: EthSpec> {
pub commitments: KzgCommitments<E>,
pub proofs: KzgProofs<E>,
#[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")]
pub blobs: BlobsList<E>,
}
impl<E: EthSpec> From<BlobsBundle<E>> for JsonBlobsBundleV1<E> {
fn from(blobs_bundle: BlobsBundle<E>) -> Self {
Self {
commitments: blobs_bundle.commitments,
proofs: blobs_bundle.proofs,
blobs: blobs_bundle.blobs,
}
}
}
impl<E: EthSpec> From<JsonBlobsBundleV1<E>> for BlobsBundle<E> {
fn from(json_blobs_bundle: JsonBlobsBundleV1<E>) -> Self {
Self {
commitments: json_blobs_bundle.commitments,
proofs: json_blobs_bundle.proofs,
blobs: json_blobs_bundle.blobs,
}
}
}

View File

@ -8,17 +8,19 @@ use crate::HttpJsonRpc;
use lru::LruCache;
use slog::{debug, error, info, warn, Logger};
use std::future::Future;
use std::num::NonZeroUsize;
use std::sync::Arc;
use std::time::Duration;
use task_executor::TaskExecutor;
use tokio::sync::{watch, Mutex, RwLock};
use tokio_stream::wrappers::WatchStream;
use types::non_zero_usize::new_non_zero_usize;
use types::ExecutionBlockHash;
/// The number of payload IDs that will be stored for each `Engine`.
///
/// Since the size of each value is small (~800 bytes) a large number is used for safety.
const PAYLOAD_ID_LRU_CACHE_SIZE: usize = 512;
const PAYLOAD_ID_LRU_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(512);
const CACHED_ENGINE_CAPABILITIES_AGE_LIMIT: Duration = Duration::from_secs(900); // 15 minutes
/// Stores the remembered state of a engine.

File diff suppressed because it is too large Load Diff

View File

@ -1,13 +1,16 @@
use eth2::types::FullPayloadContents;
use lru::LruCache;
use parking_lot::Mutex;
use std::num::NonZeroUsize;
use tree_hash::TreeHash;
use types::{EthSpec, ExecutionPayload, Hash256};
use types::non_zero_usize::new_non_zero_usize;
use types::{EthSpec, Hash256};
pub const DEFAULT_PAYLOAD_CACHE_SIZE: usize = 10;
pub const DEFAULT_PAYLOAD_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(10);
/// A cache mapping execution payloads by tree hash roots.
pub struct PayloadCache<T: EthSpec> {
payloads: Mutex<LruCache<PayloadCacheId, ExecutionPayload<T>>>,
payloads: Mutex<LruCache<PayloadCacheId, FullPayloadContents<T>>>,
}
#[derive(Hash, PartialEq, Eq)]
@ -22,16 +25,16 @@ impl<T: EthSpec> Default for PayloadCache<T> {
}
impl<T: EthSpec> PayloadCache<T> {
pub fn put(&self, payload: ExecutionPayload<T>) -> Option<ExecutionPayload<T>> {
let root = payload.tree_hash_root();
pub fn put(&self, payload: FullPayloadContents<T>) -> Option<FullPayloadContents<T>> {
let root = payload.payload_ref().tree_hash_root();
self.payloads.lock().put(PayloadCacheId(root), payload)
}
pub fn pop(&self, root: &Hash256) -> Option<ExecutionPayload<T>> {
pub fn pop(&self, root: &Hash256) -> Option<FullPayloadContents<T>> {
self.payloads.lock().pop(&PayloadCacheId(*root))
}
pub fn get(&self, hash: &Hash256) -> Option<ExecutionPayload<T>> {
pub fn get(&self, hash: &Hash256) -> Option<FullPayloadContents<T>> {
self.payloads.lock().get(&PayloadCacheId(*hash)).cloned()
}
}

View File

@ -1,4 +1,5 @@
use crate::engines::ForkchoiceState;
use crate::EthersTransaction;
use crate::{
engine_api::{
json_structures::{
@ -8,15 +9,27 @@ use crate::{
},
ExecutionBlockWithTransactions,
};
use eth2::types::BlobsBundle;
use kzg::{Kzg, KzgCommitment, KzgProof};
use parking_lot::Mutex;
use rand::{rngs::StdRng, Rng, SeedableRng};
use serde::{Deserialize, Serialize};
use ssz::Decode;
use ssz_types::VariableList;
use std::collections::HashMap;
use std::sync::Arc;
use tree_hash::TreeHash;
use tree_hash_derive::TreeHash;
use types::{
EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge,
ForkName, Hash256, Uint256,
Blob, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella,
ExecutionPayloadDeneb, ExecutionPayloadHeader, ExecutionPayloadMerge, ForkName, Hash256,
Transaction, Transactions, Uint256,
};
use super::DEFAULT_TERMINAL_BLOCK;
const TEST_BLOB_BUNDLE: &[u8] = include_bytes!("fixtures/mainnet/test_blobs_bundle.ssz");
const GAS_LIMIT: u64 = 16384;
const GAS_USED: u64 = GAS_LIMIT - 1;
@ -118,6 +131,19 @@ pub struct ExecutionBlockGenerator<T: EthSpec> {
* Post-merge fork triggers
*/
pub shanghai_time: Option<u64>, // withdrawals
pub cancun_time: Option<u64>, // deneb
/*
* deneb stuff
*/
pub blobs_bundles: HashMap<PayloadId, BlobsBundle<T>>,
pub kzg: Option<Arc<Kzg>>,
rng: Arc<Mutex<StdRng>>,
}
fn make_rng() -> Arc<Mutex<StdRng>> {
// Nondeterminism in tests is a highly undesirable thing. Seed the RNG to some arbitrary
// but fixed value for reproducibility.
Arc::new(Mutex::new(StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64)))
}
impl<T: EthSpec> ExecutionBlockGenerator<T> {
@ -126,6 +152,8 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
terminal_block_number: u64,
terminal_block_hash: ExecutionBlockHash,
shanghai_time: Option<u64>,
cancun_time: Option<u64>,
kzg: Option<Kzg>,
) -> Self {
let mut gen = Self {
head_block: <_>::default(),
@ -139,6 +167,10 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
next_payload_id: 0,
payload_ids: <_>::default(),
shanghai_time,
cancun_time,
blobs_bundles: <_>::default(),
kzg: kzg.map(Arc::new),
rng: make_rng(),
};
gen.insert_pow_block(0).unwrap();
@ -171,9 +203,12 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
}
pub fn get_fork_at_timestamp(&self, timestamp: u64) -> ForkName {
match self.shanghai_time {
Some(fork_time) if timestamp >= fork_time => ForkName::Capella,
_ => ForkName::Merge,
match self.cancun_time {
Some(fork_time) if timestamp >= fork_time => ForkName::Deneb,
_ => match self.shanghai_time {
Some(fork_time) if timestamp >= fork_time => ForkName::Capella,
_ => ForkName::Merge,
},
}
}
@ -249,10 +284,15 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
finalized_block_hash
));
}
let parent_hash = if block_number == 0 {
ExecutionBlockHash::zero()
let block = if block_number == 0 {
generate_genesis_block(self.terminal_total_difficulty, self.terminal_block_number)?
} else if let Some(block) = self.block_by_number(block_number - 1) {
block.block_hash()
generate_pow_block(
self.terminal_total_difficulty,
self.terminal_block_number,
block_number,
block.block_hash(),
)?
} else {
return Err(format!(
"parent with block number {} not found",
@ -260,13 +300,6 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
));
};
let block = generate_pow_block(
self.terminal_total_difficulty,
self.terminal_block_number,
block_number,
parent_hash,
)?;
// Insert block into block tree
self.insert_block(Block::PoW(block))?;
@ -327,10 +360,10 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
Ok(hash)
}
// This does not reject duplicate blocks inserted. This lets us re-use the same execution
// block generator for multiple beacon chains which is useful in testing.
pub fn insert_block(&mut self, block: Block<T>) -> Result<ExecutionBlockHash, String> {
if self.blocks.contains_key(&block.block_hash()) {
return Err(format!("{:?} is already known", block.block_hash()));
} else if block.parent_hash() != ExecutionBlockHash::zero()
if block.parent_hash() != ExecutionBlockHash::zero()
&& !self.blocks.contains_key(&block.parent_hash())
{
return Err(format!("parent block {:?} is unknown", block.parent_hash()));
@ -343,7 +376,7 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
let block_hash = block.block_hash();
self.block_hashes
.entry(block.block_number())
.or_insert_with(Vec::new)
.or_default()
.push(block_hash);
self.blocks.insert(block_hash, block);
@ -388,10 +421,12 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
self.payload_ids.get(id).cloned()
}
pub fn get_blobs_bundle(&mut self, id: &PayloadId) -> Option<BlobsBundle<T>> {
self.blobs_bundles.get(id).cloned()
}
pub fn new_payload(&mut self, payload: ExecutionPayload<T>) -> PayloadStatusV1 {
let parent = if let Some(parent) = self.blocks.get(&payload.parent_hash()) {
parent
} else {
let Some(parent) = self.blocks.get(&payload.parent_hash()) else {
return PayloadStatusV1 {
status: PayloadStatusV1Status::Syncing,
latest_valid_hash: None,
@ -424,14 +459,20 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
forkchoice_state: ForkchoiceState,
payload_attributes: Option<PayloadAttributes>,
) -> Result<JsonForkchoiceUpdatedV1Response, String> {
if let Some(payload) = self
.pending_payloads
.remove(&forkchoice_state.head_block_hash)
{
// This is meant to cover starting post-merge transition at genesis. Useful for
// testing Capella forks and later.
let head_block_hash = forkchoice_state.head_block_hash;
if let Some(genesis_pow_block) = self.block_by_number(0) {
if genesis_pow_block.block_hash() == head_block_hash {
self.terminal_block_hash = head_block_hash;
}
}
if let Some(payload) = self.pending_payloads.remove(&head_block_hash) {
self.insert_block(Block::PoS(payload))?;
}
let unknown_head_block_hash = !self.blocks.contains_key(&forkchoice_state.head_block_hash);
let unknown_head_block_hash = !self.blocks.contains_key(&head_block_hash);
let unknown_safe_block_hash = forkchoice_state.safe_block_hash
!= ExecutionBlockHash::zero()
&& !self.blocks.contains_key(&forkchoice_state.safe_block_hash);
@ -464,75 +505,15 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
let parent = self
.blocks
.get(&forkchoice_state.head_block_hash)
.ok_or_else(|| {
format!(
"unknown parent block {:?}",
forkchoice_state.head_block_hash
)
})?;
.get(&head_block_hash)
.cloned()
.ok_or_else(|| format!("unknown parent block {head_block_hash:?}"))?;
let id = payload_id_from_u64(self.next_payload_id);
self.next_payload_id += 1;
let mut execution_payload = match &attributes {
PayloadAttributes::V1(pa) => ExecutionPayload::Merge(ExecutionPayloadMerge {
parent_hash: forkchoice_state.head_block_hash,
fee_recipient: pa.suggested_fee_recipient,
receipts_root: Hash256::repeat_byte(42),
state_root: Hash256::repeat_byte(43),
logs_bloom: vec![0; 256].into(),
prev_randao: pa.prev_randao,
block_number: parent.block_number() + 1,
gas_limit: GAS_LIMIT,
gas_used: GAS_USED,
timestamp: pa.timestamp,
extra_data: "block gen was here".as_bytes().to_vec().into(),
base_fee_per_gas: Uint256::one(),
block_hash: ExecutionBlockHash::zero(),
transactions: vec![].into(),
}),
PayloadAttributes::V2(pa) => match self.get_fork_at_timestamp(pa.timestamp) {
ForkName::Merge => ExecutionPayload::Merge(ExecutionPayloadMerge {
parent_hash: forkchoice_state.head_block_hash,
fee_recipient: pa.suggested_fee_recipient,
receipts_root: Hash256::repeat_byte(42),
state_root: Hash256::repeat_byte(43),
logs_bloom: vec![0; 256].into(),
prev_randao: pa.prev_randao,
block_number: parent.block_number() + 1,
gas_limit: GAS_LIMIT,
gas_used: GAS_USED,
timestamp: pa.timestamp,
extra_data: "block gen was here".as_bytes().to_vec().into(),
base_fee_per_gas: Uint256::one(),
block_hash: ExecutionBlockHash::zero(),
transactions: vec![].into(),
}),
ForkName::Capella => ExecutionPayload::Capella(ExecutionPayloadCapella {
parent_hash: forkchoice_state.head_block_hash,
fee_recipient: pa.suggested_fee_recipient,
receipts_root: Hash256::repeat_byte(42),
state_root: Hash256::repeat_byte(43),
logs_bloom: vec![0; 256].into(),
prev_randao: pa.prev_randao,
block_number: parent.block_number() + 1,
gas_limit: GAS_LIMIT,
gas_used: GAS_USED,
timestamp: pa.timestamp,
extra_data: "block gen was here".as_bytes().to_vec().into(),
base_fee_per_gas: Uint256::one(),
block_hash: ExecutionBlockHash::zero(),
transactions: vec![].into(),
withdrawals: pa.withdrawals.clone().into(),
}),
_ => unreachable!(),
},
};
*execution_payload.block_hash_mut() =
ExecutionBlockHash::from_root(execution_payload.tree_hash_root());
let execution_payload =
self.build_new_execution_payload(head_block_hash, &parent, id, &attributes)?;
self.payload_ids.insert(id, execution_payload);
Some(id)
@ -559,12 +540,239 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
payload_id: id.map(Into::into),
})
}
pub fn build_new_execution_payload(
&mut self,
head_block_hash: ExecutionBlockHash,
parent: &Block<T>,
id: PayloadId,
attributes: &PayloadAttributes,
) -> Result<ExecutionPayload<T>, String> {
let mut execution_payload = match attributes {
PayloadAttributes::V1(pa) => ExecutionPayload::Merge(ExecutionPayloadMerge {
parent_hash: head_block_hash,
fee_recipient: pa.suggested_fee_recipient,
receipts_root: Hash256::repeat_byte(42),
state_root: Hash256::repeat_byte(43),
logs_bloom: vec![0; 256].into(),
prev_randao: pa.prev_randao,
block_number: parent.block_number() + 1,
gas_limit: GAS_LIMIT,
gas_used: GAS_USED,
timestamp: pa.timestamp,
extra_data: "block gen was here".as_bytes().to_vec().into(),
base_fee_per_gas: Uint256::one(),
block_hash: ExecutionBlockHash::zero(),
transactions: vec![].into(),
}),
PayloadAttributes::V2(pa) => match self.get_fork_at_timestamp(pa.timestamp) {
ForkName::Merge => ExecutionPayload::Merge(ExecutionPayloadMerge {
parent_hash: head_block_hash,
fee_recipient: pa.suggested_fee_recipient,
receipts_root: Hash256::repeat_byte(42),
state_root: Hash256::repeat_byte(43),
logs_bloom: vec![0; 256].into(),
prev_randao: pa.prev_randao,
block_number: parent.block_number() + 1,
gas_limit: GAS_LIMIT,
gas_used: GAS_USED,
timestamp: pa.timestamp,
extra_data: "block gen was here".as_bytes().to_vec().into(),
base_fee_per_gas: Uint256::one(),
block_hash: ExecutionBlockHash::zero(),
transactions: vec![].into(),
}),
ForkName::Capella => ExecutionPayload::Capella(ExecutionPayloadCapella {
parent_hash: head_block_hash,
fee_recipient: pa.suggested_fee_recipient,
receipts_root: Hash256::repeat_byte(42),
state_root: Hash256::repeat_byte(43),
logs_bloom: vec![0; 256].into(),
prev_randao: pa.prev_randao,
block_number: parent.block_number() + 1,
gas_limit: GAS_LIMIT,
gas_used: GAS_USED,
timestamp: pa.timestamp,
extra_data: "block gen was here".as_bytes().to_vec().into(),
base_fee_per_gas: Uint256::one(),
block_hash: ExecutionBlockHash::zero(),
transactions: vec![].into(),
withdrawals: pa.withdrawals.clone().into(),
}),
_ => unreachable!(),
},
PayloadAttributes::V3(pa) => ExecutionPayload::Deneb(ExecutionPayloadDeneb {
parent_hash: head_block_hash,
fee_recipient: pa.suggested_fee_recipient,
receipts_root: Hash256::repeat_byte(42),
state_root: Hash256::repeat_byte(43),
logs_bloom: vec![0; 256].into(),
prev_randao: pa.prev_randao,
block_number: parent.block_number() + 1,
gas_limit: GAS_LIMIT,
gas_used: GAS_USED,
timestamp: pa.timestamp,
extra_data: "block gen was here".as_bytes().to_vec().into(),
base_fee_per_gas: Uint256::one(),
block_hash: ExecutionBlockHash::zero(),
transactions: vec![].into(),
withdrawals: pa.withdrawals.clone().into(),
blob_gas_used: 0,
excess_blob_gas: 0,
}),
};
match execution_payload.fork_name() {
ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => {}
ForkName::Deneb => {
// get random number between 0 and Max Blobs
let mut rng = self.rng.lock();
let num_blobs = rng.gen::<usize>() % (T::max_blobs_per_block() + 1);
let (bundle, transactions) = generate_blobs(num_blobs)?;
for tx in Vec::from(transactions) {
execution_payload
.transactions_mut()
.push(tx)
.map_err(|_| "transactions are full".to_string())?;
}
self.blobs_bundles.insert(id, bundle);
}
}
*execution_payload.block_hash_mut() =
ExecutionBlockHash::from_root(execution_payload.tree_hash_root());
Ok(execution_payload)
}
}
pub fn load_test_blobs_bundle<E: EthSpec>() -> Result<(KzgCommitment, KzgProof, Blob<E>), String> {
let BlobsBundle::<E> {
commitments,
proofs,
blobs,
} = BlobsBundle::from_ssz_bytes(TEST_BLOB_BUNDLE)
.map_err(|e| format!("Unable to decode ssz: {:?}", e))?;
Ok((
commitments
.first()
.cloned()
.ok_or("commitment missing in test bundle")?,
proofs
.first()
.cloned()
.ok_or("proof missing in test bundle")?,
blobs
.first()
.cloned()
.ok_or("blob missing in test bundle")?,
))
}
pub fn generate_blobs<E: EthSpec>(
n_blobs: usize,
) -> Result<(BlobsBundle<E>, Transactions<E>), String> {
let (kzg_commitment, kzg_proof, blob) = load_test_blobs_bundle::<E>()?;
let mut bundle = BlobsBundle::<E>::default();
let mut transactions = vec![];
for blob_index in 0..n_blobs {
let tx = static_valid_tx::<E>()
.map_err(|e| format!("error creating valid tx SSZ bytes: {:?}", e))?;
transactions.push(tx);
bundle
.blobs
.push(blob.clone())
.map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?;
bundle
.commitments
.push(kzg_commitment)
.map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?;
bundle
.proofs
.push(kzg_proof)
.map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?;
}
Ok((bundle, transactions.into()))
}
fn static_valid_tx<T: EthSpec>() -> Result<Transaction<T::MaxBytesPerTransaction>, String> {
// This is a real transaction hex encoded, but we don't care about the contents of the transaction.
let transaction: EthersTransaction = serde_json::from_str(
r#"{
"blockHash":"0x1d59ff54b1eb26b013ce3cb5fc9dab3705b415a67127a003c3e61eb445bb8df2",
"blockNumber":"0x5daf3b",
"from":"0xa7d9ddbe1f17865597fbd27ec712455208b6b76d",
"gas":"0xc350",
"gasPrice":"0x4a817c800",
"hash":"0x88df016429689c079f3b2f6ad39fa052532c56795b733da78a91ebe6a713944b",
"input":"0x68656c6c6f21",
"nonce":"0x15",
"to":"0xf02c1c8e6114b1dbe8937a39260b5b0a374432bb",
"transactionIndex":"0x41",
"value":"0xf3dbb76162000",
"v":"0x25",
"r":"0x1b5e176d927f8e9ab405058b2d2457392da3e20f328b16ddabcebc33eaac5fea",
"s":"0x4ba69724e8f69de52f0125ad8b3c5c2cef33019bac3249e2c0a2192766d1721c"
}"#,
)
.unwrap();
VariableList::new(transaction.rlp().to_vec())
.map_err(|e| format!("Failed to convert transaction to SSZ: {:?}", e))
}
fn payload_id_from_u64(n: u64) -> PayloadId {
n.to_le_bytes()
}
pub fn generate_genesis_header<T: EthSpec>(
spec: &ChainSpec,
post_transition_merge: bool,
) -> Option<ExecutionPayloadHeader<T>> {
let genesis_fork = spec.fork_name_at_slot::<T>(spec.genesis_slot);
let genesis_block_hash =
generate_genesis_block(spec.terminal_total_difficulty, DEFAULT_TERMINAL_BLOCK)
.ok()
.map(|block| block.block_hash);
match genesis_fork {
ForkName::Base | ForkName::Altair => None,
ForkName::Merge => {
if post_transition_merge {
let mut header = ExecutionPayloadHeader::Merge(<_>::default());
*header.block_hash_mut() = genesis_block_hash.unwrap_or_default();
Some(header)
} else {
Some(ExecutionPayloadHeader::<T>::Merge(<_>::default()))
}
}
ForkName::Capella => {
let mut header = ExecutionPayloadHeader::Capella(<_>::default());
*header.block_hash_mut() = genesis_block_hash.unwrap_or_default();
Some(header)
}
ForkName::Deneb => {
let mut header = ExecutionPayloadHeader::Deneb(<_>::default());
*header.block_hash_mut() = genesis_block_hash.unwrap_or_default();
Some(header)
}
}
}
pub fn generate_genesis_block(
terminal_total_difficulty: Uint256,
terminal_block_number: u64,
) -> Result<PoWBlock, String> {
generate_pow_block(
terminal_total_difficulty,
terminal_block_number,
0,
ExecutionBlockHash::zero(),
)
}
pub fn generate_pow_block(
terminal_total_difficulty: Uint256,
terminal_block_number: u64,
@ -605,7 +813,9 @@ pub fn generate_pow_block(
#[cfg(test)]
mod test {
use super::*;
use types::MainnetEthSpec;
use eth2_network_config::TRUSTED_SETUP_BYTES;
use kzg::TrustedSetup;
use types::{MainnetEthSpec, MinimalEthSpec};
#[test]
fn pow_chain_only() {
@ -618,6 +828,8 @@ mod test {
TERMINAL_BLOCK,
ExecutionBlockHash::zero(),
None,
None,
None,
);
for i in 0..=TERMINAL_BLOCK {
@ -665,4 +877,32 @@ mod test {
assert!(generator.block_by_number(next_i).is_none());
}
}
#[test]
fn valid_test_blobs() {
assert!(
validate_blob::<MainnetEthSpec>().is_ok(),
"Mainnet preset test blobs bundle should contain valid proofs"
);
assert!(
validate_blob::<MinimalEthSpec>().is_ok(),
"Minimal preset test blobs bundle should contain valid proofs"
);
}
fn validate_blob<E: EthSpec>() -> Result<(), String> {
let kzg = load_kzg()?;
let (kzg_commitment, kzg_proof, blob) = load_test_blobs_bundle::<E>()?;
let kzg_blob = kzg::Blob::from_bytes(blob.as_ref())
.map_err(|e| format!("Error converting blob to kzg blob: {e:?}"))?;
kzg.verify_blob_kzg_proof(&kzg_blob, kzg_commitment, kzg_proof)
.map_err(|e| format!("Invalid blobs bundle: {e:?}"))
}
fn load_kzg() -> Result<Kzg, String> {
let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES)
.map_err(|e| format!("Unable to read trusted setup file: {e:?}"))?;
Kzg::new_from_trusted_setup(trusted_setup)
.map_err(|e| format!("Failed to load trusted setup: {e:?}"))
}
}

Some files were not shown because too many files have changed in this diff Show More