diff --git a/.config/nextest.toml b/.config/nextest.toml new file mode 100644 index 000000000..1ef771b3d --- /dev/null +++ b/.config/nextest.toml @@ -0,0 +1,113 @@ +# This is the default config used by nextest. It is embedded in the binary at +# build time. It may be used as a template for .config/nextest.toml. + +[store] +# The directory under the workspace root at which nextest-related files are +# written. Profile-specific storage is currently written to dir/. +dir = "target/nextest" + +# This section defines the default nextest profile. Custom profiles are layered +# on top of the default profile. +[profile.default] +# "retries" defines the number of times a test should be retried. If set to a +# non-zero value, tests that succeed on a subsequent attempt will be marked as +# non-flaky. Can be overridden through the `--retries` option. +# Examples +# * retries = 3 +# * retries = { backoff = "fixed", count = 2, delay = "1s" } +# * retries = { backoff = "exponential", count = 10, delay = "1s", jitter = true, max-delay = "10s" } +retries = 0 + +# The number of threads to run tests with. Supported values are either an integer or +# the string "num-cpus". Can be overridden through the `--test-threads` option. +test-threads = 8 + +# The number of threads required for each test. This is generally used in overrides to +# mark certain tests as heavier than others. However, it can also be set as a global parameter. +threads-required = 1 + +# Show these test statuses in the output. +# +# The possible values this can take are: +# * none: no output +# * fail: show failed (including exec-failed) tests +# * retry: show flaky and retried tests +# * slow: show slow tests +# * pass: show passed tests +# * skip: show skipped tests (most useful for CI) +# * all: all of the above +# +# Each value includes all the values above it; for example, "slow" includes +# failed and retried tests. +# +# Can be overridden through the `--status-level` flag. +status-level = "pass" + +# Similar to status-level, show these test statuses at the end of the run. +final-status-level = "flaky" + +# "failure-output" defines when standard output and standard error for failing tests are produced. +# Accepted values are +# * "immediate": output failures as soon as they happen +# * "final": output failures at the end of the test run +# * "immediate-final": output failures as soon as they happen and at the end of +# the test run; combination of "immediate" and "final" +# * "never": don't output failures at all +# +# For large test suites and CI it is generally useful to use "immediate-final". +# +# Can be overridden through the `--failure-output` option. +failure-output = "immediate" + +# "success-output" controls production of standard output and standard error on success. This should +# generally be set to "never". +success-output = "never" + +# Cancel the test run on the first failure. For CI runs, consider setting this +# to false. +fail-fast = true + +# Treat a test that takes longer than the configured 'period' as slow, and print a message. +# See for more information. +# +# Optional: specify the parameter 'terminate-after' with a non-zero integer, +# which will cause slow tests to be terminated after the specified number of +# periods have passed. +# Example: slow-timeout = { period = "60s", terminate-after = 2 } +slow-timeout = { period = "120s" } + +# Treat a test as leaky if after the process is shut down, standard output and standard error +# aren't closed within this duration. +# +# This usually happens in case of a test that creates a child process and lets it inherit those +# handles, but doesn't clean the child process up (especially when it fails). +# +# See for more information. +leak-timeout = "100ms" + +[profile.default.junit] +# Output a JUnit report into the given file inside 'store.dir/'. +# If unspecified, JUnit is not written out. + +# path = "junit.xml" + +# The name of the top-level "report" element in JUnit report. If aggregating +# reports across different test runs, it may be useful to provide separate names +# for each report. +report-name = "lighthouse-run" + +# Whether standard output and standard error for passing tests should be stored in the JUnit report. +# Output is stored in the and elements of the element. +store-success-output = false + +# Whether standard output and standard error for failing tests should be stored in the JUnit report. +# Output is stored in the and elements of the element. +# +# Note that if a description can be extracted from the output, it is always stored in the +# element. +store-failure-output = true + +# This profile is activated if MIRI_SYSROOT is set. +[profile.default-miri] +# Miri tests take up a lot of memory, so only run 1 test at a time by default. +test-threads = 4 diff --git a/.github/mergify.yml b/.github/mergify.yml new file mode 100644 index 000000000..4c4046cf6 --- /dev/null +++ b/.github/mergify.yml @@ -0,0 +1,19 @@ +queue_rules: + - name: default + batch_size: 8 + batch_max_wait_time: 60 s + checks_timeout: 10800 s + merge_method: squash + commit_message_template: | + {{ title }} (#{{ number }}) + + {% for commit in commits %} + * {{ commit.commit_message }} + {% endfor %} + queue_conditions: + - "#approved-reviews-by >= 1" + - "check-success=license/cla" + - "check-success=target-branch-check" + merge_conditions: + - "check-success=test-suite-success" + - "check-success=local-testnet-success" diff --git a/.github/workflows/linkcheck.yml b/.github/workflows/linkcheck.yml index 19236691f..7f5d3e0b6 100644 --- a/.github/workflows/linkcheck.yml +++ b/.github/workflows/linkcheck.yml @@ -22,14 +22,15 @@ jobs: - name: Checkout code uses: actions/checkout@v3 - - name: Create docker network - run: docker network create book - - name: Run mdbook server - run: docker run -v ${{ github.workspace }}/book:/book --network book --name book -p 3000:3000 -d peaceiris/mdbook:v0.4.20-rust serve --hostname 0.0.0.0 + run: | + docker run -v ${{ github.workspace }}/book:/book --name book -p 3000:3000 -d peaceiris/mdbook:latest serve --hostname 0.0.0.0 + sleep 5 - name: Print logs run: docker logs book - name: Run linkcheck - run: docker run --network book tennox/linkcheck:latest book:3000 + run: | + curl -sL https://github.com/filiph/linkcheck/releases/download/3.0.0/linkcheck-3.0.0-linux-x64.tar.gz | tar xvzf - linkcheck/linkcheck --strip 1 + ./linkcheck localhost:3000 -d diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index 1269aee62..75a81ce0e 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -20,6 +20,9 @@ jobs: - ubuntu-22.04 - macos-12 runs-on: ${{ matrix.os }} + env: + # Enable portable to prevent issues with caching `blst` for the wrong CPU type + FEATURES: portable,jemalloc steps: - uses: actions/checkout@v3 @@ -83,4 +86,15 @@ jobs: - name: Stop local testnet with blinded block production run: ./stop_local_testnet.sh - working-directory: scripts/local_testnet \ No newline at end of file + working-directory: scripts/local_testnet + + # This job succeeds ONLY IF all others succeed. It is used by the merge queue to determine whether + # a PR is safe to merge. New jobs should be added here. + local-testnet-success: + name: local-testnet-success + runs-on: ubuntu-latest + needs: ["run-local-testnet"] + steps: + - uses: actions/checkout@v3 + - name: Check that success job is dependent on all others + run: ./scripts/ci/check-success-job.sh ./.github/workflows/local-testnet.yml local-testnet-success diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 24ca09ec0..30ac52b6e 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -282,9 +282,6 @@ jobs: | | Docker | [${{ env.VERSION }}](https://hub.docker.com/r/${{ env.IMAGE_NAME }}/tags?page=1&ordering=last_updated&name=${{ env.VERSION }}) | [${{ env.IMAGE_NAME }}](https://hub.docker.com/r/${{ env.IMAGE_NAME }}) | ENDBODY ) - assets=() - for asset in ./lighthouse-*.tar.gz*; do - assets+=("-a" "$asset/$asset") - done + assets=(./lighthouse-*.tar.gz*/lighthouse-*.tar.gz*) tag_name="${{ env.VERSION }}" - echo "$body" | hub release create --draft "${assets[@]}" -F "-" "$tag_name" + echo "$body" | gh release create --draft -F "-" "$tag_name" "${assets[@]}" diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index fd9b77ae2..70fb59424 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -18,14 +18,16 @@ env: # Deny warnings in CI # Disable debug info (see https://github.com/sigp/lighthouse/issues/4005) RUSTFLAGS: "-D warnings -C debuginfo=0" - # The Nightly version used for cargo-udeps, might need updating from time to time. - PINNED_NIGHTLY: nightly-2023-04-16 # Prevent Github API rate limiting. LIGHTHOUSE_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Enable self-hosted runners for the sigp repo only. SELF_HOSTED_RUNNERS: ${{ github.repository == 'sigp/lighthouse' }} # Self-hosted runners need to reference a different host for `./watch` tests. WATCH_HOST: ${{ github.repository == 'sigp/lighthouse' && 'host.docker.internal' || 'localhost' }} + # Disable incremental compilation + CARGO_INCREMENTAL: 0 + # Enable portable to prevent issues with caching `blst` for the wrong CPU type + TEST_FEATURES: portable jobs: target-branch-check: name: target-branch-check @@ -34,145 +36,191 @@ jobs: steps: - name: Check that the pull request is not targeting the stable branch run: test ${{ github.base_ref }} != "stable" - extract-msrv: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Extract Minimum Supported Rust Version (MSRV) - run: | - metadata=$(cargo metadata --no-deps --format-version 1) - msrv=$(echo $metadata | jq -r '.packages | map(select(.name == "lighthouse")) | .[0].rust_version') - echo "MSRV=$msrv" >> $GITHUB_OUTPUT - id: extract_msrv - outputs: - MSRV: ${{ steps.extract_msrv.outputs.MSRV }} - cargo-fmt: - name: cargo-fmt - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Check formatting with cargo fmt - run: make cargo-fmt release-tests-ubuntu: name: release-tests-ubuntu # Use self-hosted runners only on the sigp repo. runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} - needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == false - run: rustup update stable + if: env.SELF_HOSTED_RUNNERS == 'false' + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Install Foundry (anvil) + if: env.SELF_HOSTED_RUNNERS == 'false' uses: foundry-rs/foundry-toolchain@v1 with: version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - name: Run tests in release - run: make test-release + run: make nextest-release + - name: Show cache stats + if: env.SELF_HOSTED_RUNNERS == 'true' + run: sccache --show-stats release-tests-windows: name: release-tests-windows runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "windows", "CI"]') || 'windows-2019' }} - needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == false - run: rustup update stable + if: env.SELF_HOSTED_RUNNERS == 'false' + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Install Foundry (anvil) + if: env.SELF_HOSTED_RUNNERS == 'false' uses: foundry-rs/foundry-toolchain@v1 with: version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - name: Install make + if: env.SELF_HOSTED_RUNNERS == 'false' run: choco install -y make - - uses: KyleMayes/install-llvm-action@v1 - if: env.SELF_HOSTED_RUNNERS == false - with: - version: "15.0" - directory: ${{ runner.temp }}/llvm +# - uses: KyleMayes/install-llvm-action@v1 +# if: env.SELF_HOSTED_RUNNERS == 'false' +# with: +# version: "15.0" +# directory: ${{ runner.temp }}/llvm - name: Set LIBCLANG_PATH run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV - name: Run tests in release - run: make test-release + run: make nextest-release + - name: Show cache stats + if: env.SELF_HOSTED_RUNNERS == 'true' + run: sccache --show-stats beacon-chain-tests: name: beacon-chain-tests # Use self-hosted runners only on the sigp repo. runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} - needs: cargo-fmt + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == false - run: rustup update stable + if: env.SELF_HOSTED_RUNNERS == 'false' + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest - name: Run beacon_chain tests for all known forks run: make test-beacon-chain + - name: Show cache stats + if: env.SELF_HOSTED_RUNNERS == 'true' + run: sccache --show-stats op-pool-tests: name: op-pool-tests runs-on: ubuntu-latest - needs: cargo-fmt + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest - name: Run operation_pool tests for all known forks run: make test-op-pool + network-tests: + name: network-tests + runs-on: ubuntu-latest + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + steps: + - uses: actions/checkout@v3 + - name: Get latest version of stable Rust + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest + - name: Run network tests for all known forks + run: make test-network slasher-tests: name: slasher-tests runs-on: ubuntu-latest - needs: cargo-fmt + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest - name: Run slasher tests for all supported backends run: make test-slasher debug-tests-ubuntu: name: debug-tests-ubuntu # Use self-hosted runners only on the sigp repo. runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} - needs: cargo-fmt + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == false - run: rustup update stable + if: env.SELF_HOSTED_RUNNERS == 'false' + uses: moonrepo/setup-rust@v1 + with: + channel: stable + bins: cargo-nextest - name: Install Foundry (anvil) + if: env.SELF_HOSTED_RUNNERS == 'false' uses: foundry-rs/foundry-toolchain@v1 with: version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - name: Run tests in debug - run: make test-debug + run: make nextest-debug + - name: Show cache stats + if: env.SELF_HOSTED_RUNNERS == 'true' + run: sccache --show-stats state-transition-vectors-ubuntu: name: state-transition-vectors-ubuntu runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release - name: Run state_transition_vectors in release. run: make run-state-transition-tests ef-tests-ubuntu: name: ef-tests-ubuntu # Use self-hosted runners only on the sigp repo. runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest' }} - needs: cargo-fmt + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == false - run: rustup update stable + if: env.SELF_HOSTED_RUNNERS == 'false' + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest - name: Run consensus-spec-tests with blst, milagro and fake_crypto - run: make test-ef + run: make nextest-ef + - name: Show cache stats + if: env.SELF_HOSTED_RUNNERS == 'true' + run: sccache --show-stats dockerfile-ubuntu: name: dockerfile-ubuntu runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - - name: Get latest version of stable Rust - run: rustup update stable - name: Build the root Dockerfile run: docker build --build-arg FEATURES=portable -t lighthouse:local . - name: Test the built image @@ -180,11 +228,13 @@ jobs: eth1-simulator-ubuntu: name: eth1-simulator-ubuntu runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 with: @@ -194,11 +244,13 @@ jobs: merge-transition-ubuntu: name: merge-transition-ubuntu runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 with: @@ -208,21 +260,25 @@ jobs: no-eth1-simulator-ubuntu: name: no-eth1-simulator-ubuntu runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release - name: Run the beacon chain sim without an eth1 connection run: cargo run --release --bin simulator no-eth1-sim syncing-simulator-ubuntu: name: syncing-simulator-ubuntu runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 with: @@ -231,21 +287,30 @@ jobs: run: cargo run --release --bin simulator syncing-sim doppelganger-protection-test: name: doppelganger-protection-test - runs-on: ubuntu-latest - needs: cargo-fmt + runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest' }} + env: + # Enable portable to prevent issues with caching `blst` for the wrong CPU type + FEATURES: jemalloc,portable steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + if: env.SELF_HOSTED_RUNNERS == 'false' + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release - name: Install geth + if: env.SELF_HOSTED_RUNNERS == 'false' run: | sudo add-apt-repository -y ppa:ethereum/ethereum sudo apt-get update sudo apt-get install ethereum - - name: Install lighthouse and lcli + - name: Install lighthouse run: | make - make install-lcli + - name: Install lcli + if: env.SELF_HOSTED_RUNNERS == 'false' + run: make install-lcli - name: Run the doppelganger protection failure test script run: | cd scripts/tests @@ -256,90 +321,76 @@ jobs: ./doppelganger_protection.sh success genesis.json execution-engine-integration-ubuntu: name: execution-engine-integration-ubuntu - runs-on: ubuntu-latest - needs: cargo-fmt + runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest' }} steps: - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 - with: - go-version: '1.20' - - uses: actions/setup-dotnet@v3 - with: - dotnet-version: '6.0.201' - name: Get latest version of stable Rust - run: rustup update stable + if: env.SELF_HOSTED_RUNNERS == 'false' + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + cache: false + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Add go compiler to $PATH + if: env.SELF_HOSTED_RUNNERS == 'true' + run: echo "/usr/local/go/bin" >> $GITHUB_PATH - name: Run exec engine integration tests in release run: make test-exec-engine - check-benchmarks: - name: check-benchmarks + check-code: + name: check-code runs-on: ubuntu-latest - needs: cargo-fmt + env: + CARGO_INCREMENTAL: 1 steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable - - name: Typecheck benchmark code without running it - run: make check-benches - clippy: - name: clippy - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v3 - - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + components: rustfmt,clippy + bins: cargo-audit + - name: Check formatting with cargo fmt + run: make cargo-fmt - name: Lint code for quality and style with Clippy run: make lint - name: Certify Cargo.lock freshness run: git diff --exit-code Cargo.lock + - name: Typecheck benchmark code without running it + run: make check-benches + - name: Validate state_processing feature arbitrary-fuzz + run: make arbitrary-fuzz + - name: Run cargo audit + run: make audit-CI + - name: Run cargo vendor to make sure dependencies can be vendored for packaging, reproducibility and archival purpose + run: CARGO_HOME=$(readlink -f $HOME) make vendor check-msrv: name: check-msrv runs-on: ubuntu-latest - needs: [cargo-fmt, extract-msrv] steps: - uses: actions/checkout@v3 - - name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }}) - run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }} + - name: Install Rust at Minimum Supported Rust Version (MSRV) + run: | + metadata=$(cargo metadata --no-deps --format-version 1) + msrv=$(echo $metadata | jq -r '.packages | map(select(.name == "lighthouse")) | .[0].rust_version') + rustup override set $msrv - name: Run cargo check run: cargo check --workspace - arbitrary-check: - name: arbitrary-check - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v3 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Validate state_processing feature arbitrary-fuzz - run: make arbitrary-fuzz - cargo-audit: - name: cargo-audit - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v3 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Run cargo audit to identify known security vulnerabilities reported to the RustSec Advisory Database - run: make audit - cargo-vendor: - name: cargo-vendor - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v3 - - name: Run cargo vendor to make sure dependencies can be vendored for packaging, reproducibility and archival purpose - run: CARGO_HOME=$(readlink -f $HOME) make vendor cargo-udeps: name: cargo-udeps runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - - name: Install Rust (${{ env.PINNED_NIGHTLY }}) - run: rustup toolchain install $PINNED_NIGHTLY - - name: Install cargo-udeps - run: cargo install cargo-udeps --locked --force + - name: Get latest version of nightly Rust + uses: moonrepo/setup-rust@v1 + with: + channel: nightly + bins: cargo-udeps + cache: false + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Create Cargo config dir run: mkdir -p .cargo - name: Install custom Cargo config @@ -360,3 +411,48 @@ jobs: run: rustup override set beta - name: Run make run: make + cli-check: + name: cli-check + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Get latest version of stable Rust + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + - name: Run Makefile to trigger the bash script + run: make cli + # This job succeeds ONLY IF all others succeed. It is used by the merge queue to determine whether + # a PR is safe to merge. New jobs should be added here. + test-suite-success: + name: test-suite-success + runs-on: ubuntu-latest + needs: [ + 'target-branch-check', + 'release-tests-ubuntu', + 'release-tests-windows', + 'beacon-chain-tests', + 'op-pool-tests', + 'network-tests', + 'slasher-tests', + 'debug-tests-ubuntu', + 'state-transition-vectors-ubuntu', + 'ef-tests-ubuntu', + 'dockerfile-ubuntu', + 'eth1-simulator-ubuntu', + 'merge-transition-ubuntu', + 'no-eth1-simulator-ubuntu', + 'syncing-simulator-ubuntu', + 'doppelganger-protection-test', + 'execution-engine-integration-ubuntu', + 'check-code', + 'check-msrv', + 'cargo-udeps', + 'compile-with-beta-compiler', + 'cli-check', + ] + steps: + - uses: actions/checkout@v3 + - name: Check that success job is dependent on all others + run: ./scripts/ci/check-success-job.sh ./.github/workflows/test-suite.yml test-suite-success diff --git a/.gitignore b/.gitignore index 1b7e5dbb8..e63e218a3 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ target/ +vendor/ **/*.rs.bk *.pk *.sk @@ -9,7 +10,11 @@ perf.data* /bin genesis.ssz /clippy.toml +/.cargo # IntelliJ /*.iml -.idea \ No newline at end of file +.idea + +# VSCode +/.vscode diff --git a/Cargo.lock b/Cargo.lock index 90a5373db..1d3dc3070 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -53,7 +53,6 @@ dependencies = [ "regex", "rpassword", "serde", - "serde_derive", "serde_yaml", "slog", "types", @@ -91,6 +90,16 @@ dependencies = [ "generic-array", ] +[[package]] +name = "aead" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" +dependencies = [ + "crypto-common", + "generic-array", +] + [[package]] name = "aes" version = "0.7.5" @@ -100,7 +109,7 @@ dependencies = [ "cfg-if", "cipher 0.3.0", "cpufeatures", - "ctr", + "ctr 0.8.0", "opaque-debug", ] @@ -117,45 +126,49 @@ dependencies = [ [[package]] name = "aes-gcm" -version = "0.9.4" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df5f85a83a7d8b0442b6aa7b504b8212c1733da07b98aae43d4bc21b2cb3cdf6" +checksum = "bc3be92e19a7ef47457b8e6f90707e12b6ac5d20c6f3866584fa3be0787d839f" dependencies = [ - "aead", + "aead 0.4.3", "aes 0.7.5", "cipher 0.3.0", - "ctr", - "ghash", + "ctr 0.7.0", + "ghash 0.4.4", + "subtle", +] + +[[package]] +name = "aes-gcm" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" +dependencies = [ + "aead 0.5.2", + "aes 0.8.3", + "cipher 0.4.4", + "ctr 0.9.2", + "ghash 0.5.0", "subtle", ] [[package]] name = "ahash" -version = "0.7.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" -dependencies = [ - "getrandom 0.2.10", - "once_cell", - "version_check", -] - -[[package]] -name = "ahash" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" dependencies = [ "cfg-if", "once_cell", "version_check", + "zerocopy", ] [[package]] name = "aho-corasick" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f2135563fb5c609d2b2b87c1e8ce7bc41b0b45430fa9661f457981503dd5bf0" +checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" dependencies = [ "memchr", ] @@ -195,26 +208,17 @@ dependencies = [ "winapi", ] -[[package]] -name = "anvil-rpc" -version = "0.1.0" -source = "git+https://github.com/foundry-rs/foundry?rev=b45456717ffae1af65acdc71099f8cb95e6683a0#b45456717ffae1af65acdc71099f8cb95e6683a0" -dependencies = [ - "serde", - "serde_json", -] - [[package]] name = "anyhow" -version = "1.0.75" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" [[package]] name = "arbitrary" -version = "1.3.0" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2d098ff73c1ca148721f37baad5ea6a465a13f9573aba8641fbbbae8164a54e" +checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110" dependencies = [ "derive_arbitrary", ] @@ -225,15 +229,6 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" -[[package]] -name = "array-init" -version = "0.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23589ecb866b460d3a0f1278834750268c607e8e28a1b982c907219f3178cd72" -dependencies = [ - "nodrop", -] - [[package]] name = "arrayref" version = "0.3.7" @@ -292,65 +287,55 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "155a5a185e42c6b77ac7b88a15143d930a9e9727a5b7b77eed417404ab15c247" [[package]] -name = "async-io" -version = "1.13.0" +name = "async-channel" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" +checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" +dependencies = [ + "concurrent-queue", + "event-listener 2.5.3", + "futures-core", +] + +[[package]] +name = "async-io" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb41eb19024a91746eba0773aa5e16036045bbf45733766661099e182ea6a744" dependencies = [ "async-lock", - "autocfg", "cfg-if", "concurrent-queue", + "futures-io", "futures-lite", - "log", "parking", "polling", - "rustix 0.37.23", + "rustix 0.38.30", "slab", - "socket2 0.4.9", - "waker-fn", + "tracing", + "windows-sys 0.52.0", ] [[package]] name = "async-lock" -version = "2.8.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" +checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" dependencies = [ - "event-listener", -] - -[[package]] -name = "async-stream" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" -dependencies = [ - "async-stream-impl", - "futures-core", + "event-listener 4.0.3", + "event-listener-strategy", "pin-project-lite", ] -[[package]] -name = "async-stream-impl" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.37", -] - [[package]] name = "async-trait" -version = "0.1.73" +version = "0.1.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" +checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.48", ] [[package]] @@ -366,9 +351,9 @@ dependencies = [ [[package]] name = "asynchronous-codec" -version = "0.6.2" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4057f2c32adbb2fc158e22fb38433c8e9bbf76b75a4732c7c0cbaf695fb65568" +checksum = "a860072022177f903e59730004fb5dc13db9275b79bb2aef7ba8ce831956c233" dependencies = [ "bytes", "futures-sink", @@ -383,12 +368,23 @@ version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb8867f378f33f78a811a8eb9bf108ad99430d7aad43315dd9319c827ef6247" dependencies = [ - "http", + "http 0.2.11", "log", "url", "wildmatch", ] +[[package]] +name = "attohttpc" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d9a9bf8b79a749ee0b911b91b671cc2b6c670bdbc7e3dfd537576ddc94bb2a2" +dependencies = [ + "http 0.2.11", + "log", + "url", +] + [[package]] name = "atty" version = "0.2.14" @@ -420,18 +416,19 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.6.20" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" +checksum = "1236b4b292f6c4d6dc34604bb5120d85c3fe1d1aa596bd5cc52ca054d13e7b9e" dependencies = [ "async-trait", "axum-core", - "bitflags 1.3.2", "bytes", "futures-util", - "http", - "http-body", - "hyper", + "http 1.0.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.1.0", + "hyper-util", "itoa", "matchit", "memchr", @@ -448,23 +445,28 @@ dependencies = [ "tower", "tower-layer", "tower-service", + "tracing", ] [[package]] name = "axum-core" -version = "0.3.4" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" dependencies = [ "async-trait", "bytes", "futures-util", - "http", - "http-body", + "http 1.0.0", + "http-body 1.0.0", + "http-body-util", "mime", + "pin-project-lite", "rustversion", + "sync_wrapper", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -508,9 +510,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.4" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64ct" @@ -518,24 +520,6 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" -[[package]] -name = "beacon-api-client" -version = "0.1.0" -source = "git+https://github.com/ralexstokes/beacon-api-client?rev=93d7e8c#93d7e8c38fe9782c4862909663e7b57c44f805a9" -dependencies = [ - "ethereum-consensus", - "http", - "itertools", - "reqwest", - "serde", - "serde_json", - "thiserror", - "tokio", - "tracing", - "tracing-subscriber", - "url", -] - [[package]] name = "beacon_chain" version = "0.2.0" @@ -546,6 +530,7 @@ dependencies = [ "environment", "eth1", "eth2", + "eth2_network_config", "ethereum_hashing", "ethereum_serde_utils", "ethereum_ssz", @@ -558,10 +543,11 @@ dependencies = [ "hex", "int_to_bytes", "itertools", + "kzg", "lazy_static", "lighthouse_metrics", "logging", - "lru 0.7.8", + "lru", "maplit", "merkle_proof", "oneshot_broadcast", @@ -576,9 +562,11 @@ dependencies = [ "serde_json", "slasher", "slog", + "slog-async", + "slog-term", "sloggers", "slot_clock", - "smallvec 1.11.0", + "smallvec", "ssz_types", "state_processing", "store", @@ -595,7 +583,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "4.5.0" +version = "4.6.0" dependencies = [ "beacon_chain", "clap", @@ -611,13 +599,14 @@ dependencies = [ "genesis", "hex", "http_api", - "hyper", + "hyper 1.1.0", "lighthouse_network", "lighthouse_version", "monitoring_api", "node_test_rig", "sensitive_url", "serde", + "serde_json", "slasher", "slog", "store", @@ -681,6 +670,29 @@ dependencies = [ "shlex", ] +[[package]] +name = "bindgen" +version = "0.66.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2b84e06fc203107bfbad243f4aba2af864eb7db3b1cf46ea0a023b0b433d2a7" +dependencies = [ + "bitflags 2.4.2", + "cexpr", + "clang-sys", + "lazy_static", + "lazycell", + "log", + "peeking_take_while", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn 2.0.48", + "which", +] + [[package]] name = "bitflags" version = "1.3.2" @@ -689,9 +701,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.0" +version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" +checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" [[package]] name = "bitvec" @@ -765,7 +777,6 @@ dependencies = [ "milagro_bls", "rand", "serde", - "serde_derive", "tree_hash", "zeroize", ] @@ -794,7 +805,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "4.5.0" +version = "4.6.0" dependencies = [ "beacon_node", "clap", @@ -806,7 +817,6 @@ dependencies = [ "log", "logging", "serde", - "serde_derive", "serde_json", "serde_yaml", "slog", @@ -859,9 +869,9 @@ checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "byteorder" -version = "1.4.3" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" @@ -893,6 +903,20 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "c-kzg" +version = "0.1.0" +source = "git+https://github.com/ethereum/c-kzg-4844?rev=748283cced543c486145d5f3f38684becdfe3e1b#748283cced543c486145d5f3f38684becdfe3e1b" +dependencies = [ + "bindgen 0.66.1", + "blst", + "cc", + "glob", + "hex", + "libc", + "serde", +] + [[package]] name = "cached_tree_hash" version = "0.1.0" @@ -903,7 +927,7 @@ dependencies = [ "ethereum_ssz_derive", "quickcheck", "quickcheck_macros", - "smallvec 1.11.0", + "smallvec", "ssz_types", "tree_hash", ] @@ -919,9 +943,9 @@ dependencies = [ [[package]] name = "cargo-platform" -version = "0.1.3" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cfa25e60aea747ec7e1124f238816749faa93759c6ff5b31f1ccdda137f4479" +checksum = "ceed8ef69d8518a5dda55c07425450b58a4e1946f4951eab6d7191ee86c2443d" dependencies = [ "serde", ] @@ -973,39 +997,38 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chacha20" -version = "0.8.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6" +checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" dependencies = [ "cfg-if", - "cipher 0.3.0", + "cipher 0.4.4", "cpufeatures", - "zeroize", ] [[package]] name = "chacha20poly1305" -version = "0.9.1" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a18446b09be63d457bbec447509e85f662f32952b035ce892290396bc0b0cff5" +checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" dependencies = [ - "aead", + "aead 0.5.2", "chacha20", - "cipher 0.3.0", + "cipher 0.4.4", "poly1305", "zeroize", ] [[package]] name = "chrono" -version = "0.4.31" +version = "0.4.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" +checksum = "41daef31d7a747c5c847246f36de49ced6f7403b4cdabc807a97b5cc184cda7a" dependencies = [ "android-tzdata", "iana-time-zone", "num-traits", - "windows-targets 0.48.5", + "windows-targets 0.52.0", ] [[package]] @@ -1025,13 +1048,14 @@ checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ "crypto-common", "inout", + "zeroize", ] [[package]] name = "clang-sys" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" +checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1" dependencies = [ "glob", "libc", @@ -1097,7 +1121,6 @@ dependencies = [ "parking_lot 0.12.1", "sensitive_url", "serde", - "serde_derive", "serde_yaml", "slasher", "slasher_service", @@ -1126,6 +1149,7 @@ name = "compare_fields" version = "0.2.0" dependencies = [ "compare_fields_derive", + "itertools", ] [[package]] @@ -1138,18 +1162,18 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "2.2.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" +checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" dependencies = [ "crossbeam-utils", ] [[package]] name = "const-oid" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "constant_time_eq" @@ -1165,9 +1189,9 @@ checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" [[package]] name = "core-foundation" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" dependencies = [ "core-foundation-sys", "libc", @@ -1175,9 +1199,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.4" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" [[package]] name = "core2" @@ -1190,9 +1214,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.9" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" dependencies = [ "libc", ] @@ -1244,46 +1268,37 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.8" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" +checksum = "176dc175b78f56c0f321911d9c8eb2b77a78a4860b9c19db83835fea1a46649b" dependencies = [ - "cfg-if", "crossbeam-utils", ] [[package]] name = "crossbeam-deque" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ - "cfg-if", "crossbeam-epoch", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" -version = "0.9.15" +version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "autocfg", - "cfg-if", "crossbeam-utils", - "memoffset 0.9.0", - "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.16" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" -dependencies = [ - "cfg-if", -] +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" [[package]] name = "crunchy" @@ -1298,19 +1313,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" dependencies = [ "generic-array", - "rand_core 0.6.4", + "rand_core", "subtle", "zeroize", ] [[package]] name = "crypto-bigint" -version = "0.5.3" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "740fe28e594155f10cfc383984cbefd529d7396050557148f79cb0f621204124" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ "generic-array", - "rand_core 0.6.4", + "rand_core", "subtle", "zeroize", ] @@ -1322,6 +1337,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", + "rand_core", "typenum", ] @@ -1337,9 +1353,9 @@ dependencies = [ [[package]] name = "crypto-mac" -version = "0.11.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" +checksum = "25fab6889090c8133f3deb8f73ba3c65a7f456f66436fc012a1b1e272b1e103e" dependencies = [ "generic-array", "subtle", @@ -1347,9 +1363,9 @@ dependencies = [ [[package]] name = "csv" -version = "1.2.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "626ae34994d3d8d668f4269922248239db4ae42d538b14c398b74a52208e8086" +checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" dependencies = [ "csv-core", "itoa", @@ -1359,13 +1375,22 @@ dependencies = [ [[package]] name = "csv-core" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" +checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" dependencies = [ "memchr", ] +[[package]] +name = "ctr" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a232f92a03f37dd7d7dd2adc67166c77e9cd88de5b019b9a9eecfaeaf7bfd481" +dependencies = [ + "cipher 0.3.0", +] + [[package]] name = "ctr" version = "0.8.0" @@ -1376,40 +1401,36 @@ dependencies = [ ] [[package]] -name = "ctrlc" -version = "3.4.1" +name = "ctr" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e95fbd621905b854affdc67943b043a0fbb6ed7385fd5a25650d19a8a6cfdf" +checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" +dependencies = [ + "cipher 0.4.4", +] + +[[package]] +name = "ctrlc" +version = "3.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b467862cc8610ca6fc9a1532d7777cee0804e678ab45410897b9396495994a0b" dependencies = [ "nix 0.27.1", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "curve25519-dalek" -version = "3.2.0" +version = "4.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" -dependencies = [ - "byteorder", - "digest 0.9.0", - "rand_core 0.5.1", - "subtle", - "zeroize", -] - -[[package]] -name = "curve25519-dalek" -version = "4.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622178105f911d937a42cdb140730ba4a3ed2becd8ae6ce39c7d28b5d75d4588" +checksum = "e89b8c6a2e4b1f45971ad09761aafb85514a84744b67a95e32c3cc1352d1f65c" dependencies = [ "cfg-if", "cpufeatures", "curve25519-dalek-derive", "digest 0.10.7", "fiat-crypto", - "platforms 3.1.2", + "platforms 3.3.0", "rustc_version", "subtle", "zeroize", @@ -1417,13 +1438,13 @@ dependencies = [ [[package]] name = "curve25519-dalek-derive" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.48", ] [[package]] @@ -1483,15 +1504,15 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" +checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" [[package]] name = "data-encoding-macro" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c904b33cc60130e1aeea4956ab803d08a3f4a0ca82d64ed757afac3891f2bb99" +checksum = "20c01c06f5f429efdf2bae21eb67c28b3df3cf85b7dd2d8ef09c0838dac5d33e" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -1499,9 +1520,9 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fdf3fce3ce863539ec1d7fd1b6dcc3c645663376b43ed376bbf887733e4f772" +checksum = "0047d07f2c89b17dd631c80450d69841a6b5d7fb17278cbc43d7e4cfcf2576f3" dependencies = [ "data-encoding", "syn 1.0.109", @@ -1516,6 +1537,7 @@ dependencies = [ "clap", "clap_utils", "environment", + "hex", "logging", "slog", "sloggers", @@ -1538,7 +1560,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4355c25cbf99edcb6b4a0e906f6bdc6956eda149e84455bea49696429b2f8e8" dependencies = [ "futures", - "tokio-util 0.7.8", + "tokio-util 0.7.10", ] [[package]] @@ -1592,9 +1614,12 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.8" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2696e8a945f658fd14dc3b87242e6b80cd0f36ff04ea560fa39082368847946" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +dependencies = [ + "powerfmt", +] [[package]] name = "derivative" @@ -1609,13 +1634,13 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.3.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53e0efad4403bfc52dc201159c4b842a246a14b98c64b55dfd0f2d89729dfeb8" +checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.48", ] [[package]] @@ -1633,11 +1658,11 @@ dependencies = [ [[package]] name = "diesel" -version = "2.1.1" +version = "2.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d98235fdc2f355d330a8244184ab6b4b33c28679c0b4158f63138e51d6cf7e88" +checksum = "62c6fcf842f17f8c78ecf7c81d75c5ce84436b41ee07e03f490fbb5f5a8731d8" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.2", "byteorder", "diesel_derives", "itoa", @@ -1647,14 +1672,14 @@ dependencies = [ [[package]] name = "diesel_derives" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e054665eaf6d97d1e7125512bb2d35d07c73ac86cc6920174cb42d1ab697a554" +checksum = "ef8337737574f55a468005a83499da720f20c65586241ffea339db9ecdfd2b44" dependencies = [ "diesel_table_macro_syntax", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.48", ] [[package]] @@ -1674,7 +1699,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc5557efc453706fed5e4fa85006fe9817c224c3f480a34c7e5959fd700921c5" dependencies = [ - "syn 2.0.37", + "syn 2.0.48", ] [[package]] @@ -1750,33 +1775,30 @@ dependencies = [ [[package]] name = "discv5" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98c05fa26996c6141f78ac4fafbe297a7fa69690565ba4e0d1f2e60bde5ce501" +version = "0.4.0" +source = "git+https://github.com/sigp/discv5?rev=e30a2c31b7ac0c57876458b971164654dfa4513b#e30a2c31b7ac0c57876458b971164654dfa4513b" dependencies = [ "aes 0.7.5", - "aes-gcm", + "aes-gcm 0.9.2", "arrayvec", "delay_map", - "enr 0.9.0", + "enr", "fnv", "futures", - "hashlink 0.7.0", + "hashlink", "hex", "hkdf", "lazy_static", - "libp2p-core", - "libp2p-identity", - "lru 0.7.8", + "libp2p", + "lru", "more-asserts", "parking_lot 0.11.2", "rand", "rlp", - "smallvec 1.11.0", - "socket2 0.4.9", + "smallvec", + "socket2 0.4.10", "tokio", "tracing", - "tracing-subscriber", "uint", "zeroize", ] @@ -1789,7 +1811,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.48", ] [[package]] @@ -1818,39 +1840,40 @@ dependencies = [ [[package]] name = "ecdsa" -version = "0.16.8" +version = "0.16.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4b1e0c257a9e9f25f90ff76d7a68360ed497ee519c8e428d1825ef0000799d4" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ "der 0.7.8", "digest 0.10.7", - "elliptic-curve 0.13.5", + "elliptic-curve 0.13.8", "rfc6979 0.4.0", - "signature 2.1.0", - "spki 0.7.2", + "signature 2.2.0", + "spki 0.7.3", ] [[package]] name = "ed25519" -version = "2.2.2" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60f6d271ca33075c88028be6f04d502853d63a5ece419d269c15315d4fc1cf1d" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ "pkcs8 0.10.2", - "signature 2.1.0", + "signature 2.2.0", ] [[package]] name = "ed25519-dalek" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7277392b266383ef8396db7fdeb1e77b6c52fed775f5df15bb24f35b72156980" +checksum = "1f628eaec48bfd21b865dc2950cfa014450c01d2fa2b69a86c2fd5844ec523c0" dependencies = [ - "curve25519-dalek 4.1.0", + "curve25519-dalek", "ed25519", - "rand_core 0.6.4", + "rand_core", "serde", - "sha2 0.10.7", + "sha2 0.10.8", + "subtle", "zeroize", ] @@ -1864,16 +1887,20 @@ dependencies = [ "compare_fields", "compare_fields_derive", "derivative", + "eth2_network_config", "ethereum-types 0.14.1", + "ethereum_serde_utils", "ethereum_ssz", "ethereum_ssz_derive", "execution_layer", "fork_choice", "fs2", "hex", + "kzg", + "logging", "rayon", "serde", - "serde_derive", + "serde_json", "serde_repr", "serde_yaml", "snap", @@ -1904,8 +1931,7 @@ dependencies = [ "ff 0.12.1", "generic-array", "group 0.12.1", - "pkcs8 0.9.0", - "rand_core 0.6.4", + "rand_core", "sec1 0.3.0", "subtle", "zeroize", @@ -1913,19 +1939,19 @@ dependencies = [ [[package]] name = "elliptic-curve" -version = "0.13.5" +version = "0.13.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "968405c8fdc9b3bf4df0a6638858cc0b52462836ab6b1c87377785dd09cf1c0b" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" dependencies = [ "base16ct 0.2.0", - "crypto-bigint 0.5.3", + "crypto-bigint 0.5.5", "digest 0.10.7", "ff 0.13.0", "generic-array", "group 0.13.0", "pem-rfc7468", "pkcs8 0.10.2", - "rand_core 0.6.4", + "rand_core", "sec1 0.7.3", "subtle", "zeroize", @@ -1942,53 +1968,33 @@ dependencies = [ [[package]] name = "enr" -version = "0.6.2" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26fa0a0be8915790626d5759eb51fe47435a8eac92c2f212bd2da9aa7f30ea56" +checksum = "2a3d8dc56e02f954cac8eb489772c552c473346fc34f67412bb6244fd647f7e4" dependencies = [ - "base64 0.13.1", - "bs58 0.4.0", - "bytes", - "hex", - "k256 0.11.6", - "log", - "rand", - "rlp", - "serde", - "sha3 0.10.8", - "zeroize", -] - -[[package]] -name = "enr" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0be7b2ac146c1f99fe245c02d16af0696450d8e06c135db75e10eeb9e642c20d" -dependencies = [ - "base64 0.21.4", + "base64 0.21.7", "bytes", "ed25519-dalek", "hex", - "k256 0.13.1", + "k256 0.13.3", "log", "rand", "rlp", "serde", - "serde-hex", "sha3 0.10.8", "zeroize", ] [[package]] name = "enum-as-inner" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" +checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" dependencies = [ "heck", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.48", ] [[package]] @@ -2025,7 +2031,6 @@ dependencies = [ "futures", "logging", "serde", - "serde_derive", "slog", "slog-async", "slog-json", @@ -2043,24 +2048,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] -name = "errno" -version = "0.3.3" +name = "erased-serde" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "136526188508e25c6fef639d7927dfb3e0e3084488bf202267829cf7fc23dbdd" +checksum = "6c138974f9d5e7fe373eb04df7cae98833802ae4b11c24ac7039a21d5af4b26c" dependencies = [ - "errno-dragonfly", - "libc", - "windows-sys 0.48.0", + "serde", ] [[package]] -name = "errno-dragonfly" -version = "0.1.2" +name = "errno" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" dependencies = [ - "cc", "libc", + "windows-sys 0.52.0", ] [[package]] @@ -2140,13 +2143,15 @@ dependencies = [ "proto_array", "psutil", "reqwest", - "ring", + "ring 0.16.20", "sensitive_url", "serde", "serde_json", "slashing_protection", + "ssz_types", "store", "tokio", + "tree_hash", "types", ] @@ -2169,7 +2174,6 @@ dependencies = [ "lazy_static", "num-bigint", "serde", - "serde_derive", "serde_yaml", ] @@ -2180,7 +2184,7 @@ dependencies = [ "bls", "hex", "num-bigint-dig", - "ring", + "ring 0.16.20", "sha2 0.9.9", "zeroize", ] @@ -2219,6 +2223,7 @@ dependencies = [ "pretty_reqwest_error", "reqwest", "sensitive_url", + "serde_json", "serde_yaml", "sha2 0.9.9", "slog", @@ -2314,30 +2319,6 @@ dependencies = [ "tiny-keccak", ] -[[package]] -name = "ethereum-consensus" -version = "0.1.1" -source = "git+https://github.com/ralexstokes/ethereum-consensus?rev=e380108#e380108d15fcc40349927fdf3d11c71f9edb67c2" -dependencies = [ - "async-stream", - "blst", - "bs58 0.4.0", - "enr 0.6.2", - "hex", - "integer-sqrt", - "multiaddr 0.14.0", - "multihash 0.16.3", - "rand", - "serde", - "serde_json", - "serde_yaml", - "sha2 0.9.9", - "ssz_rs", - "thiserror", - "tokio", - "tokio-stream", -] - [[package]] name = "ethereum-types" version = "0.12.1" @@ -2363,7 +2344,7 @@ dependencies = [ "impl-codec 0.6.0", "impl-rlp", "impl-serde 0.4.0", - "primitive-types 0.12.1", + "primitive-types 0.12.2", "scale-info", "uint", ] @@ -2376,15 +2357,15 @@ checksum = "233dc6f434ce680dbabf4451ee3380cec46cb3c45d66660445a435619710dd35" dependencies = [ "cpufeatures", "lazy_static", - "ring", - "sha2 0.10.7", + "ring 0.16.20", + "sha2 0.10.8", ] [[package]] name = "ethereum_serde_utils" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f8cb04ea380a33e9c269fa5f8df6f2d63dee19728235f3e639e7674e038686a" +checksum = "de4d5951468846963c24e8744c133d44f39dff2cd3a233f6be22b370d08a524f" dependencies = [ "ethereum-types 0.14.1", "hex", @@ -2401,7 +2382,7 @@ checksum = "e61ffea29f26e8249d35128a82ec8d3bd4fbc80179ea5f5e5e3daafef6a80fcb" dependencies = [ "ethereum-types 0.14.1", "itertools", - "smallvec 1.11.0", + "smallvec", ] [[package]] @@ -2446,7 +2427,7 @@ dependencies = [ "dunce", "ethers-core", "eyre", - "getrandom 0.2.10", + "getrandom", "hex", "proc-macro2", "quote", @@ -2517,10 +2498,10 @@ dependencies = [ "futures-core", "futures-timer", "futures-util", - "getrandom 0.2.10", + "getrandom", "hashers", "hex", - "http", + "http 0.2.11", "once_cell", "parking_lot 0.11.2", "pin-project", @@ -2545,6 +2526,27 @@ version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" +[[package]] +name = "event-listener" +version = "4.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" +dependencies = [ + "event-listener 4.0.3", + "pin-project-lite", +] + [[package]] name = "execution_engine_integration" version = "0.1.0" @@ -2575,12 +2577,11 @@ version = "0.1.0" dependencies = [ "arc-swap", "async-trait", - "axum", "builder_client", "bytes", "environment", "eth2", - "ethereum-consensus", + "eth2_network_config", "ethereum_serde_utils", "ethereum_ssz", "ethers-core", @@ -2590,13 +2591,12 @@ dependencies = [ "hash-db", "hash256-std-hasher", "hex", - "hyper", "jsonwebtoken", "keccak-hash", + "kzg", "lazy_static", "lighthouse_metrics", - "lru 0.7.8", - "mev-rs", + "lru", "parking_lot 0.12.1", "pretty_reqwest_error", "rand", @@ -2606,7 +2606,6 @@ dependencies = [ "serde_json", "slog", "slot_clock", - "ssz_rs", "ssz_types", "state_processing", "strum", @@ -2634,9 +2633,9 @@ dependencies = [ [[package]] name = "eyre" -version = "0.6.8" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c2b6b5a29c02cdc822728b7d7b8ae1bab3e3b05d44522770ddd49722eeac7eb" +checksum = "b6267a1fa6f59179ea4afc8e50fd8612a3cc60bc858f786ff877a4a8cb042799" dependencies = [ "indenter", "once_cell", @@ -2656,18 +2655,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "1.9.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" -dependencies = [ - "instant", -] - -[[package]] -name = "fastrand" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" [[package]] name = "ff" @@ -2675,7 +2665,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" dependencies = [ - "rand_core 0.6.4", + "rand_core", "subtle", ] @@ -2685,7 +2675,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ - "rand_core 0.6.4", + "rand_core", "subtle", ] @@ -2697,9 +2687,9 @@ checksum = "ec54ac60a7f2ee9a97cad9946f9bf629a3bc6a7ae59e68983dc9318f5a54b81a" [[package]] name = "fiat-crypto" -version = "0.2.1" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0870c84016d4b481be5c9f323c24f65e31e901ae618f0e80f4308fb00de1d2d" +checksum = "27573eac26f4dd11e2b1916c3fe1baa56407c83c71a773a8ba17ec0bca03b6b7" [[package]] name = "field-offset" @@ -2707,7 +2697,7 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38e2275cc4e4fc009b0669731a1e5ab7ebf11f469eaede2bab9309a5b4d6057f" dependencies = [ - "memoffset 0.9.0", + "memoffset", "rustc_version", ] @@ -2752,9 +2742,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.27" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010" +checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" dependencies = [ "crc32fast", "libz-sys", @@ -2799,9 +2789,9 @@ dependencies = [ [[package]] name = "form_urlencoded" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ "percent-encoding", ] @@ -2830,9 +2820,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", @@ -2843,11 +2833,20 @@ dependencies = [ "futures-util", ] +[[package]] +name = "futures-bounded" +version = "0.2.3" +source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" +dependencies = [ + "futures-timer", + "futures-util", +] + [[package]] name = "futures-channel" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -2855,15 +2854,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -2873,34 +2872,29 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-lite" -version = "1.13.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" +checksum = "445ba825b27408685aaecefd65178908c36c6e96aaf6d8599419d46e624192ba" dependencies = [ - "fastrand 1.9.0", "futures-core", - "futures-io", - "memchr", - "parking", "pin-project-lite", - "waker-fn", ] [[package]] name = "futures-macro" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.48", ] [[package]] @@ -2910,20 +2904,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35bd3cf68c183738046838e300353e4716c674dc5e56890de4826801a6622a28" dependencies = [ "futures-io", - "rustls", + "rustls 0.21.10", ] [[package]] name = "futures-sink" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-ticker" @@ -2944,9 +2938,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-channel", "futures-core", @@ -3003,25 +2997,14 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.1.16" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", -] - -[[package]] -name = "getrandom" -version = "0.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" +checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" dependencies = [ "cfg-if", "js-sys", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "wasm-bindgen", ] @@ -3032,35 +3015,43 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99" dependencies = [ "opaque-debug", - "polyval", + "polyval 0.5.3", +] + +[[package]] +name = "ghash" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" +dependencies = [ + "opaque-debug", + "polyval 0.6.1", ] [[package]] name = "gimli" -version = "0.28.0" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" [[package]] name = "git-version" -version = "0.3.5" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6b0decc02f4636b9ccad390dcbe77b722a77efedfa393caf8379a51d5c61899" +checksum = "1ad568aa3db0fcbc81f2f116137f263d7304f512a1209b35b85150d3ef88ad19" dependencies = [ "git-version-macro", - "proc-macro-hack", ] [[package]] name = "git-version-macro" -version = "0.3.5" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe69f1cbdb6e28af2bac214e943b99ce8a0a06b447d15d3e61161b0423139f3f" +checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" dependencies = [ - "proc-macro-hack", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.48", ] [[package]] @@ -3076,7 +3067,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" dependencies = [ "ff 0.12.1", - "rand_core 0.6.4", + "rand_core", "subtle", ] @@ -3087,26 +3078,45 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ "ff 0.13.0", - "rand_core 0.6.4", + "rand_core", "subtle", ] [[package]] name = "h2" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91fc23aa11be92976ef4729127f1a74adf36d8436f7816b185d18df956790833" +checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" dependencies = [ "bytes", "fnv", "futures-core", "futures-sink", "futures-util", - "http", - "indexmap 1.9.3", + "http 0.2.11", + "indexmap 2.1.0", "slab", "tokio", - "tokio-util 0.7.8", + "tokio-util 0.7.10", + "tracing", +] + +[[package]] +name = "h2" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31d030e59af851932b72ceebadf4a2b5986dba4c3b99dd2493f8273a0f151943" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http 1.0.0", + "indexmap 2.1.0", + "slab", + "tokio", + "tokio-util 0.7.10", "tracing", ] @@ -3131,40 +3141,19 @@ dependencies = [ "crunchy", ] -[[package]] -name = "hashbrown" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" -dependencies = [ - "ahash 0.7.6", -] - [[package]] name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" -dependencies = [ - "ahash 0.7.6", -] [[package]] name = "hashbrown" -version = "0.13.2" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ - "ahash 0.8.3", -] - -[[package]] -name = "hashbrown" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" -dependencies = [ - "ahash 0.8.3", + "ahash", "allocator-api2", ] @@ -3177,22 +3166,13 @@ dependencies = [ "fxhash", ] -[[package]] -name = "hashlink" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7249a3129cbc1ffccd74857f81464a323a152173cdb134e0fd81bc803b29facf" -dependencies = [ - "hashbrown 0.11.2", -] - [[package]] name = "hashlink" version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" dependencies = [ - "hashbrown 0.14.0", + "hashbrown 0.14.3", ] [[package]] @@ -3201,10 +3181,10 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06683b93020a07e3dbcf5f8c0f6d40080d725bea7936fc01ad345c01b97dc270" dependencies = [ - "base64 0.21.4", + "base64 0.21.7", "bytes", "headers-core", - "http", + "http 0.2.11", "httpdate", "mime", "sha1", @@ -3216,7 +3196,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" dependencies = [ - "http", + "http 0.2.11", ] [[package]] @@ -3236,9 +3216,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.3.2" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" +checksum = "5d3d0e0f38255e7fa3cf31335b3a56f05febd18025f4db5ef7a0cfb4f8da651f" [[package]] name = "hex" @@ -3253,10 +3233,56 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" [[package]] -name = "hkdf" -version = "0.12.3" +name = "hickory-proto" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" +checksum = "091a6fbccf4860009355e3efc52ff4acf37a63489aad7435372d44ceeb6fbbcf" +dependencies = [ + "async-trait", + "cfg-if", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna 0.4.0", + "ipnet", + "once_cell", + "rand", + "socket2 0.5.5", + "thiserror", + "tinyvec", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "hickory-resolver" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35b8f021164e6a984c9030023544c57789c51760065cd510572fedcfb04164e8" +dependencies = [ + "cfg-if", + "futures-util", + "hickory-proto", + "ipconfig", + "lru-cache", + "once_cell", + "parking_lot 0.12.1", + "rand", + "resolv-conf", + "smallvec", + "thiserror", + "tokio", + "tracing", +] + +[[package]] +name = "hkdf" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" dependencies = [ "hmac 0.12.1", ] @@ -3277,7 +3303,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" dependencies = [ - "crypto-mac 0.11.1", + "crypto-mac 0.11.0", "digest 0.9.0", ] @@ -3301,6 +3327,15 @@ dependencies = [ "hmac 0.8.1", ] +[[package]] +name = "home" +version = "0.5.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" +dependencies = [ + "windows-sys 0.52.0", +] + [[package]] name = "hostname" version = "0.3.1" @@ -3314,9 +3349,20 @@ dependencies = [ [[package]] name = "http" -version = "0.2.9" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" +checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" dependencies = [ "bytes", "fnv", @@ -3325,12 +3371,35 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", - "http", + "http 0.2.11", + "pin-project-lite", +] + +[[package]] +name = "http-body" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +dependencies = [ + "bytes", + "http 1.0.0", +] + +[[package]] +name = "http-body-util" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41cb79eb393015dadd30fc252023adb0b2400a0caee0fa2a077e6e21a551e840" +dependencies = [ + "bytes", + "futures-util", + "http 1.0.0", + "http-body 1.0.0", "pin-project-lite", ] @@ -3357,7 +3426,7 @@ dependencies = [ "lighthouse_network", "lighthouse_version", "logging", - "lru 0.7.8", + "lru", "network", "operation_pool", "parking_lot 0.12.1", @@ -3422,22 +3491,22 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.27" +version = "0.14.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" dependencies = [ "bytes", "futures-channel", "futures-core", "futures-util", - "h2", - "http", - "http-body", + "h2 0.3.24", + "http 0.2.11", + "http-body 0.4.6", "httparse", "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.9", + "socket2 0.5.5", "tokio", "tower-service", "tracing", @@ -3445,17 +3514,36 @@ dependencies = [ ] [[package]] -name = "hyper-rustls" -version = "0.24.1" +name = "hyper" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" +checksum = "fb5aa53871fc917b1a9ed87b683a5d86db645e23acb32c2e0785a353e522fb75" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "h2 0.4.2", + "http 1.0.0", + "http-body 1.0.0", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "hyper-rustls" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", - "http", - "hyper", - "rustls", + "http 0.2.11", + "hyper 0.14.28", + "rustls 0.21.10", "tokio", - "tokio-rustls", + "tokio-rustls 0.24.1", ] [[package]] @@ -3465,24 +3553,42 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", - "hyper", + "hyper 0.14.28", "native-tls", "tokio", "tokio-native-tls", ] [[package]] -name = "iana-time-zone" -version = "0.1.57" +name = "hyper-util" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" +checksum = "bdea9aac0dbe5a9240d68cfd9501e2db94222c6dc06843e06640b9e07f0fdc67" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http 1.0.0", + "http-body 1.0.0", + "hyper 1.1.0", + "pin-project-lite", + "socket2 0.5.5", + "tokio", + "tracing", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.59" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6a67363e2aa4443928ce15e57ebae94fd8949958fd1223c4cfc0cd473ad7539" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows 0.48.0", + "windows-core 0.52.0", ] [[package]] @@ -3502,20 +3608,19 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "0.2.3" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ - "matches", "unicode-bidi", "unicode-normalization", ] [[package]] name = "idna" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -3534,12 +3639,12 @@ dependencies = [ [[package]] name = "if-addrs" -version = "0.7.0" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbc0fa01ffc752e9dbc72818cdb072cd028b86be5e09dd04c5a643704fe101a9" +checksum = "cabb0019d51a643781ff15c9c8a3e5dedc365c47211270f4e8f82812fedd8f0a" dependencies = [ "libc", - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -3554,21 +3659,21 @@ dependencies = [ [[package]] name = "if-watch" -version = "3.0.1" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9465340214b296cd17a0009acdb890d6160010b8adf8f78a00d0d7ab270f79f" +checksum = "d6b0422c86d7ce0e97169cc42e04ae643caf278874a7a3c87b8150a220dc7e1e" dependencies = [ "async-io", "core-foundation", "fnv", "futures", - "if-addrs 0.7.0", + "if-addrs 0.10.2", "ipnet", "log", "rtnetlink", "system-configuration", "tokio", - "windows 0.34.0", + "windows", ] [[package]] @@ -3577,13 +3682,32 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "556b5a75cd4adb7c4ea21c64af1c48cefb2ce7d43dc4352c720a1fe47c21f355" dependencies = [ - "attohttpc", + "attohttpc 0.16.3", "log", "rand", "url", "xmltree", ] +[[package]] +name = "igd-next" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "064d90fec10d541084e7b39ead8875a5a80d9114a2b18791565253bae25f49e4" +dependencies = [ + "async-trait", + "attohttpc 0.24.1", + "bytes", + "futures", + "http 0.2.11", + "hyper 0.14.28", + "log", + "rand", + "tokio", + "url", + "xmltree", +] + [[package]] name = "impl-codec" version = "0.5.1" @@ -3599,7 +3723,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec 3.6.5", + "parity-scale-codec 3.6.9", ] [[package]] @@ -3658,12 +3782,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" dependencies = [ "equivalent", - "hashbrown 0.14.0", + "hashbrown 0.14.3", ] [[package]] @@ -3711,7 +3835,7 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi 0.3.2", + "hermit-abi 0.3.4", "libc", "windows-sys 0.48.0", ] @@ -3722,7 +3846,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.4", + "socket2 0.5.5", "widestring 1.0.2", "windows-sys 0.48.0", "winreg", @@ -3730,9 +3854,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" +checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" [[package]] name = "itertools" @@ -3745,9 +3869,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" [[package]] name = "jemalloc-ctl" @@ -3782,18 +3906,18 @@ dependencies = [ [[package]] name = "jobserver" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" +checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" dependencies = [ "libc", ] [[package]] name = "js-sys" -version = "0.3.64" +version = "0.3.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" +checksum = "9a1d36f1235bc969acba30b7f5990b864423a6068a10f7c90ae8f0112e3a59d1" dependencies = [ "wasm-bindgen", ] @@ -3804,9 +3928,9 @@ version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.21.4", - "pem", - "ring", + "base64 0.21.7", + "pem 1.1.1", + "ring 0.16.20", "serde", "serde_json", "simple_asn1", @@ -3821,29 +3945,29 @@ dependencies = [ "cfg-if", "ecdsa 0.14.8", "elliptic-curve 0.12.3", - "sha2 0.10.7", + "sha2 0.10.8", "sha3 0.10.8", ] [[package]] name = "k256" -version = "0.13.1" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cadb76004ed8e97623117f3df85b17aaa6626ab0b0831e6573f104df16cd1bcc" +checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" dependencies = [ "cfg-if", - "ecdsa 0.16.8", - "elliptic-curve 0.13.5", + "ecdsa 0.16.9", + "elliptic-curve 0.13.8", "once_cell", - "sha2 0.10.7", - "signature 2.1.0", + "sha2 0.10.8", + "signature 2.2.0", ] [[package]] name = "keccak" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" dependencies = [ "cpufeatures", ] @@ -3854,17 +3978,33 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b286e6b663fb926e1eeb68528e69cb70ed46c6d65871a21b2215ae8154c6d3c" dependencies = [ - "primitive-types 0.12.1", + "primitive-types 0.12.2", "tiny-keccak", ] +[[package]] +name = "kzg" +version = "0.1.0" +dependencies = [ + "arbitrary", + "c-kzg", + "derivative", + "ethereum_hashing", + "ethereum_serde_utils", + "ethereum_ssz", + "ethereum_ssz_derive", + "hex", + "serde", + "tree_hash", +] + [[package]] name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" dependencies = [ - "spin", + "spin 0.5.2", ] [[package]] @@ -3875,7 +4015,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "4.5.0" +version = "4.6.0" dependencies = [ "account_utils", "beacon_chain", @@ -3892,7 +4032,9 @@ dependencies = [ "eth2_wallet", "ethereum_hashing", "ethereum_ssz", + "execution_layer", "genesis", + "hex", "int_to_bytes", "lighthouse_network", "lighthouse_version", @@ -3936,9 +4078,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.148" +version = "0.2.152" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b" +checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7" [[package]] name = "libflate" @@ -3962,19 +4104,19 @@ dependencies = [ [[package]] name = "libloading" -version = "0.7.4" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" +checksum = "c571b676ddfc9a8c12f1f3d3085a7b163966a8fd8098a90640953ce5f6170161" dependencies = [ "cfg-if", - "winapi", + "windows-sys 0.48.0", ] [[package]] name = "libm" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libmdbx" @@ -3993,14 +4135,14 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.52.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32d07d1502a027366d55afe187621c2d7895dc111a3df13b35fed698049681d7" +version = "0.54.0" +source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" dependencies = [ "bytes", + "either", "futures", "futures-timer", - "getrandom 0.2.10", + "getrandom", "instant", "libp2p-allow-block-list", "libp2p-connection-limits", @@ -4016,16 +4158,18 @@ dependencies = [ "libp2p-quic", "libp2p-swarm", "libp2p-tcp", + "libp2p-upnp", "libp2p-yamux", - "multiaddr 0.18.0", + "multiaddr", "pin-project", + "rw-stream-sink", + "thiserror", ] [[package]] name = "libp2p-allow-block-list" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55b46558c5c0bf99d3e2a1a38fd54ff5476ca66dd1737b12466a1824dd219311" +version = "0.3.0" +source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" dependencies = [ "libp2p-core", "libp2p-identity", @@ -4035,9 +4179,8 @@ dependencies = [ [[package]] name = "libp2p-connection-limits" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f5107ad45cb20b2f6c3628c7b6014b996fcb13a88053f4569c872c6e30abf58" +version = "0.3.1" +source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" dependencies = [ "libp2p-core", "libp2p-identity", @@ -4047,9 +4190,8 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.40.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef7dd7b09e71aac9271c60031d0e558966cdb3253ba0308ab369bb2de80630d0" +version = "0.41.2" +source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" dependencies = [ "either", "fnv", @@ -4057,9 +4199,8 @@ dependencies = [ "futures-timer", "instant", "libp2p-identity", - "log", - "multiaddr 0.18.0", - "multihash 0.19.1", + "multiaddr", + "multihash", "multistream-select", "once_cell", "parking_lot 0.12.1", @@ -4067,205 +4208,203 @@ dependencies = [ "quick-protobuf", "rand", "rw-stream-sink", - "smallvec 1.11.0", + "smallvec", "thiserror", - "unsigned-varint 0.7.2", + "tracing", + "unsigned-varint 0.8.0", "void", ] [[package]] name = "libp2p-dns" -version = "0.40.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd4394c81c0c06d7b4a60f3face7e8e8a9b246840f98d2c80508d0721b032147" +version = "0.41.1" +source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" dependencies = [ + "async-trait", "futures", + "hickory-resolver", "libp2p-core", "libp2p-identity", - "log", "parking_lot 0.12.1", - "smallvec 1.11.0", - "trust-dns-resolver", + "smallvec", + "tracing", ] [[package]] name = "libp2p-gossipsub" -version = "0.45.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d157562dba6017193e5285acf6b1054759e83540bfd79f75b69d6ce774c88da" +version = "0.46.1" +source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" dependencies = [ + "async-channel", "asynchronous-codec", - "base64 0.21.4", + "base64 0.21.7", "byteorder", "bytes", "either", "fnv", "futures", "futures-ticker", - "getrandom 0.2.10", + "futures-timer", + "getrandom", "hex_fmt", "instant", "libp2p-core", "libp2p-identity", "libp2p-swarm", - "log", "prometheus-client", "quick-protobuf", "quick-protobuf-codec", "rand", "regex", - "sha2 0.10.7", - "smallvec 1.11.0", - "unsigned-varint 0.7.2", + "sha2 0.10.8", + "smallvec", + "tracing", "void", ] [[package]] name = "libp2p-identify" -version = "0.43.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a29675a32dbcc87790db6cf599709e64308f1ae9d5ecea2d259155889982db8" +version = "0.44.1" +source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" dependencies = [ "asynchronous-codec", "either", "futures", + "futures-bounded", "futures-timer", "libp2p-core", "libp2p-identity", "libp2p-swarm", - "log", - "lru 0.10.1", + "lru", "quick-protobuf", "quick-protobuf-codec", - "smallvec 1.11.0", + "smallvec", "thiserror", + "tracing", "void", ] [[package]] name = "libp2p-identity" -version = "0.2.3" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686e73aff5e23efbb99bc85340ea6fd8686986aa7b283a881ba182cfca535ca9" +checksum = "999ec70441b2fb35355076726a6bc466c932e9bdc66f6a11c6c0aa17c7ab9be0" dependencies = [ "asn1_der", "bs58 0.5.0", "ed25519-dalek", + "hkdf", "libsecp256k1", - "log", - "multihash 0.19.1", + "multihash", "p256", "quick-protobuf", "rand", "sec1 0.7.3", - "sha2 0.10.7", + "sha2 0.10.8", "thiserror", + "tracing", "void", "zeroize", ] [[package]] name = "libp2p-mdns" -version = "0.44.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42a2567c305232f5ef54185e9604579a894fd0674819402bb0ac0246da82f52a" +version = "0.45.1" +source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" dependencies = [ "data-encoding", "futures", + "hickory-proto", "if-watch", "libp2p-core", "libp2p-identity", "libp2p-swarm", - "log", "rand", - "smallvec 1.11.0", - "socket2 0.5.4", + "smallvec", + "socket2 0.5.5", "tokio", - "trust-dns-proto", + "tracing", "void", ] [[package]] name = "libp2p-metrics" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "239ba7d28f8d0b5d77760dc6619c05c7e88e74ec8fbbe97f856f20a56745e620" +version = "0.14.1" +source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" dependencies = [ + "futures", "instant", "libp2p-core", "libp2p-gossipsub", "libp2p-identify", "libp2p-identity", "libp2p-swarm", - "once_cell", + "pin-project", "prometheus-client", ] [[package]] name = "libp2p-mplex" -version = "0.40.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93959ed08b6caf9810e067655e25f1362098797fef7c44d3103e63dcb6f0fabe" +version = "0.41.0" +source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" dependencies = [ "asynchronous-codec", "bytes", "futures", "libp2p-core", "libp2p-identity", - "log", "nohash-hasher", "parking_lot 0.12.1", "rand", - "smallvec 1.11.0", - "unsigned-varint 0.7.2", + "smallvec", + "tracing", + "unsigned-varint 0.8.0", ] [[package]] name = "libp2p-noise" -version = "0.43.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71ce70757f2c0d82e9a3ef738fb10ea0723d16cec37f078f719e2c247704c1bb" +version = "0.44.0" +source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" dependencies = [ + "asynchronous-codec", "bytes", - "curve25519-dalek 4.1.0", + "curve25519-dalek", "futures", "libp2p-core", "libp2p-identity", - "log", - "multiaddr 0.18.0", - "multihash 0.19.1", + "multiaddr", + "multihash", "once_cell", "quick-protobuf", "rand", - "sha2 0.10.7", + "sha2 0.10.8", "snow", "static_assertions", "thiserror", + "tracing", "x25519-dalek", "zeroize", ] [[package]] name = "libp2p-plaintext" -version = "0.40.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37266c683a757df713f7dcda0cdcb5ad4681355ffa1b37b77c113c176a531195" +version = "0.41.0" +source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" dependencies = [ "asynchronous-codec", "bytes", "futures", "libp2p-core", "libp2p-identity", - "log", "quick-protobuf", - "unsigned-varint 0.7.2", + "quick-protobuf-codec", + "tracing", ] [[package]] name = "libp2p-quic" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cb763e88f9a043546bfebd3575f340e7dd3d6c1b2cf2629600ec8965360c63a" +version = "0.10.2" +source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" dependencies = [ "bytes", "futures", @@ -4274,21 +4413,21 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-tls", - "log", "parking_lot 0.12.1", "quinn", "rand", - "rustls", - "socket2 0.5.4", + "ring 0.16.20", + "rustls 0.21.10", + "socket2 0.5.5", "thiserror", "tokio", + "tracing", ] [[package]] name = "libp2p-swarm" -version = "0.43.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28016944851bd73526d3c146aabf0fa9bbe27c558f080f9e5447da3a1772c01a" +version = "0.45.0" +source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" dependencies = [ "either", "fnv", @@ -4298,33 +4437,30 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm-derive", - "log", "multistream-select", "once_cell", "rand", - "smallvec 1.11.0", + "smallvec", "tokio", + "tracing", "void", ] [[package]] name = "libp2p-swarm-derive" -version = "0.33.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4d5ec2a3df00c7836d7696c136274c9c59705bac69133253696a6c932cd1d74" +version = "0.34.1" +source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" dependencies = [ "heck", - "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.48", ] [[package]] name = "libp2p-tcp" -version = "0.40.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09bfdfb6f945c5c014b87872a0bdb6e0aef90e92f380ef57cd9013f118f9289d" +version = "0.41.0" +source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" dependencies = [ "futures", "futures-timer", @@ -4332,41 +4468,67 @@ dependencies = [ "libc", "libp2p-core", "libp2p-identity", - "log", - "socket2 0.5.4", + "socket2 0.5.5", "tokio", + "tracing", ] [[package]] name = "libp2p-tls" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8218d1d5482b122ccae396bbf38abdcb283ecc96fa54760e1dfd251f0546ac61" +version = "0.3.0" +source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" dependencies = [ "futures", "futures-rustls", "libp2p-core", "libp2p-identity", "rcgen", - "ring", - "rustls", - "rustls-webpki", + "ring 0.16.20", + "rustls 0.21.10", + "rustls-webpki 0.101.7", "thiserror", "x509-parser", "yasna", ] [[package]] -name = "libp2p-yamux" -version = "0.44.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eedcb62824c4300efb9cfd4e2a6edaf3ca097b9e68b36dabe45a44469fd6a85" +name = "libp2p-upnp" +version = "0.2.0" +source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" dependencies = [ + "futures", + "futures-timer", + "igd-next", + "libp2p-core", + "libp2p-swarm", + "tokio", + "tracing", + "void", +] + +[[package]] +name = "libp2p-yamux" +version = "0.45.1" +source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" +dependencies = [ + "either", "futures", "libp2p-core", - "log", "thiserror", - "yamux", + "tracing", + "yamux 0.12.1", + "yamux 0.13.1", +] + +[[package]] +name = "libredox" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" +dependencies = [ + "bitflags 2.4.2", + "libc", + "redox_syscall 0.4.1", ] [[package]] @@ -4430,9 +4592,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.12" +version = "1.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d97137b25e321a73eef1418d1d5d2eda4d77e12813f8e6dead84bc52c5870a7b" +checksum = "295c17e837573c8c821dbaeb3cceb3d745ad082f7572191409e69cbc1b3fd050" dependencies = [ "cc", "pkg-config", @@ -4441,7 +4603,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "4.5.0" +version = "4.6.0" dependencies = [ "account_manager", "account_utils", @@ -4453,7 +4615,6 @@ dependencies = [ "clap_utils", "database_manager", "directory", - "env_logger 0.9.3", "environment", "eth1", "eth2", @@ -4464,6 +4625,7 @@ dependencies = [ "lighthouse_metrics", "lighthouse_network", "lighthouse_version", + "logging", "malloc_utils", "sensitive_url", "serde", @@ -4475,6 +4637,7 @@ dependencies = [ "sloggers", "task_executor", "tempfile", + "tracing-subscriber", "types", "unused_port", "validator_client", @@ -4508,10 +4671,9 @@ dependencies = [ "lazy_static", "libp2p", "libp2p-mplex", - "libp2p-quic", "lighthouse_metrics", "lighthouse_version", - "lru 0.7.8", + "lru", "lru_cache", "parking_lot 0.12.1", "prometheus-client", @@ -4520,12 +4682,11 @@ dependencies = [ "rand", "regex", "serde", - "serde_derive", "sha2 0.9.9", "slog", "slog-async", "slog-term", - "smallvec 1.11.0", + "smallvec", "snap", "ssz_types", "strum", @@ -4567,15 +4728,9 @@ checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" [[package]] name = "linux-raw-sys" -version = "0.3.8" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" - -[[package]] -name = "linux-raw-sys" -version = "0.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a9bad9f94746442c783ca431b22403b519cd7fbeed0533fdd6328b2f2212128" +checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" [[package]] name = "lmdb-rkv" @@ -4600,9 +4755,9 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" +checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" dependencies = [ "autocfg", "scopeguard", @@ -4638,24 +4793,20 @@ dependencies = [ "sloggers", "take_mut", "tokio", + "tracing", + "tracing-appender", + "tracing-core", + "tracing-log", + "tracing-subscriber", ] [[package]] name = "lru" -version = "0.7.8" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999beba7b6e8345721bd280141ed958096a2e4abdf74f67ff4ce49b4b54e47a" +checksum = "2994eeba8ed550fd9b47a0b38f0242bc3344e496483c6180b69139cc2fa5d1d7" dependencies = [ - "hashbrown 0.12.3", -] - -[[package]] -name = "lru" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "718e8fae447df0c7e1ba7f5189829e63fd536945c8988d61444c19039f16b670" -dependencies = [ - "hashbrown 0.13.2", + "hashbrown 0.14.3", ] [[package]] @@ -4672,13 +4823,14 @@ name = "lru_cache" version = "0.1.0" dependencies = [ "fnv", + "mock_instant", ] [[package]] -name = "mach" -version = "0.3.2" +name = "mach2" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa" +checksum = "19b955cdeb2a02b9117f121ce63aa52d08ade45de53e48fe6a38b39c10f6f709" dependencies = [ "libc", ] @@ -4724,22 +4876,17 @@ checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" [[package]] name = "matchit" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed1202b2a6f884ae56f04cff409ab315c5ce26b5e58d7412e484f01fd52f52ef" - -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] name = "md-5" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" +checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" dependencies = [ + "cfg-if", "digest 0.10.7", ] @@ -4748,7 +4895,7 @@ name = "mdbx-sys" version = "0.11.6-4" source = "git+https://github.com/sigp/libmdbx-rs?tag=v0.1.4#096da80a83d14343f8df833006483f48075cd135" dependencies = [ - "bindgen", + "bindgen 0.59.2", "cc", "cmake", "libc", @@ -4756,24 +4903,15 @@ dependencies = [ [[package]] name = "mediatype" -version = "0.19.15" +version = "0.19.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c408dc227d302f1496c84d9dc68c00fec6f56f9228a18f3023f976f3ca7c945" +checksum = "83a018c36a54f4e12c30464bbc59311f85d3f6f4d6c1b4fa4ea9db2b174ddefc" [[package]] name = "memchr" -version = "2.6.3" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" - -[[package]] -name = "memoffset" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" -dependencies = [ - "autocfg", -] +checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" [[package]] name = "memoffset" @@ -4815,31 +4953,10 @@ dependencies = [ "itertools", "proc-macro2", "quote", - "smallvec 1.11.0", + "smallvec", "syn 1.0.109", ] -[[package]] -name = "mev-rs" -version = "0.3.0" -source = "git+https://github.com/ralexstokes/mev-rs?rev=216657016d5c0889b505857c89ae42c7aa2764af#216657016d5c0889b505857c89ae42c7aa2764af" -dependencies = [ - "anvil-rpc", - "async-trait", - "axum", - "beacon-api-client", - "ethereum-consensus", - "hyper", - "parking_lot 0.12.1", - "reqwest", - "serde", - "serde_json", - "ssz_rs", - "thiserror", - "tokio", - "tracing", -] - [[package]] name = "migrations_internals" version = "2.1.0" @@ -4906,15 +5023,21 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.8" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" +checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" dependencies = [ "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "windows-sys 0.48.0", ] +[[package]] +name = "mock_instant" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c1a54de846c4006b88b1516731cc1f6026eb5dc4bcb186aa071ef66d40524ec" + [[package]] name = "monitoring_api" version = "0.1.0" @@ -4927,7 +5050,6 @@ dependencies = [ "reqwest", "sensitive_url", "serde", - "serde_derive", "serde_json", "slog", "store", @@ -4937,40 +5059,22 @@ dependencies = [ [[package]] name = "more-asserts" -version = "0.2.2" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7843ec2de400bcbc6a6328c958dc38e5359da6e93e72e37bc5246bf1ae776389" +checksum = "1fafa6961cabd9c63bcd77a45d7e3b7f3b552b70417831fb0f56db717e72407e" [[package]] name = "multiaddr" -version = "0.14.0" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c580bfdd8803cce319b047d239559a22f809094aaea4ac13902a1fdcfcd4261" -dependencies = [ - "arrayref", - "bs58 0.4.0", - "byteorder", - "data-encoding", - "multihash 0.16.3", - "percent-encoding", - "serde", - "static_assertions", - "unsigned-varint 0.7.2", - "url", -] - -[[package]] -name = "multiaddr" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92a651988b3ed3ad1bc8c87d016bb92f6f395b84ed1db9b926b32b1fc5a2c8b5" +checksum = "8b852bc02a2da5feed68cd14fa50d0774b92790a5bdbfa932a813926c8472070" dependencies = [ "arrayref", "byteorder", "data-encoding", "libp2p-identity", "multibase", - "multihash 0.19.1", + "multihash", "percent-encoding", "serde", "static_assertions", @@ -4989,19 +5093,6 @@ dependencies = [ "data-encoding-macro", ] -[[package]] -name = "multihash" -version = "0.16.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c346cf9999c631f002d8f977c4eaeaa0e6386f16007202308d0b3757522c2cc" -dependencies = [ - "core2", - "digest 0.10.7", - "multihash-derive", - "sha2 0.10.7", - "unsigned-varint 0.7.2", -] - [[package]] name = "multihash" version = "0.19.1" @@ -5012,32 +5103,17 @@ dependencies = [ "unsigned-varint 0.7.2", ] -[[package]] -name = "multihash-derive" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6d4752e6230d8ef7adf7bd5d8c4b1f6561c1014c5ba9a37445ccefe18aa1db" -dependencies = [ - "proc-macro-crate", - "proc-macro-error", - "proc-macro2", - "quote", - "syn 1.0.109", - "synstructure", -] - [[package]] name = "multistream-select" version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea0df8e5eec2298a62b326ee4f0d7fe1a6b90a09dfcf9df37b38f947a8c42f19" +source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" dependencies = [ "bytes", "futures", - "log", "pin-project", - "smallvec 1.11.0", - "unsigned-varint 0.7.2", + "smallvec", + "tracing", + "unsigned-varint 0.8.0", ] [[package]] @@ -5134,6 +5210,7 @@ dependencies = [ "derivative", "environment", "error-chain", + "eth2", "ethereum-types 0.14.1", "ethereum_ssz", "execution_layer", @@ -5149,6 +5226,7 @@ dependencies = [ "lighthouse_metrics", "lighthouse_network", "logging", + "lru", "lru_cache", "matches", "num_cpus", @@ -5161,7 +5239,7 @@ dependencies = [ "slog-term", "sloggers", "slot_clock", - "smallvec 1.11.0", + "smallvec", "ssz_types", "store", "strum", @@ -5172,19 +5250,6 @@ dependencies = [ "types", ] -[[package]] -name = "nix" -version = "0.23.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3790c00a0150112de0f4cd161e3d7fc4b2d8a5542ffc35f099a2562aecb35c" -dependencies = [ - "bitflags 1.3.2", - "cc", - "cfg-if", - "libc", - "memoffset 0.6.5", -] - [[package]] name = "nix" version = "0.24.3" @@ -5202,7 +5267,7 @@ version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.2", "cfg-if", "libc", ] @@ -5223,12 +5288,6 @@ dependencies = [ "validator_dir", ] -[[package]] -name = "nodrop" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" - [[package]] name = "nohash-hasher" version = "0.2.0" @@ -5289,7 +5348,7 @@ dependencies = [ "num-traits", "rand", "serde", - "smallvec 1.11.0", + "smallvec", "zeroize", ] @@ -5316,9 +5375,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" +checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" dependencies = [ "autocfg", ] @@ -5329,7 +5388,7 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.3.2", + "hermit-abi 0.3.4", "libc", ] @@ -5344,9 +5403,9 @@ dependencies = [ [[package]] name = "object" -version = "0.32.1" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" dependencies = [ "memchr", ] @@ -5362,9 +5421,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "oneshot_broadcast" @@ -5412,11 +5471,11 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.57" +version = "0.10.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bac25ee399abb46215765b1cb35bc0212377e58a061560d8b29b024fd0430e7c" +checksum = "15c9d69dd87a29568d4d017cfe8ec518706046a05184e5aea92d0af890b803c8" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.2", "cfg-if", "foreign-types", "libc", @@ -5433,7 +5492,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.48", ] [[package]] @@ -5444,18 +5503,18 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "300.1.3+3.1.2" +version = "300.2.1+3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd2c101a165fff9935e34def4669595ab1c7847943c42be86e21503e482be107" +checksum = "3fe476c29791a5ca0d1273c697e96085bbabbbea2ef7afd5617e78a4b40332d3" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.93" +version = "0.9.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db4d56a4c0478783083cfafcc42493dd4a981d41669da64b4572a2a089b51b1d" +checksum = "22e1bf214306098e4832460f797824c05d25aacdf896f64a985fb0fd992454ae" dependencies = [ "cc", "libc", @@ -5481,7 +5540,6 @@ dependencies = [ "rand", "rayon", "serde", - "serde_derive", "state_processing", "store", "tokio", @@ -5500,10 +5558,10 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" dependencies = [ - "ecdsa 0.16.8", - "elliptic-curve 0.13.5", + "ecdsa 0.16.9", + "elliptic-curve 0.13.8", "primeorder", - "sha2 0.10.7", + "sha2 0.10.8", ] [[package]] @@ -5522,15 +5580,15 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.5" +version = "3.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dec8a8073036902368c2cdc0387e85ff9a37054d7e7c98e592145e0c92cd4fb" +checksum = "881331e34fa842a2fb61cc2db9643a8fedc615e47cfcc52597d1af0db9a7e8fe" dependencies = [ "arrayvec", "bitvec 1.0.1", "byte-slice-cast", "impl-trait-for-tuples", - "parity-scale-codec-derive 3.6.5", + "parity-scale-codec-derive 3.6.9", "serde", ] @@ -5540,7 +5598,7 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 1.3.1", "proc-macro2", "quote", "syn 1.0.109", @@ -5548,11 +5606,11 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.6.5" +version = "3.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "312270ee71e1cd70289dacf597cab7b207aa107d2f28191c2ae45b2ece18a260" +checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 2.0.0", "proc-macro2", "quote", "syn 1.0.109", @@ -5560,9 +5618,9 @@ dependencies = [ [[package]] name = "parking" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" +checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" [[package]] name = "parking_lot" @@ -5582,7 +5640,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.8", + "parking_lot_core 0.9.9", ] [[package]] @@ -5595,20 +5653,20 @@ dependencies = [ "instant", "libc", "redox_syscall 0.2.16", - "smallvec 1.11.0", + "smallvec", "winapi", ] [[package]] name = "parking_lot_core" -version = "0.9.8" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" +checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.3.5", - "smallvec 1.11.0", + "redox_syscall 0.4.1", + "smallvec", "windows-targets 0.48.5", ] @@ -5619,7 +5677,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" dependencies = [ "base64ct", - "rand_core 0.6.4", + "rand_core", "subtle", ] @@ -5635,7 +5693,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d95f5254224e617595d2cc3cc73ff0a5eaf2637519e25f03388154e9378b6ffa" dependencies = [ - "crypto-mac 0.11.1", + "crypto-mac 0.11.0", ] [[package]] @@ -5647,7 +5705,7 @@ dependencies = [ "digest 0.10.7", "hmac 0.12.1", "password-hash", - "sha2 0.10.7", + "sha2 0.10.8", ] [[package]] @@ -5665,6 +5723,16 @@ dependencies = [ "base64 0.13.1", ] +[[package]] +name = "pem" +version = "3.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b8fcc794035347fb64beda2d3b462595dd2753e3f268d89c5aae77e8cf2c310" +dependencies = [ + "base64 0.21.7", + "serde", +] + [[package]] name = "pem-rfc7468" version = "0.7.0" @@ -5676,9 +5744,9 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pharos" @@ -5725,7 +5793,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.48", ] [[package]] @@ -5757,14 +5825,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ "der 0.7.8", - "spki 0.7.2", + "spki 0.7.3", ] [[package]] name = "pkg-config" -version = "0.3.27" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +checksum = "2900ede94e305130c13ddd391e0ab7cbaeb783945ae07a279c268cb05109c6cb" [[package]] name = "platforms" @@ -5774,9 +5842,9 @@ checksum = "e8d0eef3571242013a0d5dc84861c3ae4a652e56e12adf8bdc26ff5f8cb34c94" [[package]] name = "platforms" -version = "3.1.2" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4503fa043bf02cee09a9582e9554b4c6403b2ef55e4612e96561d294419429f8" +checksum = "626dec3cac7cc0e1577a2ec3fc496277ec2baa084bebad95bb6fdbfae235f84c" [[package]] name = "plotters" @@ -5808,29 +5876,27 @@ dependencies = [ [[package]] name = "polling" -version = "2.8.0" +version = "3.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" +checksum = "545c980a3880efd47b2e262f6a4bb6daad6555cf3367aa9c4e52895f69537a41" dependencies = [ - "autocfg", - "bitflags 1.3.2", "cfg-if", "concurrent-queue", - "libc", - "log", "pin-project-lite", - "windows-sys 0.48.0", + "rustix 0.38.30", + "tracing", + "windows-sys 0.52.0", ] [[package]] name = "poly1305" -version = "0.7.2" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "048aeb476be11a4b6ca432ca569e375810de9294ae78f4774e78ea98a9246ede" +checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" dependencies = [ "cpufeatures", "opaque-debug", - "universal-hash", + "universal-hash 0.5.1", ] [[package]] @@ -5842,7 +5908,19 @@ dependencies = [ "cfg-if", "cpufeatures", "opaque-debug", - "universal-hash", + "universal-hash 0.4.0", +] + +[[package]] +name = "polyval" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52cff9d1d4dee5fe6d03729099f4a310a41179e0a10dbf542039873f2e826fb" +dependencies = [ + "cfg-if", + "cpufeatures", + "opaque-debug", + "universal-hash 0.5.1", ] [[package]] @@ -5851,7 +5929,7 @@ version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49b6c5ef183cd3ab4ba005f1ca64c21e8bd97ce4699cfea9e8d9a2c4958ca520" dependencies = [ - "base64 0.21.4", + "base64 0.21.7", "byteorder", "bytes", "fallible-iterator", @@ -5859,7 +5937,7 @@ dependencies = [ "md-5", "memchr", "rand", - "sha2 0.10.7", + "sha2 0.10.8", "stringprep", ] @@ -5874,6 +5952,12 @@ dependencies = [ "postgres-protocol", ] +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + [[package]] name = "ppv-lite86" version = "0.2.17" @@ -5898,12 +5982,22 @@ dependencies = [ ] [[package]] -name = "primeorder" -version = "0.13.2" +name = "prettyplease" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c2fcef82c0ec6eefcc179b978446c399b3cdf73c392c35604e399eee6df1ee3" +checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5" dependencies = [ - "elliptic-curve 0.13.5", + "proc-macro2", + "syn 2.0.48", +] + +[[package]] +name = "primeorder" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" +dependencies = [ + "elliptic-curve 0.13.8", ] [[package]] @@ -5921,9 +6015,9 @@ dependencies = [ [[package]] name = "primitive-types" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f3486ccba82358b11a77516035647c34ba167dfa53312630de83b12bd4f3d66" +checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" dependencies = [ "fixed-hash 0.8.0", "impl-codec 0.6.0", @@ -5935,12 +6029,21 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "1.1.3" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" +checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" dependencies = [ - "thiserror", - "toml 0.5.11", + "once_cell", + "toml_edit 0.19.15", +] + +[[package]] +name = "proc-macro-crate" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" +dependencies = [ + "toml_edit 0.20.7", ] [[package]] @@ -5967,28 +6070,11 @@ dependencies = [ "version_check", ] -[[package]] -name = "proc-macro-hack" -version = "0.5.20+deprecated" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" - -[[package]] -name = "proc-macro-warning" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.37", -] - [[package]] name = "proc-macro2" -version = "1.0.67" +version = "1.0.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" +checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" dependencies = [ "unicode-ident", ] @@ -6005,7 +6091,7 @@ dependencies = [ "flate2", "hex", "lazy_static", - "rustix 0.36.15", + "rustix 0.36.17", ] [[package]] @@ -6025,9 +6111,9 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.21.2" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c99afa9a01501019ac3a14d71d9f94050346f55ca471ce90c799a15c58f61e2" +checksum = "510c4f1c9d81d556458f94c98f857748130ea9737bbd6053da497503b26ea63c" dependencies = [ "dtoa", "itoa", @@ -6043,7 +6129,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.48", ] [[package]] @@ -6054,7 +6140,6 @@ dependencies = [ "ethereum_ssz_derive", "safe_arith", "serde", - "serde_derive", "serde_yaml", "superstruct", "types", @@ -6068,16 +6153,16 @@ checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" [[package]] name = "psutil" -version = "3.2.2" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f866af2b0f8e4b0d2d00aad8a9c5fc48fad33466cd99a64cbb3a4c1505f1a62d" +checksum = "5e617cc9058daa5e1fe5a0d23ed745773a5ee354111dad1ec0235b0cc16b6730" dependencies = [ "cfg-if", "darwin-libproc", "derive_more", "glob", - "mach", - "nix 0.23.2", + "mach2", + "nix 0.24.3", "num_cpus", "once_cell", "platforms 2.0.0", @@ -6102,15 +6187,14 @@ dependencies = [ [[package]] name = "quick-protobuf-codec" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8ededb1cd78531627244d51dd0c7139fbe736c7d57af0092a76f0ffb2f56e98" +version = "0.3.1" +source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" dependencies = [ "asynchronous-codec", "bytes", "quick-protobuf", "thiserror", - "unsigned-varint 0.7.2", + "unsigned-varint 0.8.0", ] [[package]] @@ -6147,7 +6231,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls", + "rustls 0.21.10", "thiserror", "tokio", "tracing", @@ -6155,15 +6239,15 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c78e758510582acc40acb90458401172d41f1016f8c9dde89e49677afb7eec1" +checksum = "141bf7dfde2fbc246bfd3fe12f2455aa24b0fbd9af535d8c86c7bd1381ff2b1a" dependencies = [ "bytes", "rand", - "ring", + "ring 0.16.20", "rustc-hash", - "rustls", + "rustls 0.21.10", "slab", "thiserror", "tinyvec", @@ -6178,16 +6262,16 @@ checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" dependencies = [ "bytes", "libc", - "socket2 0.5.4", + "socket2 0.5.5", "tracing", "windows-sys 0.48.0", ] [[package]] name = "quote" -version = "1.0.33" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" dependencies = [ "proc-macro2", ] @@ -6233,7 +6317,7 @@ checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha", - "rand_core 0.6.4", + "rand_core", ] [[package]] @@ -6243,16 +6327,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.16", + "rand_core", ] [[package]] @@ -6261,7 +6336,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.10", + "getrandom", ] [[package]] @@ -6270,14 +6345,14 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" dependencies = [ - "rand_core 0.6.4", + "rand_core", ] [[package]] name = "rayon" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" +checksum = "fa7237101a77a10773db45d62004a272517633fbcc3df19d96455ede1122e051" dependencies = [ "either", "rayon-core", @@ -6285,24 +6360,22 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.11.0" +version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" dependencies = [ - "crossbeam-channel", "crossbeam-deque", "crossbeam-utils", - "num_cpus", ] [[package]] name = "rcgen" -version = "0.10.0" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" +checksum = "52c4f3084aa3bc7dfbba4eff4fab2a54db4324965d8872ab933565e6fbd83bc6" dependencies = [ - "pem", - "ring", + "pem 3.0.3", + "ring 0.16.20", "time", "yasna", ] @@ -6318,34 +6391,34 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.3.5" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "redox_users" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" +checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" dependencies = [ - "getrandom 0.2.10", - "redox_syscall 0.2.16", + "getrandom", + "libredox", "thiserror", ] [[package]] name = "regex" -version = "1.9.5" +version = "1.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47" +checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.3.8", - "regex-syntax 0.7.5", + "regex-automata 0.4.4", + "regex-syntax 0.8.2", ] [[package]] @@ -6359,13 +6432,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.8" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" +checksum = "3b7fa1134405e2ec9353fd416b17f8dacd46c473d7d3fd1cf202706a14eb792a" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.7.5", + "regex-syntax 0.8.2", ] [[package]] @@ -6376,25 +6449,25 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.5" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "reqwest" -version = "0.11.20" +version = "0.11.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1" +checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41" dependencies = [ - "base64 0.21.4", + "base64 0.21.7", "bytes", "encoding_rs", "futures-core", "futures-util", - "h2", - "http", - "http-body", - "hyper", + "h2 0.3.24", + "http 0.2.11", + "http-body 0.4.6", + "hyper 0.14.28", "hyper-rustls", "hyper-tls", "ipnet", @@ -6405,15 +6478,16 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls", - "rustls-pemfile", + "rustls 0.21.10", + "rustls-pemfile 1.0.4", "serde", "serde_json", "serde_urlencoded", + "system-configuration", "tokio", "tokio-native-tls", - "tokio-rustls", - "tokio-util 0.7.8", + "tokio-rustls 0.24.1", + "tokio-util 0.7.10", "tower-service", "url", "wasm-bindgen", @@ -6464,12 +6538,26 @@ dependencies = [ "cc", "libc", "once_cell", - "spin", - "untrusted", + "spin 0.5.2", + "untrusted 0.7.1", "web-sys", "winapi", ] +[[package]] +name = "ring" +version = "0.17.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" +dependencies = [ + "cc", + "getrandom", + "libc", + "spin 0.9.8", + "untrusted 0.9.0", + "windows-sys 0.48.0", +] + [[package]] name = "rle-decode-fast" version = "1.0.3" @@ -6531,9 +6619,9 @@ dependencies = [ "bitflags 1.3.2", "fallible-iterator", "fallible-streaming-iterator", - "hashlink 0.8.4", + "hashlink", "libsqlite3-sys", - "smallvec 1.11.0", + "smallvec", ] [[package]] @@ -6574,9 +6662,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.15" +version = "0.36.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c37f1bd5ef1b5422177b7646cba67430579cfe2ace80f284fee876bca52ad941" +checksum = "305efbd14fde4139eb501df5f136994bb520b033fa9fbdce287507dc23b8c7ed" dependencies = [ "bitflags 1.3.2", "errno", @@ -6588,60 +6676,87 @@ dependencies = [ [[package]] name = "rustix" -version = "0.37.23" +version = "0.38.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06" +checksum = "322394588aaf33c24007e8bb3238ee3e4c5c09c084ab32bc73890b99ff326bca" dependencies = [ - "bitflags 1.3.2", - "errno", - "io-lifetimes", - "libc", - "linux-raw-sys 0.3.8", - "windows-sys 0.48.0", -] - -[[package]] -name = "rustix" -version = "0.38.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7db8590df6dfcd144d22afd1b83b36c21a18d7cbc1dc4bb5295a8712e9eb662" -dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.2", "errno", "libc", - "linux-raw-sys 0.4.7", - "windows-sys 0.48.0", + "linux-raw-sys 0.4.13", + "windows-sys 0.52.0", ] [[package]] name = "rustls" -version = "0.21.7" +version = "0.21.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" +checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" dependencies = [ "log", - "ring", - "rustls-webpki", + "ring 0.17.7", + "rustls-webpki 0.101.7", "sct", ] [[package]] -name = "rustls-pemfile" -version = "1.0.3" +name = "rustls" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" +checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41" dependencies = [ - "base64 0.21.4", + "log", + "ring 0.17.7", + "rustls-pki-types", + "rustls-webpki 0.102.1", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +dependencies = [ + "base64 0.21.7", +] + +[[package]] +name = "rustls-pemfile" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35e4980fa29e4c4b212ffb3db068a564cbf560e51d3944b7c88bd8bf5bec64f4" +dependencies = [ + "base64 0.21.7", + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e9d979b3ce68192e42760c7810125eb6cf2ea10efae545a156063e61f314e2a" + +[[package]] +name = "rustls-webpki" +version = "0.101.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +dependencies = [ + "ring 0.17.7", + "untrusted 0.9.0", ] [[package]] name = "rustls-webpki" -version = "0.101.5" +version = "0.102.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45a27e3b59326c16e23d30aeb7a36a24cc0d29e71d68ff611cdfb4a01d013bed" +checksum = "ef4ca26037c909dedb327b48c3327d0ba91d3dd3c4e05dad328f210ffb68e95b" dependencies = [ - "ring", - "untrusted", + "ring 0.17.7", + "rustls-pki-types", + "untrusted 0.9.0", ] [[package]] @@ -6653,8 +6768,7 @@ checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" [[package]] name = "rw-stream-sink" version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8c9026ff5d2f23da5e45bbc283f156383001bfb09c4e44256d02c1a685fe9a1" +source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" dependencies = [ "futures", "pin-project", @@ -6663,9 +6777,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.15" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" +checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" [[package]] name = "safe_arith" @@ -6691,23 +6805,23 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35c0a159d0c45c12b20c5a844feb1fe4bea86e28f17b92a5f0c42193634d3782" +checksum = "7f7d66a1128282b7ef025a8ead62a4a9fcf017382ec53b8ffbf4d7bf77bd3c60" dependencies = [ "cfg-if", "derive_more", - "parity-scale-codec 3.6.5", + "parity-scale-codec 3.6.9", "scale-info-derive", ] [[package]] name = "scale-info-derive" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "912e55f6d20e0e80d63733872b40e1227c0bce1e1ab81ba67d696339bfd7fd29" +checksum = "abf2c68b89cafb3b8d918dd07b42be0da66ff202cf1155c5739a4e0c1ea0dc19" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 1.3.1", "proc-macro2", "quote", "syn 1.0.109", @@ -6715,11 +6829,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" +checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -6757,12 +6871,12 @@ dependencies = [ [[package]] name = "sct" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring", - "untrusted", + "ring 0.17.7", + "untrusted 0.9.0", ] [[package]] @@ -6818,9 +6932,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.18" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" +checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" dependencies = [ "serde", ] @@ -6841,24 +6955,13 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.188" +version = "1.0.195" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" +checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02" dependencies = [ "serde_derive", ] -[[package]] -name = "serde-hex" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca37e3e4d1b39afd7ff11ee4e947efae85adfddf4841787bfa47c470e96dc26d" -dependencies = [ - "array-init", - "serde", - "smallvec 0.6.14", -] - [[package]] name = "serde_array_query" version = "0.1.0" @@ -6881,20 +6984,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.188" +version = "1.0.195" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" +checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.48", ] [[package]] name = "serde_json" -version = "1.0.107" +version = "1.0.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" +checksum = "176e46fa42316f18edd598015a5166857fc835ec732f5215eac6b7bdbf0a84f4" dependencies = [ "itoa", "ryu", @@ -6903,9 +7006,9 @@ dependencies = [ [[package]] name = "serde_path_to_error" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4beec8bce849d58d06238cb50db2e1c417cfeafa4c63f692b15c82b7c80f8335" +checksum = "ebd154a240de39fdebcf5775d2675c204d7c13cf39a4c697be6493c8e734337c" dependencies = [ "itoa", "serde", @@ -6913,20 +7016,20 @@ dependencies = [ [[package]] name = "serde_repr" -version = "0.1.16" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8725e1dfadb3a50f7e5ce0b1a540466f6ed3fe7a0fca2ac2b8b831d31316bd00" +checksum = "0b2e6b945e9d3df726b65d6ee24060aff8e3533d431f677a9695db04eff9dfdb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.48", ] [[package]] name = "serde_spanned" -version = "0.6.3" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186" +checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" dependencies = [ "serde", ] @@ -6967,21 +7070,22 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.26" +version = "0.9.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b" +checksum = "b1bf28c79a99f70ee1f1d83d10c875d2e70618417fda01ad1785e027579d9d38" dependencies = [ - "indexmap 1.9.3", + "indexmap 2.1.0", + "itoa", "ryu", "serde", - "yaml-rust", + "unsafe-libyaml", ] [[package]] name = "sha1" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ "cfg-if", "cpufeatures", @@ -7003,9 +7107,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.7" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if", "cpufeatures", @@ -7036,18 +7140,18 @@ dependencies = [ [[package]] name = "sharded-slab" -version = "0.1.4" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" dependencies = [ "lazy_static", ] [[package]] name = "shlex" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7cee0529a6d40f580e7a5e6c495c8fbfe21b7b52795ed4bb5e62cdf92bc6380" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" @@ -7065,17 +7169,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" dependencies = [ "digest 0.10.7", - "rand_core 0.6.4", + "rand_core", ] [[package]] name = "signature" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ "digest 0.10.7", - "rand_core 0.6.4", + "rand_core", ] [[package]] @@ -7139,14 +7243,13 @@ dependencies = [ "lmdb-rkv", "lmdb-rkv-sys", "logging", - "lru 0.7.8", + "lru", "maplit", "parking_lot 0.12.1", "rand", "rayon", "safe_arith", "serde", - "serde_derive", "slog", "sloggers", "strum", @@ -7186,7 +7289,6 @@ dependencies = [ "rayon", "rusqlite", "serde", - "serde_derive", "serde_json", "tempfile", "types", @@ -7197,6 +7299,9 @@ name = "slog" version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8347046d4ebd943127157b94d63abb990fcf729dc4e9978927fdf4ac3c998d06" +dependencies = [ + "erased-serde", +] [[package]] name = "slog-async" @@ -7303,47 +7408,38 @@ dependencies = [ [[package]] name = "smallvec" -version = "0.6.14" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97fcaeba89edba30f044a10c6a3cc39df9c3f17d7cd829dd1446cab35f890e0" -dependencies = [ - "maybe-uninit", -] - -[[package]] -name = "smallvec" -version = "1.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" +checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" [[package]] name = "snap" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831" +checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b" [[package]] name = "snow" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c9d1425eb528a21de2755c75af4c9b5d57f50a0d4c3b7f1828a4cd03f8ba155" +checksum = "58021967fd0a5eeeb23b08df6cc244a4d4a5b4aec1d27c9e02fad1a58b4cd74e" dependencies = [ - "aes-gcm", + "aes-gcm 0.10.3", "blake2", "chacha20poly1305", - "curve25519-dalek 4.1.0", - "rand_core 0.6.4", - "ring", + "curve25519-dalek", + "rand_core", + "ring 0.17.7", "rustc_version", - "sha2 0.10.7", + "sha2 0.10.8", "subtle", ] [[package]] name = "socket2" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" +checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" dependencies = [ "libc", "winapi", @@ -7351,9 +7447,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4031e820eb552adee9295814c0ced9e5cf38ddf1e8b7d566d6de8e2538ea989e" +checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" dependencies = [ "libc", "windows-sys 0.48.0", @@ -7365,6 +7461,12 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + [[package]] name = "spki" version = "0.6.0" @@ -7377,39 +7479,14 @@ dependencies = [ [[package]] name = "spki" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", "der 0.7.8", ] -[[package]] -name = "ssz_rs" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "057291e5631f280978fa9c8009390663ca4613359fc1318e36a8c24c392f6d1f" -dependencies = [ - "bitvec 1.0.1", - "hex", - "num-bigint", - "serde", - "sha2 0.9.9", - "ssz_rs_derive", -] - -[[package]] -name = "ssz_rs_derive" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f07d54c4d01a1713eb363b55ba51595da15f6f1211435b71466460da022aa140" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "ssz_types" version = "0.5.4" @@ -7423,7 +7500,7 @@ dependencies = [ "itertools", "serde", "serde_derive", - "smallvec 1.11.0", + "smallvec", "tree_hash", "typenum", ] @@ -7448,7 +7525,7 @@ dependencies = [ "merkle_proof", "rayon", "safe_arith", - "smallvec 1.11.0", + "smallvec", "ssz_types", "tokio", "tree_hash", @@ -7486,10 +7563,9 @@ dependencies = [ "lazy_static", "leveldb", "lighthouse_metrics", - "lru 0.7.8", + "lru", "parking_lot 0.12.1", "serde", - "serde_derive", "slog", "sloggers", "state_processing", @@ -7545,9 +7621,9 @@ dependencies = [ [[package]] name = "subtle" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "superstruct" @@ -7559,7 +7635,7 @@ dependencies = [ "itertools", "proc-macro2", "quote", - "smallvec 1.11.0", + "smallvec", "syn 1.0.109", ] @@ -7585,9 +7661,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.37" +version = "2.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7303ef2c05cd654186cb250d29049a24840ca25d2747c25c0381c8d9e2f582e8" +checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" dependencies = [ "proc-macro2", "quote", @@ -7655,7 +7731,6 @@ dependencies = [ "lighthouse_network", "parking_lot 0.12.1", "serde", - "serde_derive", "serde_json", "sysinfo", "types", @@ -7701,15 +7776,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.8.0" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" +checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa" dependencies = [ "cfg-if", - "fastrand 2.0.0", - "redox_syscall 0.3.5", - "rustix 0.38.13", - "windows-sys 0.48.0", + "fastrand", + "redox_syscall 0.4.1", + "rustix 0.38.30", + "windows-sys 0.52.0", ] [[package]] @@ -7725,9 +7800,9 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.2.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" dependencies = [ "winapi-util", ] @@ -7750,8 +7825,9 @@ dependencies = [ [[package]] name = "testcontainers" -version = "0.14.0" -source = "git+https://github.com/testcontainers/testcontainers-rs/?rev=0f2c9851#0f2c985160e51a200cfc847097c15b8d85ed7df1" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83d2931d7f521af5bae989f716c3fa43a6af9af7ec7a5e21b59ae40878cec00" dependencies = [ "bollard-stubs", "futures", @@ -7761,7 +7837,7 @@ dependencies = [ "rand", "serde", "serde_json", - "sha2 0.10.7", + "sha2 0.10.8", ] [[package]] @@ -7775,22 +7851,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.48" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7" +checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.48" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" +checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.48", ] [[package]] @@ -7814,14 +7890,15 @@ dependencies = [ [[package]] name = "time" -version = "0.3.28" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f6bb557fd245c28e6411aa56b6403c689ad95061f50e4be16c274e70a17e48" +checksum = "f657ba42c3f86e7680e53c8cd3af8abbe56b5491790b46e22e19c0d57463583e" dependencies = [ "deranged", "itoa", "libc", "num_threads", + "powerfmt", "serde", "time-core", "time-macros", @@ -7829,15 +7906,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.14" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a942f44339478ef67935ab2bbaec2fb0322496cf3cbe84b261e06ac3814c572" +checksum = "26197e33420244aeb70c3e8c78376ca46571bc4e701e4791c2cd9f57dcb3a43f" dependencies = [ "time-core", ] @@ -7865,7 +7942,7 @@ dependencies = [ "pbkdf2 0.11.0", "rand", "rustc-hash", - "sha2 0.10.7", + "sha2 0.10.8", "thiserror", "unicode-normalization", "wasm-bindgen", @@ -7908,19 +7985,18 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.32.0" +version = "1.35.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ed6077ed6cd6c74735e21f37eb16dc3935f96878b1fe961074089cc80893f9" +checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104" dependencies = [ "backtrace", "bytes", "libc", "mio", "num_cpus", - "parking_lot 0.12.1", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.4", + "socket2 0.5.5", "tokio-macros", "windows-sys 0.48.0", ] @@ -7937,13 +8013,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.48", ] [[package]] @@ -7976,9 +8052,9 @@ dependencies = [ "postgres-protocol", "postgres-types", "rand", - "socket2 0.5.4", + "socket2 0.5.5", "tokio", - "tokio-util 0.7.8", + "tokio-util 0.7.10", "whoami", ] @@ -7988,7 +8064,18 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls", + "rustls 0.21.10", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +dependencies = [ + "rustls 0.22.2", + "rustls-pki-types", "tokio", ] @@ -8001,7 +8088,7 @@ dependencies = [ "futures-core", "pin-project-lite", "tokio", - "tokio-util 0.7.8", + "tokio-util 0.7.10", ] [[package]] @@ -8022,9 +8109,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.8" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" +checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" dependencies = [ "bytes", "futures-core", @@ -8053,14 +8140,14 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit", + "toml_edit 0.19.15", ] [[package]] name = "toml_datetime" -version = "0.6.3" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" +checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" dependencies = [ "serde", ] @@ -8071,13 +8158,24 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.0.0", + "indexmap 2.1.0", "serde", "serde_spanned", "toml_datetime", "winnow", ] +[[package]] +name = "toml_edit" +version = "0.20.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" +dependencies = [ + "indexmap 2.1.0", + "toml_datetime", + "winnow", +] + [[package]] name = "tower" version = "0.4.13" @@ -8108,11 +8206,10 @@ checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.37" +version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ - "cfg-if", "log", "pin-project-lite", "tracing-attributes", @@ -8120,21 +8217,33 @@ dependencies = [ ] [[package]] -name = "tracing-attributes" -version = "0.1.26" +name = "tracing-appender" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" +checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" +dependencies = [ + "crossbeam-channel", + "thiserror", + "time", + "tracing-subscriber", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.48", ] [[package]] name = "tracing-core" -version = "0.1.31" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", "valuable", @@ -8152,27 +8261,27 @@ dependencies = [ [[package]] name = "tracing-log" -version = "0.1.3" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" dependencies = [ - "lazy_static", "log", + "once_cell", "tracing-core", ] [[package]] name = "tracing-subscriber" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" dependencies = [ "matchers", "nu-ansi-term", "once_cell", "regex", "sharded-slab", - "smallvec 1.11.0", + "smallvec", "thread_local", "tracing", "tracing-core", @@ -8206,7 +8315,7 @@ checksum = "5c998ac5fe2b07c025444bdd522e6258110b63861c6698eedc610c071980238d" dependencies = [ "ethereum-types 0.14.1", "ethereum_hashing", - "smallvec 1.11.0", + "smallvec", ] [[package]] @@ -8230,57 +8339,11 @@ dependencies = [ "rlp", ] -[[package]] -name = "trust-dns-proto" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f7f83d1e4a0e4358ac54c5c3681e5d7da5efc5a7a632c90bb6d6669ddd9bc26" -dependencies = [ - "async-trait", - "cfg-if", - "data-encoding", - "enum-as-inner", - "futures-channel", - "futures-io", - "futures-util", - "idna 0.2.3", - "ipnet", - "lazy_static", - "rand", - "smallvec 1.11.0", - "socket2 0.4.9", - "thiserror", - "tinyvec", - "tokio", - "tracing", - "url", -] - -[[package]] -name = "trust-dns-resolver" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aff21aa4dcefb0a1afbfac26deb0adc93888c7d295fb63ab273ef276ba2b7cfe" -dependencies = [ - "cfg-if", - "futures-util", - "ipconfig", - "lazy_static", - "lru-cache", - "parking_lot 0.12.1", - "resolv-conf", - "smallvec 1.11.0", - "thiserror", - "tokio", - "tracing", - "trust-dns-proto", -] - [[package]] name = "try-lock" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "typenum" @@ -8309,6 +8372,7 @@ dependencies = [ "hex", "int_to_bytes", "itertools", + "kzg", "lazy_static", "log", "maplit", @@ -8323,12 +8387,10 @@ dependencies = [ "rusqlite", "safe_arith", "serde", - "serde_derive", "serde_json", - "serde_with", "serde_yaml", "slog", - "smallvec 1.11.0", + "smallvec", "ssz_types", "state_processing", "strum", @@ -8371,9 +8433,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-ident" @@ -8392,9 +8454,9 @@ dependencies = [ [[package]] name = "unicode-width" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" +checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" [[package]] name = "unicode-xid" @@ -8404,14 +8466,30 @@ checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" [[package]] name = "universal-hash" -version = "0.4.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f214e8f697e925001e66ec2c6e37a4ef93f0f78c2eed7814394e10c62025b05" +checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" dependencies = [ "generic-array", "subtle", ] +[[package]] +name = "universal-hash" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" +dependencies = [ + "crypto-common", + "subtle", +] + +[[package]] +name = "unsafe-libyaml" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b" + [[package]] name = "unsigned-varint" version = "0.6.0" @@ -8427,6 +8505,12 @@ name = "unsigned-varint" version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" + +[[package]] +name = "unsigned-varint" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb066959b24b5196ae73cb057f45598450d2c5f71460e98c49b738086eff9c06" dependencies = [ "asynchronous-codec", "bytes", @@ -8438,6 +8522,12 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + [[package]] name = "unused_port" version = "0.1.0" @@ -8449,12 +8539,12 @@ dependencies = [ [[package]] name = "url" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" dependencies = [ "form_urlencoded", - "idna 0.4.0", + "idna 0.5.0", "percent-encoding", ] @@ -8464,7 +8554,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.10", + "getrandom", "serde", ] @@ -8488,7 +8578,7 @@ dependencies = [ "filesystem", "futures", "hex", - "hyper", + "hyper 1.1.0", "itertools", "lazy_static", "libsecp256k1", @@ -8501,15 +8591,15 @@ dependencies = [ "parking_lot 0.12.1", "rand", "reqwest", - "ring", + "ring 0.16.20", "safe_arith", "sensitive_url", "serde", - "serde_derive", "serde_json", "slashing_protection", "slog", "slot_clock", + "strum", "sysinfo", "system_health", "task_executor", @@ -8597,12 +8687,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" -[[package]] -name = "waker-fn" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" - [[package]] name = "walkdir" version = "2.4.0" @@ -8624,29 +8708,28 @@ dependencies = [ [[package]] name = "warp" -version = "0.3.5" -source = "git+https://github.com/seanmonstar/warp.git#5ad8a9cb155f6485d13d591a564d8c70053a388a" +version = "0.3.6" +source = "git+https://github.com/seanmonstar/warp.git#7b07043cee0ca24e912155db4e8f6d9ab7c049ed" dependencies = [ "bytes", "futures-channel", "futures-util", "headers", - "http", - "hyper", + "http 0.2.11", + "hyper 0.14.28", "log", "mime", "mime_guess", "percent-encoding", "pin-project", - "rustls-pemfile", + "rustls-pemfile 2.0.0", "scoped-tls", "serde", "serde_json", "serde_urlencoded", "tokio", - "tokio-rustls", - "tokio-stream", - "tokio-util 0.7.8", + "tokio-rustls 0.25.0", + "tokio-util 0.7.10", "tower-service", "tracing", ] @@ -8669,12 +8752,6 @@ dependencies = [ "warp", ] -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -8683,9 +8760,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.87" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +checksum = "b1223296a201415c7fad14792dbefaace9bd52b62d33453ade1c5b5f07555406" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -8693,24 +8770,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.87" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +checksum = "fcdc935b63408d58a32f8cc9738a0bffd8f05cc7c002086c6ef20b7312ad9dcd" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.48", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.37" +version = "0.4.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" +checksum = "bde2032aeb86bdfaecc8b261eef3cba735cc426c1f3a3416d1e0791be95fc461" dependencies = [ "cfg-if", "js-sys", @@ -8720,9 +8797,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.87" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +checksum = "3e4c238561b2d428924c49815533a8b9121c664599558a5d9ec51f8a1740a999" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -8730,22 +8807,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.87" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +checksum = "bae1abb6806dc1ad9e560ed242107c0f6c84335f1749dd4e8ddb012ebd5e25a7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.48", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.87" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" +checksum = "4d91413b1c31d7539ba5ef2451af3f0b833a005eb27a631cec32bc0635a8602b" [[package]] name = "wasm-streams" @@ -8791,7 +8868,7 @@ dependencies = [ "eth2", "hex", "http_api", - "hyper", + "hyper 1.1.0", "log", "logging", "network", @@ -8812,9 +8889,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.64" +version = "0.3.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" +checksum = "58cd2333b6e0be7a39605f0e255892fd7418a682d8da8fe042fe25128794d2ed" dependencies = [ "js-sys", "wasm-bindgen", @@ -8848,9 +8925,21 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.25.2" +version = "0.25.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" +checksum = "1778a42e8b3b90bff8d0f5032bf22250792889a5cdc752aa0020c84abe3aaf10" + +[[package]] +name = "which" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" +dependencies = [ + "either", + "home", + "once_cell", + "rustix 0.38.30", +] [[package]] name = "whoami" @@ -8898,9 +8987,9 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" dependencies = [ "winapi", ] @@ -8913,23 +9002,11 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" -version = "0.34.0" +version = "0.51.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45296b64204227616fdbf2614cefa4c236b98ee64dfaaaa435207ed99fe7829f" -dependencies = [ - "windows_aarch64_msvc 0.34.0", - "windows_i686_gnu 0.34.0", - "windows_i686_msvc 0.34.0", - "windows_x86_64_gnu 0.34.0", - "windows_x86_64_msvc 0.34.0", -] - -[[package]] -name = "windows" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" +checksum = "ca229916c5ee38c2f2bc1e9d8f04df975b4bd93f9955dc69fabb5d91270045c9" dependencies = [ + "windows-core 0.51.1", "windows-targets 0.48.5", ] @@ -8945,6 +9022,24 @@ dependencies = [ "winapi", ] +[[package]] +name = "windows-core" +version = "0.51.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-core" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +dependencies = [ + "windows-targets 0.52.0", +] + [[package]] name = "windows-sys" version = "0.45.0" @@ -8963,6 +9058,15 @@ dependencies = [ "windows-targets 0.48.5", ] +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.0", +] + [[package]] name = "windows-targets" version = "0.42.2" @@ -8993,6 +9097,21 @@ dependencies = [ "windows_x86_64_msvc 0.48.5", ] +[[package]] +name = "windows-targets" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" @@ -9006,10 +9125,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] -name = "windows_aarch64_msvc" -version = "0.34.0" +name = "windows_aarch64_gnullvm" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" [[package]] name = "windows_aarch64_msvc" @@ -9024,10 +9143,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] -name = "windows_i686_gnu" -version = "0.34.0" +name = "windows_aarch64_msvc" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" [[package]] name = "windows_i686_gnu" @@ -9042,10 +9161,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] -name = "windows_i686_msvc" -version = "0.34.0" +name = "windows_i686_gnu" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" [[package]] name = "windows_i686_msvc" @@ -9060,10 +9179,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] -name = "windows_x86_64_gnu" -version = "0.34.0" +name = "windows_i686_msvc" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" [[package]] name = "windows_x86_64_gnu" @@ -9077,6 +9196,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" + [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" @@ -9090,10 +9215,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] -name = "windows_x86_64_msvc" -version = "0.34.0" +name = "windows_x86_64_gnullvm" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" [[package]] name = "windows_x86_64_msvc" @@ -9108,10 +9233,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] -name = "winnow" -version = "0.5.15" +name = "windows_x86_64_msvc" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c2e3184b9c4e92ad5167ca73039d0c42476302ab603e2fec4487511f38ccefc" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" + +[[package]] +name = "winnow" +version = "0.5.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7cf47b659b318dccbd69cc4797a39ae128f533dce7902a1096044d1967b9c16" dependencies = [ "memchr", ] @@ -9162,12 +9293,13 @@ dependencies = [ [[package]] name = "x25519-dalek" -version = "1.1.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a0c105152107e3b96f6a00a65e86ce82d9b125230e1c4302940eca58ff71f4f" +checksum = "fb66477291e7e8d2b0ff1bcb900bf29489a9692816d79874bea351e7a8b6de96" dependencies = [ - "curve25519-dalek 3.2.0", - "rand_core 0.5.1", + "curve25519-dalek", + "rand_core", + "serde", "zeroize", ] @@ -9190,9 +9322,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.18" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab77e97b50aee93da431f2cee7cd0f43b4d1da3c408042f2d7d164187774f0a" +checksum = "0fcb9cbac069e033553e8bb871be2fbdffcab578eb25bd0f7c508cedc6dcd75a" [[package]] name = "xmltree" @@ -9214,9 +9346,9 @@ dependencies = [ [[package]] name = "yamux" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0329ef377816896f014435162bb3711ea7a07729c23d0960e6f8048b21b8fe91" +checksum = "9ed0164ae619f2dc144909a9f082187ebb5893693d8c0196e8085283ccd4b776" dependencies = [ "futures", "log", @@ -9227,6 +9359,22 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "yamux" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad1d0148b89300047e72994bee99ecdabd15a9166a7b70c8b8c37c314dcc9002" +dependencies = [ + "futures", + "instant", + "log", + "nohash-hasher", + "parking_lot 0.12.1", + "pin-project", + "rand", + "static_assertions", +] + [[package]] name = "yasna" version = "0.5.2" @@ -9237,10 +9385,30 @@ dependencies = [ ] [[package]] -name = "zeroize" -version = "1.6.0" +name = "zerocopy" +version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" +checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "zeroize" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" dependencies = [ "zeroize_derive", ] @@ -9253,7 +9421,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.48", ] [[package]] @@ -9297,11 +9465,10 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.8+zstd.1.5.5" +version = "2.0.9+zstd.1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5556e6ee25d32df2586c098bbfa278803692a20d0ab9565e049480d52707ec8c" +checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" dependencies = [ "cc", - "libc", "pkg-config", ] diff --git a/Cargo.toml b/Cargo.toml index 62c0e7bd2..ca55d00d4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -59,6 +59,7 @@ members = [ "consensus/swap_or_not_shuffle", "crypto/bls", + "crypto/kzg", "crypto/eth2_key_derivation", "crypto/eth2_keystore", "crypto/eth2_wallet", @@ -104,7 +105,7 @@ criterion = "0.3" delay_map = "0.3" derivative = "2" dirs = "3" -discv5 = { version = "0.3", features = ["libp2p"] } +discv5 = { git="https://github.com/sigp/discv5", rev="e30a2c31b7ac0c57876458b971164654dfa4513b", features = ["libp2p"] } env_logger = "0.9" error-chain = "0.12" ethereum-types = "0.14" @@ -119,12 +120,12 @@ fnv = "1" fs2 = "0.4" futures = "0.3" hex = "0.4" -hyper = "0.14" +hyper = "1" itertools = "0.10" lazy_static = "1" libsecp256k1 = "0.7" log = "0.4" -lru = "0.7" +lru = "0.12" maplit = "1" num_cpus = "1" parking_lot = "0.12" @@ -136,19 +137,19 @@ r2d2 = "0.8" rand = "0.8" rayon = "1.7" regex = "1" -reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "stream", "rustls-tls"] } +reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "stream", "rustls-tls", "native-tls-vendored"] } ring = "0.16" rusqlite = { version = "0.28", features = ["bundled"] } serde = { version = "1", features = ["derive"] } serde_json = "1" serde_repr = "0.1" -serde_yaml = "0.8" +serde_yaml = "0.9" sha2 = "0.9" -slog = { version = "2", features = ["max_level_trace", "release_max_level_trace"] } +slog = { version = "2", features = ["max_level_trace", "release_max_level_trace", "nested-values"] } slog-async = "2" slog-term = "2" sloggers = { version = "2", features = ["json"] } -smallvec = "1" +smallvec = "1.11.2" snap = "1" ssz_types = "0.5" strum = { version = "0.24", features = ["derive"] } @@ -156,9 +157,13 @@ superstruct = "0.6" syn = "1" sysinfo = "0.26" tempfile = "3" -tokio = { version = "1", features = ["rt-multi-thread", "sync"] } +tokio = { version = "1", features = ["rt-multi-thread", "sync", "signal"] } tokio-stream = { version = "0.1", features = ["sync"] } tokio-util = { version = "0.6", features = ["codec", "compat", "time"] } +tracing-appender = "0.2" +tracing-core = "0.1" +tracing-log = "0.2" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } tree_hash = "0.5" tree_hash_derive = "0.5" url = "2" @@ -194,6 +199,7 @@ fork_choice = { path = "consensus/fork_choice" } genesis = { path = "beacon_node/genesis" } http_api = { path = "beacon_node/http_api" } int_to_bytes = { path = "consensus/int_to_bytes" } +kzg = { path = "crypto/kzg" } lighthouse_metrics = { path = "common/lighthouse_metrics" } lighthouse_network = { path = "beacon_node/lighthouse_network" } lighthouse_version = { path = "common/lighthouse_version" } @@ -218,7 +224,7 @@ swap_or_not_shuffle = { path = "consensus/swap_or_not_shuffle" } task_executor = { path = "common/task_executor" } types = { path = "consensus/types" } unused_port = { path = "common/unused_port" } -validator_client = { path = "validator_client/" } +validator_client = { path = "validator_client" } validator_dir = { path = "common/validator_dir" } warp_utils = { path = "common/warp_utils" } diff --git a/Cross.toml b/Cross.toml index d5f7a5d50..871391253 100644 --- a/Cross.toml +++ b/Cross.toml @@ -1,5 +1,5 @@ [target.x86_64-unknown-linux-gnu] -pre-build = ["apt-get install -y cmake clang-3.9"] +pre-build = ["apt-get install -y cmake clang-5.0"] [target.aarch64-unknown-linux-gnu] -pre-build = ["apt-get install -y cmake clang-3.9"] +pre-build = ["apt-get install -y cmake clang-5.0"] diff --git a/Dockerfile b/Dockerfile index bcddef8a6..a8dadf2ad 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,10 +1,12 @@ -FROM rust:1.69.0-bullseye AS builder +FROM rust:1.73.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse ARG FEATURES ARG PROFILE=release +ARG CARGO_USE_GIT_CLI=true ENV FEATURES $FEATURES ENV PROFILE $PROFILE +ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_USE_GIT_CLI RUN cd lighthouse && make FROM ubuntu:22.04 @@ -13,4 +15,4 @@ RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-reco ca-certificates \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* -COPY --from=builder /usr/local/cargo/bin/lighthouse /usr/local/bin/lighthouse \ No newline at end of file +COPY --from=builder /usr/local/cargo/bin/lighthouse /usr/local/bin/lighthouse diff --git a/Makefile b/Makefile index 7bed5732b..8392d0017 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ BUILD_PATH_AARCH64 = "target/$(AARCH64_TAG)/release" PINNED_NIGHTLY ?= nightly CLIPPY_PINNED_NIGHTLY=nightly-2022-05-19 -# List of features to use when building natively. Can be overriden via the environment. +# List of features to use when building natively. Can be overridden via the environment. # No jemalloc on Windows ifeq ($(OS),Windows_NT) FEATURES?= @@ -31,12 +31,15 @@ CROSS_PROFILE ?= release # List of features to use when running EF tests. EF_TEST_FEATURES ?= +# List of features to use when running CI tests. +TEST_FEATURES ?= + # Cargo profile for regular builds. PROFILE ?= release # List of all hard forks. This list is used to set env variables for several tests so that # they run for different forks. -FORKS=phase0 altair merge capella +FORKS=phase0 altair merge capella deneb # Extra flags for Cargo CARGO_INSTALL_EXTRA_FLAGS?= @@ -106,12 +109,26 @@ build-release-tarballs: # Runs the full workspace tests in **release**, without downloading any additional # test vectors. test-release: - cargo test --workspace --release --exclude ef_tests --exclude beacon_chain --exclude slasher + cargo test --workspace --release --features "$(TEST_FEATURES)" \ + --exclude ef_tests --exclude beacon_chain --exclude slasher --exclude network + +# Runs the full workspace tests in **release**, without downloading any additional +# test vectors, using nextest. +nextest-release: + cargo nextest run --workspace --release --features "$(TEST_FEATURES)" \ + --exclude ef_tests --exclude beacon_chain --exclude slasher --exclude network # Runs the full workspace tests in **debug**, without downloading any additional test # vectors. test-debug: - cargo test --workspace --exclude ef_tests --exclude beacon_chain + cargo test --workspace --features "$(TEST_FEATURES)" \ + --exclude ef_tests --exclude beacon_chain --exclude network + +# Runs the full workspace tests in **debug**, without downloading any additional test +# vectors, using nextest. +nextest-debug: + cargo nextest run --workspace --features "$(TEST_FEATURES)" \ + --exclude ef_tests --exclude beacon_chain --exclude network # Runs cargo-fmt (linter). cargo-fmt: @@ -119,7 +136,7 @@ cargo-fmt: # Typechecks benchmark code check-benches: - cargo check --workspace --benches + cargo check --workspace --benches --features "$(TEST_FEATURES)" # Runs only the ef-test vectors. run-ef-tests: @@ -129,25 +146,41 @@ run-ef-tests: cargo test --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),milagro" ./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/consensus-spec-tests +# Runs EF test vectors with nextest +nextest-run-ef-tests: + rm -rf $(EF_TESTS)/.accessed_file_log.txt + cargo nextest run --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES)" + cargo nextest run --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),fake_crypto" + cargo nextest run --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),milagro" + ./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/consensus-spec-tests + # Run the tests in the `beacon_chain` crate for all known forks. test-beacon-chain: $(patsubst %,test-beacon-chain-%,$(FORKS)) test-beacon-chain-%: - env FORK_NAME=$* cargo test --release --features fork_from_env,slasher/lmdb -p beacon_chain + env FORK_NAME=$* cargo nextest run --release --features "fork_from_env,slasher/lmdb,$(TEST_FEATURES)" -p beacon_chain # Run the tests in the `operation_pool` crate for all known forks. test-op-pool: $(patsubst %,test-op-pool-%,$(FORKS)) test-op-pool-%: - env FORK_NAME=$* cargo test --release \ - --features 'beacon_chain/fork_from_env'\ + env FORK_NAME=$* cargo nextest run --release \ + --features "beacon_chain/fork_from_env,$(TEST_FEATURES)"\ -p operation_pool +# Run the tests in the `network` crate for all known forks. +test-network: $(patsubst %,test-network-%,$(FORKS)) + +test-network-%: + env FORK_NAME=$* cargo nextest run --release \ + --features "fork_from_env,$(TEST_FEATURES)" \ + -p network + # Run the tests in the `slasher` crate for all supported database backends. test-slasher: - cargo test --release -p slasher --features lmdb - cargo test --release -p slasher --no-default-features --features mdbx - cargo test --release -p slasher --features lmdb,mdbx # both backends enabled + cargo nextest run --release -p slasher --features "lmdb,$(TEST_FEATURES)" + cargo nextest run --release -p slasher --no-default-features --features "mdbx,$(TEST_FEATURES)" + cargo nextest run --release -p slasher --features "lmdb,mdbx,$(TEST_FEATURES)" # both backends enabled # Runs only the tests/state_transition_vectors tests. run-state-transition-tests: @@ -156,6 +189,9 @@ run-state-transition-tests: # Downloads and runs the EF test vectors. test-ef: make-ef-tests run-ef-tests +# Downloads and runs the EF test vectors with nextest. +nextest-ef: make-ef-tests nextest-run-ef-tests + # Runs tests checking interop between Lighthouse and execution clients. test-exec-engine: make -C $(EXECUTION_ENGINE_INTEGRATION) test @@ -164,21 +200,34 @@ test-exec-engine: # test vectors. test: test-release +# Updates the CLI help text pages in the Lighthouse book, building with Docker. +cli: + docker run --rm --user=root \ + -v ${PWD}:/home/runner/actions-runner/lighthouse sigmaprime/github-runner \ + bash -c 'cd lighthouse && make && ./scripts/cli.sh' + +# Updates the CLI help text pages in the Lighthouse book, building using local +# `cargo`. +cli-local: + make && ./scripts/cli.sh + # Runs the entire test suite, downloading test vectors if required. test-full: cargo-fmt test-release test-debug test-ef test-exec-engine # Lints the code for bad style and potentially unsafe arithmetic using Clippy. # Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints. lint: - cargo clippy --workspace --tests $(EXTRA_CLIPPY_OPTS) -- \ + cargo clippy --workspace --tests $(EXTRA_CLIPPY_OPTS) --features "$(TEST_FEATURES)" -- \ -D clippy::fn_to_numeric_cast_any \ + -D clippy::manual_let_else \ -D warnings \ -A clippy::derive_partial_eq_without_eq \ -A clippy::from-over-into \ -A clippy::upper-case-acronyms \ -A clippy::vec-init-then-push \ -A clippy::question-mark \ - -A clippy::uninlined-format-args + -A clippy::uninlined-format-args \ + -A clippy::enum_variant_names # Lints the code using Clippy and automatically fix some simple compiler warnings. lint-fix: @@ -201,12 +250,16 @@ make-ef-tests: # Verifies that crates compile with fuzzing features enabled arbitrary-fuzz: - cargo check -p state_processing --features arbitrary-fuzz - cargo check -p slashing_protection --features arbitrary-fuzz + cargo check -p state_processing --features arbitrary-fuzz,$(TEST_FEATURES) + cargo check -p slashing_protection --features arbitrary-fuzz,$(TEST_FEATURES) # Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database) -audit: +audit: install-audit audit-CI + +install-audit: cargo install --force cargo-audit + +audit-CI: cargo audit # Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose. @@ -215,7 +268,7 @@ vendor: # Runs `cargo udeps` to check for unused dependencies udeps: - cargo +$(PINNED_NIGHTLY) udeps --tests --all-targets --release + cargo +$(PINNED_NIGHTLY) udeps --tests --all-targets --release --features "$(TEST_FEATURES)" # Performs a `cargo` clean and cleans the `ef_tests` directory. clean: diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index 602390556..bc9e0ee1d 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -14,7 +14,7 @@ use slot_clock::{SlotClock, SystemTimeSlotClock}; use std::path::{Path, PathBuf}; use std::time::Duration; use tokio::time::sleep; -use types::{ChainSpec, Epoch, EthSpec, Fork, VoluntaryExit}; +use types::{ChainSpec, Epoch, EthSpec, VoluntaryExit}; pub const CMD: &str = "exit"; pub const KEYSTORE_FLAG: &str = "keystore"; @@ -146,7 +146,6 @@ async fn publish_voluntary_exit( .ok_or("Failed to get current epoch. Please check your system time")?; let validator_index = get_validator_index_for_exit(client, &keypair.pk, epoch, spec).await?; - let fork = get_beacon_state_fork(client).await?; let voluntary_exit = VoluntaryExit { epoch, validator_index, @@ -173,12 +172,8 @@ async fn publish_voluntary_exit( if confirmation == CONFIRMATION_PHRASE { // Sign and publish the voluntary exit to network - let signed_voluntary_exit = voluntary_exit.sign( - &keypair.sk, - &fork, - genesis_data.genesis_validators_root, - spec, - ); + let signed_voluntary_exit = + voluntary_exit.sign(&keypair.sk, genesis_data.genesis_validators_root, spec); client .post_beacon_pool_voluntary_exits(&signed_voluntary_exit) .await @@ -316,16 +311,6 @@ async fn is_syncing(client: &BeaconNodeHttpClient) -> Result { .is_syncing) } -/// Get fork object for the current state by querying the beacon node client. -async fn get_beacon_state_fork(client: &BeaconNodeHttpClient) -> Result { - Ok(client - .get_beacon_states_fork(StateId::Head) - .await - .map_err(|e| format!("Failed to get get fork: {:?}", e))? - .ok_or("Failed to get fork, state not found")? - .data) -} - /// Calculates the current epoch from the genesis time and current time. fn get_current_epoch(genesis_time: u64, spec: &ChainSpec) -> Option { let slot_clock = SystemTimeSlotClock::new( diff --git a/account_manager/src/validator/import.rs b/account_manager/src/validator/import.rs index 339d9a291..bf000385f 100644 --- a/account_manager/src/validator/import.rs +++ b/account_manager/src/validator/import.rs @@ -284,6 +284,8 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin suggested_fee_recipient, None, None, + None, + None, ) .map_err(|e| format!("Unable to create new validator definition: {:?}", e))?; diff --git a/account_manager/src/validator/slashing_protection.rs b/account_manager/src/validator/slashing_protection.rs index c6d81275a..0a98a452b 100644 --- a/account_manager/src/validator/slashing_protection.rs +++ b/account_manager/src/validator/slashing_protection.rs @@ -16,7 +16,6 @@ pub const EXPORT_CMD: &str = "export"; pub const IMPORT_FILE_ARG: &str = "IMPORT-FILE"; pub const EXPORT_FILE_ARG: &str = "EXPORT-FILE"; -pub const MINIFY_FLAG: &str = "minify"; pub const PUBKEYS_FLAG: &str = "pubkeys"; pub fn cli_app<'a, 'b>() -> App<'a, 'b> { @@ -31,16 +30,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .value_name("FILE") .help("The slashing protection interchange file to import (.json)"), ) - .arg( - Arg::with_name(MINIFY_FLAG) - .long(MINIFY_FLAG) - .takes_value(true) - .possible_values(&["false", "true"]) - .help( - "Deprecated: Lighthouse no longer requires minification on import \ - because it always minifies", - ), - ), ) .subcommand( App::new(EXPORT_CMD) @@ -61,17 +50,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { comma-separated. All known keys will be exported if omitted", ), ) - .arg( - Arg::with_name(MINIFY_FLAG) - .long(MINIFY_FLAG) - .takes_value(true) - .default_value("false") - .possible_values(&["false", "true"]) - .help( - "Minify the output file. This will make it smaller and faster to \ - import, but not faster to generate.", - ), - ), ) } @@ -92,7 +70,6 @@ pub fn cli_run( match matches.subcommand() { (IMPORT_CMD, Some(matches)) => { let import_filename: PathBuf = clap_utils::parse_required(matches, IMPORT_FILE_ARG)?; - let minify: Option = clap_utils::parse_optional(matches, MINIFY_FLAG)?; let import_file = File::open(&import_filename).map_err(|e| { format!( "Unable to open import file at {}: {:?}", @@ -102,23 +79,10 @@ pub fn cli_run( })?; eprint!("Loading JSON file into memory & deserializing"); - let mut interchange = Interchange::from_json_reader(&import_file) + let interchange = Interchange::from_json_reader(&import_file) .map_err(|e| format!("Error parsing file for import: {:?}", e))?; eprintln!(" [done]."); - if let Some(minify) = minify { - eprintln!( - "WARNING: --minify flag is deprecated and will be removed in a future release" - ); - if minify { - eprint!("Minifying input file for faster loading"); - interchange = interchange - .minify() - .map_err(|e| format!("Minification failed: {:?}", e))?; - eprintln!(" [done]."); - } - } - let slashing_protection_database = SlashingDatabase::open_or_create(&slashing_protection_db_path).map_err(|e| { format!( @@ -206,7 +170,6 @@ pub fn cli_run( } (EXPORT_CMD, Some(matches)) => { let export_filename: PathBuf = clap_utils::parse_required(matches, EXPORT_FILE_ARG)?; - let minify: bool = clap_utils::parse_required(matches, MINIFY_FLAG)?; let selected_pubkeys = if let Some(pubkeys) = clap_utils::parse_optional::(matches, PUBKEYS_FLAG)? @@ -237,17 +200,10 @@ pub fn cli_run( ) })?; - let mut interchange = slashing_protection_database + let interchange = slashing_protection_database .export_interchange_info(genesis_validators_root, selected_pubkeys.as_deref()) .map_err(|e| format!("Error during export: {:?}", e))?; - if minify { - eprintln!("Minifying output file"); - interchange = interchange - .minify() - .map_err(|e| format!("Unable to minify output: {:?}", e))?; - } - let output_file = File::create(export_filename) .map_err(|e| format!("Error creating output file: {:?}", e))?; diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 073a0d170..8428a30a3 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "4.5.0" +version = "4.6.0" authors = [ "Paul Hauner ", "Age Manning BeaconChain { match state { BeaconState::Base(_) => self.compute_attestation_rewards_base(state, validators), - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { - self.compute_attestation_rewards_altair(state, validators) - } + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Deneb(_) => self.compute_attestation_rewards_altair(state, validators), } } @@ -65,6 +70,13 @@ impl BeaconChain { let mut validator_statuses = ValidatorStatuses::new(&state, spec)?; validator_statuses.process_attestations(&state)?; + process_justification_and_finalization_base( + &state, + &validator_statuses.total_balances, + spec, + )? + .apply_changes_to_state(&mut state); + let ideal_rewards = self.compute_ideal_rewards_base(&state, &validator_statuses.total_balances)?; @@ -123,6 +135,9 @@ impl BeaconChain { // Calculate ideal_rewards let participation_cache = ParticipationCache::new(&state, spec)?; + process_justification_and_finalization(&state, &participation_cache)? + .apply_changes_to_state(&mut state); + process_inactivity_updates(&mut state, &participation_cache, spec)?; let previous_epoch = state.previous_epoch(); @@ -189,6 +204,7 @@ impl BeaconChain { let mut head_reward = 0i64; let mut target_reward = 0i64; let mut source_reward = 0i64; + let mut inactivity_penalty = 0i64; if eligible { let effective_balance = state.get_effective_balance(*validator_index)?; @@ -214,6 +230,14 @@ impl BeaconChain { head_reward = 0; } else if flag_index == TIMELY_TARGET_FLAG_INDEX { target_reward = *penalty; + + let penalty_numerator = effective_balance + .safe_mul(state.get_inactivity_score(*validator_index)?)?; + let penalty_denominator = spec + .inactivity_score_bias + .safe_mul(spec.inactivity_penalty_quotient_for_state(&state))?; + inactivity_penalty = + -(penalty_numerator.safe_div(penalty_denominator)? as i64); } else if flag_index == TIMELY_SOURCE_FLAG_INDEX { source_reward = *penalty; } @@ -225,8 +249,7 @@ impl BeaconChain { target: target_reward, source: source_reward, inclusion_delay: None, - // TODO: altair calculation logic needs to be updated to include inactivity penalty - inactivity: 0, + inactivity: inactivity_penalty, }); } @@ -249,7 +272,6 @@ impl BeaconChain { target: 0, source: 0, inclusion_delay: None, - // TODO: altair calculation logic needs to be updated to include inactivity penalty inactivity: 0, }); match *flag_index { diff --git a/beacon_node/beacon_chain/src/attestation_simulator.rs b/beacon_node/beacon_chain/src/attestation_simulator.rs new file mode 100644 index 000000000..645315845 --- /dev/null +++ b/beacon_node/beacon_chain/src/attestation_simulator.rs @@ -0,0 +1,107 @@ +use crate::{BeaconChain, BeaconChainTypes}; +use slog::{debug, error}; +use slot_clock::SlotClock; +use std::sync::Arc; +use task_executor::TaskExecutor; +use tokio::time::sleep; +use types::{EthSpec, Slot}; + +/// Don't run the attestation simulator if the head slot is this many epochs +/// behind the wall-clock slot. +const SYNCING_TOLERANCE_EPOCHS: u64 = 2; + +/// Spawns a routine which produces an unaggregated attestation at every slot. +/// +/// This routine will run once per slot +pub fn start_attestation_simulator_service( + executor: TaskExecutor, + chain: Arc>, +) { + executor.clone().spawn( + async move { attestation_simulator_service(executor, chain).await }, + "attestation_simulator_service", + ); +} + +/// Loop indefinitely, calling `BeaconChain::produce_unaggregated_attestation` every 4s into each slot. +async fn attestation_simulator_service( + executor: TaskExecutor, + chain: Arc>, +) { + let slot_duration = chain.slot_clock.slot_duration(); + let additional_delay = slot_duration / 3; + + loop { + match chain.slot_clock.duration_to_next_slot() { + Some(duration) => { + sleep(duration + additional_delay).await; + + debug!( + chain.log, + "Simulating unagg. attestation production"; + ); + + // Run the task in the executor + let inner_chain = chain.clone(); + executor.spawn( + async move { + if let Ok(current_slot) = inner_chain.slot() { + produce_unaggregated_attestation(inner_chain, current_slot); + } + }, + "attestation_simulator_service", + ); + } + None => { + error!(chain.log, "Failed to read slot clock"); + // If we can't read the slot clock, just wait another slot. + sleep(slot_duration).await; + } + }; + } +} + +pub fn produce_unaggregated_attestation( + chain: Arc>, + current_slot: Slot, +) { + // Don't run the attestation simulator when the head slot is far behind the + // wall-clock slot. + // + // This helps prevent the simulator from becoming a burden by computing + // committees from old states. + let syncing_tolerance_slots = SYNCING_TOLERANCE_EPOCHS * T::EthSpec::slots_per_epoch(); + if chain.best_slot() + syncing_tolerance_slots < current_slot { + return; + } + + // Since attestations for different committees are practically identical (apart from the committee index field) + // Committee 0 is guaranteed to exist. That means there's no need to load the committee. + let beacon_committee_index = 0; + + // Store the unaggregated attestation in the validator monitor for later processing + match chain.produce_unaggregated_attestation(current_slot, beacon_committee_index) { + Ok(unaggregated_attestation) => { + let data = &unaggregated_attestation.data; + + debug!( + chain.log, + "Produce unagg. attestation"; + "attestation_source" => data.source.root.to_string(), + "attestation_target" => data.target.root.to_string(), + ); + + chain + .validator_monitor + .write() + .set_unaggregated_attestation(unaggregated_attestation); + } + Err(e) => { + debug!( + chain.log, + "Failed to simulate attestation"; + "error" => ?e + ); + } + } +} diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 5535fec37..d7a8bca4d 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -55,7 +55,7 @@ use std::borrow::Cow; use strum::AsRefStr; use tree_hash::TreeHash; use types::{ - Attestation, BeaconCommittee, ChainSpec, CommitteeIndex, Epoch, EthSpec, Hash256, + Attestation, BeaconCommittee, ChainSpec, CommitteeIndex, Epoch, EthSpec, ForkName, Hash256, IndexedAttestation, SelectionProof, SignedAggregateAndProof, Slot, SubnetId, }; @@ -1049,10 +1049,21 @@ pub fn verify_propagation_slot_range( } // Taking advantage of saturating subtraction on `Slot`. - let earliest_permissible_slot = slot_clock + let one_epoch_prior = slot_clock .now_with_past_tolerance(spec.maximum_gossip_clock_disparity()) .ok_or(BeaconChainError::UnableToReadSlot)? - E::slots_per_epoch(); + + let current_fork = + spec.fork_name_at_slot::(slot_clock.now().ok_or(BeaconChainError::UnableToReadSlot)?); + let earliest_permissible_slot = match current_fork { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => one_epoch_prior, + // EIP-7045 + ForkName::Deneb => one_epoch_prior + .epoch(E::slots_per_epoch()) + .start_slot(E::slots_per_epoch()), + }; + if attestation_slot < earliest_permissible_slot { return Err(Error::PastSlot { attestation_slot, diff --git a/beacon_node/beacon_chain/src/beacon_block_reward.rs b/beacon_node/beacon_chain/src/beacon_block_reward.rs index 786402c99..d05f7cb4f 100644 --- a/beacon_node/beacon_chain/src/beacon_block_reward.rs +++ b/beacon_node/beacon_chain/src/beacon_block_reward.rs @@ -33,6 +33,17 @@ impl BeaconChain { state.build_committee_cache(RelativeEpoch::Previous, &self.spec)?; state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; + self.compute_beacon_block_reward_with_cache(block, block_root, state) + } + + // This should only be called after a committee cache has been built + // for both the previous and current epoch + fn compute_beacon_block_reward_with_cache>( + &self, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, + block_root: Hash256, + state: &BeaconState, + ) -> Result { let proposer_index = block.proposer_index(); let sync_aggregate_reward = @@ -64,19 +75,19 @@ impl BeaconChain { self.compute_beacon_block_attestation_reward_base(block, block_root, state) .map_err(|e| { error!( - self.log, - "Error calculating base block attestation reward"; - "error" => ?e + self.log, + "Error calculating base block attestation reward"; + "error" => ?e ); BeaconChainError::BlockRewardAttestationError })? } else { - self.compute_beacon_block_attestation_reward_altair(block, state) + self.compute_beacon_block_attestation_reward_altair_deneb(block, state) .map_err(|e| { error!( - self.log, - "Error calculating altair block attestation reward"; - "error" => ?e + self.log, + "Error calculating altair block attestation reward"; + "error" => ?e ); BeaconChainError::BlockRewardAttestationError })? @@ -173,10 +184,12 @@ impl BeaconChain { Ok(block_attestation_reward) } - fn compute_beacon_block_attestation_reward_altair>( + fn compute_beacon_block_attestation_reward_altair_deneb< + Payload: AbstractExecPayload, + >( &self, block: BeaconBlockRef<'_, T::EthSpec, Payload>, - state: &mut BeaconState, + state: &BeaconState, ) -> Result { let total_active_balance = state.get_total_active_balance()?; let base_reward_per_increment = @@ -189,9 +202,13 @@ impl BeaconChain { .safe_mul(WEIGHT_DENOMINATOR)? .safe_div(PROPOSER_WEIGHT)?; + let mut current_epoch_participation = state.current_epoch_participation()?.clone(); + let mut previous_epoch_participation = state.previous_epoch_participation()?.clone(); + for attestation in block.body().attestations() { let data = &attestation.data; let inclusion_delay = state.slot().safe_sub(data.slot)?.as_u64(); + // [Modified in Deneb:EIP7045] let participation_flag_indices = get_attestation_participation_flag_indices( state, data, @@ -200,13 +217,16 @@ impl BeaconChain { )?; let attesting_indices = get_attesting_indices_from_state(state, attestation)?; - let mut proposer_reward_numerator = 0; for index in attesting_indices { let index = index as usize; for (flag_index, &weight) in PARTICIPATION_FLAG_WEIGHTS.iter().enumerate() { - let epoch_participation = - state.get_epoch_participation_mut(data.target.epoch)?; + let epoch_participation = if data.target.epoch == state.current_epoch() { + &mut current_epoch_participation + } else { + &mut previous_epoch_participation + }; + let validator_participation = epoch_participation .get_mut(index) .ok_or(BeaconStateError::ParticipationOutOfBounds(index))?; diff --git a/beacon_node/beacon_chain/src/beacon_block_streamer.rs b/beacon_node/beacon_chain/src/beacon_block_streamer.rs index e43f2a8dd..9312d4511 100644 --- a/beacon_node/beacon_chain/src/beacon_block_streamer.rs +++ b/beacon_node/beacon_chain/src/beacon_block_streamer.rs @@ -3,7 +3,7 @@ use execution_layer::{ExecutionLayer, ExecutionPayloadBodyV1}; use slog::{crit, debug, Logger}; use std::collections::HashMap; use std::sync::Arc; -use store::DatabaseBlock; +use store::{DatabaseBlock, ExecutionPayloadDeneb}; use task_executor::TaskExecutor; use tokio::sync::{ mpsc::{self, UnboundedSender}, @@ -97,6 +97,7 @@ fn reconstruct_default_header_block( let payload: ExecutionPayload = match fork { ForkName::Merge => ExecutionPayloadMerge::default().into(), ForkName::Capella => ExecutionPayloadCapella::default().into(), + ForkName::Deneb => ExecutionPayloadDeneb::default().into(), ForkName::Base | ForkName::Altair => { return Err(Error::PayloadReconstruction(format!( "Block with fork variant {} has execution payload", @@ -714,19 +715,21 @@ mod tests { } #[tokio::test] - async fn check_all_blocks_from_altair_to_capella() { + async fn check_all_blocks_from_altair_to_deneb() { let slots_per_epoch = MinimalEthSpec::slots_per_epoch() as usize; let num_epochs = 8; let bellatrix_fork_epoch = 2usize; let capella_fork_epoch = 4usize; + let deneb_fork_epoch = 6usize; let num_blocks_produced = num_epochs * slots_per_epoch; let mut spec = test_spec::(); spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = Some(Epoch::new(bellatrix_fork_epoch as u64)); spec.capella_fork_epoch = Some(Epoch::new(capella_fork_epoch as u64)); + spec.deneb_fork_epoch = Some(Epoch::new(deneb_fork_epoch as u64)); - let harness = get_harness(VALIDATOR_COUNT, spec); + let harness = get_harness(VALIDATOR_COUNT, spec.clone()); // go to bellatrix fork harness .extend_slots(bellatrix_fork_epoch * slots_per_epoch) @@ -833,17 +836,19 @@ mod tests { } #[tokio::test] - async fn check_fallback_altair_to_capella() { + async fn check_fallback_altair_to_deneb() { let slots_per_epoch = MinimalEthSpec::slots_per_epoch() as usize; let num_epochs = 8; let bellatrix_fork_epoch = 2usize; let capella_fork_epoch = 4usize; + let deneb_fork_epoch = 6usize; let num_blocks_produced = num_epochs * slots_per_epoch; let mut spec = test_spec::(); spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = Some(Epoch::new(bellatrix_fork_epoch as u64)); spec.capella_fork_epoch = Some(Epoch::new(capella_fork_epoch as u64)); + spec.deneb_fork_epoch = Some(Epoch::new(deneb_fork_epoch as u64)); let harness = get_harness(VALIDATOR_COUNT, spec); diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 958778350..fcd7be791 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -7,14 +7,22 @@ use crate::attester_cache::{AttesterCache, AttesterCacheKey}; use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckEarlyAttesterCache}; use crate::beacon_proposer_cache::compute_proposer_duties_from_head; use crate::beacon_proposer_cache::BeaconProposerCache; +use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use crate::block_times_cache::BlockTimesCache; +use crate::block_verification::POS_PANDA_BANNER; use crate::block_verification::{ - check_block_is_finalized_checkpoint_or_descendant, check_block_relevancy, get_block_root, - signature_verify_chain_segment, BlockError, ExecutionPendingBlock, GossipVerifiedBlock, - IntoExecutionPendingBlock, PayloadVerificationOutcome, POS_PANDA_BANNER, + check_block_is_finalized_checkpoint_or_descendant, check_block_relevancy, + signature_verify_chain_segment, verify_header_signature, BlockError, ExecutionPendingBlock, + GossipVerifiedBlock, IntoExecutionPendingBlock, }; -pub use crate::canonical_head::{CanonicalHead, CanonicalHeadRwLock}; +use crate::block_verification_types::{ + AsBlock, AvailableExecutedBlock, BlockImportData, ExecutedBlock, RpcBlock, +}; +pub use crate::canonical_head::CanonicalHead; use crate::chain_config::ChainConfig; +use crate::data_availability_checker::{ + Availability, AvailabilityCheckError, AvailableBlock, DataAvailabilityChecker, +}; use crate::early_attester_cache::EarlyAttesterCache; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend}; @@ -22,7 +30,7 @@ use crate::eth1_finalization_cache::{Eth1FinalizationCache, Eth1FinalizationData use crate::events::ServerSentEventHandler; use crate::execution_payload::{get_execution_payload, NotifyExecutionLayer, PreparePayloadHandle}; use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult}; -use crate::head_tracker::HeadTracker; +use crate::head_tracker::{HeadTracker, HeadTrackerReader, SszHeadTracker}; use crate::historical_blocks::HistoricalBlockError; use crate::light_client_finality_update_verification::{ Error as LightClientFinalityUpdateError, VerifiedLightClientFinalityUpdate, @@ -41,8 +49,10 @@ use crate::observed_aggregates::{ use crate::observed_attesters::{ ObservedAggregators, ObservedAttesters, ObservedSyncAggregators, ObservedSyncContributors, }; +use crate::observed_blob_sidecars::ObservedBlobSidecars; use crate::observed_block_producers::ObservedBlockProducers; use crate::observed_operations::{ObservationOutcome, ObservedOperations}; +use crate::observed_slashable::ObservedSlashable; use crate::persisted_beacon_chain::{PersistedBeaconChain, DUMMY_CANONICAL_HEAD_BLOCK_ROOT}; use crate::persisted_fork_choice::PersistedForkChoice; use crate::pre_finalization_cache::PreFinalizationBlockCache; @@ -57,11 +67,14 @@ use crate::validator_monitor::{ HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS, }; use crate::validator_pubkey_cache::ValidatorPubkeyCache; -use crate::{metrics, BeaconChainError, BeaconForkChoiceStore, BeaconSnapshot, CachedHead}; -use eth2::types::{EventKind, SseBlock, SseExtendedPayloadAttributes, SyncDuty}; +use crate::{ + kzg_utils, metrics, AvailabilityPendingExecutedBlock, BeaconChainError, BeaconForkChoiceStore, + BeaconSnapshot, CachedHead, +}; +use eth2::types::{EventKind, SseBlobSidecar, SseBlock, SseExtendedPayloadAttributes, SyncDuty}; use execution_layer::{ - BlockProposalContents, BuilderParams, ChainHealth, ExecutionLayer, FailedCondition, - PayloadAttributes, PayloadStatus, + BlockProposalContents, BlockProposalContentsType, BuilderParams, ChainHealth, ExecutionLayer, + FailedCondition, PayloadAttributes, PayloadStatus, }; use fork_choice::{ AttestationFromBlock, ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters, @@ -70,6 +83,7 @@ use fork_choice::{ use futures::channel::mpsc::Sender; use itertools::process_results; use itertools::Itertools; +use kzg::Kzg; use operation_pool::{AttestationRef, OperationPool, PersistedOperationPool, ReceivedPreCapella}; use parking_lot::{Mutex, RwLock}; use proto_array::{DoNotReOrg, ProposerHeadError}; @@ -106,12 +120,14 @@ use task_executor::{ShutdownReason, TaskExecutor}; use tokio_stream::Stream; use tree_hash::TreeHash; use types::beacon_state::CloneConfig; +use types::blob_sidecar::{BlobSidecarList, FixedBlobSidecarList}; +use types::payload::BlockProductionVersion; use types::*; pub type ForkChoiceError = fork_choice::Error; /// Alias to appease clippy. -type HashBlockTuple = (Hash256, Arc>); +type HashBlockTuple = (Hash256, RpcBlock); /// The time-out before failure during an operation to take a read/write RwLock on the block /// processing cache. @@ -170,6 +186,34 @@ pub enum WhenSlotSkipped { Prev, } +#[derive(Copy, Clone, Debug, PartialEq)] +pub enum AvailabilityProcessingStatus { + MissingComponents(Slot, Hash256), + Imported(Hash256), +} + +impl TryInto for AvailabilityProcessingStatus { + type Error = (); + + fn try_into(self) -> Result { + match self { + AvailabilityProcessingStatus::Imported(hash) => Ok(hash.into()), + _ => Err(()), + } + } +} + +impl TryInto for AvailabilityProcessingStatus { + type Error = (); + + fn try_into(self) -> Result { + match self { + AvailabilityProcessingStatus::Imported(hash) => Ok(hash), + _ => Err(()), + } + } +} + /// The result of a chain segment processing. pub enum ChainSegmentResult { /// Processing this chain segment finished successfully. @@ -192,9 +236,13 @@ pub enum ProduceBlockVerification { pub struct PrePayloadAttributes { pub proposer_index: u64, pub prev_randao: Hash256, + /// The block number of the block being built upon (same block as fcU `headBlockHash`). + /// /// The parent block number is not part of the payload attributes sent to the EL, but *is* /// sent to builders via SSE. pub parent_block_number: u64, + /// The block root of the block being built upon (same block as fcU `headBlockHash`). + pub parent_beacon_block_root: Hash256, } /// Information about a state/block at a specific slot. @@ -273,8 +321,7 @@ pub trait BeaconChainTypes: Send + Sync + 'static { type EthSpec: types::EthSpec; } -/// Used internally to split block production into discrete functions. -struct PartialBeaconBlock> { +struct PartialBeaconBlock { state: BeaconState, slot: Slot, proposer_index: u64, @@ -288,7 +335,7 @@ struct PartialBeaconBlock> { deposits: Vec, voluntary_exits: Vec, sync_aggregate: Option>, - prepare_payload_handle: Option>, + prepare_payload_handle: Option>, bls_to_execution_changes: Vec, } @@ -359,6 +406,10 @@ pub struct BeaconChain { pub(crate) observed_sync_aggregators: RwLock>, /// Maintains a record of which validators have proposed blocks for each slot. pub observed_block_producers: RwLock>, + /// Maintains a record of blob sidecars seen over the gossip network. + pub observed_blob_sidecars: RwLock>, + /// Maintains a record of slashable message seen over the gossip network or RPC. + pub observed_slashable: RwLock>, /// Maintains a record of which validators have submitted voluntary exits. pub(crate) observed_voluntary_exits: Mutex>, /// Maintains a record of which validators we've seen proposer slashings for. @@ -404,7 +455,7 @@ pub struct BeaconChain { /// A cache of eth1 deposit data at epoch boundaries for deposit finalization pub eth1_finalization_cache: TimeoutRwLock, /// Caches the beacon block proposer shuffling for a given epoch and shuffling key root. - pub beacon_proposer_cache: Mutex, + pub beacon_proposer_cache: Arc>, /// Caches a map of `validator_index -> validator_pubkey`. pub(crate) validator_pubkey_cache: TimeoutRwLock>, /// A cache used when producing attestations. @@ -428,9 +479,67 @@ pub struct BeaconChain { pub validator_monitor: RwLock>, /// The slot at which blocks are downloaded back to. pub genesis_backfill_slot: Slot, + /// Provides a KZG verification and temporary storage for blocks and blobs as + /// they are collected and combined. + pub data_availability_checker: Arc>, + /// The KZG trusted setup used by this chain. + pub kzg: Option>, + /// State with complete tree hash cache, ready for block production. + /// + /// NB: We can delete this once we have tree-states. + #[allow(clippy::type_complexity)] + pub block_production_state: Arc)>>>, } -type BeaconBlockAndState = (BeaconBlock, BeaconState); +pub enum BeaconBlockResponseWrapper { + Full(BeaconBlockResponse>), + Blinded(BeaconBlockResponse>), +} + +impl BeaconBlockResponseWrapper { + pub fn fork_name(&self, spec: &ChainSpec) -> Result { + Ok(match self { + BeaconBlockResponseWrapper::Full(resp) => resp.block.to_ref().fork_name(spec)?, + BeaconBlockResponseWrapper::Blinded(resp) => resp.block.to_ref().fork_name(spec)?, + }) + } + + pub fn execution_payload_value(&self) -> Uint256 { + match self { + BeaconBlockResponseWrapper::Full(resp) => resp.execution_payload_value, + BeaconBlockResponseWrapper::Blinded(resp) => resp.execution_payload_value, + } + } + + pub fn consensus_block_value_gwei(&self) -> u64 { + match self { + BeaconBlockResponseWrapper::Full(resp) => resp.consensus_block_value, + BeaconBlockResponseWrapper::Blinded(resp) => resp.consensus_block_value, + } + } + + pub fn consensus_block_value_wei(&self) -> Uint256 { + Uint256::from(self.consensus_block_value_gwei()) * 1_000_000_000 + } + + pub fn is_blinded(&self) -> bool { + matches!(self, BeaconBlockResponseWrapper::Blinded(_)) + } +} + +/// The components produced when the local beacon node creates a new block to extend the chain +pub struct BeaconBlockResponse> { + /// The newly produced beacon block + pub block: BeaconBlock, + /// The post-state after applying the new block + pub state: BeaconState, + /// The Blobs / Proofs associated with the new block + pub blob_items: Option<(KzgProofs, BlobsList)>, + /// The execution layer reward for the block + pub execution_payload_value: Uint256, + /// The consensus layer reward to the proposer + pub consensus_block_value: u64, +} impl FinalizationAndCanonicity { pub fn is_finalized(self) -> bool { @@ -501,12 +610,19 @@ impl BeaconChain { let mut batch = vec![]; let _head_timer = metrics::start_timer(&metrics::PERSIST_HEAD); - batch.push(self.persist_head_in_batch()); + + // Hold a lock to head_tracker until it has been persisted to disk. Otherwise there's a race + // condition with the pruning thread which can result in a block present in the head tracker + // but absent in the DB. This inconsistency halts pruning and dramastically increases disk + // size. Ref: https://github.com/sigp/lighthouse/issues/4773 + let head_tracker = self.head_tracker.0.read(); + batch.push(self.persist_head_in_batch(&head_tracker)); let _fork_choice_timer = metrics::start_timer(&metrics::PERSIST_FORK_CHOICE); batch.push(self.persist_fork_choice_in_batch()); self.store.hot_db.do_atomically(batch)?; + drop(head_tracker); Ok(()) } @@ -514,25 +630,28 @@ impl BeaconChain { /// Return a `PersistedBeaconChain` without reference to a `BeaconChain`. pub fn make_persisted_head( genesis_block_root: Hash256, - head_tracker: &HeadTracker, + head_tracker_reader: &HeadTrackerReader, ) -> PersistedBeaconChain { PersistedBeaconChain { _canonical_head_block_root: DUMMY_CANONICAL_HEAD_BLOCK_ROOT, genesis_block_root, - ssz_head_tracker: head_tracker.to_ssz_container(), + ssz_head_tracker: SszHeadTracker::from_map(head_tracker_reader), } } /// Return a database operation for writing the beacon chain head to disk. - pub fn persist_head_in_batch(&self) -> KeyValueStoreOp { - Self::persist_head_in_batch_standalone(self.genesis_block_root, &self.head_tracker) + pub fn persist_head_in_batch( + &self, + head_tracker_reader: &HeadTrackerReader, + ) -> KeyValueStoreOp { + Self::persist_head_in_batch_standalone(self.genesis_block_root, head_tracker_reader) } pub fn persist_head_in_batch_standalone( genesis_block_root: Hash256, - head_tracker: &HeadTracker, + head_tracker_reader: &HeadTrackerReader, ) -> KeyValueStoreOp { - Self::make_persisted_head(genesis_block_root, head_tracker) + Self::make_persisted_head(genesis_block_root, head_tracker_reader) .as_kv_store_op(BEACON_CHAIN_DB_KEY) } @@ -543,11 +662,11 @@ impl BeaconChain { spec: &ChainSpec, log: &Logger, ) -> Result>, Error> { - let persisted_fork_choice = - match store.get_item::(&FORK_CHOICE_DB_KEY)? { - Some(fc) => fc, - None => return Ok(None), - }; + let Some(persisted_fork_choice) = + store.get_item::(&FORK_CHOICE_DB_KEY)? + else { + return Ok(None); + }; let fc_store = BeaconForkChoiceStore::from_persisted(persisted_fork_choice.fork_choice_store, store)?; @@ -590,6 +709,13 @@ impl BeaconChain { Ok(()) } + pub fn persist_data_availability_checker(&self) -> Result<(), Error> { + let _timer = metrics::start_timer(&metrics::PERSIST_DATA_AVAILABILITY_CHECKER); + self.data_availability_checker.persist_all()?; + + Ok(()) + } + /// Returns the slot _right now_ according to `self.slot_clock`. Returns `Err` if the slot is /// unavailable. /// @@ -670,10 +796,10 @@ impl BeaconChain { start_slot, end_slot, || { - ( + Ok(( head.beacon_state.clone_with_only_committee_caches(), head.beacon_block_root, - ) + )) }, &self.spec, )?; @@ -767,10 +893,10 @@ impl BeaconChain { start_slot, end_slot, || { - ( + Ok(( head.beacon_state.clone_with_only_committee_caches(), head.beacon_state_root(), - ) + )) }, &self.spec, )?; @@ -1040,6 +1166,15 @@ impl BeaconChain { ) } + pub fn get_blobs_checking_early_attester_cache( + &self, + block_root: &Hash256, + ) -> Result, Error> { + self.early_attester_cache + .get_blobs(*block_root) + .map_or_else(|| self.get_blobs(block_root), Ok) + } + /// Returns the block at the given root, if any. /// /// ## Errors @@ -1104,6 +1239,17 @@ impl BeaconChain { .map(Some) } + /// Returns the blobs at the given root, if any. + /// + /// ## Errors + /// May return a database error. + pub fn get_blobs(&self, block_root: &Hash256) -> Result, Error> { + match self.store.get_blobs(block_root)? { + Some(blobs) => Ok(blobs), + None => Ok(BlobSidecarList::default()), + } + } + pub fn get_blinded_block( &self, block_root: &Hash256, @@ -1205,6 +1351,7 @@ impl BeaconChain { self.head_tracker.heads() } + /// Only used in tests. pub fn knows_head(&self, block_hash: &SignedBeaconBlockHash) -> bool { self.head_tracker.contains_head((*block_hash).into()) } @@ -1923,6 +2070,19 @@ impl BeaconChain { }) } + pub fn verify_blob_sidecar_for_gossip( + self: &Arc, + blob_sidecar: Arc>, + subnet_id: u64, + ) -> Result, GossipBlobError> { + metrics::inc_counter(&metrics::BLOBS_SIDECAR_PROCESSING_REQUESTS); + let _timer = metrics::start_timer(&metrics::BLOBS_SIDECAR_GOSSIP_VERIFICATION_TIMES); + GossipVerifiedBlob::new(blob_sidecar, subnet_id, self).map(|v| { + metrics::inc_counter(&metrics::BLOBS_SIDECAR_PROCESSING_SUCCESSES); + v + }) + } + /// Accepts some 'LightClientOptimisticUpdate' from the network and attempts to verify it pub fn verify_optimistic_update_for_gossip( self: &Arc, @@ -2428,7 +2588,7 @@ impl BeaconChain { /// This method is potentially long-running and should not run on the core executor. pub fn filter_chain_segment( self: &Arc, - chain_segment: Vec>>, + chain_segment: Vec>, ) -> Result>, ChainSegmentResult> { // This function will never import any blocks. let imported_blocks = 0; @@ -2445,14 +2605,14 @@ impl BeaconChain { for (i, block) in chain_segment.into_iter().enumerate() { // Ensure the block is the correct structure for the fork at `block.slot()`. - if let Err(e) = block.fork_name(&self.spec) { + if let Err(e) = block.as_block().fork_name(&self.spec) { return Err(ChainSegmentResult::Failed { imported_blocks, error: BlockError::InconsistentFork(e), }); } - let block_root = get_block_root(&block); + let block_root = block.block_root(); if let Some((child_parent_root, child_slot)) = children.get(i) { // If this block has a child in this chain segment, ensure that its parent root matches @@ -2476,7 +2636,7 @@ impl BeaconChain { } } - match check_block_relevancy(&block, block_root, self) { + match check_block_relevancy(block.as_block(), block_root, self) { // If the block is relevant, add it to the filtered chain segment. Ok(_) => filtered_chain_segment.push((block_root, block)), // If the block is already known, simply ignore this block. @@ -2534,7 +2694,7 @@ impl BeaconChain { /// `Self::process_block`. pub async fn process_chain_segment( self: &Arc, - chain_segment: Vec>>, + chain_segment: Vec>, notify_execution_layer: NotifyExecutionLayer, ) -> ChainSegmentResult { let mut imported_blocks = 0; @@ -2558,7 +2718,7 @@ impl BeaconChain { while let Some((_root, block)) = filtered_chain_segment.first() { // Determine the epoch of the first block in the remaining segment. - let start_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); + let start_epoch = block.epoch(); // The `last_index` indicates the position of the first block in an epoch greater // than the current epoch: partitioning the blocks into a run of blocks in the same @@ -2566,9 +2726,7 @@ impl BeaconChain { // the same `BeaconState`. let last_index = filtered_chain_segment .iter() - .position(|(_root, block)| { - block.slot().epoch(T::EthSpec::slots_per_epoch()) > start_epoch - }) + .position(|(_root, block)| block.epoch() > start_epoch) .unwrap_or(filtered_chain_segment.len()); let mut blocks = filtered_chain_segment.split_off(last_index); @@ -2608,7 +2766,24 @@ impl BeaconChain { ) .await { - Ok(_) => imported_blocks += 1, + Ok(status) => { + match status { + AvailabilityProcessingStatus::Imported(_) => { + // The block was imported successfully. + imported_blocks += 1; + } + AvailabilityProcessingStatus::MissingComponents(slot, block_root) => { + warn!(self.log, "Blobs missing in response to range request"; + "block_root" => ?block_root, "slot" => slot); + return ChainSegmentResult::Failed { + imported_blocks, + error: BlockError::AvailabilityCheck( + AvailabilityCheckError::MissingBlobs, + ), + }; + } + } + } Err(error) => { return ChainSegmentResult::Failed { imported_blocks, @@ -2646,12 +2821,14 @@ impl BeaconChain { match GossipVerifiedBlock::new(block, &chain) { Ok(verified) => { + let commitments_formatted = verified.block.commitments_formatted(); debug!( chain.log, "Successfully verified gossip block"; "graffiti" => graffiti_string, "slot" => slot, "root" => ?verified.block_root(), + "commitments" => commitments_formatted, ); Ok(verified) @@ -2676,6 +2853,117 @@ impl BeaconChain { .map_err(BeaconChainError::TokioJoin)? } + /// Cache the blob in the processing cache, process it, then evict it from the cache if it was + /// imported or errors. + pub async fn process_gossip_blob( + self: &Arc, + blob: GossipVerifiedBlob, + ) -> Result> { + let block_root = blob.block_root(); + + // If this block has already been imported to forkchoice it must have been available, so + // we don't need to process its blobs again. + if self + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { + return Err(BlockError::BlockIsAlreadyKnown); + } + + if let Some(event_handler) = self.event_handler.as_ref() { + if event_handler.has_blob_sidecar_subscribers() { + event_handler.register(EventKind::BlobSidecar(SseBlobSidecar::from_blob_sidecar( + blob.as_blob(), + ))); + } + } + + self.data_availability_checker + .notify_gossip_blob(blob.slot(), block_root, &blob); + let r = self.check_gossip_blob_availability_and_import(blob).await; + self.remove_notified(&block_root, r) + } + + /// Cache the blobs in the processing cache, process it, then evict it from the cache if it was + /// imported or errors. + pub async fn process_rpc_blobs( + self: &Arc, + slot: Slot, + block_root: Hash256, + blobs: FixedBlobSidecarList, + ) -> Result> { + // If this block has already been imported to forkchoice it must have been available, so + // we don't need to process its blobs again. + if self + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { + return Err(BlockError::BlockIsAlreadyKnown); + } + + if let Some(event_handler) = self.event_handler.as_ref() { + if event_handler.has_blob_sidecar_subscribers() { + for blob in blobs.iter().filter_map(|maybe_blob| maybe_blob.as_ref()) { + event_handler.register(EventKind::BlobSidecar( + SseBlobSidecar::from_blob_sidecar(blob), + )); + } + } + } + + self.data_availability_checker + .notify_rpc_blobs(slot, block_root, &blobs); + let r = self + .check_rpc_blob_availability_and_import(slot, block_root, blobs) + .await; + self.remove_notified(&block_root, r) + } + + /// Remove any block components from the *processing cache* if we no longer require them. If the + /// block was imported full or erred, we no longer require them. + fn remove_notified( + &self, + block_root: &Hash256, + r: Result>, + ) -> Result> { + let has_missing_components = + matches!(r, Ok(AvailabilityProcessingStatus::MissingComponents(_, _))); + if !has_missing_components { + self.data_availability_checker.remove_notified(block_root); + } + r + } + + /// Wraps `process_block` in logic to cache the block's commitments in the processing cache + /// and evict if the block was imported or erred. + pub async fn process_block_with_early_caching>( + self: &Arc, + block_root: Hash256, + unverified_block: B, + notify_execution_layer: NotifyExecutionLayer, + ) -> Result> { + if let Ok(commitments) = unverified_block + .block() + .message() + .body() + .blob_kzg_commitments() + { + self.data_availability_checker.notify_block_commitments( + unverified_block.block().slot(), + block_root, + commitments.clone(), + ); + }; + let r = self + .process_block(block_root, unverified_block, notify_execution_layer, || { + Ok(()) + }) + .await; + self.remove_notified(&block_root, r) + } + /// Returns `Ok(block_root)` if the given `unverified_block` was successfully verified and /// imported into the chain. /// @@ -2683,6 +2971,7 @@ impl BeaconChain { /// /// - `SignedBeaconBlock` /// - `GossipVerifiedBlock` + /// - `RpcBlock` /// /// ## Errors /// @@ -2694,15 +2983,28 @@ impl BeaconChain { unverified_block: B, notify_execution_layer: NotifyExecutionLayer, publish_fn: impl FnOnce() -> Result<(), BlockError> + Send + 'static, - ) -> Result> { + ) -> Result> { // Start the Prometheus timer. let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); // Increment the Prometheus counter for block processing requests. metrics::inc_counter(&metrics::BLOCK_PROCESSING_REQUESTS); - // Clone the block so we can provide it to the event handler. - let block = unverified_block.block().clone(); + // Set observed time if not already set. Usually this should be set by gossip or RPC, + // but just in case we set it again here (useful for tests). + if let (Some(seen_timestamp), Some(current_slot)) = + (self.slot_clock.now_duration(), self.slot_clock.now()) + { + self.block_times_cache.write().set_time_observed( + block_root, + current_slot, + seen_timestamp, + None, + None, + ); + } + + let block_slot = unverified_block.block().slot(); // A small closure to group the verification and import errors. let chain = self.clone(); @@ -2713,26 +3015,42 @@ impl BeaconChain { notify_execution_layer, )?; publish_fn()?; - chain - .import_execution_pending_block(execution_pending) - .await + let executed_block = chain.into_executed_block(execution_pending).await?; + match executed_block { + ExecutedBlock::Available(block) => { + self.import_available_block(Box::new(block)).await + } + ExecutedBlock::AvailabilityPending(block) => { + self.check_block_availability_and_import(block).await + } + } }; // Verify and import the block. match import_block.await { // The block was successfully verified and imported. Yay. - Ok(block_root) => { + Ok(status @ AvailabilityProcessingStatus::Imported(block_root)) => { trace!( self.log, "Beacon block imported"; "block_root" => ?block_root, - "block_slot" => %block.slot(), + "block_slot" => block_slot, ); // Increment the Prometheus counter for block processing successes. metrics::inc_counter(&metrics::BLOCK_PROCESSING_SUCCESSES); - Ok(block_root) + Ok(status) + } + Ok(status @ AvailabilityProcessingStatus::MissingComponents(slot, block_root)) => { + trace!( + self.log, + "Beacon block awaiting blobs"; + "block_root" => ?block_root, + "block_slot" => slot, + ); + + Ok(status) } Err(e @ BlockError::BeaconChainError(BeaconChainError::TokioJoin(_))) => { debug!( @@ -2764,36 +3082,27 @@ impl BeaconChain { } } - /// Accepts a fully-verified block and imports it into the chain without performing any - /// additional verification. + /// Accepts a fully-verified block and awaits on it's payload verification handle to + /// get a fully `ExecutedBlock` /// - /// An error is returned if the block was unable to be imported. It may be partially imported - /// (i.e., this function is not atomic). - pub async fn import_execution_pending_block( + /// An error is returned if the verification handle couldn't be awaited. + pub async fn into_executed_block( self: Arc, execution_pending_block: ExecutionPendingBlock, - ) -> Result> { + ) -> Result, BlockError> { let ExecutionPendingBlock { block, - block_root, - state, - parent_block, - confirmed_state_roots, + import_data, payload_verification_handle, - parent_eth1_finalization_data, - consensus_context, } = execution_pending_block; - let PayloadVerificationOutcome { - payload_verification_status, - is_valid_merge_transition_block, - } = payload_verification_handle + let payload_verification_outcome = payload_verification_handle .await .map_err(BeaconChainError::TokioJoin)? .ok_or(BeaconChainError::RuntimeShutdown)??; // Log the PoS pandas if a merge transition just occurred. - if is_valid_merge_transition_block { + if payload_verification_outcome.is_valid_merge_transition_block { info!(self.log, "{}", POS_PANDA_BANNER); info!( self.log, @@ -2821,9 +3130,127 @@ impl BeaconChain { .into_root() ); } + Ok(ExecutedBlock::new( + block, + import_data, + payload_verification_outcome, + )) + } + /* Import methods */ + + /// Checks if the block is available, and imports immediately if so, otherwise caches the block + /// in the data availability checker. + async fn check_block_availability_and_import( + self: &Arc, + block: AvailabilityPendingExecutedBlock, + ) -> Result> { + let slot = block.block.slot(); + let availability = self + .data_availability_checker + .put_pending_executed_block(block)?; + self.process_availability(slot, availability).await + } + + /// Checks if the provided blob can make any cached blocks available, and imports immediately + /// if so, otherwise caches the blob in the data availability checker. + async fn check_gossip_blob_availability_and_import( + self: &Arc, + blob: GossipVerifiedBlob, + ) -> Result> { + let slot = blob.slot(); + if let Some(slasher) = self.slasher.as_ref() { + slasher.accept_block_header(blob.signed_block_header()); + } + let availability = self.data_availability_checker.put_gossip_blob(blob)?; + + self.process_availability(slot, availability).await + } + + /// Checks if the provided blobs can make any cached blocks available, and imports immediately + /// if so, otherwise caches the blob in the data availability checker. + async fn check_rpc_blob_availability_and_import( + self: &Arc, + slot: Slot, + block_root: Hash256, + blobs: FixedBlobSidecarList, + ) -> Result> { + // Need to scope this to ensure the lock is dropped before calling `process_availability` + // Even an explicit drop is not enough to convince the borrow checker. + { + let mut slashable_cache = self.observed_slashable.write(); + for header in blobs + .into_iter() + .filter_map(|b| b.as_ref().map(|b| b.signed_block_header.clone())) + .unique() + { + if verify_header_signature::>(self, &header).is_ok() { + slashable_cache + .observe_slashable( + header.message.slot, + header.message.proposer_index, + block_root, + ) + .map_err(|e| BlockError::BeaconChainError(e.into()))?; + if let Some(slasher) = self.slasher.as_ref() { + slasher.accept_block_header(header); + } + } + } + } + let availability = self + .data_availability_checker + .put_rpc_blobs(block_root, blobs)?; + + self.process_availability(slot, availability).await + } + + /// Imports a fully available block. Otherwise, returns `AvailabilityProcessingStatus::MissingComponents` + /// + /// An error is returned if the block was unable to be imported. It may be partially imported + /// (i.e., this function is not atomic). + async fn process_availability( + self: &Arc, + slot: Slot, + availability: Availability, + ) -> Result> { + match availability { + Availability::Available(block) => { + // This is the time since start of the slot where all the components of the block have become available + let delay = + get_slot_delay_ms(timestamp_now(), block.block.slot(), &self.slot_clock); + metrics::observe_duration(&metrics::BLOCK_AVAILABILITY_DELAY, delay); + // Block is fully available, import into fork choice + self.import_available_block(block).await + } + Availability::MissingComponents(block_root) => Ok( + AvailabilityProcessingStatus::MissingComponents(slot, block_root), + ), + } + } + + pub async fn import_available_block( + self: &Arc, + block: Box>, + ) -> Result> { + let AvailableExecutedBlock { + block, + import_data, + payload_verification_outcome, + } = *block; + + let BlockImportData { + block_root, + state, + parent_block, + parent_eth1_finalization_data, + confirmed_state_roots, + consensus_context, + } = import_data; + + // import let chain = self.clone(); - let block_hash = self + let block_root = self .spawn_blocking_handle( move || { chain.import_block( @@ -2831,7 +3258,7 @@ impl BeaconChain { block_root, state, confirmed_state_roots, - payload_verification_status, + payload_verification_outcome.payload_verification_status, parent_block, parent_eth1_finalization_data, consensus_context, @@ -2840,11 +3267,10 @@ impl BeaconChain { "payload_verification_handle", ) .await??; - - Ok(block_hash) + Ok(AvailabilityProcessingStatus::Imported(block_root)) } - /// Accepts a fully-verified block and imports it into the chain without performing any + /// Accepts a fully-verified and available block and imports it into the chain without performing any /// additional verification. /// /// An error is returned if the block was unable to be imported. It may be partially imported @@ -2852,7 +3278,7 @@ impl BeaconChain { #[allow(clippy::too_many_arguments)] fn import_block( &self, - signed_block: Arc>, + signed_block: AvailableBlock, block_root: Hash256, mut state: BeaconState, confirmed_state_roots: Vec, @@ -2905,7 +3331,9 @@ impl BeaconChain { let mut fork_choice = self.canonical_head.fork_choice_write_lock(); // Do not import a block that doesn't descend from the finalized root. - check_block_is_finalized_checkpoint_or_descendant(self, &fork_choice, &signed_block)?; + let signed_block = + check_block_is_finalized_checkpoint_or_descendant(self, &fork_choice, signed_block)?; + let block = signed_block.message(); // Register the new block with the fork choice service. { @@ -3016,6 +3444,8 @@ impl BeaconChain { // If the write fails, revert fork choice to the version from disk, else we can // end up with blocks in fork choice that are missing from disk. // See https://github.com/sigp/lighthouse/issues/2028 + let (_, signed_block, blobs) = signed_block.deconstruct(); + let block = signed_block.message(); ops.extend( confirmed_state_roots .into_iter() @@ -3023,9 +3453,21 @@ impl BeaconChain { ); ops.push(StoreOp::PutBlock(block_root, signed_block.clone())); ops.push(StoreOp::PutState(block.state_root(), &state)); + + if let Some(blobs) = blobs { + if !blobs.is_empty() { + debug!( + self.log, "Writing blobs to store"; + "block_root" => %block_root, + "count" => blobs.len(), + ); + ops.push(StoreOp::PutBlobs(block_root, blobs)); + } + } + let txn_lock = self.store.hot_db.begin_rw_transaction(); - if let Err(e) = self.store.do_atomically(ops) { + if let Err(e) = self.store.do_atomically_with_block_and_blobs_cache(ops) { error!( self.log, "Database write failed!"; @@ -3143,9 +3585,7 @@ impl BeaconChain { state: &BeaconState, ) -> Result<(), BlockError> { // Only perform the weak subjectivity check if it was configured. - let wss_checkpoint = if let Some(checkpoint) = self.config.weak_subjectivity_checkpoint { - checkpoint - } else { + let Some(wss_checkpoint) = self.config.weak_subjectivity_checkpoint else { return Ok(()); }; // Note: we're using the finalized checkpoint from the head state, rather than fork @@ -3217,16 +3657,18 @@ impl BeaconChain { } // Allow the validator monitor to learn about a new valid state. - self.validator_monitor - .write() - .process_valid_state(current_slot.epoch(T::EthSpec::slots_per_epoch()), state); + self.validator_monitor.write().process_valid_state( + current_slot.epoch(T::EthSpec::slots_per_epoch()), + state, + &self.spec, + ); let validator_monitor = self.validator_monitor.read(); // Sync aggregate. if let Ok(sync_aggregate) = block.body().sync_aggregate() { // `SyncCommittee` for the sync_aggregate should correspond to the duty slot - let duty_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); + let duty_epoch = block.epoch(); match self.sync_committee_at_epoch(duty_epoch) { Ok(sync_committee) => { @@ -3507,7 +3949,7 @@ impl BeaconChain { parent_block_slot: Slot, ) { // Do not write to eth1 finalization cache for blocks older than 5 epochs. - if block.slot().epoch(T::EthSpec::slots_per_epoch()) + 5 < current_epoch { + if block.epoch() + 5 < current_epoch { return; } @@ -3609,35 +4051,17 @@ impl BeaconChain { Ok(()) } - /// Produce a new block at the given `slot`. - /// - /// The produced block will not be inherently valid, it must be signed by a block producer. - /// Block signing is out of the scope of this function and should be done by a separate program. - pub async fn produce_block + 'static>( - self: &Arc, - randao_reveal: Signature, - slot: Slot, - validator_graffiti: Option, - ) -> Result, BlockProductionError> { - self.produce_block_with_verification( - randao_reveal, - slot, - validator_graffiti, - ProduceBlockVerification::VerifyRandao, - ) - .await - } - - /// Same as `produce_block` but allowing for configuration of RANDAO-verification. - pub async fn produce_block_with_verification< - Payload: AbstractExecPayload + 'static, - >( + pub async fn produce_block_with_verification( self: &Arc, randao_reveal: Signature, slot: Slot, validator_graffiti: Option, verification: ProduceBlockVerification, - ) -> Result, BlockProductionError> { + builder_boost_factor: Option, + block_production_version: BlockProductionVersion, + ) -> Result, BlockProductionError> { + metrics::inc_counter(&metrics::BLOCK_PRODUCTION_REQUESTS); + let _complete_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_TIMES); // Part 1/2 (blocking) // // Load the parent state from disk. @@ -3655,13 +4079,15 @@ impl BeaconChain { // Part 2/2 (async, with some blocking components) // // Produce the block upon the state - self.produce_block_on_state::( + self.produce_block_on_state( state, state_root_opt, slot, randao_reveal, validator_graffiti, verification, + builder_boost_factor, + block_production_version, ) .await } @@ -3672,9 +4098,6 @@ impl BeaconChain { self: &Arc, slot: Slot, ) -> Result<(BeaconState, Option), BlockProductionError> { - metrics::inc_counter(&metrics::BLOCK_PRODUCTION_REQUESTS); - let _complete_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_TIMES); - let fork_choice_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_FORK_CHOICE_TIMES); self.wait_for_fork_choice_before_block_production(slot)?; drop(fork_choice_timer); @@ -3704,7 +4127,16 @@ impl BeaconChain { ); (re_org_state.pre_state, re_org_state.state_root) } - // Normal case: proposing a block atop the current head. Use the snapshot cache. + // Normal case: proposing a block atop the current head using the cache. + else if let Some((_, cached_state)) = self + .block_production_state + .lock() + .take() + .filter(|(cached_block_root, _)| *cached_block_root == head_block_root) + { + (cached_state.pre_state, cached_state.state_root) + } + // Fall back to a direct read of the snapshot cache. else if let Some(pre_state) = self .snapshot_cache .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) @@ -3712,6 +4144,12 @@ impl BeaconChain { snapshot_cache.get_state_for_block_production(head_block_root) }) { + warn!( + self.log, + "Block production cache miss"; + "message" => "falling back to snapshot cache clone", + "slot" => slot + ); (pre_state.pre_state, pre_state.state_root) } else { warn!( @@ -3835,12 +4273,27 @@ impl BeaconChain { drop(proposer_head_timer); let re_org_parent_block = proposer_head.parent_node.root; - // Only attempt a re-org if we hit the snapshot cache. + // Only attempt a re-org if we hit the block production cache or snapshot cache. let pre_state = self - .snapshot_cache - .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .and_then(|snapshot_cache| { - snapshot_cache.get_state_for_block_production(re_org_parent_block) + .block_production_state + .lock() + .take() + .and_then(|(cached_block_root, state)| { + (cached_block_root == re_org_parent_block).then_some(state) + }) + .or_else(|| { + warn!( + self.log, + "Block production cache miss"; + "message" => "falling back to snapshot cache during re-org", + "slot" => slot, + "block_root" => ?re_org_parent_block + ); + self.snapshot_cache + .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) + .and_then(|snapshot_cache| { + snapshot_cache.get_state_for_block_production(re_org_parent_block) + }) }) .or_else(|| { debug!( @@ -3877,10 +4330,10 @@ impl BeaconChain { let proposal_epoch = proposal_slot.epoch(T::EthSpec::slots_per_epoch()); let head_block_root = cached_head.head_block_root(); - let parent_block_root = cached_head.parent_block_root(); + let head_parent_block_root = cached_head.parent_block_root(); // The proposer head must be equal to the canonical head or its parent. - if proposer_head != head_block_root && proposer_head != parent_block_root { + if proposer_head != head_block_root && proposer_head != head_parent_block_root { warn!( self.log, "Unable to compute payload attributes"; @@ -3959,7 +4412,7 @@ impl BeaconChain { // Get the `prev_randao` and parent block number. let head_block_number = cached_head.head_block_number()?; - let (prev_randao, parent_block_number) = if proposer_head == parent_block_root { + let (prev_randao, parent_block_number) = if proposer_head == head_parent_block_root { ( cached_head.parent_random()?, head_block_number.saturating_sub(1), @@ -3972,6 +4425,7 @@ impl BeaconChain { proposer_index, prev_randao, parent_block_number, + parent_beacon_block_root: proposer_head, })) } @@ -4052,7 +4506,7 @@ impl BeaconChain { /// This function uses heuristics that align quite closely but not exactly with the re-org /// conditions set out in `get_state_for_re_org` and `get_proposer_head`. The differences are /// documented below. - fn overridden_forkchoice_update_params( + pub fn overridden_forkchoice_update_params( &self, canonical_forkchoice_params: ForkchoiceUpdateParameters, ) -> Result { @@ -4070,7 +4524,7 @@ impl BeaconChain { }) } - fn overridden_forkchoice_update_params_or_failure_reason( + pub fn overridden_forkchoice_update_params_or_failure_reason( &self, canonical_forkchoice_params: &ForkchoiceUpdateParameters, ) -> Result> { @@ -4211,7 +4665,7 @@ impl BeaconChain { .unwrap_or_else(|| Duration::from_secs(0)), ); block_delays.observed.map_or(false, |delay| { - delay > self.slot_clock.unagg_attestation_production_delay() + delay >= self.slot_clock.unagg_attestation_production_delay() }) } @@ -4227,7 +4681,8 @@ impl BeaconChain { /// The provided `state_root_opt` should only ever be set to `Some` if the contained value is /// equal to the root of `state`. Providing this value will serve as an optimization to avoid /// performing a tree hash in some scenarios. - pub async fn produce_block_on_state + 'static>( + #[allow(clippy::too_many_arguments)] + pub async fn produce_block_on_state( self: &Arc, state: BeaconState, state_root_opt: Option, @@ -4235,7 +4690,9 @@ impl BeaconChain { randao_reveal: Signature, validator_graffiti: Option, verification: ProduceBlockVerification, - ) -> Result, BlockProductionError> { + builder_boost_factor: Option, + block_production_version: BlockProductionVersion, + ) -> Result, BlockProductionError> { // Part 1/3 (blocking) // // Perform the state advance and block-packing functions. @@ -4250,6 +4707,8 @@ impl BeaconChain { produce_at_slot, randao_reveal, validator_graffiti, + builder_boost_factor, + block_production_version, ) }, "produce_partial_beacon_block", @@ -4257,50 +4716,98 @@ impl BeaconChain { .ok_or(BlockProductionError::ShuttingDown)? .await .map_err(BlockProductionError::TokioJoin)??; - // Part 2/3 (async) // // Wait for the execution layer to return an execution payload (if one is required). let prepare_payload_handle = partial_beacon_block.prepare_payload_handle.take(); - let block_contents = if let Some(prepare_payload_handle) = prepare_payload_handle { - Some( - prepare_payload_handle - .await - .map_err(BlockProductionError::TokioJoin)? - .ok_or(BlockProductionError::ShuttingDown)??, - ) - } else { - None - }; - + let block_contents_type_option = + if let Some(prepare_payload_handle) = prepare_payload_handle { + Some( + prepare_payload_handle + .await + .map_err(BlockProductionError::TokioJoin)? + .ok_or(BlockProductionError::ShuttingDown)??, + ) + } else { + None + }; // Part 3/3 (blocking) - // - // Perform the final steps of combining all the parts and computing the state root. - let chain = self.clone(); - self.task_executor - .spawn_blocking_handle( - move || { - chain.complete_partial_beacon_block( - partial_beacon_block, - block_contents, - verification, - ) - }, - "complete_partial_beacon_block", - ) - .ok_or(BlockProductionError::ShuttingDown)? - .await - .map_err(BlockProductionError::TokioJoin)? + if let Some(block_contents_type) = block_contents_type_option { + match block_contents_type { + BlockProposalContentsType::Full(block_contents) => { + let chain = self.clone(); + let beacon_block_response = self + .task_executor + .spawn_blocking_handle( + move || { + chain.complete_partial_beacon_block( + partial_beacon_block, + Some(block_contents), + verification, + ) + }, + "complete_partial_beacon_block", + ) + .ok_or(BlockProductionError::ShuttingDown)? + .await + .map_err(BlockProductionError::TokioJoin)??; + + Ok(BeaconBlockResponseWrapper::Full(beacon_block_response)) + } + BlockProposalContentsType::Blinded(block_contents) => { + let chain = self.clone(); + let beacon_block_response = self + .task_executor + .spawn_blocking_handle( + move || { + chain.complete_partial_beacon_block( + partial_beacon_block, + Some(block_contents), + verification, + ) + }, + "complete_partial_beacon_block", + ) + .ok_or(BlockProductionError::ShuttingDown)? + .await + .map_err(BlockProductionError::TokioJoin)??; + + Ok(BeaconBlockResponseWrapper::Blinded(beacon_block_response)) + } + } + } else { + let chain = self.clone(); + let beacon_block_response = self + .task_executor + .spawn_blocking_handle( + move || { + chain.complete_partial_beacon_block( + partial_beacon_block, + None, + verification, + ) + }, + "complete_partial_beacon_block", + ) + .ok_or(BlockProductionError::ShuttingDown)? + .await + .map_err(BlockProductionError::TokioJoin)??; + + Ok(BeaconBlockResponseWrapper::Full(beacon_block_response)) + } } - fn produce_partial_beacon_block + 'static>( + #[allow(clippy::too_many_arguments)] + fn produce_partial_beacon_block( self: &Arc, mut state: BeaconState, state_root_opt: Option, produce_at_slot: Slot, randao_reveal: Signature, validator_graffiti: Option, - ) -> Result, BlockProductionError> { + builder_boost_factor: Option, + block_production_version: BlockProductionVersion, + ) -> Result, BlockProductionError> { let eth1_chain = self .eth1_chain .as_ref() @@ -4353,9 +4860,16 @@ impl BeaconChain { // allows it to run concurrently with things like attestation packing. let prepare_payload_handle = match &state { BeaconState::Base(_) | BeaconState::Altair(_) => None, - BeaconState::Merge(_) | BeaconState::Capella(_) => { - let prepare_payload_handle = - get_execution_payload(self.clone(), &state, proposer_index, builder_params)?; + BeaconState::Merge(_) | BeaconState::Capella(_) | BeaconState::Deneb(_) => { + let prepare_payload_handle = get_execution_payload( + self.clone(), + &state, + parent_root, + proposer_index, + builder_params, + builder_boost_factor, + block_production_version, + )?; Some(prepare_payload_handle) } }; @@ -4364,6 +4878,7 @@ impl BeaconChain { self.op_pool.get_slashings_and_exits(&state, &self.spec); let eth1_data = eth1_chain.eth1_data_for_block_production(&state, &self.spec)?; + let deposits = eth1_chain.deposits_for_block_inclusion(&state, ð1_data, &self.spec)?; let bls_to_execution_changes = self @@ -4534,10 +5049,10 @@ impl BeaconChain { fn complete_partial_beacon_block>( &self, - partial_beacon_block: PartialBeaconBlock, + partial_beacon_block: PartialBeaconBlock, block_contents: Option>, verification: ProduceBlockVerification, - ) -> Result, BlockProductionError> { + ) -> Result, BlockProductionError> { let PartialBeaconBlock { mut state, slot, @@ -4559,90 +5074,153 @@ impl BeaconChain { bls_to_execution_changes, } = partial_beacon_block; - let inner_block = match &state { - BeaconState::Base(_) => BeaconBlock::Base(BeaconBlockBase { - slot, - proposer_index, - parent_root, - state_root: Hash256::zero(), - body: BeaconBlockBodyBase { - randao_reveal, - eth1_data, - graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings.into(), - attestations: attestations.into(), - deposits: deposits.into(), - voluntary_exits: voluntary_exits.into(), - _phantom: PhantomData, - }, - }), - BeaconState::Altair(_) => BeaconBlock::Altair(BeaconBlockAltair { - slot, - proposer_index, - parent_root, - state_root: Hash256::zero(), - body: BeaconBlockBodyAltair { - randao_reveal, - eth1_data, - graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings.into(), - attestations: attestations.into(), - deposits: deposits.into(), - voluntary_exits: voluntary_exits.into(), - sync_aggregate: sync_aggregate - .ok_or(BlockProductionError::MissingSyncAggregate)?, - _phantom: PhantomData, - }, - }), - BeaconState::Merge(_) => BeaconBlock::Merge(BeaconBlockMerge { - slot, - proposer_index, - parent_root, - state_root: Hash256::zero(), - body: BeaconBlockBodyMerge { - randao_reveal, - eth1_data, - graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings.into(), - attestations: attestations.into(), - deposits: deposits.into(), - voluntary_exits: voluntary_exits.into(), - sync_aggregate: sync_aggregate - .ok_or(BlockProductionError::MissingSyncAggregate)?, - execution_payload: block_contents + let (inner_block, maybe_blobs_and_proofs, execution_payload_value) = match &state { + BeaconState::Base(_) => ( + BeaconBlock::Base(BeaconBlockBase { + slot, + proposer_index, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyBase { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings.into(), + attestations: attestations.into(), + deposits: deposits.into(), + voluntary_exits: voluntary_exits.into(), + _phantom: PhantomData, + }, + }), + None, + Uint256::zero(), + ), + BeaconState::Altair(_) => ( + BeaconBlock::Altair(BeaconBlockAltair { + slot, + proposer_index, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyAltair { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings.into(), + attestations: attestations.into(), + deposits: deposits.into(), + voluntary_exits: voluntary_exits.into(), + sync_aggregate: sync_aggregate + .ok_or(BlockProductionError::MissingSyncAggregate)?, + _phantom: PhantomData, + }, + }), + None, + Uint256::zero(), + ), + BeaconState::Merge(_) => { + let block_proposal_contents = + block_contents.ok_or(BlockProductionError::MissingExecutionPayload)?; + let execution_payload_value = block_proposal_contents.block_value().to_owned(); + ( + BeaconBlock::Merge(BeaconBlockMerge { + slot, + proposer_index, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyMerge { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings.into(), + attestations: attestations.into(), + deposits: deposits.into(), + voluntary_exits: voluntary_exits.into(), + sync_aggregate: sync_aggregate + .ok_or(BlockProductionError::MissingSyncAggregate)?, + execution_payload: block_proposal_contents + .to_payload() + .try_into() + .map_err(|_| BlockProductionError::InvalidPayloadFork)?, + }, + }), + None, + execution_payload_value, + ) + } + BeaconState::Capella(_) => { + let block_proposal_contents = + block_contents.ok_or(BlockProductionError::MissingExecutionPayload)?; + let execution_payload_value = block_proposal_contents.block_value().to_owned(); + + ( + BeaconBlock::Capella(BeaconBlockCapella { + slot, + proposer_index, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyCapella { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings.into(), + attestations: attestations.into(), + deposits: deposits.into(), + voluntary_exits: voluntary_exits.into(), + sync_aggregate: sync_aggregate + .ok_or(BlockProductionError::MissingSyncAggregate)?, + execution_payload: block_proposal_contents + .to_payload() + .try_into() + .map_err(|_| BlockProductionError::InvalidPayloadFork)?, + bls_to_execution_changes: bls_to_execution_changes.into(), + }, + }), + None, + execution_payload_value, + ) + } + BeaconState::Deneb(_) => { + let (payload, kzg_commitments, maybe_blobs_and_proofs, execution_payload_value) = + block_contents .ok_or(BlockProductionError::MissingExecutionPayload)? - .to_payload() - .try_into() - .map_err(|_| BlockProductionError::InvalidPayloadFork)?, - }, - }), - BeaconState::Capella(_) => BeaconBlock::Capella(BeaconBlockCapella { - slot, - proposer_index, - parent_root, - state_root: Hash256::zero(), - body: BeaconBlockBodyCapella { - randao_reveal, - eth1_data, - graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings.into(), - attestations: attestations.into(), - deposits: deposits.into(), - voluntary_exits: voluntary_exits.into(), - sync_aggregate: sync_aggregate - .ok_or(BlockProductionError::MissingSyncAggregate)?, - execution_payload: block_contents - .ok_or(BlockProductionError::MissingExecutionPayload)? - .to_payload() - .try_into() - .map_err(|_| BlockProductionError::InvalidPayloadFork)?, - bls_to_execution_changes: bls_to_execution_changes.into(), - }, - }), + .deconstruct(); + + ( + BeaconBlock::Deneb(BeaconBlockDeneb { + slot, + proposer_index, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyDeneb { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings.into(), + attestations: attestations.into(), + deposits: deposits.into(), + voluntary_exits: voluntary_exits.into(), + sync_aggregate: sync_aggregate + .ok_or(BlockProductionError::MissingSyncAggregate)?, + execution_payload: payload + .try_into() + .map_err(|_| BlockProductionError::InvalidPayloadFork)?, + bls_to_execution_changes: bls_to_execution_changes.into(), + blob_kzg_commitments: kzg_commitments.ok_or( + BlockProductionError::MissingKzgCommitment( + "Kzg commitments missing from block contents".to_string(), + ), + )?, + }, + }), + maybe_blobs_and_proofs, + execution_payload_value, + ) + } }; let block = SignedBeaconBlock::from_block( @@ -4656,7 +5234,6 @@ impl BeaconChain { self.log, "Produced block on state"; "block_size" => block_size, - "slot" => block.slot(), ); metrics::observe(&metrics::BLOCK_SIZE, block_size as f64); @@ -4670,8 +5247,15 @@ impl BeaconChain { ProduceBlockVerification::VerifyRandao => BlockSignatureStrategy::VerifyRandao, ProduceBlockVerification::NoVerification => BlockSignatureStrategy::NoVerification, }; + // Use a context without block root or proposer index so that both are checked. let mut ctxt = ConsensusContext::new(block.slot()); + + let consensus_block_value = self + .compute_beacon_block_reward(block.message(), Hash256::zero(), &mut state) + .map(|reward| reward.total) + .unwrap_or(0); + per_block_processing( &mut state, &block, @@ -4690,6 +5274,47 @@ impl BeaconChain { let (mut block, _) = block.deconstruct(); *block.state_root_mut() = state_root; + let blobs_verification_timer = + metrics::start_timer(&metrics::BLOCK_PRODUCTION_BLOBS_VERIFICATION_TIMES); + let blob_items = match maybe_blobs_and_proofs { + Some((blobs, proofs)) => { + let expected_kzg_commitments = + block.body().blob_kzg_commitments().map_err(|_| { + BlockProductionError::InvalidBlockVariant( + "deneb block does not contain kzg commitments".to_string(), + ) + })?; + + if expected_kzg_commitments.len() != blobs.len() { + return Err(BlockProductionError::MissingKzgCommitment(format!( + "Missing KZG commitment for slot {}. Expected {}, got: {}", + block.slot(), + blobs.len(), + expected_kzg_commitments.len() + ))); + } + + let kzg_proofs = Vec::from(proofs); + + let kzg = self + .kzg + .as_ref() + .ok_or(BlockProductionError::TrustedSetupNotInitialized)?; + kzg_utils::validate_blobs::( + kzg, + expected_kzg_commitments, + blobs.iter().collect(), + &kzg_proofs, + ) + .map_err(BlockProductionError::KzgError)?; + + Some((kzg_proofs.into(), blobs)) + } + None => None, + }; + + drop(blobs_verification_timer); + metrics::inc_counter(&metrics::BLOCK_PRODUCTION_SUCCESSES); trace!( @@ -4700,7 +5325,13 @@ impl BeaconChain { "slot" => block.slot() ); - Ok((block, state)) + Ok(BeaconBlockResponse { + block, + state, + blob_items, + execution_payload_value, + consensus_block_value, + }) } /// This method must be called whenever an execution engine indicates that a payload is @@ -4815,15 +5446,18 @@ impl BeaconChain { /// /// This function will result in a call to `forkchoiceUpdated` on the EL if we're in the /// tail-end of the slot (as defined by `self.config.prepare_payload_lookahead`). + /// + /// Return `Ok(Some(head_block_root))` if this node prepared to propose at the next slot on + /// top of `head_block_root`. pub async fn prepare_beacon_proposer( self: &Arc, current_slot: Slot, - ) -> Result<(), Error> { + ) -> Result, Error> { let prepare_slot = current_slot + 1; // There's no need to run the proposer preparation routine before the bellatrix fork. if self.slot_is_prior_to_bellatrix(prepare_slot) { - return Ok(()); + return Ok(None); } let execution_layer = self @@ -4836,7 +5470,7 @@ impl BeaconChain { if !self.config.always_prepare_payload && !execution_layer.has_any_proposer_preparation_data().await { - return Ok(()); + return Ok(None); } // Load the cached head and its forkchoice update parameters. @@ -4880,14 +5514,11 @@ impl BeaconChain { ) .await??; - let (forkchoice_update_params, pre_payload_attributes) = - if let Some((fcu, Some(pre_payload))) = maybe_prep_data { - (fcu, pre_payload) - } else { - // Appropriate log messages have already been logged above and in - // `get_pre_payload_attributes`. - return Ok(()); - }; + let Some((forkchoice_update_params, Some(pre_payload_attributes))) = maybe_prep_data else { + // Appropriate log messages have already been logged above and in + // `get_pre_payload_attributes`. + return Ok(None); + }; // If the execution layer doesn't have any proposer data for this validator then we assume // it's not connected to this BN and no action is required. @@ -4897,10 +5528,10 @@ impl BeaconChain { .has_proposer_preparation_data(proposer) .await { - return Ok(()); + return Ok(None); } - // Fetch payoad attributes from the execution layer's cache, or compute them from scratch + // Fetch payload attributes from the execution layer's cache, or compute them from scratch // if no matching entry is found. This saves recomputing the withdrawals which can take // considerable time to compute if a state load is required. let head_root = forkchoice_update_params.head_root; @@ -4910,9 +5541,10 @@ impl BeaconChain { { payload_attributes } else { - let withdrawals = match self.spec.fork_name_at_slot::(prepare_slot) { + let prepare_slot_fork = self.spec.fork_name_at_slot::(prepare_slot); + let withdrawals = match prepare_slot_fork { ForkName::Base | ForkName::Altair | ForkName::Merge => None, - ForkName::Capella => { + ForkName::Capella | ForkName::Deneb => { let chain = self.clone(); self.spawn_blocking_handle( move || { @@ -4925,6 +5557,11 @@ impl BeaconChain { } }; + let parent_beacon_block_root = match prepare_slot_fork { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => None, + ForkName::Deneb => Some(pre_payload_attributes.parent_beacon_block_root), + }; + let payload_attributes = PayloadAttributes::new( self.slot_clock .start_of(prepare_slot) @@ -4933,6 +5570,7 @@ impl BeaconChain { pre_payload_attributes.prev_randao, execution_layer.get_suggested_fee_recipient(proposer).await, withdrawals.map(Into::into), + parent_beacon_block_root, ); execution_layer @@ -4968,28 +5606,26 @@ impl BeaconChain { parent_block_hash: forkchoice_update_params.head_hash.unwrap_or_default(), payload_attributes: payload_attributes.into(), }, + metadata: Default::default(), version: Some(self.spec.fork_name_at_slot::(prepare_slot)), })); } } - let till_prepare_slot = - if let Some(duration) = self.slot_clock.duration_to_slot(prepare_slot) { - duration - } else { - // `SlotClock::duration_to_slot` will return `None` when we are past the start - // of `prepare_slot`. Don't bother sending a `forkchoiceUpdated` in that case, - // it's too late. - // - // This scenario might occur on an overloaded/under-resourced node. - warn!( - self.log, - "Delayed proposer preparation"; - "prepare_slot" => prepare_slot, - "validator" => proposer, - ); - return Ok(()); - }; + let Some(till_prepare_slot) = self.slot_clock.duration_to_slot(prepare_slot) else { + // `SlotClock::duration_to_slot` will return `None` when we are past the start + // of `prepare_slot`. Don't bother sending a `forkchoiceUpdated` in that case, + // it's too late. + // + // This scenario might occur on an overloaded/under-resourced node. + warn!( + self.log, + "Delayed proposer preparation"; + "prepare_slot" => prepare_slot, + "validator" => proposer, + ); + return Ok(None); + }; // If we are close enough to the proposal slot, send an fcU, which will have payload // attributes filled in by the execution layer cache we just primed. @@ -5011,7 +5647,7 @@ impl BeaconChain { .await?; } - Ok(()) + Ok(Some(head_root)) } pub async fn update_execution_engine_forkchoice( @@ -5928,6 +6564,45 @@ impl BeaconChain { gossip_attested || block_attested || aggregated || produced_block } + + /// The epoch at which we require a data availability check in block processing. + /// `None` if the `Deneb` fork is disabled. + pub fn data_availability_boundary(&self) -> Option { + self.data_availability_checker.data_availability_boundary() + } + + /// Gets the `LightClientBootstrap` object for a requested block root. + /// + /// Returns `None` when the state or block is not found in the database. + #[allow(clippy::type_complexity)] + pub fn get_light_client_bootstrap( + &self, + block_root: &Hash256, + ) -> Result, ForkName)>, Error> { + let Some((state_root, slot)) = self + .get_blinded_block(block_root)? + .map(|block| (block.state_root(), block.slot())) + else { + return Ok(None); + }; + + let Some(mut state) = self.get_state(&state_root, Some(slot))? else { + return Ok(None); + }; + + let fork_name = state + .fork_name(&self.spec) + .map_err(Error::InconsistentFork)?; + + match fork_name { + ForkName::Altair | ForkName::Merge => { + LightClientBootstrap::from_beacon_state(&mut state) + .map(|bootstrap| Some((bootstrap, fork_name))) + .map_err(Error::LightClientError) + } + ForkName::Base | ForkName::Capella | ForkName::Deneb => Err(Error::UnsupportedFork), + } + } } impl Drop for BeaconChain { @@ -5935,6 +6610,7 @@ impl Drop for BeaconChain { let drop = || -> Result<(), Error> { self.persist_head_and_fork_choice()?; self.persist_op_pool()?; + self.persist_data_availability_checker()?; self.persist_eth1_cache() }; diff --git a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs index eae71bd63..fa6c93a3e 100644 --- a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs +++ b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs @@ -14,18 +14,20 @@ use lru::LruCache; use smallvec::SmallVec; use state_processing::state_advance::partial_state_advance; use std::cmp::Ordering; +use std::num::NonZeroUsize; +use types::non_zero_usize::new_non_zero_usize; use types::{ BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, Fork, Hash256, Slot, Unsigned, }; /// The number of sets of proposer indices that should be cached. -const CACHE_SIZE: usize = 16; +const CACHE_SIZE: NonZeroUsize = new_non_zero_usize(16); /// This value is fairly unimportant, it's used to avoid heap allocations. The result of it being /// incorrect is non-substantial from a consensus perspective (and probably also from a /// performance perspective). -const TYPICAL_SLOTS_PER_EPOCH: usize = 32; +pub const TYPICAL_SLOTS_PER_EPOCH: usize = 32; /// For some given slot, this contains the proposer index (`index`) and the `fork` that should be /// used to verify their signature. diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs new file mode 100644 index 000000000..f2d150d72 --- /dev/null +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -0,0 +1,661 @@ +use derivative::Derivative; +use slot_clock::SlotClock; +use std::sync::Arc; + +use crate::beacon_chain::{BeaconChain, BeaconChainTypes, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT}; +use crate::block_verification::{ + cheap_state_advance_to_obtain_committees, get_validator_pubkey_cache, process_block_slash_info, + BlockSlashInfo, +}; +use crate::kzg_utils::{validate_blob, validate_blobs}; +use crate::{metrics, BeaconChainError}; +use kzg::{Error as KzgError, Kzg, KzgCommitment}; +use merkle_proof::MerkleTreeError; +use slog::{debug, warn}; +use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; +use tree_hash::TreeHash; +use types::blob_sidecar::BlobIdentifier; +use types::{ + BeaconStateError, BlobSidecar, CloneConfig, EthSpec, Hash256, SignedBeaconBlockHeader, Slot, +}; + +/// An error occurred while validating a gossip blob. +#[derive(Debug)] +pub enum GossipBlobError { + /// The blob sidecar is from a slot that is later than the current slot (with respect to the + /// gossip clock disparity). + /// + /// ## Peer scoring + /// + /// Assuming the local clock is correct, the peer has sent an invalid message. + FutureSlot { + message_slot: Slot, + latest_permissible_slot: Slot, + }, + + /// There was an error whilst processing the blob. It is not known if it is + /// valid or invalid. + /// + /// ## Peer scoring + /// + /// We were unable to process this blob due to an internal error. It's + /// unclear if the blob is valid. + BeaconChainError(BeaconChainError), + + /// The `BlobSidecar` was gossiped over an incorrect subnet. + /// + /// ## Peer scoring + /// + /// The blob is invalid or the peer is faulty. + InvalidSubnet { expected: u64, received: u64 }, + + /// The sidecar corresponds to a slot older than the finalized head slot. + /// + /// ## Peer scoring + /// + /// It's unclear if this blob is valid, but this blob is for a finalized slot and is + /// therefore useless to us. + PastFinalizedSlot { + blob_slot: Slot, + finalized_slot: Slot, + }, + + /// The proposer index specified in the sidecar does not match the locally computed + /// proposer index. + /// + /// ## Peer scoring + /// + /// The blob is invalid and the peer is faulty. + ProposerIndexMismatch { sidecar: usize, local: usize }, + + /// The proposal signature in invalid. + /// + /// ## Peer scoring + /// + /// The blob is invalid and the peer is faulty. + ProposalSignatureInvalid, + + /// The proposal_index corresponding to blob.beacon_block_root is not known. + /// + /// ## Peer scoring + /// + /// The blob is invalid and the peer is faulty. + UnknownValidator(u64), + + /// The provided blob is not from a later slot than its parent. + /// + /// ## Peer scoring + /// + /// The blob is invalid and the peer is faulty. + BlobIsNotLaterThanParent { blob_slot: Slot, parent_slot: Slot }, + + /// The provided blob's parent block is unknown. + /// + /// ## Peer scoring + /// + /// We cannot process the blob without validating its parent, the peer isn't necessarily faulty. + BlobParentUnknown(Arc>), + + /// Invalid kzg commitment inclusion proof + /// ## Peer scoring + /// + /// The blob sidecar is invalid and the peer is faulty + InvalidInclusionProof, + + /// A blob has already been seen for the given `(sidecar.block_root, sidecar.index)` tuple + /// over gossip or no gossip sources. + /// + /// ## Peer scoring + /// + /// The peer isn't faulty, but we do not forward it over gossip. + RepeatBlob { + proposer: u64, + slot: Slot, + index: u64, + }, + + /// `Kzg` struct hasn't been initialized. This is an internal error. + /// + /// ## Peer scoring + /// + /// The peer isn't faulty, This is an internal error. + KzgNotInitialized, + + /// The kzg verification failed. + /// + /// ## Peer scoring + /// + /// The blob sidecar is invalid and the peer is faulty. + KzgError(kzg::Error), + + /// The kzg commitment inclusion proof failed. + /// + /// ## Peer scoring + /// + /// The blob sidecar is invalid + InclusionProof(MerkleTreeError), + + /// The pubkey cache timed out. + /// + /// ## Peer scoring + /// + /// The blob sidecar may be valid, this is an internal error. + PubkeyCacheTimeout, + + /// The block conflicts with finalization, no need to propagate. + /// + /// ## Peer scoring + /// + /// It's unclear if this block is valid, but it conflicts with finality and shouldn't be + /// imported. + NotFinalizedDescendant { block_parent_root: Hash256 }, +} + +impl std::fmt::Display for GossipBlobError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + GossipBlobError::BlobParentUnknown(blob_sidecar) => { + write!( + f, + "BlobParentUnknown(parent_root:{})", + blob_sidecar.block_parent_root() + ) + } + other => write!(f, "{:?}", other), + } + } +} + +impl From for GossipBlobError { + fn from(e: BeaconChainError) -> Self { + GossipBlobError::BeaconChainError(e) + } +} + +impl From for GossipBlobError { + fn from(e: BeaconStateError) -> Self { + GossipBlobError::BeaconChainError(BeaconChainError::BeaconStateError(e)) + } +} + +pub type GossipVerifiedBlobList = VariableList< + GossipVerifiedBlob, + <::EthSpec as EthSpec>::MaxBlobsPerBlock, +>; + +/// A wrapper around a `BlobSidecar` that indicates it has been approved for re-gossiping on +/// the p2p network. +#[derive(Debug)] +pub struct GossipVerifiedBlob { + block_root: Hash256, + blob: KzgVerifiedBlob, +} + +impl GossipVerifiedBlob { + pub fn new( + blob: Arc>, + subnet_id: u64, + chain: &BeaconChain, + ) -> Result> { + let header = blob.signed_block_header.clone(); + // We only process slashing info if the gossip verification failed + // since we do not process the blob any further in that case. + validate_blob_sidecar_for_gossip(blob, subnet_id, chain).map_err(|e| { + process_block_slash_info::<_, GossipBlobError>( + chain, + BlockSlashInfo::from_early_error_blob(header, e), + ) + }) + } + /// Construct a `GossipVerifiedBlob` that is assumed to be valid. + /// + /// This should ONLY be used for testing. + pub fn __assumed_valid(blob: Arc>) -> Self { + Self { + block_root: blob.block_root(), + blob: KzgVerifiedBlob { blob }, + } + } + pub fn id(&self) -> BlobIdentifier { + BlobIdentifier { + block_root: self.block_root, + index: self.blob.blob_index(), + } + } + pub fn block_root(&self) -> Hash256 { + self.block_root + } + pub fn slot(&self) -> Slot { + self.blob.blob.slot() + } + pub fn index(&self) -> u64 { + self.blob.blob.index + } + pub fn kzg_commitment(&self) -> KzgCommitment { + self.blob.blob.kzg_commitment + } + pub fn signed_block_header(&self) -> SignedBeaconBlockHeader { + self.blob.blob.signed_block_header.clone() + } + pub fn block_proposer_index(&self) -> u64 { + self.blob.blob.block_proposer_index() + } + pub fn into_inner(self) -> KzgVerifiedBlob { + self.blob + } + pub fn as_blob(&self) -> &BlobSidecar { + self.blob.as_blob() + } + /// This is cheap as we're calling clone on an Arc + pub fn clone_blob(&self) -> Arc> { + self.blob.clone_blob() + } +} + +/// Wrapper over a `BlobSidecar` for which we have completed kzg verification. +/// i.e. `verify_blob_kzg_proof(blob, commitment, proof) == true`. +#[derive(Debug, Derivative, Clone, Encode, Decode)] +#[derivative(PartialEq, Eq)] +#[ssz(struct_behaviour = "transparent")] +pub struct KzgVerifiedBlob { + blob: Arc>, +} + +impl PartialOrd for KzgVerifiedBlob { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for KzgVerifiedBlob { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.blob.cmp(&other.blob) + } +} + +impl KzgVerifiedBlob { + pub fn new(blob: Arc>, kzg: &Kzg) -> Result { + verify_kzg_for_blob(blob, kzg) + } + pub fn to_blob(self) -> Arc> { + self.blob + } + pub fn as_blob(&self) -> &BlobSidecar { + &self.blob + } + /// This is cheap as we're calling clone on an Arc + pub fn clone_blob(&self) -> Arc> { + self.blob.clone() + } + pub fn blob_index(&self) -> u64 { + self.blob.index + } + /// Construct a `KzgVerifiedBlob` that is assumed to be valid. + /// + /// This should ONLY be used for testing. + #[cfg(test)] + pub fn __assumed_valid(blob: Arc>) -> Self { + Self { blob } + } +} + +/// Complete kzg verification for a `BlobSidecar`. +/// +/// Returns an error if the kzg verification check fails. +pub fn verify_kzg_for_blob( + blob: Arc>, + kzg: &Kzg, +) -> Result, KzgError> { + validate_blob::(kzg, &blob.blob, blob.kzg_commitment, blob.kzg_proof)?; + Ok(KzgVerifiedBlob { blob }) +} + +pub struct KzgVerifiedBlobList { + verified_blobs: Vec>, +} + +impl KzgVerifiedBlobList { + pub fn new>>>( + blob_list: I, + kzg: &Kzg, + ) -> Result { + let blobs = blob_list.into_iter().collect::>(); + verify_kzg_for_blob_list(blobs.iter(), kzg)?; + Ok(Self { + verified_blobs: blobs + .into_iter() + .map(|blob| KzgVerifiedBlob { blob }) + .collect(), + }) + } +} + +impl IntoIterator for KzgVerifiedBlobList { + type Item = KzgVerifiedBlob; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.verified_blobs.into_iter() + } +} + +/// Complete kzg verification for a list of `BlobSidecar`s. +/// Returns an error if any of the `BlobSidecar`s fails kzg verification. +/// +/// Note: This function should be preferred over calling `verify_kzg_for_blob` +/// in a loop since this function kzg verifies a list of blobs more efficiently. +pub fn verify_kzg_for_blob_list<'a, T: EthSpec, I>( + blob_iter: I, + kzg: &'a Kzg, +) -> Result<(), KzgError> +where + I: Iterator>>, +{ + let (blobs, (commitments, proofs)): (Vec<_>, (Vec<_>, Vec<_>)) = blob_iter + .map(|blob| (&blob.blob, (blob.kzg_commitment, blob.kzg_proof))) + .unzip(); + validate_blobs::(kzg, commitments.as_slice(), blobs, proofs.as_slice()) +} + +pub fn validate_blob_sidecar_for_gossip( + blob_sidecar: Arc>, + subnet: u64, + chain: &BeaconChain, +) -> Result, GossipBlobError> { + let blob_slot = blob_sidecar.slot(); + let blob_index = blob_sidecar.index; + let block_parent_root = blob_sidecar.block_parent_root(); + let blob_proposer_index = blob_sidecar.block_proposer_index(); + let block_root = blob_sidecar.block_root(); + let blob_epoch = blob_slot.epoch(T::EthSpec::slots_per_epoch()); + let signed_block_header = &blob_sidecar.signed_block_header; + + // This condition is not possible if we have received the blob from the network + // since we only subscribe to `MaxBlobsPerBlock` subnets over gossip network. + // We include this check only for completeness. + // Getting this error would imply something very wrong with our networking decoding logic. + if blob_index >= T::EthSpec::max_blobs_per_block() as u64 { + return Err(GossipBlobError::InvalidSubnet { + expected: subnet, + received: blob_index, + }); + } + + // Verify that the blob_sidecar was received on the correct subnet. + if blob_index != subnet { + return Err(GossipBlobError::InvalidSubnet { + expected: blob_index, + received: subnet, + }); + } + + // Verify that the sidecar is not from a future slot. + let latest_permissible_slot = chain + .slot_clock + .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity()) + .ok_or(BeaconChainError::UnableToReadSlot)?; + if blob_slot > latest_permissible_slot { + return Err(GossipBlobError::FutureSlot { + message_slot: blob_slot, + latest_permissible_slot, + }); + } + + // Verify that the sidecar slot is greater than the latest finalized slot + let latest_finalized_slot = chain + .head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + if blob_slot <= latest_finalized_slot { + return Err(GossipBlobError::PastFinalizedSlot { + blob_slot, + finalized_slot: latest_finalized_slot, + }); + } + + // Verify that this is the first blob sidecar received for the tuple: + // (block_header.slot, block_header.proposer_index, blob_sidecar.index) + if chain + .observed_blob_sidecars + .read() + .proposer_is_known(&blob_sidecar) + .map_err(|e| GossipBlobError::BeaconChainError(e.into()))? + { + return Err(GossipBlobError::RepeatBlob { + proposer: blob_proposer_index, + slot: blob_slot, + index: blob_index, + }); + } + + // Verify the inclusion proof in the sidecar + let _timer = metrics::start_timer(&metrics::BLOB_SIDECAR_INCLUSION_PROOF_VERIFICATION); + if !blob_sidecar + .verify_blob_sidecar_inclusion_proof() + .map_err(GossipBlobError::InclusionProof)? + { + return Err(GossipBlobError::InvalidInclusionProof); + } + drop(_timer); + + let fork_choice = chain.canonical_head.fork_choice_read_lock(); + + // We have already verified that the blob is past finalization, so we can + // just check fork choice for the block's parent. + let Some(parent_block) = fork_choice.get_block(&block_parent_root) else { + return Err(GossipBlobError::BlobParentUnknown(blob_sidecar)); + }; + + // Do not process a blob that does not descend from the finalized root. + // We just loaded the parent_block, so we can be sure that it exists in fork choice. + if !fork_choice.is_finalized_checkpoint_or_descendant(block_parent_root) { + return Err(GossipBlobError::NotFinalizedDescendant { block_parent_root }); + } + drop(fork_choice); + + if parent_block.slot >= blob_slot { + return Err(GossipBlobError::BlobIsNotLaterThanParent { + blob_slot, + parent_slot: parent_block.slot, + }); + } + + let proposer_shuffling_root = + if parent_block.slot.epoch(T::EthSpec::slots_per_epoch()) == blob_epoch { + parent_block + .next_epoch_shuffling_id + .shuffling_decision_block + } else { + parent_block.root + }; + + let proposer_opt = chain + .beacon_proposer_cache + .lock() + .get_slot::(proposer_shuffling_root, blob_slot); + + let (proposer_index, fork) = if let Some(proposer) = proposer_opt { + (proposer.index, proposer.fork) + } else { + debug!( + chain.log, + "Proposer shuffling cache miss for blob verification"; + "block_root" => %block_root, + "index" => %blob_index, + ); + if let Some(mut snapshot) = chain + .snapshot_cache + .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) + .and_then(|snapshot_cache| { + snapshot_cache.get_cloned(block_parent_root, CloneConfig::committee_caches_only()) + }) + { + if snapshot.beacon_state.slot() == blob_slot { + debug!( + chain.log, + "Cloning snapshot cache state for blob verification"; + "block_root" => %block_root, + "index" => %blob_index, + ); + ( + snapshot + .beacon_state + .get_beacon_proposer_index(blob_slot, &chain.spec)?, + snapshot.beacon_state.fork(), + ) + } else { + debug!( + chain.log, + "Cloning and advancing snapshot cache state for blob verification"; + "block_root" => %block_root, + "index" => %blob_index, + ); + let state = + cheap_state_advance_to_obtain_committees::<_, GossipBlobError>( + &mut snapshot.beacon_state, + Some(snapshot.beacon_block_root), + blob_slot, + &chain.spec, + )?; + ( + state.get_beacon_proposer_index(blob_slot, &chain.spec)?, + state.fork(), + ) + } + } + // Need to advance the state to get the proposer index + else { + warn!( + chain.log, + "Snapshot cache miss for blob verification"; + "block_root" => %block_root, + "index" => %blob_index, + ); + + let parent_block = chain + .get_blinded_block(&block_parent_root) + .map_err(GossipBlobError::BeaconChainError)? + .ok_or_else(|| { + GossipBlobError::from(BeaconChainError::MissingBeaconBlock(block_parent_root)) + })?; + + let mut parent_state = chain + .get_state(&parent_block.state_root(), Some(parent_block.slot()))? + .ok_or_else(|| { + BeaconChainError::DBInconsistent(format!( + "Missing state {:?}", + parent_block.state_root() + )) + })?; + let state = cheap_state_advance_to_obtain_committees::<_, GossipBlobError>( + &mut parent_state, + Some(parent_block.state_root()), + blob_slot, + &chain.spec, + )?; + + let proposers = state.get_beacon_proposer_indices(&chain.spec)?; + let proposer_index = *proposers + .get(blob_slot.as_usize() % T::EthSpec::slots_per_epoch() as usize) + .ok_or_else(|| BeaconChainError::NoProposerForSlot(blob_slot))?; + + let fork = state.fork(); + // Prime the proposer shuffling cache with the newly-learned value. + chain.beacon_proposer_cache.lock().insert( + blob_epoch, + proposer_shuffling_root, + proposers, + fork, + )?; + (proposer_index, fork) + } + }; + + // Signature verify the signed block header. + let signature_is_valid = { + let pubkey_cache = + get_validator_pubkey_cache(chain).map_err(|_| GossipBlobError::PubkeyCacheTimeout)?; + let pubkey = pubkey_cache + .get(proposer_index) + .ok_or_else(|| GossipBlobError::UnknownValidator(proposer_index as u64))?; + signed_block_header.verify_signature::( + pubkey, + &fork, + chain.genesis_validators_root, + &chain.spec, + ) + }; + + if !signature_is_valid { + return Err(GossipBlobError::ProposalSignatureInvalid); + } + + if proposer_index != blob_proposer_index as usize { + return Err(GossipBlobError::ProposerIndexMismatch { + sidecar: blob_proposer_index as usize, + local: proposer_index, + }); + } + + chain + .observed_slashable + .write() + .observe_slashable( + blob_sidecar.slot(), + blob_sidecar.block_proposer_index(), + block_root, + ) + .map_err(|e| GossipBlobError::BeaconChainError(e.into()))?; + + // Now the signature is valid, store the proposal so we don't accept another blob sidecar + // with the same `BlobIdentifier`. + // It's important to double-check that the proposer still hasn't been observed so we don't + // have a race-condition when verifying two blocks simultaneously. + // + // Note: If this BlobSidecar goes on to fail full verification, we do not evict it from the seen_cache + // as alternate blob_sidecars for the same identifier can still be retrieved + // over rpc. Evicting them from this cache would allow faster propagation over gossip. So we allow + // retrieval of potentially valid blocks over rpc, but try to punish the proposer for signing + // invalid messages. Issue for more background + // https://github.com/ethereum/consensus-specs/issues/3261 + if chain + .observed_blob_sidecars + .write() + .observe_sidecar(&blob_sidecar) + .map_err(|e| GossipBlobError::BeaconChainError(e.into()))? + { + return Err(GossipBlobError::RepeatBlob { + proposer: proposer_index as u64, + slot: blob_slot, + index: blob_index, + }); + } + + // Kzg verification for gossip blob sidecar + let kzg = chain + .kzg + .as_ref() + .ok_or(GossipBlobError::KzgNotInitialized)?; + let kzg_verified_blob = + KzgVerifiedBlob::new(blob_sidecar, kzg).map_err(GossipBlobError::KzgError)?; + + Ok(GossipVerifiedBlob { + block_root, + blob: kzg_verified_blob, + }) +} + +/// Returns the canonical root of the given `blob`. +/// +/// Use this function to ensure that we report the blob hashing time Prometheus metric. +pub fn get_blob_root(blob: &BlobSidecar) -> Hash256 { + let blob_root_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_BLOB_ROOT); + + let blob_root = blob.tree_hash_root(); + + metrics::stop_timer(blob_root_timer); + + blob_root +} diff --git a/beacon_node/beacon_chain/src/block_times_cache.rs b/beacon_node/beacon_chain/src/block_times_cache.rs index 484de841d..c5293bcb0 100644 --- a/beacon_node/beacon_chain/src/block_times_cache.rs +++ b/beacon_node/beacon_chain/src/block_times_cache.rs @@ -23,7 +23,7 @@ pub struct Timestamps { } // Helps arrange delay data so it is more relevant to metrics. -#[derive(Default)] +#[derive(Debug, Default)] pub struct BlockDelays { pub observed: Option, pub imported: Option, @@ -51,7 +51,7 @@ impl BlockDelays { // If the block was received via gossip, we can record the client type of the peer which sent us // the block. -#[derive(Clone, Default)] +#[derive(Debug, Clone, Default, PartialEq)] pub struct BlockPeerInfo { pub id: Option, pub client: Option, @@ -80,6 +80,8 @@ pub struct BlockTimesCache { /// Helper methods to read from and write to the cache. impl BlockTimesCache { + /// Set the observation time for `block_root` to `timestamp` if `timestamp` is less than + /// any previous timestamp at which this block was observed. pub fn set_time_observed( &mut self, block_root: BlockRoot, @@ -92,11 +94,19 @@ impl BlockTimesCache { .cache .entry(block_root) .or_insert_with(|| BlockTimesCacheValue::new(slot)); - block_times.timestamps.observed = Some(timestamp); - block_times.peer_info = BlockPeerInfo { - id: peer_id, - client: peer_client, - }; + match block_times.timestamps.observed { + Some(existing_observation_time) if existing_observation_time <= timestamp => { + // Existing timestamp is earlier, do nothing. + } + _ => { + // No existing timestamp, or new timestamp is earlier. + block_times.timestamps.observed = Some(timestamp); + block_times.peer_info = BlockPeerInfo { + id: peer_id, + client: peer_client, + }; + } + } } pub fn set_time_imported(&mut self, block_root: BlockRoot, slot: Slot, timestamp: Duration) { @@ -141,3 +151,71 @@ impl BlockTimesCache { .retain(|_, cache| cache.slot > current_slot.saturating_sub(64_u64)); } } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn observed_time_uses_minimum() { + let mut cache = BlockTimesCache::default(); + + let block_root = Hash256::zero(); + let slot = Slot::new(100); + + let slot_start_time = Duration::from_secs(0); + + let ts1 = Duration::from_secs(5); + let ts2 = Duration::from_secs(6); + let ts3 = Duration::from_secs(4); + + let peer_info2 = BlockPeerInfo { + id: Some("peer2".to_string()), + client: Some("lighthouse".to_string()), + }; + + let peer_info3 = BlockPeerInfo { + id: Some("peer3".to_string()), + client: Some("prysm".to_string()), + }; + + cache.set_time_observed(block_root, slot, ts1, None, None); + + assert_eq!( + cache.get_block_delays(block_root, slot_start_time).observed, + Some(ts1) + ); + assert_eq!(cache.get_peer_info(block_root), BlockPeerInfo::default()); + + // Second observation with higher timestamp should not override anything, even though it has + // superior peer info. + cache.set_time_observed( + block_root, + slot, + ts2, + peer_info2.id.clone(), + peer_info2.client.clone(), + ); + + assert_eq!( + cache.get_block_delays(block_root, slot_start_time).observed, + Some(ts1) + ); + assert_eq!(cache.get_peer_info(block_root), BlockPeerInfo::default()); + + // Third observation with lower timestamp should override everything. + cache.set_time_observed( + block_root, + slot, + ts3, + peer_info3.id.clone(), + peer_info3.client.clone(), + ); + + assert_eq!( + cache.get_block_delays(block_root, slot_start_time).observed, + Some(ts3) + ); + assert_eq!(cache.get_peer_info(block_root), peer_info3); + } +} diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index ef7f1b339..e8df5b811 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -23,6 +23,7 @@ //! | //! â–¼ //! SignedBeaconBlock +//! | //! |--------------- //! | | //! | â–¼ @@ -47,6 +48,11 @@ // returned alongside. #![allow(clippy::result_large_err)] +use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob}; +use crate::block_verification_types::{ + AsBlock, BlockContentsError, BlockImportData, GossipVerifiedBlockContents, RpcBlock, +}; +use crate::data_availability_checker::{AvailabilityCheckError, MaybeAvailableBlock}; use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::execution_payload::{ is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, @@ -64,15 +70,17 @@ use crate::{ metrics, BeaconChain, BeaconChainError, BeaconChainTypes, }; use derivative::Derivative; -use eth2::types::EventKind; +use eth2::types::{EventKind, PublishBlockRequest}; use execution_layer::PayloadStatus; -use fork_choice::{AttestationFromBlock, PayloadVerificationStatus}; +pub use fork_choice::{AttestationFromBlock, PayloadVerificationStatus}; use parking_lot::RwLockReadGuard; use proto_array::Block as ProtoBlock; use safe_arith::ArithError; use slog::{debug, error, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; +use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; use state_processing::per_block_processing::{errors::IntoWithIndex, is_merge_transition_block}; use state_processing::{ block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError}, @@ -82,6 +90,7 @@ use state_processing::{ StateProcessingStrategy, VerifyBlockRoot, }; use std::borrow::Cow; +use std::fmt::Debug; use std::fs; use std::io::Write; use std::sync::Arc; @@ -89,12 +98,12 @@ use std::time::Duration; use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; use task_executor::JoinHandle; use tree_hash::TreeHash; -use types::ExecPayload; use types::{ - BeaconBlockRef, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, CloneConfig, Epoch, - EthSpec, ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, - RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, + BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, + ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, + SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; +use types::{BlobSidecar, ExecPayload}; pub const POS_PANDA_BANNER: &str = r#" ,,, ,,, ,,, ,,, @@ -141,7 +150,7 @@ pub enum BlockError { /// /// It's unclear if this block is valid, but it cannot be processed without already knowing /// its parent. - ParentUnknown(Arc>), + ParentUnknown(RpcBlock), /// The block slot is greater than the present slot. /// /// ## Peer scoring @@ -215,7 +224,7 @@ pub enum BlockError { /// /// The block is invalid and the peer is faulty. InvalidSignature, - /// The provided block is from an later slot than its parent. + /// The provided block is not from a later slot than its parent. /// /// ## Peer scoring /// @@ -284,6 +293,27 @@ pub enum BlockError { /// Honest peers shouldn't forward more than 1 equivocating block from the same proposer, so /// we penalise them with a mid-tolerance error. Slashable, + /// The block and blob together failed validation. + /// + /// ## Peer scoring + /// + /// This error implies that the block satisfied all block validity conditions except consistency + /// with the corresponding blob that we received over gossip/rpc. This is because availability + /// checks are always done after all other checks are completed. + /// This implies that either: + /// 1. The block proposer is faulty + /// 2. We received the blob over rpc and it is invalid (inconsistent w.r.t the block). + /// 3. It is an internal error + /// For all these cases, we cannot penalize the peer that gave us the block. + /// TODO: We may need to penalize the peer that gave us a potentially invalid rpc blob. + /// https://github.com/sigp/lighthouse/issues/4546 + AvailabilityCheck(AvailabilityCheckError), +} + +impl From for BlockError { + fn from(e: AvailabilityCheckError) -> Self { + Self::AvailabilityCheck(e) + } } /// Returned when block validation failed due to some issue verifying @@ -459,6 +489,7 @@ impl From for BlockError { } /// Stores information about verifying a payload against an execution engine. +#[derive(Debug, PartialEq, Clone, Encode, Decode)] pub struct PayloadVerificationOutcome { pub payload_verification_status: PayloadVerificationStatus, pub is_valid_merge_transition_block: bool, @@ -476,7 +507,7 @@ pub enum BlockSlashInfo { } impl BlockSlashInfo> { - pub fn from_early_error(header: SignedBeaconBlockHeader, e: BlockError) -> Self { + pub fn from_early_error_block(header: SignedBeaconBlockHeader, e: BlockError) -> Self { match e { BlockError::ProposalSignatureInvalid => BlockSlashInfo::SignatureInvalid(e), // `InvalidSignature` could indicate any signature in the block, so we want @@ -486,17 +517,28 @@ impl BlockSlashInfo> { } } +impl BlockSlashInfo> { + pub fn from_early_error_blob(header: SignedBeaconBlockHeader, e: GossipBlobError) -> Self { + match e { + GossipBlobError::ProposalSignatureInvalid => BlockSlashInfo::SignatureInvalid(e), + // `InvalidSignature` could indicate any signature in the block, so we want + // to recheck the proposer signature alone. + _ => BlockSlashInfo::SignatureNotChecked(header, e), + } + } +} + /// Process invalid blocks to see if they are suitable for the slasher. /// /// If no slasher is configured, this is a no-op. -fn process_block_slash_info( +pub(crate) fn process_block_slash_info( chain: &BeaconChain, - slash_info: BlockSlashInfo>, -) -> BlockError { + slash_info: BlockSlashInfo, +) -> TErr { if let Some(slasher) = chain.slasher.as_ref() { let (verified_header, error) = match slash_info { BlockSlashInfo::SignatureNotChecked(header, e) => { - if verify_header_signature(chain, &header).is_ok() { + if verify_header_signature::<_, TErr>(chain, &header).is_ok() { (header, e) } else { return e; @@ -528,7 +570,7 @@ fn process_block_slash_info( /// The given `chain_segment` must contain only blocks from the same epoch, otherwise an error /// will be returned. pub fn signature_verify_chain_segment( - mut chain_segment: Vec<(Hash256, Arc>)>, + mut chain_segment: Vec<(Hash256, RpcBlock)>, chain: &BeaconChain, ) -> Result>, BlockError> { if chain_segment.is_empty() { @@ -545,32 +587,40 @@ pub fn signature_verify_chain_segment( .map(|(_, block)| block.slot()) .unwrap_or_else(|| slot); - let state = cheap_state_advance_to_obtain_committees( + let state = cheap_state_advance_to_obtain_committees::<_, BlockError>( &mut parent.pre_state, parent.beacon_state_root, highest_slot, &chain.spec, )?; + // unzip chain segment and verify kzg in bulk + let (roots, blocks): (Vec<_>, Vec<_>) = chain_segment.into_iter().unzip(); + let maybe_available_blocks = chain + .data_availability_checker + .verify_kzg_for_rpc_blocks(blocks)?; + // zip it back up + let mut signature_verified_blocks = roots + .into_iter() + .zip(maybe_available_blocks) + .map(|(block_root, maybe_available_block)| { + let consensus_context = ConsensusContext::new(maybe_available_block.slot()) + .set_current_block_root(block_root); + SignatureVerifiedBlock { + block: maybe_available_block, + block_root, + parent: None, + consensus_context, + } + }) + .collect::>(); + + // verify signatures let pubkey_cache = get_validator_pubkey_cache(chain)?; let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); - - let mut signature_verified_blocks = Vec::with_capacity(chain_segment.len()); - - for (block_root, block) in &chain_segment { - let mut consensus_context = - ConsensusContext::new(block.slot()).set_current_block_root(*block_root); - - signature_verifier.include_all_signatures(block, &mut consensus_context)?; - - // Save the block and its consensus context. The context will have had its proposer index - // and attesting indices filled in, which can be used to accelerate later block processing. - signature_verified_blocks.push(SignatureVerifiedBlock { - block: block.clone(), - block_root: *block_root, - parent: None, - consensus_context, - }); + for svb in &mut signature_verified_blocks { + signature_verifier + .include_all_signatures(svb.block.as_block(), &mut svb.consensus_context)?; } if signature_verifier.verify().is_err() { @@ -600,7 +650,7 @@ pub struct GossipVerifiedBlock { /// A wrapper around a `SignedBeaconBlock` that indicates that all signatures (except the deposit /// signatures) have been verified. pub struct SignatureVerifiedBlock { - block: Arc>, + block: MaybeAvailableBlock, block_root: Hash256, parent: Option>, consensus_context: ConsensusContext, @@ -617,52 +667,68 @@ type PayloadVerificationHandle = /// - Signatures /// - State root check /// - Per block processing +/// - Blobs sidecar has been validated if present /// /// Note: a `ExecutionPendingBlock` is not _forever_ valid to be imported, it may later become invalid /// due to finality or some other event. A `ExecutionPendingBlock` should be imported into the /// `BeaconChain` immediately after it is instantiated. pub struct ExecutionPendingBlock { - pub block: Arc>, - pub block_root: Hash256, - pub state: BeaconState, - pub parent_block: SignedBeaconBlock>, - pub parent_eth1_finalization_data: Eth1FinalizationData, - pub confirmed_state_roots: Vec, - pub consensus_context: ConsensusContext, + pub block: MaybeAvailableBlock, + pub import_data: BlockImportData, pub payload_verification_handle: PayloadVerificationHandle, } -pub trait IntoGossipVerifiedBlock: Sized { +pub trait IntoGossipVerifiedBlockContents: Sized { fn into_gossip_verified_block( self, chain: &BeaconChain, - ) -> Result, BlockError>; - fn inner(&self) -> Arc>; + ) -> Result, BlockContentsError>; + fn inner_block(&self) -> &SignedBeaconBlock; } -impl IntoGossipVerifiedBlock for GossipVerifiedBlock { +impl IntoGossipVerifiedBlockContents for GossipVerifiedBlockContents { fn into_gossip_verified_block( self, _chain: &BeaconChain, - ) -> Result, BlockError> { + ) -> Result, BlockContentsError> { Ok(self) } - - fn inner(&self) -> Arc> { - self.block.clone() + fn inner_block(&self) -> &SignedBeaconBlock { + self.0.block.as_block() } } -impl IntoGossipVerifiedBlock for Arc> { +impl IntoGossipVerifiedBlockContents for PublishBlockRequest { fn into_gossip_verified_block( self, chain: &BeaconChain, - ) -> Result, BlockError> { - GossipVerifiedBlock::new(self, chain) + ) -> Result, BlockContentsError> { + let (block, blobs) = self.deconstruct(); + + let gossip_verified_blobs = blobs + .map(|(kzg_proofs, blobs)| { + let mut gossip_verified_blobs = vec![]; + for (i, (kzg_proof, blob)) in kzg_proofs.iter().zip(blobs).enumerate() { + let _timer = + metrics::start_timer(&metrics::BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION); + let blob = BlobSidecar::new(i, blob, &block, *kzg_proof) + .map_err(BlockContentsError::SidecarError)?; + drop(_timer); + let gossip_verified_blob = + GossipVerifiedBlob::new(Arc::new(blob), i as u64, chain)?; + gossip_verified_blobs.push(gossip_verified_blob); + } + let gossip_verified_blobs = VariableList::from(gossip_verified_blobs); + Ok::<_, BlockContentsError>(gossip_verified_blobs) + }) + .transpose()?; + let gossip_verified_block = GossipVerifiedBlock::new(block, chain)?; + + Ok((gossip_verified_block, gossip_verified_blobs)) } - fn inner(&self) -> Arc> { - self.clone() + fn inner_block(&self) -> &SignedBeaconBlock { + self.signed_block() } } @@ -684,7 +750,9 @@ pub trait IntoExecutionPendingBlock: Sized { } execution_pending }) - .map_err(|slash_info| process_block_slash_info(chain, slash_info)) + .map_err(|slash_info| { + process_block_slash_info::<_, BlockError>(chain, slash_info) + }) } /// Convert the block to fully-verified form while producing data to aid checking slashability. @@ -712,14 +780,21 @@ impl GossipVerifiedBlock { // it to the slasher if an error occurs, because that's the end of this block's journey, // and it could be a repeat proposal (a likely cause for slashing!). let header = block.signed_block_header(); - Self::new_without_slasher_checks(block, chain).map_err(|e| { - process_block_slash_info(chain, BlockSlashInfo::from_early_error(header, e)) + // The `SignedBeaconBlock` and `SignedBeaconBlockHeader` have the same canonical root, + // but it's way quicker to calculate root of the header since the hash of the tree rooted + // at `BeaconBlockBody` is already computed in the header. + Self::new_without_slasher_checks(block, &header, chain).map_err(|e| { + process_block_slash_info::<_, BlockError>( + chain, + BlockSlashInfo::from_early_error_block(header, e), + ) }) } /// As for new, but doesn't pass the block to the slasher. fn new_without_slasher_checks( block: Arc>, + block_header: &SignedBeaconBlockHeader, chain: &BeaconChain, ) -> Result> { // Ensure the block is the correct structure for the fork at `block.slot()`. @@ -739,7 +814,7 @@ impl GossipVerifiedBlock { }); } - let block_root = get_block_root(&block); + let block_root = get_block_header_root(block_header); // Disallow blocks that conflict with the anchor (weak subjectivity checkpoint), if any. check_block_against_anchor_slot(block.message(), chain)?; @@ -762,11 +837,16 @@ impl GossipVerifiedBlock { // Do not process a block that doesn't descend from the finalized root. // // We check this *before* we load the parent so that we can return a more detailed error. - check_block_is_finalized_checkpoint_or_descendant(chain, &fork_choice_read_lock, &block)?; - drop(fork_choice_read_lock); + let block = check_block_is_finalized_checkpoint_or_descendant( + chain, + &fork_choice_read_lock, + block, + )?; let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); - let (parent_block, block) = verify_parent_block_is_known(chain, block)?; + let (parent_block, block) = + verify_parent_block_is_known::(block_root, &fork_choice_read_lock, block)?; + drop(fork_choice_read_lock); // Track the number of skip slots between the block and its parent. metrics::set_gauge( @@ -825,7 +905,7 @@ impl GossipVerifiedBlock { ); // The state produced is only valid for determining proposer/attester shuffling indices. - let state = cheap_state_advance_to_obtain_committees( + let state = cheap_state_advance_to_obtain_committees::<_, BlockError>( &mut parent.pre_state, parent.beacon_state_root, block.slot(), @@ -866,6 +946,11 @@ impl GossipVerifiedBlock { return Err(BlockError::ProposalSignatureInvalid); } + chain + .observed_slashable + .write() + .observe_slashable(block.slot(), block.message().proposer_index(), block_root) + .map_err(|e| BlockError::BeaconChainError(e.into()))?; // Now the signature is valid, store the proposal so we don't accept another from this // validator and slot. // @@ -877,7 +962,9 @@ impl GossipVerifiedBlock { .observe_proposal(block_root, block.message()) .map_err(|e| BlockError::BeaconChainError(e.into()))? { - SeenBlock::Slashable => return Err(BlockError::Slashable), + SeenBlock::Slashable => { + return Err(BlockError::Slashable); + } SeenBlock::Duplicate => return Err(BlockError::BlockIsAlreadyKnown), SeenBlock::UniqueNonSlashable => {} }; @@ -895,7 +982,7 @@ impl GossipVerifiedBlock { // Having checked the proposer index and the block root we can cache them. let consensus_context = ConsensusContext::new(block.slot()) .set_current_block_root(block_root) - .set_proposer_index(block.message().proposer_index()); + .set_proposer_index(block.as_block().message().proposer_index()); Ok(Self { block, @@ -928,7 +1015,7 @@ impl IntoExecutionPendingBlock for GossipVerifiedBlock &SignedBeaconBlock { - &self.block + self.block.as_block() } } @@ -938,12 +1025,13 @@ impl SignatureVerifiedBlock { /// /// Returns an error if the block is invalid, or if the block was unable to be verified. pub fn new( - block: Arc>, + block: MaybeAvailableBlock, block_root: Hash256, chain: &BeaconChain, ) -> Result> { // Ensure the block is the correct structure for the fork at `block.slot()`. block + .as_block() .fork_name(&chain.spec) .map_err(BlockError::InconsistentFork)?; @@ -952,7 +1040,7 @@ impl SignatureVerifiedBlock { let (mut parent, block) = load_parent(block_root, block, chain)?; - let state = cheap_state_advance_to_obtain_committees( + let state = cheap_state_advance_to_obtain_committees::<_, BlockError>( &mut parent.pre_state, parent.beacon_state_root, block.slot(), @@ -966,7 +1054,7 @@ impl SignatureVerifiedBlock { let mut consensus_context = ConsensusContext::new(block.slot()).set_current_block_root(block_root); - signature_verifier.include_all_signatures(&block, &mut consensus_context)?; + signature_verifier.include_all_signatures(block.as_block(), &mut consensus_context)?; if signature_verifier.verify().is_ok() { Ok(Self { @@ -982,12 +1070,13 @@ impl SignatureVerifiedBlock { /// As for `new` above but producing `BlockSlashInfo`. pub fn check_slashable( - block: Arc>, + block: MaybeAvailableBlock, block_root: Hash256, chain: &BeaconChain, ) -> Result>> { let header = block.signed_block_header(); - Self::new(block, block_root, chain).map_err(|e| BlockSlashInfo::from_early_error(header, e)) + Self::new(block, block_root, chain) + .map_err(|e| BlockSlashInfo::from_early_error_block(header, e)) } /// Finishes signature verification on the provided `GossipVerifedBlock`. Does not re-verify @@ -1002,7 +1091,7 @@ impl SignatureVerifiedBlock { load_parent(from.block_root, from.block, chain)? }; - let state = cheap_state_advance_to_obtain_committees( + let state = cheap_state_advance_to_obtain_committees::<_, BlockError>( &mut parent.pre_state, parent.beacon_state_root, block.slot(), @@ -1017,11 +1106,14 @@ impl SignatureVerifiedBlock { // signature. let mut consensus_context = from.consensus_context; signature_verifier - .include_all_signatures_except_proposal(&block, &mut consensus_context)?; + .include_all_signatures_except_proposal(block.as_ref(), &mut consensus_context)?; if signature_verifier.verify().is_ok() { Ok(Self { - block, + block: MaybeAvailableBlock::AvailabilityPending { + block_root: from.block_root, + block, + }, block_root: from.block_root, parent: Some(parent), consensus_context, @@ -1038,7 +1130,7 @@ impl SignatureVerifiedBlock { ) -> Result>> { let header = from.block.signed_block_header(); Self::from_gossip_verified_block(from, chain) - .map_err(|e| BlockSlashInfo::from_early_error(header, e)) + .map_err(|e| BlockSlashInfo::from_early_error_block(header, e)) } pub fn block_root(&self) -> Hash256 { @@ -1074,7 +1166,7 @@ impl IntoExecutionPendingBlock for SignatureVerifiedBloc } fn block(&self) -> &SignedBeaconBlock { - &self.block + self.block.as_block() } } @@ -1090,8 +1182,16 @@ impl IntoExecutionPendingBlock for Arc IntoExecutionPendingBlock for Arc IntoExecutionPendingBlock for RpcBlock { + /// Verifies the `SignedBeaconBlock` by first transforming it into a `SignatureVerifiedBlock` + /// and then using that implementation of `IntoExecutionPendingBlock` to complete verification. + fn into_execution_pending_block_slashable( + self, + block_root: Hash256, + chain: &Arc>, + notify_execution_layer: NotifyExecutionLayer, + ) -> Result, BlockSlashInfo>> { + // Perform an early check to prevent wasting time on irrelevant blocks. + let block_root = check_block_relevancy(self.as_block(), block_root, chain) + .map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?; + let maybe_available = chain + .data_availability_checker + .verify_kzg_for_rpc_block(self.clone()) + .map_err(|e| { + BlockSlashInfo::SignatureNotChecked( + self.signed_block_header(), + BlockError::AvailabilityCheck(e), + ) + })?; + SignatureVerifiedBlock::check_slashable(maybe_available, block_root, chain)? + .into_execution_pending_block_slashable(block_root, chain, notify_execution_layer) + } + + fn block(&self) -> &SignedBeaconBlock { + self.as_block() + } +} + impl ExecutionPendingBlock { /// Instantiates `Self`, a wrapper that indicates that the given `block` is fully valid. See /// the struct-level documentation for more information. @@ -1109,13 +1239,19 @@ impl ExecutionPendingBlock { /// /// Returns an error if the block is invalid, or if the block was unable to be verified. pub fn from_signature_verified_components( - block: Arc>, + block: MaybeAvailableBlock, block_root: Hash256, parent: PreProcessingSnapshot, mut consensus_context: ConsensusContext, chain: &Arc>, notify_execution_layer: NotifyExecutionLayer, ) -> Result> { + chain + .observed_slashable + .write() + .observe_slashable(block.slot(), block.message().proposer_index(), block_root) + .map_err(|e| BlockError::BeaconChainError(e.into()))?; + chain .observed_block_producers .write() @@ -1145,14 +1281,14 @@ impl ExecutionPendingBlock { // because it will revert finalization. Note that the finalized block is stored in fork // choice, so we will not reject any child of the finalized block (this is relevant during // genesis). - return Err(BlockError::ParentUnknown(block)); + return Err(BlockError::ParentUnknown(block.into_rpc_block())); } /* * Perform cursory checks to see if the block is even worth processing. */ - check_block_relevancy(&block, block_root, chain)?; + check_block_relevancy(block.as_block(), block_root, chain)?; // Define a future that will verify the execution payload with an execution engine. // @@ -1160,7 +1296,7 @@ impl ExecutionPendingBlock { // with the payload verification. let payload_notifier = PayloadNotifier::new( chain.clone(), - block.clone(), + block.block_cloned(), &parent.pre_state, notify_execution_layer, )?; @@ -1310,7 +1446,9 @@ impl ExecutionPendingBlock { StoreOp::PutStateTemporaryFlag(state_root), ] }; - chain.store.do_atomically(state_batch)?; + chain + .store + .do_atomically_with_block_and_blobs_cache(state_batch)?; drop(txn_lock); confirmed_state_roots.push(state_root); @@ -1401,13 +1539,13 @@ impl ExecutionPendingBlock { &state, &chain.log, ); - write_block(&block, block_root, &chain.log); + write_block(block.as_block(), block_root, &chain.log); let core_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CORE); if let Err(err) = per_block_processing( &mut state, - &block, + block.as_block(), // Signatures were verified earlier in this function. BlockSignatureStrategy::NoVerification, StateProcessingStrategy::Accurate, @@ -1491,12 +1629,14 @@ impl ExecutionPendingBlock { Ok(Self { block, - block_root, - state, - parent_block: parent.beacon_block, - parent_eth1_finalization_data, - confirmed_state_roots, - consensus_context, + import_data: BlockImportData { + block_root, + state, + parent_block: parent.beacon_block, + parent_eth1_finalization_data, + confirmed_state_roots, + consensus_context, + }, payload_verification_handle, }) } @@ -1551,13 +1691,16 @@ fn check_block_against_finalized_slot( /// ## Warning /// /// Taking a lock on the `chain.canonical_head.fork_choice` might cause a deadlock here. -pub fn check_block_is_finalized_checkpoint_or_descendant( +pub fn check_block_is_finalized_checkpoint_or_descendant< + T: BeaconChainTypes, + B: AsBlock, +>( chain: &BeaconChain, fork_choice: &BeaconForkChoice, - block: &Arc>, -) -> Result<(), BlockError> { + block: B, +) -> Result> { if fork_choice.is_finalized_checkpoint_or_descendant(block.parent_root()) { - Ok(()) + Ok(block) } else { // If fork choice does *not* consider the parent to be a descendant of the finalized block, // then there are two more cases: @@ -1576,7 +1719,7 @@ pub fn check_block_is_finalized_checkpoint_or_descendant( block_parent_root: block.parent_root(), }) } else { - Err(BlockError::ParentUnknown(block.clone())) + Err(BlockError::ParentUnknown(block.into_rpc_block())) } } } @@ -1643,21 +1786,34 @@ pub fn get_block_root(block: &SignedBeaconBlock) -> Hash256 { block_root } +/// Returns the canonical root of the given `block_header`. +/// +/// Use this function to ensure that we report the block hashing time Prometheus metric. +pub fn get_block_header_root(block_header: &SignedBeaconBlockHeader) -> Hash256 { + let block_root_timer = metrics::start_timer(&metrics::BLOCK_HEADER_PROCESSING_BLOCK_ROOT); + + let block_root = block_header.message.canonical_root(); + + metrics::stop_timer(block_root_timer); + + block_root +} + /// Verify the parent of `block` is known, returning some information about the parent block from /// fork choice. #[allow(clippy::type_complexity)] fn verify_parent_block_is_known( - chain: &BeaconChain, + block_root: Hash256, + fork_choice_read_lock: &RwLockReadGuard>, block: Arc>, ) -> Result<(ProtoBlock, Arc>), BlockError> { - if let Some(proto_block) = chain - .canonical_head - .fork_choice_read_lock() - .get_block(&block.message().parent_root()) - { + if let Some(proto_block) = fork_choice_read_lock.get_block(&block.parent_root()) { Ok((proto_block, block)) } else { - Err(BlockError::ParentUnknown(block)) + Err(BlockError::ParentUnknown(RpcBlock::new_without_blobs( + Some(block_root), + block, + ))) } } @@ -1666,17 +1822,11 @@ fn verify_parent_block_is_known( /// Returns `Err(BlockError::ParentUnknown)` if the parent is not found, or if an error occurs /// whilst attempting the operation. #[allow(clippy::type_complexity)] -fn load_parent( +fn load_parent>( block_root: Hash256, - block: Arc>, + block: B, chain: &BeaconChain, -) -> Result< - ( - PreProcessingSnapshot, - Arc>, - ), - BlockError, -> { +) -> Result<(PreProcessingSnapshot, B), BlockError> { let spec = &chain.spec; // Reject any block if its parent is not known to fork choice. @@ -1694,7 +1844,7 @@ fn load_parent( .fork_choice_read_lock() .contains_block(&block.parent_root()) { - return Err(BlockError::ParentUnknown(block)); + return Err(BlockError::ParentUnknown(block.into_rpc_block())); } let block_delay = chain @@ -1794,6 +1944,47 @@ fn load_parent( result } +/// This trait is used to unify `BlockError` and `GossipBlobError`. +pub trait BlockBlobError: From + From + Debug { + fn not_later_than_parent_error(block_slot: Slot, state_slot: Slot) -> Self; + fn unknown_validator_error(validator_index: u64) -> Self; + fn proposer_signature_invalid() -> Self; +} + +impl BlockBlobError for BlockError { + fn not_later_than_parent_error(block_slot: Slot, parent_slot: Slot) -> Self { + BlockError::BlockIsNotLaterThanParent { + block_slot, + parent_slot, + } + } + + fn unknown_validator_error(validator_index: u64) -> Self { + BlockError::UnknownValidator(validator_index) + } + + fn proposer_signature_invalid() -> Self { + BlockError::ProposalSignatureInvalid + } +} + +impl BlockBlobError for GossipBlobError { + fn not_later_than_parent_error(blob_slot: Slot, parent_slot: Slot) -> Self { + GossipBlobError::BlobIsNotLaterThanParent { + blob_slot, + parent_slot, + } + } + + fn unknown_validator_error(validator_index: u64) -> Self { + GossipBlobError::UnknownValidator(validator_index) + } + + fn proposer_signature_invalid() -> Self { + GossipBlobError::ProposalSignatureInvalid + } +} + /// Performs a cheap (time-efficient) state advancement so the committees and proposer shuffling for /// `slot` can be obtained from `state`. /// @@ -1805,12 +1996,12 @@ fn load_parent( /// and `Cow::Borrowed(state)` will be returned. Otherwise, the state will be cloned, cheaply /// advanced and then returned as a `Cow::Owned`. The end result is that the given `state` is never /// mutated to be invalid (in fact, it is never changed beyond a simple committee cache build). -fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>( +pub fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec, Err: BlockBlobError>( state: &'a mut BeaconState, state_root_opt: Option, block_slot: Slot, spec: &ChainSpec, -) -> Result>, BlockError> { +) -> Result>, Err> { let block_epoch = block_slot.epoch(E::slots_per_epoch()); if state.current_epoch() == block_epoch { @@ -1821,10 +2012,7 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>( Ok(Cow::Borrowed(state)) } else if state.slot() > block_slot { - Err(BlockError::BlockIsNotLaterThanParent { - block_slot, - parent_slot: state.slot(), - }) + Err(Err::not_later_than_parent_error(block_slot, state.slot())) } else { let mut state = state.clone_with(CloneConfig::committee_caches_only()); let target_slot = block_epoch.start_slot(E::slots_per_epoch()); @@ -1832,7 +2020,7 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>( // Advance the state into the same epoch as the block. Use the "partial" method since state // roots are not important for proposer/attester shuffling. partial_state_advance(&mut state, state_root_opt, target_slot, spec) - .map_err(|e| BlockError::BeaconChainError(BeaconChainError::from(e)))?; + .map_err(BeaconChainError::from)?; state.build_committee_cache(RelativeEpoch::Previous, spec)?; state.build_committee_cache(RelativeEpoch::Current, spec)?; @@ -1844,12 +2032,11 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>( /// Obtains a read-locked `ValidatorPubkeyCache` from the `chain`. pub fn get_validator_pubkey_cache( chain: &BeaconChain, -) -> Result>, BlockError> { +) -> Result>, BeaconChainError> { chain .validator_pubkey_cache .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout) - .map_err(BlockError::BeaconChainError) } /// Produces an _empty_ `BlockSignatureVerifier`. @@ -1890,14 +2077,14 @@ fn get_signature_verifier<'a, T: BeaconChainTypes>( /// Verify that `header` was signed with a valid signature from its proposer. /// /// Return `Ok(())` if the signature is valid, and an `Err` otherwise. -fn verify_header_signature( +pub fn verify_header_signature( chain: &BeaconChain, header: &SignedBeaconBlockHeader, -) -> Result<(), BlockError> { +) -> Result<(), Err> { let proposer_pubkey = get_validator_pubkey_cache(chain)? .get(header.message.proposer_index as usize) .cloned() - .ok_or(BlockError::UnknownValidator(header.message.proposer_index))?; + .ok_or(Err::unknown_validator_error(header.message.proposer_index))?; let head_fork = chain.canonical_head.cached_head().head_fork(); if header.verify_signature::( @@ -1908,7 +2095,7 @@ fn verify_header_signature( ) { Ok(()) } else { - Err(BlockError::ProposalSignatureInvalid) + Err(Err::proposer_signature_invalid()) } } diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs new file mode 100644 index 000000000..a6840ed76 --- /dev/null +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -0,0 +1,533 @@ +use crate::blob_verification::{GossipBlobError, GossipVerifiedBlobList}; +use crate::block_verification::BlockError; +use crate::data_availability_checker::AvailabilityCheckError; +pub use crate::data_availability_checker::{AvailableBlock, MaybeAvailableBlock}; +use crate::eth1_finalization_cache::Eth1FinalizationData; +use crate::{get_block_root, GossipVerifiedBlock, PayloadVerificationOutcome}; +use derivative::Derivative; +use ssz_types::VariableList; +use state_processing::ConsensusContext; +use std::sync::Arc; +use types::blob_sidecar::{BlobIdentifier, BlobSidecarError, FixedBlobSidecarList}; +use types::{ + BeaconBlockRef, BeaconState, BlindedPayload, BlobSidecarList, Epoch, EthSpec, Hash256, + SignedBeaconBlock, SignedBeaconBlockHeader, Slot, +}; + +/// A block that has been received over RPC. It has 2 internal variants: +/// +/// 1. `BlockAndBlobs`: A fully available post deneb block with all the blobs available. This variant +/// is only constructed after making consistency checks between blocks and blobs. +/// Hence, it is fully self contained w.r.t verification. i.e. this block has all the required +/// data to get verified and imported into fork choice. +/// +/// 2. `Block`: This can be a fully available pre-deneb block **or** a post-deneb block that may or may +/// not require blobs to be considered fully available. +/// +/// Note: We make a distinction over blocks received over gossip because +/// in a post-deneb world, the blobs corresponding to a given block that are received +/// over rpc do not contain the proposer signature for dos resistance. +#[derive(Debug, Clone, Derivative)] +#[derivative(Hash(bound = "E: EthSpec"))] +pub struct RpcBlock { + block_root: Hash256, + block: RpcBlockInner, +} + +impl RpcBlock { + pub fn block_root(&self) -> Hash256 { + self.block_root + } + + pub fn as_block(&self) -> &SignedBeaconBlock { + match &self.block { + RpcBlockInner::Block(block) => block, + RpcBlockInner::BlockAndBlobs(block, _) => block, + } + } + + pub fn blobs(&self) -> Option<&BlobSidecarList> { + match &self.block { + RpcBlockInner::Block(_) => None, + RpcBlockInner::BlockAndBlobs(_, blobs) => Some(blobs), + } + } +} + +/// Note: This variant is intentionally private because we want to safely construct the +/// internal variants after applying consistency checks to ensure that the block and blobs +/// are consistent with respect to each other. +#[derive(Debug, Clone, Derivative)] +#[derivative(Hash(bound = "E: EthSpec"))] +enum RpcBlockInner { + /// Single block lookup response. This should potentially hit the data availability cache. + Block(Arc>), + /// This variant is used with parent lookups and by-range responses. It should have all blobs + /// ordered, all block roots matching, and the correct number of blobs for this block. + BlockAndBlobs(Arc>, BlobSidecarList), +} + +impl RpcBlock { + /// Constructs a `Block` variant. + pub fn new_without_blobs( + block_root: Option, + block: Arc>, + ) -> Self { + let block_root = block_root.unwrap_or_else(|| get_block_root(&block)); + + Self { + block_root, + block: RpcBlockInner::Block(block), + } + } + + /// Constructs a new `BlockAndBlobs` variant after making consistency + /// checks between the provided blocks and blobs. + pub fn new( + block_root: Option, + block: Arc>, + blobs: Option>, + ) -> Result { + let block_root = block_root.unwrap_or_else(|| get_block_root(&block)); + + if let (Some(blobs), Ok(block_commitments)) = ( + blobs.as_ref(), + block.message().body().blob_kzg_commitments(), + ) { + if blobs.len() != block_commitments.len() { + return Err(AvailabilityCheckError::MissingBlobs); + } + for (blob, &block_commitment) in blobs.iter().zip(block_commitments.iter()) { + let blob_commitment = blob.kzg_commitment; + if blob_commitment != block_commitment { + return Err(AvailabilityCheckError::KzgCommitmentMismatch { + block_commitment, + blob_commitment, + }); + } + } + } + let inner = match blobs { + Some(blobs) => RpcBlockInner::BlockAndBlobs(block, blobs), + None => RpcBlockInner::Block(block), + }; + Ok(Self { + block_root, + block: inner, + }) + } + + pub fn new_from_fixed( + block_root: Hash256, + block: Arc>, + blobs: FixedBlobSidecarList, + ) -> Result { + let filtered = blobs + .into_iter() + .filter_map(|b| b.clone()) + .collect::>(); + let blobs = if filtered.is_empty() { + None + } else { + Some(VariableList::from(filtered)) + }; + Self::new(Some(block_root), block, blobs) + } + + pub fn deconstruct( + self, + ) -> ( + Hash256, + Arc>, + Option>, + ) { + let block_root = self.block_root(); + match self.block { + RpcBlockInner::Block(block) => (block_root, block, None), + RpcBlockInner::BlockAndBlobs(block, blobs) => (block_root, block, Some(blobs)), + } + } + pub fn n_blobs(&self) -> usize { + match &self.block { + RpcBlockInner::Block(_) => 0, + RpcBlockInner::BlockAndBlobs(_, blobs) => blobs.len(), + } + } +} + +/// A block that has gone through all pre-deneb block processing checks including block processing +/// and execution by an EL client. This block hasn't necessarily completed data availability checks. +/// +/// +/// It contains 2 variants: +/// 1. `Available`: This block has been executed and also contains all data to consider it a +/// fully available block. i.e. for post-deneb, this implies that this contains all the +/// required blobs. +/// 2. `AvailabilityPending`: This block hasn't received all required blobs to consider it a +/// fully available block. +pub enum ExecutedBlock { + Available(AvailableExecutedBlock), + AvailabilityPending(AvailabilityPendingExecutedBlock), +} + +impl ExecutedBlock { + pub fn new( + block: MaybeAvailableBlock, + import_data: BlockImportData, + payload_verification_outcome: PayloadVerificationOutcome, + ) -> Self { + match block { + MaybeAvailableBlock::Available(available_block) => { + Self::Available(AvailableExecutedBlock::new( + available_block, + import_data, + payload_verification_outcome, + )) + } + MaybeAvailableBlock::AvailabilityPending { + block_root: _, + block: pending_block, + } => Self::AvailabilityPending(AvailabilityPendingExecutedBlock::new( + pending_block, + import_data, + payload_verification_outcome, + )), + } + } + + pub fn as_block(&self) -> &SignedBeaconBlock { + match self { + Self::Available(available) => available.block.block(), + Self::AvailabilityPending(pending) => &pending.block, + } + } + + pub fn block_root(&self) -> Hash256 { + match self { + ExecutedBlock::AvailabilityPending(pending) => pending.import_data.block_root, + ExecutedBlock::Available(available) => available.import_data.block_root, + } + } +} + +/// A block that has completed all pre-deneb block processing checks including verification +/// by an EL client **and** has all requisite blob data to be imported into fork choice. +#[derive(PartialEq)] +pub struct AvailableExecutedBlock { + pub block: AvailableBlock, + pub import_data: BlockImportData, + pub payload_verification_outcome: PayloadVerificationOutcome, +} + +impl AvailableExecutedBlock { + pub fn new( + block: AvailableBlock, + import_data: BlockImportData, + payload_verification_outcome: PayloadVerificationOutcome, + ) -> Self { + Self { + block, + import_data, + payload_verification_outcome, + } + } + + pub fn get_all_blob_ids(&self) -> Vec { + let num_blobs_expected = self + .block + .message() + .body() + .blob_kzg_commitments() + .map_or(0, |commitments| commitments.len()); + let mut blob_ids = Vec::with_capacity(num_blobs_expected); + for i in 0..num_blobs_expected { + blob_ids.push(BlobIdentifier { + block_root: self.import_data.block_root, + index: i as u64, + }); + } + blob_ids + } +} + +/// A block that has completed all pre-deneb block processing checks, verification +/// by an EL client but does not have all requisite blob data to get imported into +/// fork choice. +pub struct AvailabilityPendingExecutedBlock { + pub block: Arc>, + pub import_data: BlockImportData, + pub payload_verification_outcome: PayloadVerificationOutcome, +} + +impl AvailabilityPendingExecutedBlock { + pub fn new( + block: Arc>, + import_data: BlockImportData, + payload_verification_outcome: PayloadVerificationOutcome, + ) -> Self { + Self { + block, + import_data, + payload_verification_outcome, + } + } + + pub fn as_block(&self) -> &SignedBeaconBlock { + &self.block + } + + pub fn num_blobs_expected(&self) -> usize { + self.block + .message() + .body() + .blob_kzg_commitments() + .map_or(0, |commitments| commitments.len()) + } +} + +#[derive(Debug, PartialEq)] +pub struct BlockImportData { + pub block_root: Hash256, + pub state: BeaconState, + pub parent_block: SignedBeaconBlock>, + pub parent_eth1_finalization_data: Eth1FinalizationData, + pub confirmed_state_roots: Vec, + pub consensus_context: ConsensusContext, +} + +pub type GossipVerifiedBlockContents = + (GossipVerifiedBlock, Option>); + +#[derive(Debug)] +pub enum BlockContentsError { + BlockError(BlockError), + BlobError(GossipBlobError), + SidecarError(BlobSidecarError), +} + +impl From> for BlockContentsError { + fn from(value: BlockError) -> Self { + Self::BlockError(value) + } +} + +impl From> for BlockContentsError { + fn from(value: GossipBlobError) -> Self { + Self::BlobError(value) + } +} + +impl std::fmt::Display for BlockContentsError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + BlockContentsError::BlockError(err) => { + write!(f, "BlockError({})", err) + } + BlockContentsError::BlobError(err) => { + write!(f, "BlobError({})", err) + } + BlockContentsError::SidecarError(err) => { + write!(f, "SidecarError({:?})", err) + } + } + } +} + +/// Trait for common block operations. +pub trait AsBlock { + fn slot(&self) -> Slot; + fn epoch(&self) -> Epoch; + fn parent_root(&self) -> Hash256; + fn state_root(&self) -> Hash256; + fn signed_block_header(&self) -> SignedBeaconBlockHeader; + fn message(&self) -> BeaconBlockRef; + fn as_block(&self) -> &SignedBeaconBlock; + fn block_cloned(&self) -> Arc>; + fn canonical_root(&self) -> Hash256; + fn into_rpc_block(self) -> RpcBlock; +} + +impl AsBlock for Arc> { + fn slot(&self) -> Slot { + SignedBeaconBlock::slot(self) + } + + fn epoch(&self) -> Epoch { + SignedBeaconBlock::epoch(self) + } + + fn parent_root(&self) -> Hash256 { + SignedBeaconBlock::parent_root(self) + } + + fn state_root(&self) -> Hash256 { + SignedBeaconBlock::state_root(self) + } + + fn signed_block_header(&self) -> SignedBeaconBlockHeader { + SignedBeaconBlock::signed_block_header(self) + } + + fn message(&self) -> BeaconBlockRef { + SignedBeaconBlock::message(self) + } + + fn as_block(&self) -> &SignedBeaconBlock { + self + } + + fn block_cloned(&self) -> Arc> { + Arc::>::clone(self) + } + + fn canonical_root(&self) -> Hash256 { + SignedBeaconBlock::canonical_root(self) + } + + fn into_rpc_block(self) -> RpcBlock { + RpcBlock::new_without_blobs(None, self) + } +} + +impl AsBlock for MaybeAvailableBlock { + fn slot(&self) -> Slot { + self.as_block().slot() + } + fn epoch(&self) -> Epoch { + self.as_block().epoch() + } + fn parent_root(&self) -> Hash256 { + self.as_block().parent_root() + } + fn state_root(&self) -> Hash256 { + self.as_block().state_root() + } + fn signed_block_header(&self) -> SignedBeaconBlockHeader { + self.as_block().signed_block_header() + } + fn message(&self) -> BeaconBlockRef { + self.as_block().message() + } + fn as_block(&self) -> &SignedBeaconBlock { + match &self { + MaybeAvailableBlock::Available(block) => block.as_block(), + MaybeAvailableBlock::AvailabilityPending { + block_root: _, + block, + } => block, + } + } + fn block_cloned(&self) -> Arc> { + match &self { + MaybeAvailableBlock::Available(block) => block.block_cloned(), + MaybeAvailableBlock::AvailabilityPending { + block_root: _, + block, + } => block.clone(), + } + } + fn canonical_root(&self) -> Hash256 { + self.as_block().canonical_root() + } + + fn into_rpc_block(self) -> RpcBlock { + match self { + MaybeAvailableBlock::Available(available_block) => available_block.into_rpc_block(), + MaybeAvailableBlock::AvailabilityPending { block_root, block } => { + RpcBlock::new_without_blobs(Some(block_root), block) + } + } + } +} + +impl AsBlock for AvailableBlock { + fn slot(&self) -> Slot { + self.block().slot() + } + + fn epoch(&self) -> Epoch { + self.block().epoch() + } + + fn parent_root(&self) -> Hash256 { + self.block().parent_root() + } + + fn state_root(&self) -> Hash256 { + self.block().state_root() + } + + fn signed_block_header(&self) -> SignedBeaconBlockHeader { + self.block().signed_block_header() + } + + fn message(&self) -> BeaconBlockRef { + self.block().message() + } + + fn as_block(&self) -> &SignedBeaconBlock { + self.block() + } + + fn block_cloned(&self) -> Arc> { + AvailableBlock::block_cloned(self) + } + + fn canonical_root(&self) -> Hash256 { + self.block().canonical_root() + } + + fn into_rpc_block(self) -> RpcBlock { + let (block_root, block, blobs_opt) = self.deconstruct(); + // Circumvent the constructor here, because an Available block will have already had + // consistency checks performed. + let inner = match blobs_opt { + None => RpcBlockInner::Block(block), + Some(blobs) => RpcBlockInner::BlockAndBlobs(block, blobs), + }; + RpcBlock { + block_root, + block: inner, + } + } +} + +impl AsBlock for RpcBlock { + fn slot(&self) -> Slot { + self.as_block().slot() + } + fn epoch(&self) -> Epoch { + self.as_block().epoch() + } + fn parent_root(&self) -> Hash256 { + self.as_block().parent_root() + } + fn state_root(&self) -> Hash256 { + self.as_block().state_root() + } + fn signed_block_header(&self) -> SignedBeaconBlockHeader { + self.as_block().signed_block_header() + } + fn message(&self) -> BeaconBlockRef { + self.as_block().message() + } + fn as_block(&self) -> &SignedBeaconBlock { + match &self.block { + RpcBlockInner::Block(block) => block, + RpcBlockInner::BlockAndBlobs(block, _) => block, + } + } + fn block_cloned(&self) -> Arc> { + match &self.block { + RpcBlockInner::Block(block) => block.clone(), + RpcBlockInner::BlockAndBlobs(block, _) => block.clone(), + } + } + fn canonical_root(&self) -> Hash256 { + self.as_block().canonical_root() + } + + fn into_rpc_block(self) -> RpcBlock { + self + } +} diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 54739f2b8..330036d43 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1,4 +1,6 @@ use crate::beacon_chain::{CanonicalHead, BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY}; +use crate::beacon_proposer_cache::BeaconProposerCache; +use crate::data_availability_checker::DataAvailabilityChecker; use crate::eth1_chain::{CachingEth1Backend, SszEth1}; use crate::eth1_finalization_cache::Eth1FinalizationCache; use crate::fork_choice_signal::ForkChoiceSignalTx; @@ -9,7 +11,7 @@ use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; use crate::snapshot_cache::{SnapshotCache, DEFAULT_SNAPSHOT_CACHE_SIZE}; use crate::timeout_rw_lock::TimeoutRwLock; -use crate::validator_monitor::ValidatorMonitor; +use crate::validator_monitor::{ValidatorMonitor, ValidatorMonitorConfig}; use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::ChainConfig; use crate::{ @@ -20,11 +22,12 @@ use eth1::Config as Eth1Config; use execution_layer::ExecutionLayer; use fork_choice::{ForkChoice, ResetPayloadStatuses}; use futures::channel::mpsc::Sender; +use kzg::{Kzg, TrustedSetup}; use operation_pool::{OperationPool, PersistedOperationPool}; -use parking_lot::RwLock; +use parking_lot::{Mutex, RwLock}; use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; use slasher::Slasher; -use slog::{crit, debug, error, info, Logger}; +use slog::{crit, debug, error, info, o, Logger}; use slot_clock::{SlotClock, TestingSlotClock}; use state_processing::per_slot_processing; use std::marker::PhantomData; @@ -33,8 +36,8 @@ use std::time::Duration; use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp}; use task_executor::{ShutdownReason, TaskExecutor}; use types::{ - BeaconBlock, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, Graffiti, Hash256, - PublicKeyBytes, Signature, SignedBeaconBlock, Slot, + BeaconBlock, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, Graffiti, Hash256, Signature, + SignedBeaconBlock, Slot, }; /// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing @@ -91,11 +94,12 @@ pub struct BeaconChainBuilder { log: Option, graffiti: Graffiti, slasher: Option>>, - validator_monitor: Option>, // Pending I/O batch that is constructed during building and should be executed atomically // alongside `PersistedBeaconChain` storage when `BeaconChainBuilder::build` is called. pending_io_batch: Vec, + trusted_setup: Option, task_executor: Option, + validator_monitor_config: Option, } impl @@ -132,9 +136,10 @@ where log: None, graffiti: Graffiti::default(), slasher: None, - validator_monitor: None, pending_io_batch: vec![], + trusted_setup: None, task_executor: None, + validator_monitor_config: None, } } @@ -392,6 +397,11 @@ where .init_anchor_info(genesis.beacon_block.message(), retain_historic_states) .map_err(|e| format!("Failed to initialize genesis anchor: {:?}", e))?, ); + self.pending_io_batch.push( + store + .init_blob_info(genesis.beacon_block.slot()) + .map_err(|e| format!("Failed to initialize genesis blob info: {:?}", e))?, + ); let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis) .map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?; @@ -515,6 +525,11 @@ where .init_anchor_info(weak_subj_block.message(), retain_historic_states) .map_err(|e| format!("Failed to initialize anchor info: {:?}", e))?, ); + self.pending_io_batch.push( + store + .init_blob_info(weak_subj_block.slot()) + .map_err(|e| format!("Failed to initialize blob info: {:?}", e))?, + ); // Store pruning checkpoint to prevent attempting to prune before the anchor state. self.pending_io_batch @@ -609,19 +624,13 @@ where /// Register some validators for additional monitoring. /// /// `validators` is a comma-separated string of 0x-formatted BLS pubkeys. - pub fn monitor_validators( - mut self, - auto_register: bool, - validators: Vec, - individual_metrics_threshold: usize, - log: Logger, - ) -> Self { - self.validator_monitor = Some(ValidatorMonitor::new( - validators, - auto_register, - individual_metrics_threshold, - log.clone(), - )); + pub fn validator_monitor_config(mut self, config: ValidatorMonitorConfig) -> Self { + self.validator_monitor_config = Some(config); + self + } + + pub fn trusted_setup(mut self, trusted_setup: TrustedSetup) -> Self { + self.trusted_setup = Some(trusted_setup); self } @@ -652,11 +661,16 @@ where let genesis_state_root = self .genesis_state_root .ok_or("Cannot build without a genesis state root")?; - let mut validator_monitor = self - .validator_monitor - .ok_or("Cannot build without a validator monitor")?; + let validator_monitor_config = self.validator_monitor_config.unwrap_or_default(); let head_tracker = Arc::new(self.head_tracker.unwrap_or_default()); + let beacon_proposer_cache: Arc> = <_>::default(); + let mut validator_monitor = ValidatorMonitor::new( + validator_monitor_config, + beacon_proposer_cache.clone(), + log.new(o!("service" => "val_mon")), + ); + let current_slot = if slot_clock .is_prior_to_genesis() .ok_or("Unable to read slot clock")? @@ -666,6 +680,15 @@ where slot_clock.now().ok_or("Unable to read slot")? }; + let kzg = if let Some(trusted_setup) = self.trusted_setup { + let kzg = Kzg::new_from_trusted_setup(trusted_setup) + .map_err(|e| format!("Failed to load trusted setup: {:?}", e))?; + let kzg_arc = Arc::new(kzg); + Some(kzg_arc) + } else { + None + }; + let initial_head_block_root = fork_choice .get_head(current_slot, &self.spec) .map_err(|e| format!("Unable to get fork choice head: {:?}", e))?; @@ -763,6 +786,7 @@ where validator_monitor.process_valid_state( slot.epoch(TEthSpec::slots_per_epoch()), &head_snapshot.beacon_state, + &self.spec, ); } @@ -781,10 +805,11 @@ where // // This *must* be stored before constructing the `BeaconChain`, so that its `Drop` instance // doesn't write a `PersistedBeaconChain` without the rest of the batch. + let head_tracker_reader = head_tracker.0.read(); self.pending_io_batch.push(BeaconChain::< Witness, >::persist_head_in_batch_standalone( - genesis_block_root, &head_tracker + genesis_block_root, &head_tracker_reader )); self.pending_io_batch.push(BeaconChain::< Witness, @@ -795,6 +820,7 @@ where .hot_db .do_atomically(self.pending_io_batch) .map_err(|e| format!("Error writing chain & metadata to disk: {:?}", e))?; + drop(head_tracker_reader); let genesis_validators_root = head_snapshot.beacon_state.genesis_validators_root(); let genesis_time = head_snapshot.beacon_state.genesis_time(); @@ -826,14 +852,14 @@ where }; let beacon_chain = BeaconChain { - spec: self.spec, + spec: self.spec.clone(), config: self.chain_config, - store, + store: store.clone(), task_executor: self .task_executor .ok_or("Cannot build without task executor")?, store_migrator, - slot_clock, + slot_clock: slot_clock.clone(), op_pool: self.op_pool.ok_or("Cannot build without op pool")?, // TODO: allow for persisting and loading the pool from disk. naive_aggregation_pool: <_>::default(), @@ -855,6 +881,8 @@ where observed_sync_aggregators: <_>::default(), // TODO: allow for persisting and loading the pool from disk. observed_block_producers: <_>::default(), + observed_blob_sidecars: <_>::default(), + observed_slashable: <_>::default(), observed_voluntary_exits: <_>::default(), observed_proposer_slashings: <_>::default(), observed_attester_slashings: <_>::default(), @@ -882,7 +910,7 @@ where log.clone(), )), eth1_finalization_cache: TimeoutRwLock::new(Eth1FinalizationCache::new(log.clone())), - beacon_proposer_cache: <_>::default(), + beacon_proposer_cache, block_times_cache: <_>::default(), pre_finalization_block_cache: <_>::default(), validator_pubkey_cache: TimeoutRwLock::new(validator_pubkey_cache), @@ -896,6 +924,12 @@ where slasher: self.slasher.clone(), validator_monitor: RwLock::new(validator_monitor), genesis_backfill_slot, + data_availability_checker: Arc::new( + DataAvailabilityChecker::new(slot_clock, kzg.clone(), store, &log, self.spec) + .map_err(|e| format!("Error initializing DataAvailabiltyChecker: {:?}", e))?, + ), + kzg, + block_production_state: Arc::new(Mutex::new(None)), }; let head = beacon_chain.head_snapshot(); @@ -958,6 +992,13 @@ where ); } + // Prune blobs older than the blob data availability boundary in the background. + if let Some(data_availability_boundary) = beacon_chain.data_availability_boundary() { + beacon_chain + .store_migrator + .process_prune_blobs(data_availability_boundary); + } + Ok(beacon_chain) } } @@ -1055,7 +1096,7 @@ fn descriptive_db_error(item: &str, error: &StoreError) -> String { #[cfg(test)] mod test { use super::*; - use crate::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD; + use crate::test_utils::EphemeralHarnessType; use ethereum_hashing::hash; use genesis::{ generate_deterministic_keypairs, interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH, @@ -1069,6 +1110,7 @@ mod test { use types::{EthSpec, MinimalEthSpec, Slot}; type TestEthSpec = MinimalEthSpec; + type Builder = BeaconChainBuilder>; fn get_logger() -> Logger { let builder = NullLoggerBuilder; @@ -1101,7 +1143,7 @@ mod test { let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let runtime = TestRuntime::default(); - let chain = BeaconChainBuilder::new(MinimalEthSpec) + let chain = Builder::new(MinimalEthSpec) .logger(log.clone()) .store(Arc::new(store)) .task_executor(runtime.task_executor.clone()) @@ -1112,12 +1154,6 @@ mod test { .testing_slot_clock(Duration::from_secs(1)) .expect("should configure testing slot clock") .shutdown_sender(shutdown_tx) - .monitor_validators( - true, - vec![], - DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, - log.clone(), - ) .build() .expect("should build"); diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 7fa5b0152..ced4eda05 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -984,6 +984,20 @@ impl BeaconChain { .start_slot(T::EthSpec::slots_per_epoch()), ); + self.observed_blob_sidecars.write().prune( + new_view + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), + ); + + self.observed_slashable.write().prune( + new_view + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), + ); + self.snapshot_cache .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) .map(|mut snapshot_cache| { @@ -1051,6 +1065,12 @@ impl BeaconChain { self.head_tracker.clone(), )?; + // Prune blobs in the background. + if let Some(data_availability_boundary) = self.data_availability_boundary() { + self.store_migrator + .process_prune_blobs(data_availability_boundary); + } + // Take a write-lock on the canonical head and signal for it to prune. self.canonical_head.fork_choice_write_lock().prune()?; diff --git a/beacon_node/beacon_chain/src/capella_readiness.rs b/beacon_node/beacon_chain/src/capella_readiness.rs index bb729d899..cde71d462 100644 --- a/beacon_node/beacon_chain/src/capella_readiness.rs +++ b/beacon_node/beacon_chain/src/capella_readiness.rs @@ -1,5 +1,4 @@ -//! Provides tools for checking if a node is ready for the Capella upgrade and following merge -//! transition. +//! Provides tools for checking if a node is ready for the Capella upgrade. use crate::{BeaconChain, BeaconChainTypes}; use execution_layer::http::{ diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index bccc3732c..7bcb764ab 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -112,7 +112,7 @@ impl Default for ChainConfig { shuffling_cache_size: crate::shuffling_cache::DEFAULT_CACHE_SIZE, genesis_backfill: false, always_prepare_payload: false, - progressive_balances_mode: ProgressiveBalancesMode::Checked, + progressive_balances_mode: ProgressiveBalancesMode::Fast, epochs_per_migration: crate::migrate::DEFAULT_EPOCHS_PER_MIGRATION, } } diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs new file mode 100644 index 000000000..21cac9a26 --- /dev/null +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -0,0 +1,644 @@ +use crate::blob_verification::{verify_kzg_for_blob_list, GossipVerifiedBlob, KzgVerifiedBlobList}; +use crate::block_verification_types::{ + AvailabilityPendingExecutedBlock, AvailableExecutedBlock, RpcBlock, +}; +pub use crate::data_availability_checker::availability_view::{ + AvailabilityView, GetCommitment, GetCommitments, +}; +pub use crate::data_availability_checker::child_components::ChildComponents; +use crate::data_availability_checker::overflow_lru_cache::OverflowLRUCache; +use crate::data_availability_checker::processing_cache::ProcessingCache; +use crate::{BeaconChain, BeaconChainTypes, BeaconStore}; +use kzg::Kzg; +use parking_lot::RwLock; +pub use processing_cache::ProcessingComponents; +use slasher::test_utils::E; +use slog::{debug, error, Logger}; +use slot_clock::SlotClock; +use std::fmt; +use std::fmt::Debug; +use std::num::NonZeroUsize; +use std::sync::Arc; +use task_executor::TaskExecutor; +use types::beacon_block_body::{KzgCommitmentOpts, KzgCommitments}; +use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList}; +use types::{BlobSidecarList, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; + +mod availability_view; +mod child_components; +mod error; +mod overflow_lru_cache; +mod processing_cache; +mod state_lru_cache; + +pub use error::{Error as AvailabilityCheckError, ErrorCategory as AvailabilityCheckErrorCategory}; +use types::non_zero_usize::new_non_zero_usize; + +/// The LRU Cache stores `PendingComponents` which can store up to +/// `MAX_BLOBS_PER_BLOCK = 6` blobs each. A `BlobSidecar` is 0.131256 MB. So +/// the maximum size of a `PendingComponents` is ~ 0.787536 MB. Setting this +/// to 1024 means the maximum size of the cache is ~ 0.8 GB. But the cache +/// will target a size of less than 75% of capacity. +pub const OVERFLOW_LRU_CAPACITY: NonZeroUsize = new_non_zero_usize(1024); +/// Until tree-states is implemented, we can't store very many states in memory :( +pub const STATE_LRU_CAPACITY_NON_ZERO: NonZeroUsize = new_non_zero_usize(2); +pub const STATE_LRU_CAPACITY: usize = STATE_LRU_CAPACITY_NON_ZERO.get(); + +/// This includes a cache for any blocks or blobs that have been received over gossip or RPC +/// and are awaiting more components before they can be imported. Additionally the +/// `DataAvailabilityChecker` is responsible for KZG verification of block components as well as +/// checking whether a "availability check" is required at all. +pub struct DataAvailabilityChecker { + processing_cache: RwLock>, + availability_cache: Arc>, + slot_clock: T::SlotClock, + kzg: Option>, + log: Logger, + spec: ChainSpec, +} + +/// This type is returned after adding a block / blob to the `DataAvailabilityChecker`. +/// +/// Indicates if the block is fully `Available` or if we need blobs or blocks +/// to "complete" the requirements for an `AvailableBlock`. +#[derive(PartialEq)] +pub enum Availability { + MissingComponents(Hash256), + Available(Box>), +} + +impl Debug for Availability { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::MissingComponents(block_root) => { + write!(f, "MissingComponents({})", block_root) + } + Self::Available(block) => write!(f, "Available({:?})", block.import_data.block_root), + } + } +} + +impl DataAvailabilityChecker { + pub fn new( + slot_clock: T::SlotClock, + kzg: Option>, + store: BeaconStore, + log: &Logger, + spec: ChainSpec, + ) -> Result { + let overflow_cache = OverflowLRUCache::new(OVERFLOW_LRU_CAPACITY, store, spec.clone())?; + Ok(Self { + processing_cache: <_>::default(), + availability_cache: Arc::new(overflow_cache), + slot_clock, + log: log.clone(), + kzg, + spec, + }) + } + + /// Checks if the given block root is cached. + pub fn has_block(&self, block_root: &Hash256) -> bool { + self.processing_cache.read().has_block(block_root) + } + + /// Get the processing info for a block. + pub fn get_processing_components( + &self, + block_root: Hash256, + ) -> Option> { + self.processing_cache.read().get(&block_root).cloned() + } + + /// A `None` indicates blobs are not required. + /// + /// If there's no block, all possible ids will be returned that don't exist in the given blobs. + /// If there no blobs, all possible ids will be returned. + pub fn get_missing_blob_ids>( + &self, + block_root: Hash256, + availability_view: &V, + ) -> MissingBlobs { + let Some(current_slot) = self.slot_clock.now_or_genesis() else { + error!( + self.log, + "Failed to read slot clock when checking for missing blob ids" + ); + return MissingBlobs::BlobsNotRequired; + }; + + let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); + + if self.da_check_required_for_epoch(current_epoch) { + match availability_view.get_cached_block() { + Some(cached_block) => { + let block_commitments = cached_block.get_commitments(); + let blob_commitments = availability_view.get_cached_blobs(); + + let num_blobs_expected = block_commitments.len(); + let mut blob_ids = Vec::with_capacity(num_blobs_expected); + + // Zip here will always limit the number of iterations to the size of + // `block_commitment` because `blob_commitments` will always be populated + // with `Option` values up to `MAX_BLOBS_PER_BLOCK`. + for (index, (block_commitment, blob_commitment_opt)) in block_commitments + .into_iter() + .zip(blob_commitments.iter()) + .enumerate() + { + // Always add a missing blob. + let Some(blob_commitment) = blob_commitment_opt else { + blob_ids.push(BlobIdentifier { + block_root, + index: index as u64, + }); + continue; + }; + + let blob_commitment = *blob_commitment.get_commitment(); + + // Check for consistency, but this shouldn't happen, an availability view + // should guaruntee consistency. + if blob_commitment != block_commitment { + error!(self.log, + "Inconsistent availability view"; + "block_root" => ?block_root, + "block_commitment" => ?block_commitment, + "blob_commitment" => ?blob_commitment, + "index" => index + ); + blob_ids.push(BlobIdentifier { + block_root, + index: index as u64, + }); + } + } + MissingBlobs::KnownMissing(blob_ids) + } + None => { + MissingBlobs::PossibleMissing(BlobIdentifier::get_all_blob_ids::(block_root)) + } + } + } else { + MissingBlobs::BlobsNotRequired + } + } + + /// Get a blob from the availability cache. + pub fn get_blob( + &self, + blob_id: &BlobIdentifier, + ) -> Result>>, AvailabilityCheckError> { + self.availability_cache.peek_blob(blob_id) + } + + /// Put a list of blobs received via RPC into the availability cache. This performs KZG + /// verification on the blobs in the list. + pub fn put_rpc_blobs( + &self, + block_root: Hash256, + blobs: FixedBlobSidecarList, + ) -> Result, AvailabilityCheckError> { + let Some(kzg) = self.kzg.as_ref() else { + return Err(AvailabilityCheckError::KzgNotInitialized); + }; + + let verified_blobs = KzgVerifiedBlobList::new(Vec::from(blobs).into_iter().flatten(), kzg) + .map_err(AvailabilityCheckError::Kzg)?; + + self.availability_cache + .put_kzg_verified_blobs(block_root, verified_blobs) + } + + /// Check if we've cached other blobs for this block. If it completes a set and we also + /// have a block cached, return the `Availability` variant triggering block import. + /// Otherwise cache the blob sidecar. + /// + /// This should only accept gossip verified blobs, so we should not have to worry about dupes. + pub fn put_gossip_blob( + &self, + gossip_blob: GossipVerifiedBlob, + ) -> Result, AvailabilityCheckError> { + self.availability_cache + .put_kzg_verified_blobs(gossip_blob.block_root(), vec![gossip_blob.into_inner()]) + } + + /// Check if we have all the blobs for a block. Returns `Availability` which has information + /// about whether all components have been received or more are required. + pub fn put_pending_executed_block( + &self, + executed_block: AvailabilityPendingExecutedBlock, + ) -> Result, AvailabilityCheckError> { + self.availability_cache + .put_pending_executed_block(executed_block) + } + + /// Verifies kzg commitments for an RpcBlock, returns a `MaybeAvailableBlock` that may + /// include the fully available block. + /// + /// WARNING: This function assumes all required blobs are already present, it does NOT + /// check if there are any missing blobs. + pub fn verify_kzg_for_rpc_block( + &self, + block: RpcBlock, + ) -> Result, AvailabilityCheckError> { + let (block_root, block, blobs) = block.deconstruct(); + match blobs { + None => { + if self.blobs_required_for_block(&block) { + Ok(MaybeAvailableBlock::AvailabilityPending { block_root, block }) + } else { + Ok(MaybeAvailableBlock::Available(AvailableBlock { + block_root, + block, + blobs: None, + })) + } + } + Some(blob_list) => { + let verified_blobs = if self.blobs_required_for_block(&block) { + let kzg = self + .kzg + .as_ref() + .ok_or(AvailabilityCheckError::KzgNotInitialized)?; + verify_kzg_for_blob_list(blob_list.iter(), kzg) + .map_err(AvailabilityCheckError::Kzg)?; + Some(blob_list) + } else { + None + }; + Ok(MaybeAvailableBlock::Available(AvailableBlock { + block_root, + block, + blobs: verified_blobs, + })) + } + } + } + + /// Checks if a vector of blocks are available. Returns a vector of `MaybeAvailableBlock` + /// This is more efficient than calling `verify_kzg_for_rpc_block` in a loop as it does + /// all kzg verification at once + /// + /// WARNING: This function assumes all required blobs are already present, it does NOT + /// check if there are any missing blobs. + pub fn verify_kzg_for_rpc_blocks( + &self, + blocks: Vec>, + ) -> Result>, AvailabilityCheckError> { + let mut results = Vec::with_capacity(blocks.len()); + let all_blobs: BlobSidecarList = blocks + .iter() + .filter(|block| self.blobs_required_for_block(block.as_block())) + // this clone is cheap as it's cloning an Arc + .filter_map(|block| block.blobs().cloned()) + .flatten() + .collect::>() + .into(); + + // verify kzg for all blobs at once + if !all_blobs.is_empty() { + let kzg = self + .kzg + .as_ref() + .ok_or(AvailabilityCheckError::KzgNotInitialized)?; + verify_kzg_for_blob_list(all_blobs.iter(), kzg)?; + } + + for block in blocks { + let (block_root, block, blobs) = block.deconstruct(); + match blobs { + None => { + if self.blobs_required_for_block(&block) { + results.push(MaybeAvailableBlock::AvailabilityPending { block_root, block }) + } else { + results.push(MaybeAvailableBlock::Available(AvailableBlock { + block_root, + block, + blobs: None, + })) + } + } + Some(blob_list) => { + let verified_blobs = if self.blobs_required_for_block(&block) { + Some(blob_list) + } else { + None + }; + // already verified kzg for all blobs + results.push(MaybeAvailableBlock::Available(AvailableBlock { + block_root, + block, + blobs: verified_blobs, + })) + } + } + } + + Ok(results) + } + + /// Determines the blob requirements for a block. If the block is pre-deneb, no blobs are required. + /// If the block's epoch is from prior to the data availability boundary, no blobs are required. + fn blobs_required_for_block(&self, block: &SignedBeaconBlock) -> bool { + block.num_expected_blobs() > 0 && self.da_check_required_for_epoch(block.epoch()) + } + + /// Adds block commitments to the processing cache. These commitments are unverified but caching + /// them here is useful to avoid duplicate downloads of blocks, as well as understanding + /// our blob download requirements. + pub fn notify_block_commitments( + &self, + slot: Slot, + block_root: Hash256, + commitments: KzgCommitments, + ) { + self.processing_cache + .write() + .entry(block_root) + .or_insert_with(|| ProcessingComponents::new(slot)) + .merge_block(commitments); + } + + /// Add a single blob commitment to the processing cache. This commitment is unverified but caching + /// them here is useful to avoid duplicate downloads of blobs, as well as understanding + /// our block and blob download requirements. + pub fn notify_gossip_blob( + &self, + slot: Slot, + block_root: Hash256, + blob: &GossipVerifiedBlob, + ) { + let index = blob.index(); + let commitment = blob.kzg_commitment(); + self.processing_cache + .write() + .entry(block_root) + .or_insert_with(|| ProcessingComponents::new(slot)) + .merge_single_blob(index as usize, commitment); + } + + /// Adds blob commitments to the processing cache. These commitments are unverified but caching + /// them here is useful to avoid duplicate downloads of blobs, as well as understanding + /// our block and blob download requirements. + pub fn notify_rpc_blobs( + &self, + slot: Slot, + block_root: Hash256, + blobs: &FixedBlobSidecarList, + ) { + let mut commitments = KzgCommitmentOpts::::default(); + for blob in blobs.iter().flatten() { + if let Some(commitment) = commitments.get_mut(blob.index as usize) { + *commitment = Some(blob.kzg_commitment); + } + } + self.processing_cache + .write() + .entry(block_root) + .or_insert_with(|| ProcessingComponents::new(slot)) + .merge_blobs(commitments); + } + + /// Clears the block and all blobs from the processing cache for a give root if they exist. + pub fn remove_notified(&self, block_root: &Hash256) { + self.processing_cache.write().remove(block_root) + } + + /// Gather all block roots for which we are not currently processing all components for the + /// given slot. + pub fn incomplete_processing_components(&self, slot: Slot) -> Vec { + self.processing_cache + .read() + .incomplete_processing_components(slot) + } + + /// The epoch at which we require a data availability check in block processing. + /// `None` if the `Deneb` fork is disabled. + pub fn data_availability_boundary(&self) -> Option { + self.spec.deneb_fork_epoch.and_then(|fork_epoch| { + self.slot_clock + .now() + .map(|slot| slot.epoch(T::EthSpec::slots_per_epoch())) + .map(|current_epoch| { + std::cmp::max( + fork_epoch, + current_epoch + .saturating_sub(self.spec.min_epochs_for_blob_sidecars_requests), + ) + }) + }) + } + + /// Returns true if the given epoch lies within the da boundary and false otherwise. + pub fn da_check_required_for_epoch(&self, block_epoch: Epoch) -> bool { + self.data_availability_boundary() + .map_or(false, |da_epoch| block_epoch >= da_epoch) + } + + /// Returns `true` if the current epoch is greater than or equal to the `Deneb` epoch. + pub fn is_deneb(&self) -> bool { + self.slot_clock.now().map_or(false, |slot| { + self.spec.deneb_fork_epoch.map_or(false, |deneb_epoch| { + let now_epoch = slot.epoch(T::EthSpec::slots_per_epoch()); + now_epoch >= deneb_epoch + }) + }) + } + + /// Persist all in memory components to disk + pub fn persist_all(&self) -> Result<(), AvailabilityCheckError> { + self.availability_cache.write_all_to_disk() + } +} + +pub fn start_availability_cache_maintenance_service( + executor: TaskExecutor, + chain: Arc>, +) { + // this cache only needs to be maintained if deneb is configured + if chain.spec.deneb_fork_epoch.is_some() { + let overflow_cache = chain.data_availability_checker.availability_cache.clone(); + executor.spawn( + async move { availability_cache_maintenance_service(chain, overflow_cache).await }, + "availability_cache_service", + ); + } else { + debug!( + chain.log, + "Deneb fork not configured, not starting availability cache maintenance service" + ); + } +} + +async fn availability_cache_maintenance_service( + chain: Arc>, + overflow_cache: Arc>, +) { + let epoch_duration = chain.slot_clock.slot_duration() * T::EthSpec::slots_per_epoch() as u32; + loop { + match chain + .slot_clock + .duration_to_next_epoch(T::EthSpec::slots_per_epoch()) + { + Some(duration) => { + // this service should run 3/4 of the way through the epoch + let additional_delay = (epoch_duration * 3) / 4; + tokio::time::sleep(duration + additional_delay).await; + + let Some(deneb_fork_epoch) = chain.spec.deneb_fork_epoch else { + // shutdown service if deneb fork epoch not set + break; + }; + + debug!( + chain.log, + "Availability cache maintenance service firing"; + ); + let Some(current_epoch) = chain + .slot_clock + .now() + .map(|slot| slot.epoch(T::EthSpec::slots_per_epoch())) + else { + continue; + }; + + if current_epoch < deneb_fork_epoch { + // we are not in deneb yet + continue; + } + + let finalized_epoch = chain + .canonical_head + .fork_choice_read_lock() + .finalized_checkpoint() + .epoch; + // any data belonging to an epoch before this should be pruned + let cutoff_epoch = std::cmp::max( + finalized_epoch + 1, + std::cmp::max( + current_epoch + .saturating_sub(chain.spec.min_epochs_for_blob_sidecars_requests), + deneb_fork_epoch, + ), + ); + + if let Err(e) = overflow_cache.do_maintenance(cutoff_epoch) { + error!(chain.log, "Failed to maintain availability cache"; "error" => ?e); + } + } + None => { + error!(chain.log, "Failed to read slot clock"); + // If we can't read the slot clock, just wait another slot. + tokio::time::sleep(chain.slot_clock.slot_duration()).await; + } + }; + } +} + +/// A fully available block that is ready to be imported into fork choice. +#[derive(Clone, Debug, PartialEq)] +pub struct AvailableBlock { + block_root: Hash256, + block: Arc>, + blobs: Option>, +} + +impl AvailableBlock { + pub fn block(&self) -> &SignedBeaconBlock { + &self.block + } + pub fn block_cloned(&self) -> Arc> { + self.block.clone() + } + + pub fn blobs(&self) -> Option<&BlobSidecarList> { + self.blobs.as_ref() + } + + pub fn deconstruct( + self, + ) -> ( + Hash256, + Arc>, + Option>, + ) { + let AvailableBlock { + block_root, + block, + blobs, + } = self; + (block_root, block, blobs) + } +} + +#[derive(Debug, Clone)] +pub enum MaybeAvailableBlock { + /// This variant is fully available. + /// i.e. for pre-deneb blocks, it contains a (`SignedBeaconBlock`, `Blobs::None`) and for + /// post-4844 blocks, it contains a `SignedBeaconBlock` and a Blobs variant other than `Blobs::None`. + Available(AvailableBlock), + /// This variant is not fully available and requires blobs to become fully available. + AvailabilityPending { + block_root: Hash256, + block: Arc>, + }, +} + +#[derive(Debug, Clone)] +pub enum MissingBlobs { + /// We know for certain these blobs are missing. + KnownMissing(Vec), + /// We think these blobs might be missing. + PossibleMissing(Vec), + /// Blobs are not required. + BlobsNotRequired, +} + +impl MissingBlobs { + pub fn new_without_block(block_root: Hash256, is_deneb: bool) -> Self { + if is_deneb { + MissingBlobs::PossibleMissing(BlobIdentifier::get_all_blob_ids::(block_root)) + } else { + MissingBlobs::BlobsNotRequired + } + } + pub fn is_empty(&self) -> bool { + match self { + MissingBlobs::KnownMissing(v) => v.is_empty(), + MissingBlobs::PossibleMissing(v) => v.is_empty(), + MissingBlobs::BlobsNotRequired => true, + } + } + pub fn contains(&self, blob_id: &BlobIdentifier) -> bool { + match self { + MissingBlobs::KnownMissing(v) => v.contains(blob_id), + MissingBlobs::PossibleMissing(v) => v.contains(blob_id), + MissingBlobs::BlobsNotRequired => false, + } + } + pub fn remove(&mut self, blob_id: &BlobIdentifier) { + match self { + MissingBlobs::KnownMissing(v) => v.retain(|id| id != blob_id), + MissingBlobs::PossibleMissing(v) => v.retain(|id| id != blob_id), + MissingBlobs::BlobsNotRequired => {} + } + } + pub fn indices(&self) -> Vec { + match self { + MissingBlobs::KnownMissing(v) => v.iter().map(|id| id.index).collect(), + MissingBlobs::PossibleMissing(v) => v.iter().map(|id| id.index).collect(), + MissingBlobs::BlobsNotRequired => vec![], + } + } +} + +impl Into> for MissingBlobs { + fn into(self) -> Vec { + match self { + MissingBlobs::KnownMissing(v) => v, + MissingBlobs::PossibleMissing(v) => v, + MissingBlobs::BlobsNotRequired => vec![], + } + } +} diff --git a/beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs b/beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs new file mode 100644 index 000000000..776f81ee5 --- /dev/null +++ b/beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs @@ -0,0 +1,553 @@ +use super::child_components::ChildComponents; +use super::state_lru_cache::DietAvailabilityPendingExecutedBlock; +use crate::blob_verification::KzgVerifiedBlob; +use crate::block_verification_types::AsBlock; +use crate::data_availability_checker::overflow_lru_cache::PendingComponents; +use crate::data_availability_checker::ProcessingComponents; +use kzg::KzgCommitment; +use ssz_types::FixedVector; +use std::sync::Arc; +use types::beacon_block_body::KzgCommitments; +use types::{BlobSidecar, EthSpec, SignedBeaconBlock}; + +/// Defines an interface for managing data availability with two key invariants: +/// +/// 1. If we haven't seen a block yet, we will insert the first blob for a given (block_root, index) +/// but we won't insert subsequent blobs for the same (block_root, index) if they have a different +/// commitment. +/// 2. On block insertion, any non-matching blob commitments are evicted. +/// +/// Types implementing this trait can be used for validating and managing availability +/// of blocks and blobs in a cache-like data structure. +pub trait AvailabilityView { + /// The type representing a block in the implementation. + type BlockType: GetCommitments; + + /// The type representing a blob in the implementation. Must implement `Clone`. + type BlobType: Clone + GetCommitment; + + /// Returns an immutable reference to the cached block. + fn get_cached_block(&self) -> &Option; + + /// Returns an immutable reference to the fixed vector of cached blobs. + fn get_cached_blobs(&self) -> &FixedVector, E::MaxBlobsPerBlock>; + + /// Returns a mutable reference to the cached block. + fn get_cached_block_mut(&mut self) -> &mut Option; + + /// Returns a mutable reference to the fixed vector of cached blobs. + fn get_cached_blobs_mut( + &mut self, + ) -> &mut FixedVector, E::MaxBlobsPerBlock>; + + /// Checks if a block exists in the cache. + /// + /// Returns: + /// - `true` if a block exists. + /// - `false` otherwise. + fn block_exists(&self) -> bool { + self.get_cached_block().is_some() + } + + /// Checks if a blob exists at the given index in the cache. + /// + /// Returns: + /// - `true` if a blob exists at the given index. + /// - `false` otherwise. + fn blob_exists(&self, blob_index: usize) -> bool { + self.get_cached_blobs() + .get(blob_index) + .map(|b| b.is_some()) + .unwrap_or(false) + } + + /// Returns the number of blobs that are expected to be present. Returns `None` if we don't have a + /// block. + /// + /// This corresponds to the number of commitments that are present in a block. + fn num_expected_blobs(&self) -> Option { + self.get_cached_block() + .as_ref() + .map(|b| b.get_commitments().len()) + } + + /// Returns the number of blobs that have been received and are stored in the cache. + fn num_received_blobs(&self) -> usize { + self.get_cached_blobs().iter().flatten().count() + } + + /// Inserts a block into the cache. + fn insert_block(&mut self, block: Self::BlockType) { + *self.get_cached_block_mut() = Some(block) + } + + /// Inserts a blob at a specific index in the cache. + /// + /// Existing blob at the index will be replaced. + fn insert_blob_at_index(&mut self, blob_index: usize, blob: Self::BlobType) { + if let Some(b) = self.get_cached_blobs_mut().get_mut(blob_index) { + *b = Some(blob); + } + } + + /// Merges a given set of blobs into the cache. + /// + /// Blobs are only inserted if: + /// 1. The blob entry at the index is empty and no block exists. + /// 2. The block exists and its commitment matches the blob's commitment. + fn merge_blobs(&mut self, blobs: FixedVector, E::MaxBlobsPerBlock>) { + for (index, blob) in blobs.iter().cloned().enumerate() { + let Some(blob) = blob else { continue }; + self.merge_single_blob(index, blob); + } + } + + /// Merges a single blob into the cache. + /// + /// Blobs are only inserted if: + /// 1. The blob entry at the index is empty and no block exists, or + /// 2. The block exists and its commitment matches the blob's commitment. + fn merge_single_blob(&mut self, index: usize, blob: Self::BlobType) { + let commitment = *blob.get_commitment(); + if let Some(cached_block) = self.get_cached_block() { + let block_commitment_opt = cached_block.get_commitments().get(index).copied(); + if let Some(block_commitment) = block_commitment_opt { + if block_commitment == commitment { + self.insert_blob_at_index(index, blob) + } + } + } else if !self.blob_exists(index) { + self.insert_blob_at_index(index, blob) + } + } + + /// Inserts a new block and revalidates the existing blobs against it. + /// + /// Blobs that don't match the new block's commitments are evicted. + fn merge_block(&mut self, block: Self::BlockType) { + self.insert_block(block); + let reinsert = std::mem::take(self.get_cached_blobs_mut()); + self.merge_blobs(reinsert); + } + + /// Checks if the block and all of its expected blobs are available in the cache. + /// + /// Returns `true` if both the block exists and the number of received blobs matches the number + /// of expected blobs. + fn is_available(&self) -> bool { + if let Some(num_expected_blobs) = self.num_expected_blobs() { + num_expected_blobs == self.num_received_blobs() + } else { + false + } + } +} + +/// Implements the `AvailabilityView` trait for a given struct. +/// +/// - `$struct_name`: The name of the struct for which to implement `AvailabilityView`. +/// - `$block_type`: The type to use for `BlockType` in the `AvailabilityView` trait. +/// - `$blob_type`: The type to use for `BlobType` in the `AvailabilityView` trait. +/// - `$block_field`: The field name in the struct that holds the cached block. +/// - `$blob_field`: The field name in the struct that holds the cached blobs. +#[macro_export] +macro_rules! impl_availability_view { + ($struct_name:ident, $block_type:ty, $blob_type:ty, $block_field:ident, $blob_field:ident) => { + impl AvailabilityView for $struct_name { + type BlockType = $block_type; + type BlobType = $blob_type; + + fn get_cached_block(&self) -> &Option { + &self.$block_field + } + + fn get_cached_blobs( + &self, + ) -> &FixedVector, E::MaxBlobsPerBlock> { + &self.$blob_field + } + + fn get_cached_block_mut(&mut self) -> &mut Option { + &mut self.$block_field + } + + fn get_cached_blobs_mut( + &mut self, + ) -> &mut FixedVector, E::MaxBlobsPerBlock> { + &mut self.$blob_field + } + } + }; +} + +impl_availability_view!( + ProcessingComponents, + KzgCommitments, + KzgCommitment, + block_commitments, + blob_commitments +); + +impl_availability_view!( + PendingComponents, + DietAvailabilityPendingExecutedBlock, + KzgVerifiedBlob, + executed_block, + verified_blobs +); + +impl_availability_view!( + ChildComponents, + Arc>, + Arc>, + downloaded_block, + downloaded_blobs +); + +pub trait GetCommitments { + fn get_commitments(&self) -> KzgCommitments; +} + +pub trait GetCommitment { + fn get_commitment(&self) -> &KzgCommitment; +} + +// These implementations are required to implement `AvailabilityView` for `ProcessingView`. +impl GetCommitments for KzgCommitments { + fn get_commitments(&self) -> KzgCommitments { + self.clone() + } +} +impl GetCommitment for KzgCommitment { + fn get_commitment(&self) -> &KzgCommitment { + self + } +} + +// These implementations are required to implement `AvailabilityView` for `PendingComponents`. +impl GetCommitments for DietAvailabilityPendingExecutedBlock { + fn get_commitments(&self) -> KzgCommitments { + self.as_block() + .message() + .body() + .blob_kzg_commitments() + .cloned() + .unwrap_or_default() + } +} + +impl GetCommitment for KzgVerifiedBlob { + fn get_commitment(&self) -> &KzgCommitment { + &self.as_blob().kzg_commitment + } +} + +// These implementations are required to implement `AvailabilityView` for `ChildComponents`. +impl GetCommitments for Arc> { + fn get_commitments(&self) -> KzgCommitments { + self.message() + .body() + .blob_kzg_commitments() + .ok() + .cloned() + .unwrap_or_default() + } +} +impl GetCommitment for Arc> { + fn get_commitment(&self) -> &KzgCommitment { + &self.kzg_commitment + } +} + +#[cfg(test)] +pub mod tests { + use super::*; + use crate::block_verification_types::BlockImportData; + use crate::eth1_finalization_cache::Eth1FinalizationData; + use crate::test_utils::{generate_rand_block_and_blobs, NumBlobs}; + use crate::AvailabilityPendingExecutedBlock; + use crate::PayloadVerificationOutcome; + use fork_choice::PayloadVerificationStatus; + use rand::rngs::StdRng; + use rand::SeedableRng; + use state_processing::ConsensusContext; + use types::test_utils::TestRandom; + use types::{BeaconState, ChainSpec, ForkName, MainnetEthSpec, Slot}; + + type E = MainnetEthSpec; + + type Setup = ( + SignedBeaconBlock, + FixedVector>>, ::MaxBlobsPerBlock>, + FixedVector>>, ::MaxBlobsPerBlock>, + ); + + pub fn pre_setup() -> Setup { + let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64); + let (block, blobs_vec) = + generate_rand_block_and_blobs::(ForkName::Deneb, NumBlobs::Random, &mut rng); + let mut blobs: FixedVector<_, ::MaxBlobsPerBlock> = FixedVector::default(); + + for blob in blobs_vec { + if let Some(b) = blobs.get_mut(blob.index as usize) { + *b = Some(Arc::new(blob)); + } + } + + let mut invalid_blobs: FixedVector< + Option>>, + ::MaxBlobsPerBlock, + > = FixedVector::default(); + for (index, blob) in blobs.iter().enumerate() { + if let Some(invalid_blob) = blob { + let mut blob_copy = invalid_blob.as_ref().clone(); + blob_copy.kzg_commitment = KzgCommitment::random_for_test(&mut rng); + *invalid_blobs.get_mut(index).unwrap() = Some(Arc::new(blob_copy)); + } + } + + (block, blobs, invalid_blobs) + } + + type ProcessingViewSetup = ( + KzgCommitments, + FixedVector, ::MaxBlobsPerBlock>, + FixedVector, ::MaxBlobsPerBlock>, + ); + + pub fn setup_processing_components( + block: SignedBeaconBlock, + valid_blobs: FixedVector>>, ::MaxBlobsPerBlock>, + invalid_blobs: FixedVector>>, ::MaxBlobsPerBlock>, + ) -> ProcessingViewSetup { + let commitments = block + .message() + .body() + .blob_kzg_commitments() + .unwrap() + .clone(); + let blobs = FixedVector::from( + valid_blobs + .iter() + .map(|blob_opt| blob_opt.as_ref().map(|blob| blob.kzg_commitment)) + .collect::>(), + ); + let invalid_blobs = FixedVector::from( + invalid_blobs + .iter() + .map(|blob_opt| blob_opt.as_ref().map(|blob| blob.kzg_commitment)) + .collect::>(), + ); + (commitments, blobs, invalid_blobs) + } + + type PendingComponentsSetup = ( + DietAvailabilityPendingExecutedBlock, + FixedVector>, ::MaxBlobsPerBlock>, + FixedVector>, ::MaxBlobsPerBlock>, + ); + + pub fn setup_pending_components( + block: SignedBeaconBlock, + valid_blobs: FixedVector>>, ::MaxBlobsPerBlock>, + invalid_blobs: FixedVector>>, ::MaxBlobsPerBlock>, + ) -> PendingComponentsSetup { + let blobs = FixedVector::from( + valid_blobs + .iter() + .map(|blob_opt| { + blob_opt + .as_ref() + .map(|blob| KzgVerifiedBlob::__assumed_valid(blob.clone())) + }) + .collect::>(), + ); + let invalid_blobs = FixedVector::from( + invalid_blobs + .iter() + .map(|blob_opt| { + blob_opt + .as_ref() + .map(|blob| KzgVerifiedBlob::__assumed_valid(blob.clone())) + }) + .collect::>(), + ); + let dummy_parent = block.clone_as_blinded(); + let block = AvailabilityPendingExecutedBlock { + block: Arc::new(block), + import_data: BlockImportData { + block_root: Default::default(), + state: BeaconState::new(0, Default::default(), &ChainSpec::minimal()), + parent_block: dummy_parent, + parent_eth1_finalization_data: Eth1FinalizationData { + eth1_data: Default::default(), + eth1_deposit_index: 0, + }, + confirmed_state_roots: vec![], + consensus_context: ConsensusContext::new(Slot::new(0)), + }, + payload_verification_outcome: PayloadVerificationOutcome { + payload_verification_status: PayloadVerificationStatus::Verified, + is_valid_merge_transition_block: false, + }, + }; + (block.into(), blobs, invalid_blobs) + } + + type ChildComponentsSetup = ( + Arc>, + FixedVector>>, ::MaxBlobsPerBlock>, + FixedVector>>, ::MaxBlobsPerBlock>, + ); + + pub fn setup_child_components( + block: SignedBeaconBlock, + valid_blobs: FixedVector>>, ::MaxBlobsPerBlock>, + invalid_blobs: FixedVector>>, ::MaxBlobsPerBlock>, + ) -> ChildComponentsSetup { + let blobs = FixedVector::from(valid_blobs.into_iter().cloned().collect::>()); + let invalid_blobs = + FixedVector::from(invalid_blobs.into_iter().cloned().collect::>()); + (Arc::new(block), blobs, invalid_blobs) + } + + pub fn assert_cache_consistent>(cache: V) { + if let Some(cached_block) = cache.get_cached_block() { + let cached_block_commitments = cached_block.get_commitments(); + for index in 0..E::max_blobs_per_block() { + let block_commitment = cached_block_commitments.get(index).copied(); + let blob_commitment_opt = cache.get_cached_blobs().get(index).unwrap(); + let blob_commitment = blob_commitment_opt.as_ref().map(|b| *b.get_commitment()); + assert_eq!(block_commitment, blob_commitment); + } + } else { + panic!("No cached block") + } + } + + pub fn assert_empty_blob_cache>(cache: V) { + for blob in cache.get_cached_blobs().iter() { + assert!(blob.is_none()); + } + } + + #[macro_export] + macro_rules! generate_tests { + ($module_name:ident, $type_name:ty, $block_field:ident, $blob_field:ident, $setup_fn:ident) => { + mod $module_name { + use super::*; + use types::Hash256; + + #[test] + fn valid_block_invalid_blobs_valid_blobs() { + let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs) = + $setup_fn(block_commitments, blobs, random_blobs); + let block_root = Hash256::zero(); + let mut cache = <$type_name>::empty(block_root); + cache.merge_block(block_commitments); + cache.merge_blobs(random_blobs); + cache.merge_blobs(blobs); + + assert_cache_consistent(cache); + } + + #[test] + fn invalid_blobs_block_valid_blobs() { + let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs) = + $setup_fn(block_commitments, blobs, random_blobs); + let block_root = Hash256::zero(); + let mut cache = <$type_name>::empty(block_root); + cache.merge_blobs(random_blobs); + cache.merge_block(block_commitments); + cache.merge_blobs(blobs); + + assert_cache_consistent(cache); + } + + #[test] + fn invalid_blobs_valid_blobs_block() { + let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs) = + $setup_fn(block_commitments, blobs, random_blobs); + + let block_root = Hash256::zero(); + let mut cache = <$type_name>::empty(block_root); + cache.merge_blobs(random_blobs); + cache.merge_blobs(blobs); + cache.merge_block(block_commitments); + + assert_empty_blob_cache(cache); + } + + #[test] + fn block_valid_blobs_invalid_blobs() { + let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs) = + $setup_fn(block_commitments, blobs, random_blobs); + + let block_root = Hash256::zero(); + let mut cache = <$type_name>::empty(block_root); + cache.merge_block(block_commitments); + cache.merge_blobs(blobs); + cache.merge_blobs(random_blobs); + + assert_cache_consistent(cache); + } + + #[test] + fn valid_blobs_block_invalid_blobs() { + let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs) = + $setup_fn(block_commitments, blobs, random_blobs); + + let block_root = Hash256::zero(); + let mut cache = <$type_name>::empty(block_root); + cache.merge_blobs(blobs); + cache.merge_block(block_commitments); + cache.merge_blobs(random_blobs); + + assert_cache_consistent(cache); + } + + #[test] + fn valid_blobs_invalid_blobs_block() { + let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs) = + $setup_fn(block_commitments, blobs, random_blobs); + + let block_root = Hash256::zero(); + let mut cache = <$type_name>::empty(block_root); + cache.merge_blobs(blobs); + cache.merge_blobs(random_blobs); + cache.merge_block(block_commitments); + + assert_cache_consistent(cache); + } + } + }; + } + + generate_tests!( + processing_components_tests, + ProcessingComponents::, + kzg_commitments, + processing_blobs, + setup_processing_components + ); + generate_tests!( + pending_components_tests, + PendingComponents, + executed_block, + verified_blobs, + setup_pending_components + ); + generate_tests!( + child_component_tests, + ChildComponents::, + downloaded_block, + downloaded_blobs, + setup_child_components + ); +} diff --git a/beacon_node/beacon_chain/src/data_availability_checker/child_components.rs b/beacon_node/beacon_chain/src/data_availability_checker/child_components.rs new file mode 100644 index 000000000..028bf9d67 --- /dev/null +++ b/beacon_node/beacon_chain/src/data_availability_checker/child_components.rs @@ -0,0 +1,54 @@ +use crate::block_verification_types::RpcBlock; +use crate::data_availability_checker::AvailabilityView; +use bls::Hash256; +use std::sync::Arc; +use types::blob_sidecar::FixedBlobSidecarList; +use types::{EthSpec, SignedBeaconBlock}; + +/// For requests triggered by an `UnknownBlockParent` or `UnknownBlobParent`, this struct +/// is used to cache components as they are sent to the network service. We can't use the +/// data availability cache currently because any blocks or blobs without parents +/// won't pass validation and therefore won't make it into the cache. +pub struct ChildComponents { + pub block_root: Hash256, + pub downloaded_block: Option>>, + pub downloaded_blobs: FixedBlobSidecarList, +} + +impl From> for ChildComponents { + fn from(value: RpcBlock) -> Self { + let (block_root, block, blobs) = value.deconstruct(); + let fixed_blobs = blobs.map(|blobs| { + FixedBlobSidecarList::from(blobs.into_iter().map(Some).collect::>()) + }); + Self::new(block_root, Some(block), fixed_blobs) + } +} + +impl ChildComponents { + pub fn empty(block_root: Hash256) -> Self { + Self { + block_root, + downloaded_block: None, + downloaded_blobs: <_>::default(), + } + } + pub fn new( + block_root: Hash256, + block: Option>>, + blobs: Option>, + ) -> Self { + let mut cache = Self::empty(block_root); + if let Some(block) = block { + cache.merge_block(block); + } + if let Some(blobs) = blobs { + cache.merge_blobs(blobs); + } + cache + } + + pub fn clear_blobs(&mut self) { + self.downloaded_blobs = FixedBlobSidecarList::default(); + } +} diff --git a/beacon_node/beacon_chain/src/data_availability_checker/error.rs b/beacon_node/beacon_chain/src/data_availability_checker/error.rs new file mode 100644 index 000000000..0804fe3b9 --- /dev/null +++ b/beacon_node/beacon_chain/src/data_availability_checker/error.rs @@ -0,0 +1,79 @@ +use kzg::{Error as KzgError, KzgCommitment}; +use types::{BeaconStateError, Hash256}; + +#[derive(Debug)] +pub enum Error { + Kzg(KzgError), + KzgNotInitialized, + KzgVerificationFailed, + KzgCommitmentMismatch { + blob_commitment: KzgCommitment, + block_commitment: KzgCommitment, + }, + Unexpected, + SszTypes(ssz_types::Error), + MissingBlobs, + BlobIndexInvalid(u64), + StoreError(store::Error), + DecodeError(ssz::DecodeError), + ParentStateMissing(Hash256), + BlockReplayError(state_processing::BlockReplayError), + RebuildingStateCaches(BeaconStateError), +} + +pub enum ErrorCategory { + /// Internal Errors (not caused by peers) + Internal, + /// Errors caused by faulty / malicious peers + Malicious, +} + +impl Error { + pub fn category(&self) -> ErrorCategory { + match self { + Error::KzgNotInitialized + | Error::SszTypes(_) + | Error::MissingBlobs + | Error::StoreError(_) + | Error::DecodeError(_) + | Error::Unexpected + | Error::ParentStateMissing(_) + | Error::BlockReplayError(_) + | Error::RebuildingStateCaches(_) => ErrorCategory::Internal, + Error::Kzg(_) + | Error::BlobIndexInvalid(_) + | Error::KzgCommitmentMismatch { .. } + | Error::KzgVerificationFailed => ErrorCategory::Malicious, + } + } +} + +impl From for Error { + fn from(value: ssz_types::Error) -> Self { + Self::SszTypes(value) + } +} + +impl From for Error { + fn from(value: store::Error) -> Self { + Self::StoreError(value) + } +} + +impl From for Error { + fn from(value: ssz::DecodeError) -> Self { + Self::DecodeError(value) + } +} + +impl From for Error { + fn from(value: state_processing::BlockReplayError) -> Self { + Self::BlockReplayError(value) + } +} + +impl From for Error { + fn from(value: KzgError) -> Self { + Self::Kzg(value) + } +} diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs new file mode 100644 index 000000000..34c9bc76f --- /dev/null +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -0,0 +1,1655 @@ +//! This module implements a LRU cache for storing partially available blocks and blobs. +//! When the cache overflows, the least recently used items are persisted to the database. +//! This prevents lighthouse from using too much memory storing unfinalized blocks and blobs +//! if the chain were to lose finality. +//! +//! ## Deadlock safety +//! +//! The main object in this module is the `OverflowLruCache`. It contains two locks: +//! +//! - `self.critical` is an `RwLock` that protects content stored in memory. +//! - `self.maintenance_lock` is held when moving data between memory and disk. +//! +//! You mostly need to ensure that you don't try to hold the critical lock more than once +//! +//! ## Basic Algorithm +//! +//! As blocks and blobs come in from the network, their components are stored in memory in +//! this cache. When a block becomes fully available, it is removed from the cache and +//! imported into fork-choice. Blocks/blobs that remain unavailable will linger in the +//! cache until they are older than the finalized epoch or older than the data availability +//! cutoff. In the event the chain is not finalizing, the cache will eventually overflow and +//! the least recently used items will be persisted to disk. When this happens, we will still +//! store the hash of the block in memory so we always know we have data for that block +//! without needing to check the database. +//! +//! When the client is shut down, all pending components are persisted in the database. +//! On startup, the keys of these components are stored in memory and will be loaded in +//! the cache when they are accessed. + +use super::state_lru_cache::{DietAvailabilityPendingExecutedBlock, StateLRUCache}; +use crate::beacon_chain::BeaconStore; +use crate::blob_verification::KzgVerifiedBlob; +use crate::block_verification_types::{ + AvailabilityPendingExecutedBlock, AvailableBlock, AvailableExecutedBlock, +}; +use crate::data_availability_checker::availability_view::AvailabilityView; +use crate::data_availability_checker::{Availability, AvailabilityCheckError}; +use crate::store::{DBColumn, KeyValueStore}; +use crate::BeaconChainTypes; +use lru::LruCache; +use parking_lot::{Mutex, RwLock, RwLockUpgradableReadGuard}; +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; +use ssz_types::{FixedVector, VariableList}; +use std::num::NonZeroUsize; +use std::{collections::HashSet, sync::Arc}; +use types::blob_sidecar::BlobIdentifier; +use types::{BlobSidecar, ChainSpec, Epoch, EthSpec, Hash256}; + +/// This represents the components of a partially available block +/// +/// The blobs are all gossip and kzg verified. +/// The block has completed all verifications except the availability check. +#[derive(Encode, Decode, Clone)] +pub struct PendingComponents { + pub block_root: Hash256, + pub verified_blobs: FixedVector>, T::MaxBlobsPerBlock>, + pub executed_block: Option>, +} + +impl PendingComponents { + pub fn empty(block_root: Hash256) -> Self { + Self { + block_root, + verified_blobs: FixedVector::default(), + executed_block: None, + } + } + + /// Verifies an `SignedBeaconBlock` against a set of KZG verified blobs. + /// This does not check whether a block *should* have blobs, these checks should have been + /// completed when producing the `AvailabilityPendingBlock`. + /// + /// WARNING: This function can potentially take a lot of time if the state needs to be + /// reconstructed from disk. Ensure you are not holding any write locks while calling this. + pub fn make_available(self, recover: R) -> Result, AvailabilityCheckError> + where + R: FnOnce( + DietAvailabilityPendingExecutedBlock, + ) -> Result, AvailabilityCheckError>, + { + let Self { + block_root, + verified_blobs, + executed_block, + } = self; + + let Some(diet_executed_block) = executed_block else { + return Err(AvailabilityCheckError::Unexpected); + }; + let num_blobs_expected = diet_executed_block.num_blobs_expected(); + let Some(verified_blobs) = verified_blobs + .into_iter() + .cloned() + .map(|b| b.map(|b| b.to_blob())) + .take(num_blobs_expected) + .collect::>>() + else { + return Err(AvailabilityCheckError::Unexpected); + }; + let verified_blobs = VariableList::new(verified_blobs)?; + + let executed_block = recover(diet_executed_block)?; + + let AvailabilityPendingExecutedBlock { + block, + import_data, + payload_verification_outcome, + } = executed_block; + + let available_block = AvailableBlock { + block_root, + block, + blobs: Some(verified_blobs), + }; + Ok(Availability::Available(Box::new( + AvailableExecutedBlock::new(available_block, import_data, payload_verification_outcome), + ))) + } + + pub fn epoch(&self) -> Option { + self.executed_block + .as_ref() + .map(|pending_block| pending_block.as_block().epoch()) + .or_else(|| { + for maybe_blob in self.verified_blobs.iter() { + if maybe_blob.is_some() { + return maybe_blob.as_ref().map(|kzg_verified_blob| { + kzg_verified_blob + .as_blob() + .slot() + .epoch(T::slots_per_epoch()) + }); + } + } + None + }) + } +} + +/// Blocks and blobs are stored in the database sequentially so that it's +/// fast to iterate over all the data for a particular block. +#[derive(Debug, PartialEq)] +enum OverflowKey { + Block(Hash256), + Blob(Hash256, u8), +} + +impl OverflowKey { + pub fn from_block_root(block_root: Hash256) -> Self { + Self::Block(block_root) + } + + pub fn from_blob_id( + blob_id: BlobIdentifier, + ) -> Result { + if blob_id.index > E::max_blobs_per_block() as u64 || blob_id.index > u8::MAX as u64 { + return Err(AvailabilityCheckError::BlobIndexInvalid(blob_id.index)); + } + Ok(Self::Blob(blob_id.block_root, blob_id.index as u8)) + } + + pub fn root(&self) -> &Hash256 { + match self { + Self::Block(root) => root, + Self::Blob(root, _) => root, + } + } +} + +/// A wrapper around BeaconStore that implements various +/// methods used for saving and retrieving blocks / blobs +/// from the store (for organization) +struct OverflowStore(BeaconStore); + +impl OverflowStore { + /// Store pending components in the database + pub fn persist_pending_components( + &self, + block_root: Hash256, + mut pending_components: PendingComponents, + ) -> Result<(), AvailabilityCheckError> { + let col = DBColumn::OverflowLRUCache; + + if let Some(block) = pending_components.executed_block.take() { + let key = OverflowKey::from_block_root(block_root); + self.0 + .hot_db + .put_bytes(col.as_str(), &key.as_ssz_bytes(), &block.as_ssz_bytes())? + } + + for blob in Vec::from(pending_components.verified_blobs) + .into_iter() + .flatten() + { + let key = OverflowKey::from_blob_id::(BlobIdentifier { + block_root, + index: blob.blob_index(), + })?; + + self.0 + .hot_db + .put_bytes(col.as_str(), &key.as_ssz_bytes(), &blob.as_ssz_bytes())? + } + + Ok(()) + } + + /// Load the pending components that we have in the database for a given block root + pub fn load_pending_components( + &self, + block_root: Hash256, + ) -> Result>, AvailabilityCheckError> { + // read everything from disk and reconstruct + let mut maybe_pending_components = None; + for res in self + .0 + .hot_db + .iter_raw_entries(DBColumn::OverflowLRUCache, block_root.as_bytes()) + { + let (key_bytes, value_bytes) = res?; + match OverflowKey::from_ssz_bytes(&key_bytes)? { + OverflowKey::Block(_) => { + maybe_pending_components + .get_or_insert_with(|| PendingComponents::empty(block_root)) + .executed_block = + Some(DietAvailabilityPendingExecutedBlock::from_ssz_bytes( + value_bytes.as_slice(), + )?); + } + OverflowKey::Blob(_, index) => { + *maybe_pending_components + .get_or_insert_with(|| PendingComponents::empty(block_root)) + .verified_blobs + .get_mut(index as usize) + .ok_or(AvailabilityCheckError::BlobIndexInvalid(index as u64))? = + Some(KzgVerifiedBlob::from_ssz_bytes(value_bytes.as_slice())?); + } + } + } + + Ok(maybe_pending_components) + } + + /// Returns the hashes of all the blocks we have any data for on disk + pub fn read_keys_on_disk(&self) -> Result, AvailabilityCheckError> { + let mut disk_keys = HashSet::new(); + for res in self.0.hot_db.iter_raw_keys(DBColumn::OverflowLRUCache, &[]) { + let key_bytes = res?; + disk_keys.insert(*OverflowKey::from_ssz_bytes(&key_bytes)?.root()); + } + Ok(disk_keys) + } + + /// Load a single blob from the database + pub fn load_blob( + &self, + blob_id: &BlobIdentifier, + ) -> Result>>, AvailabilityCheckError> { + let key = OverflowKey::from_blob_id::(*blob_id)?; + + self.0 + .hot_db + .get_bytes(DBColumn::OverflowLRUCache.as_str(), &key.as_ssz_bytes())? + .map(|blob_bytes| Arc::>::from_ssz_bytes(blob_bytes.as_slice())) + .transpose() + .map_err(|e| e.into()) + } + + /// Delete a set of keys from the database + pub fn delete_keys(&self, keys: &Vec) -> Result<(), AvailabilityCheckError> { + for key in keys { + self.0 + .hot_db + .key_delete(DBColumn::OverflowLRUCache.as_str(), &key.as_ssz_bytes())?; + } + Ok(()) + } +} + +/// This data stores the *critical* data that we need to keep in memory +/// protected by the RWLock +struct Critical { + /// This is the LRU cache of pending components + pub in_memory: LruCache>, + /// This holds all the roots of the blocks for which we have + /// `PendingComponents` in the database. + pub store_keys: HashSet, +} + +impl Critical { + pub fn new(capacity: NonZeroUsize) -> Self { + Self { + in_memory: LruCache::new(capacity), + store_keys: HashSet::new(), + } + } + + pub fn reload_store_keys( + &mut self, + overflow_store: &OverflowStore, + ) -> Result<(), AvailabilityCheckError> { + let disk_keys = overflow_store.read_keys_on_disk()?; + self.store_keys = disk_keys; + Ok(()) + } + + /// This only checks for the blobs in memory + pub fn peek_blob( + &self, + blob_id: &BlobIdentifier, + ) -> Result>>, AvailabilityCheckError> { + if let Some(pending_components) = self.in_memory.peek(&blob_id.block_root) { + Ok(pending_components + .verified_blobs + .get(blob_id.index as usize) + .ok_or(AvailabilityCheckError::BlobIndexInvalid(blob_id.index))? + .as_ref() + .map(|blob| blob.clone_blob())) + } else { + Ok(None) + } + } + + /// Puts the pending components in the LRU cache. If the cache + /// is at capacity, the LRU entry is written to the store first + pub fn put_pending_components( + &mut self, + block_root: Hash256, + pending_components: PendingComponents, + overflow_store: &OverflowStore, + ) -> Result<(), AvailabilityCheckError> { + if self.in_memory.len() == self.in_memory.cap().get() { + // cache will overflow, must write lru entry to disk + if let Some((lru_key, lru_value)) = self.in_memory.pop_lru() { + overflow_store.persist_pending_components(lru_key, lru_value)?; + self.store_keys.insert(lru_key); + } + } + self.in_memory.put(block_root, pending_components); + Ok(()) + } + + /// Removes and returns the pending_components corresponding to + /// the `block_root` or `None` if it does not exist + pub fn pop_pending_components( + &mut self, + block_root: Hash256, + store: &OverflowStore, + ) -> Result>, AvailabilityCheckError> { + match self.in_memory.pop_entry(&block_root) { + Some((_, pending_components)) => Ok(Some(pending_components)), + None => { + // not in memory, is it in the store? + if self.store_keys.remove(&block_root) { + // We don't need to remove the data from the store as we have removed it from + // `store_keys` so we won't go looking for it on disk. The maintenance thread + // will remove it from disk the next time it runs. + store.load_pending_components(block_root) + } else { + Ok(None) + } + } + } + } +} + +/// This is the main struct for this module. Outside methods should +/// interact with the cache through this. +pub struct OverflowLRUCache { + /// Contains all the data we keep in memory, protected by an RwLock + critical: RwLock>, + /// This is how we read and write components to the disk + overflow_store: OverflowStore, + /// This cache holds a limited number of states in memory and reconstructs them + /// from disk when necessary. This is necessary until we merge tree-states + state_cache: StateLRUCache, + /// Mutex to guard maintenance methods which move data between disk and memory + maintenance_lock: Mutex<()>, + /// The capacity of the LRU cache + capacity: NonZeroUsize, +} + +impl OverflowLRUCache { + pub fn new( + capacity: NonZeroUsize, + beacon_store: BeaconStore, + spec: ChainSpec, + ) -> Result { + let overflow_store = OverflowStore(beacon_store.clone()); + let mut critical = Critical::new(capacity); + critical.reload_store_keys(&overflow_store)?; + Ok(Self { + critical: RwLock::new(critical), + overflow_store, + state_cache: StateLRUCache::new(beacon_store, spec), + maintenance_lock: Mutex::new(()), + capacity, + }) + } + + /// Fetch a blob from the cache without affecting the LRU ordering + pub fn peek_blob( + &self, + blob_id: &BlobIdentifier, + ) -> Result>>, AvailabilityCheckError> { + let read_lock = self.critical.read(); + if let Some(blob) = read_lock.peek_blob(blob_id)? { + Ok(Some(blob)) + } else if read_lock.store_keys.contains(&blob_id.block_root) { + drop(read_lock); + self.overflow_store.load_blob(blob_id) + } else { + Ok(None) + } + } + + pub fn put_kzg_verified_blobs>>( + &self, + block_root: Hash256, + kzg_verified_blobs: I, + ) -> Result, AvailabilityCheckError> { + let mut fixed_blobs = FixedVector::default(); + + for blob in kzg_verified_blobs { + if let Some(blob_opt) = fixed_blobs.get_mut(blob.blob_index() as usize) { + *blob_opt = Some(blob); + } + } + + let mut write_lock = self.critical.write(); + + // Grab existing entry or create a new entry. + let mut pending_components = write_lock + .pop_pending_components(block_root, &self.overflow_store)? + .unwrap_or_else(|| PendingComponents::empty(block_root)); + + // Merge in the blobs. + pending_components.merge_blobs(fixed_blobs); + + if pending_components.is_available() { + // No need to hold the write lock anymore + drop(write_lock); + pending_components.make_available(|diet_block| { + self.state_cache.recover_pending_executed_block(diet_block) + }) + } else { + write_lock.put_pending_components( + block_root, + pending_components, + &self.overflow_store, + )?; + Ok(Availability::MissingComponents(block_root)) + } + } + + /// Check if we have all the blobs for a block. If we do, return the Availability variant that + /// triggers import of the block. + pub fn put_pending_executed_block( + &self, + executed_block: AvailabilityPendingExecutedBlock, + ) -> Result, AvailabilityCheckError> { + let mut write_lock = self.critical.write(); + let block_root = executed_block.import_data.block_root; + + // register the block to get the diet block + let diet_executed_block = self + .state_cache + .register_pending_executed_block(executed_block); + + // Grab existing entry or create a new entry. + let mut pending_components = write_lock + .pop_pending_components(block_root, &self.overflow_store)? + .unwrap_or_else(|| PendingComponents::empty(block_root)); + + // Merge in the block. + pending_components.merge_block(diet_executed_block); + + // Check if we have all components and entire set is consistent. + if pending_components.is_available() { + // No need to hold the write lock anymore + drop(write_lock); + pending_components.make_available(|diet_block| { + self.state_cache.recover_pending_executed_block(diet_block) + }) + } else { + write_lock.put_pending_components( + block_root, + pending_components, + &self.overflow_store, + )?; + Ok(Availability::MissingComponents(block_root)) + } + } + + /// write all in memory objects to disk + pub fn write_all_to_disk(&self) -> Result<(), AvailabilityCheckError> { + let maintenance_lock = self.maintenance_lock.lock(); + let mut critical_lock = self.critical.write(); + + let mut swap_lru = LruCache::new(self.capacity); + std::mem::swap(&mut swap_lru, &mut critical_lock.in_memory); + + for (root, pending_components) in swap_lru.into_iter() { + self.overflow_store + .persist_pending_components(root, pending_components)?; + critical_lock.store_keys.insert(root); + } + + drop(critical_lock); + drop(maintenance_lock); + Ok(()) + } + + /// maintain the cache + pub fn do_maintenance(&self, cutoff_epoch: Epoch) -> Result<(), AvailabilityCheckError> { + // ensure memory usage is below threshold + let threshold = self.capacity.get() * 3 / 4; + self.maintain_threshold(threshold, cutoff_epoch)?; + // clean up any keys on the disk that shouldn't be there + self.prune_disk(cutoff_epoch)?; + // clean up any lingering states in the state cache + self.state_cache.do_maintenance(cutoff_epoch); + Ok(()) + } + + /// Enforce that the size of the cache is below a given threshold by + /// moving the least recently used items to disk. + fn maintain_threshold( + &self, + threshold: usize, + cutoff_epoch: Epoch, + ) -> Result<(), AvailabilityCheckError> { + // ensure only one thread at a time can be deleting things from the disk or + // moving things between memory and storage + let maintenance_lock = self.maintenance_lock.lock(); + + let mut stored = self.critical.read().in_memory.len(); + while stored > threshold { + let read_lock = self.critical.upgradable_read(); + let lru_entry = read_lock + .in_memory + .peek_lru() + .map(|(key, value)| (*key, value.clone())); + + let Some((lru_root, lru_pending_components)) = lru_entry else { + break; + }; + + if lru_pending_components + .epoch() + .map(|epoch| epoch < cutoff_epoch) + .unwrap_or(true) + { + // this data is no longer needed -> delete it + let mut write_lock = RwLockUpgradableReadGuard::upgrade(read_lock); + write_lock.in_memory.pop_entry(&lru_root); + stored = write_lock.in_memory.len(); + continue; + } else { + drop(read_lock); + } + + // write the lru entry to disk (we aren't holding any critical locks while we do this) + self.overflow_store + .persist_pending_components(lru_root, lru_pending_components)?; + // now that we've written to disk, grab the critical write lock + let mut write_lock = self.critical.write(); + if let Some((new_lru_root_ref, _)) = write_lock.in_memory.peek_lru() { + // need to ensure the entry we just wrote to disk wasn't updated + // while we were writing and is still the LRU entry + if *new_lru_root_ref == lru_root { + // it is still LRU entry -> delete it from memory & record that it's on disk + write_lock.in_memory.pop_entry(&lru_root); + write_lock.store_keys.insert(lru_root); + } + } + stored = write_lock.in_memory.len(); + drop(write_lock); + } + + drop(maintenance_lock); + Ok(()) + } + + /// Delete any data on disk that shouldn't be there. This can happen if + /// 1. The entry has been moved back to memory (or become fully available) + /// 2. The entry belongs to a block beyond the cutoff epoch + fn prune_disk(&self, cutoff_epoch: Epoch) -> Result<(), AvailabilityCheckError> { + // ensure only one thread at a time can be deleting things from the disk or + // moving things between memory and storage + let maintenance_lock = self.maintenance_lock.lock(); + + struct BlockData { + keys: Vec, + root: Hash256, + epoch: Epoch, + } + + let delete_if_outdated = |cache: &OverflowLRUCache, + block_data: Option| + -> Result<(), AvailabilityCheckError> { + let Some(block_data) = block_data else { + return Ok(()); + }; + let not_in_store_keys = !cache.critical.read().store_keys.contains(&block_data.root); + if not_in_store_keys { + // these keys aren't supposed to be on disk + cache.overflow_store.delete_keys(&block_data.keys)?; + } else { + // check this data is still relevant + if block_data.epoch < cutoff_epoch { + // this data is no longer needed -> delete it + self.overflow_store.delete_keys(&block_data.keys)?; + } + } + Ok(()) + }; + + let mut current_block_data: Option = None; + for res in self + .overflow_store + .0 + .hot_db + .iter_raw_entries(DBColumn::OverflowLRUCache, &[]) + { + let (key_bytes, value_bytes) = res?; + let overflow_key = OverflowKey::from_ssz_bytes(&key_bytes)?; + let current_root = *overflow_key.root(); + + match &mut current_block_data { + Some(block_data) if block_data.root == current_root => { + // still dealing with the same block + block_data.keys.push(overflow_key); + } + _ => { + // first time encountering data for this block + delete_if_outdated(self, current_block_data)?; + let current_epoch = match &overflow_key { + OverflowKey::Block(_) => { + DietAvailabilityPendingExecutedBlock::::from_ssz_bytes( + value_bytes.as_slice(), + )? + .as_block() + .epoch() + } + OverflowKey::Blob(_, _) => { + KzgVerifiedBlob::::from_ssz_bytes(value_bytes.as_slice())? + .as_blob() + .slot() + .epoch(T::EthSpec::slots_per_epoch()) + } + }; + current_block_data = Some(BlockData { + keys: vec![overflow_key], + root: current_root, + epoch: current_epoch, + }); + } + } + } + // can't fall off the end + delete_if_outdated(self, current_block_data)?; + + drop(maintenance_lock); + Ok(()) + } + + #[cfg(test)] + /// get the state cache for inspection (used only for tests) + pub fn state_lru_cache(&self) -> &StateLRUCache { + &self.state_cache + } +} + +impl ssz::Encode for OverflowKey { + fn is_ssz_fixed_len() -> bool { + true + } + + fn ssz_append(&self, buf: &mut Vec) { + match self { + OverflowKey::Block(block_hash) => { + block_hash.ssz_append(buf); + buf.push(0u8) + } + OverflowKey::Blob(block_hash, index) => { + block_hash.ssz_append(buf); + buf.push(*index + 1) + } + } + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + 1 + } + + fn ssz_bytes_len(&self) -> usize { + match self { + Self::Block(root) => root.ssz_bytes_len() + 1, + Self::Blob(root, _) => root.ssz_bytes_len() + 1, + } + } +} + +impl ssz::Decode for OverflowKey { + fn is_ssz_fixed_len() -> bool { + true + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + 1 + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + let len = bytes.len(); + let h256_len = ::ssz_fixed_len(); + let expected = h256_len + 1; + + if len != expected { + Err(ssz::DecodeError::InvalidByteLength { len, expected }) + } else { + let root_bytes = bytes + .get(..h256_len) + .ok_or(ssz::DecodeError::OutOfBoundsByte { i: 0 })?; + let block_root = Hash256::from_ssz_bytes(root_bytes)?; + let id_byte = *bytes + .get(h256_len) + .ok_or(ssz::DecodeError::OutOfBoundsByte { i: h256_len })?; + match id_byte { + 0 => Ok(OverflowKey::Block(block_root)), + n => Ok(OverflowKey::Blob(block_root, n - 1)), + } + } + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::{ + blob_verification::GossipVerifiedBlob, + block_verification::PayloadVerificationOutcome, + block_verification_types::{AsBlock, BlockImportData}, + data_availability_checker::STATE_LRU_CAPACITY, + eth1_finalization_cache::Eth1FinalizationData, + test_utils::{BaseHarnessType, BeaconChainHarness, DiskHarnessType}, + }; + use fork_choice::PayloadVerificationStatus; + use logging::test_logger; + use slog::{info, Logger}; + use state_processing::ConsensusContext; + use std::collections::{BTreeMap, HashMap, VecDeque}; + use std::ops::AddAssign; + use store::{HotColdDB, ItemStore, LevelDB, StoreConfig}; + use tempfile::{tempdir, TempDir}; + use types::non_zero_usize::new_non_zero_usize; + use types::{ChainSpec, ExecPayload, MinimalEthSpec}; + + const LOW_VALIDATOR_COUNT: usize = 32; + + fn get_store_with_spec( + db_path: &TempDir, + spec: ChainSpec, + log: Logger, + ) -> Arc, LevelDB>> { + let hot_path = db_path.path().join("hot_db"); + let cold_path = db_path.path().join("cold_db"); + let blobs_path = db_path.path().join("blobs_db"); + let config = StoreConfig::default(); + + HotColdDB::open( + &hot_path, + &cold_path, + &blobs_path, + |_, _, _| Ok(()), + config, + spec, + log, + ) + .expect("disk store should initialize") + } + + // get a beacon chain harness advanced to just before deneb fork + async fn get_deneb_chain( + log: Logger, + db_path: &TempDir, + ) -> BeaconChainHarness> { + let altair_fork_epoch = Epoch::new(1); + let bellatrix_fork_epoch = Epoch::new(2); + let bellatrix_fork_slot = bellatrix_fork_epoch.start_slot(E::slots_per_epoch()); + let capella_fork_epoch = Epoch::new(3); + let deneb_fork_epoch = Epoch::new(4); + let deneb_fork_slot = deneb_fork_epoch.start_slot(E::slots_per_epoch()); + + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(altair_fork_epoch); + spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); + spec.capella_fork_epoch = Some(capella_fork_epoch); + spec.deneb_fork_epoch = Some(deneb_fork_epoch); + + let chain_store = get_store_with_spec::(db_path, spec.clone(), log.clone()); + let validators_keypairs = + types::test_utils::generate_deterministic_keypairs(LOW_VALIDATOR_COUNT); + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec.clone()) + .logger(log.clone()) + .keypairs(validators_keypairs) + .fresh_disk_store(chain_store) + .mock_execution_layer() + .build(); + + // go to bellatrix slot + harness.extend_to_slot(bellatrix_fork_slot).await; + let merge_head = &harness.chain.head_snapshot().beacon_block; + assert!(merge_head.as_merge().is_ok()); + assert_eq!(merge_head.slot(), bellatrix_fork_slot); + assert!( + merge_head + .message() + .body() + .execution_payload() + .unwrap() + .is_default_with_empty_roots(), + "Merge head is default payload" + ); + // Trigger the terminal PoW block. + harness + .execution_block_generator() + .move_to_terminal_block() + .unwrap(); + // go right before deneb slot + harness.extend_to_slot(deneb_fork_slot - 1).await; + + harness + } + + #[test] + fn overflow_key_encode_decode_equality() { + type E = types::MainnetEthSpec; + let key_block = OverflowKey::Block(Hash256::random()); + let key_blob_0 = OverflowKey::from_blob_id::(BlobIdentifier { + block_root: Hash256::random(), + index: 0, + }) + .expect("should create overflow key 0"); + let key_blob_1 = OverflowKey::from_blob_id::(BlobIdentifier { + block_root: Hash256::random(), + index: 1, + }) + .expect("should create overflow key 1"); + let key_blob_2 = OverflowKey::from_blob_id::(BlobIdentifier { + block_root: Hash256::random(), + index: 2, + }) + .expect("should create overflow key 2"); + let key_blob_3 = OverflowKey::from_blob_id::(BlobIdentifier { + block_root: Hash256::random(), + index: 3, + }) + .expect("should create overflow key 3"); + + let keys = vec![key_block, key_blob_0, key_blob_1, key_blob_2, key_blob_3]; + for key in keys { + let encoded = key.as_ssz_bytes(); + let decoded = OverflowKey::from_ssz_bytes(&encoded).expect("should decode"); + assert_eq!(key, decoded, "Encoded and decoded keys should be equal"); + } + } + + async fn availability_pending_block( + harness: &BeaconChainHarness>, + ) -> ( + AvailabilityPendingExecutedBlock, + Vec>>, + ) + where + E: EthSpec, + Hot: ItemStore, + Cold: ItemStore, + { + let chain = &harness.chain; + let log = chain.log.clone(); + let head = chain.head_snapshot(); + let parent_state = head.beacon_state.clone_with_only_committee_caches(); + + let target_slot = chain.slot().expect("should get slot") + 1; + let parent_root = head.beacon_block_root; + let parent_block = chain + .get_blinded_block(&parent_root) + .expect("should get block") + .expect("should have block"); + + let parent_eth1_finalization_data = Eth1FinalizationData { + eth1_data: parent_block.message().body().eth1_data().clone(), + eth1_deposit_index: 0, + }; + + let (signed_beacon_block_hash, (block, maybe_blobs), state) = harness + .add_block_at_slot(target_slot, parent_state) + .await + .expect("should add block"); + let block_root = signed_beacon_block_hash.into(); + assert_eq!( + block_root, + block.canonical_root(), + "block root should match" + ); + + // log kzg commitments + info!(log, "printing kzg commitments"); + for comm in Vec::from( + block + .message() + .body() + .blob_kzg_commitments() + .expect("should be deneb fork") + .clone(), + ) { + info!(log, "kzg commitment"; "commitment" => ?comm); + } + info!(log, "done printing kzg commitments"); + + let gossip_verified_blobs = if let Some((kzg_proofs, blobs)) = maybe_blobs { + let sidecars = BlobSidecar::build_sidecars(blobs, &block, kzg_proofs).unwrap(); + Vec::from(sidecars) + .into_iter() + .map(|sidecar| { + let subnet = sidecar.index; + GossipVerifiedBlob::new(sidecar, subnet, &harness.chain) + .expect("should validate blob") + }) + .collect() + } else { + vec![] + }; + + let slot = block.slot(); + let consensus_context = ConsensusContext::::new(slot); + let import_data: BlockImportData = BlockImportData { + block_root, + state, + parent_block, + parent_eth1_finalization_data, + confirmed_state_roots: vec![], + consensus_context, + }; + + let payload_verification_outcome = PayloadVerificationOutcome { + payload_verification_status: PayloadVerificationStatus::Verified, + is_valid_merge_transition_block: false, + }; + + let availability_pending_block = AvailabilityPendingExecutedBlock { + block, + import_data, + payload_verification_outcome, + }; + + (availability_pending_block, gossip_verified_blobs) + } + + async fn setup_harness_and_cache( + capacity: usize, + ) -> ( + BeaconChainHarness>, + Arc>, + TempDir, + ) + where + E: EthSpec, + T: BeaconChainTypes, ColdStore = LevelDB, EthSpec = E>, + { + let log = test_logger(); + let chain_db_path = tempdir().expect("should get temp dir"); + let harness = get_deneb_chain(log.clone(), &chain_db_path).await; + let spec = harness.spec.clone(); + let test_store = harness.chain.store.clone(); + let capacity_non_zero = new_non_zero_usize(capacity); + let cache = Arc::new( + OverflowLRUCache::::new(capacity_non_zero, test_store, spec.clone()) + .expect("should create cache"), + ); + (harness, cache, chain_db_path) + } + + #[tokio::test] + async fn overflow_cache_test_insert_components() { + type E = MinimalEthSpec; + type T = DiskHarnessType; + let capacity = 4; + let (harness, cache, _path) = setup_harness_and_cache::(capacity).await; + + let (pending_block, blobs) = availability_pending_block(&harness).await; + let root = pending_block.import_data.block_root; + + let blobs_expected = pending_block.num_blobs_expected(); + assert_eq!( + blobs.len(), + blobs_expected, + "should have expected number of blobs" + ); + assert!( + cache.critical.read().in_memory.is_empty(), + "cache should be empty" + ); + let availability = cache + .put_pending_executed_block(pending_block) + .expect("should put block"); + if blobs_expected == 0 { + assert!( + matches!(availability, Availability::Available(_)), + "block doesn't have blobs, should be available" + ); + assert_eq!( + cache.critical.read().in_memory.len(), + 0, + "cache should be empty because we don't have blobs" + ); + } else { + assert!( + matches!(availability, Availability::MissingComponents(_)), + "should be pending blobs" + ); + assert_eq!( + cache.critical.read().in_memory.len(), + 1, + "cache should have one block" + ); + assert!( + cache.critical.read().in_memory.peek(&root).is_some(), + "newly inserted block should exist in memory" + ); + } + + let mut kzg_verified_blobs = Vec::new(); + for (blob_index, gossip_blob) in blobs.into_iter().enumerate() { + kzg_verified_blobs.push(gossip_blob.into_inner()); + let availability = cache + .put_kzg_verified_blobs(root, kzg_verified_blobs.clone()) + .expect("should put blob"); + if blob_index == blobs_expected - 1 { + assert!(matches!(availability, Availability::Available(_))); + } else { + assert!(matches!(availability, Availability::MissingComponents(_))); + assert_eq!(cache.critical.read().in_memory.len(), 1); + } + } + assert!( + cache.critical.read().in_memory.is_empty(), + "cache should be empty now that all components available" + ); + + let (pending_block, blobs) = availability_pending_block(&harness).await; + let blobs_expected = pending_block.num_blobs_expected(); + assert_eq!( + blobs.len(), + blobs_expected, + "should have expected number of blobs" + ); + let root = pending_block.import_data.block_root; + let mut kzg_verified_blobs = vec![]; + for gossip_blob in blobs { + kzg_verified_blobs.push(gossip_blob.into_inner()); + let availability = cache + .put_kzg_verified_blobs(root, kzg_verified_blobs.clone()) + .expect("should put blob"); + assert_eq!( + availability, + Availability::MissingComponents(root), + "should be pending block" + ); + assert_eq!(cache.critical.read().in_memory.len(), 1); + } + let availability = cache + .put_pending_executed_block(pending_block) + .expect("should put block"); + assert!( + matches!(availability, Availability::Available(_)), + "block should be available: {:?}", + availability + ); + assert!( + cache.critical.read().in_memory.is_empty(), + "cache should be empty now that all components available" + ); + } + + #[tokio::test] + async fn overflow_cache_test_overflow() { + type E = MinimalEthSpec; + type T = DiskHarnessType; + let capacity = 4; + let (harness, cache, _path) = setup_harness_and_cache::(capacity).await; + + let mut pending_blocks = VecDeque::new(); + let mut pending_blobs = VecDeque::new(); + let mut roots = VecDeque::new(); + while pending_blobs.len() < capacity + 1 { + let (pending_block, blobs) = availability_pending_block(&harness).await; + if pending_block.num_blobs_expected() == 0 { + // we need blocks with blobs + continue; + } + let root = pending_block.block.canonical_root(); + pending_blocks.push_back(pending_block); + pending_blobs.push_back(blobs); + roots.push_back(root); + } + + for i in 0..capacity { + cache + .put_pending_executed_block(pending_blocks.pop_front().expect("should have block")) + .expect("should put block"); + assert_eq!(cache.critical.read().in_memory.len(), i + 1); + } + for root in roots.iter().take(capacity) { + assert!(cache.critical.read().in_memory.peek(root).is_some()); + } + assert_eq!( + cache.critical.read().in_memory.len(), + capacity, + "cache should be full" + ); + // the first block should be the lru entry + assert_eq!( + *cache + .critical + .read() + .in_memory + .peek_lru() + .expect("should exist") + .0, + roots[0], + "first block should be lru" + ); + + cache + .put_pending_executed_block(pending_blocks.pop_front().expect("should have block")) + .expect("should put block"); + assert_eq!( + cache.critical.read().in_memory.len(), + capacity, + "cache should be full" + ); + assert!( + cache.critical.read().in_memory.peek(&roots[0]).is_none(), + "first block should be evicted" + ); + assert_eq!( + *cache + .critical + .read() + .in_memory + .peek_lru() + .expect("should exist") + .0, + roots[1], + "second block should be lru" + ); + + assert!(cache + .overflow_store + .load_pending_components(roots[0]) + .expect("should exist") + .is_some()); + + let threshold = capacity * 3 / 4; + cache + .maintain_threshold(threshold, Epoch::new(0)) + .expect("should maintain threshold"); + assert_eq!( + cache.critical.read().in_memory.len(), + threshold, + "cache should have been maintained" + ); + + let store_keys = cache + .overflow_store + .read_keys_on_disk() + .expect("should read keys"); + assert_eq!(store_keys.len(), 2); + assert!(store_keys.contains(&roots[0])); + assert!(store_keys.contains(&roots[1])); + assert!(cache.critical.read().store_keys.contains(&roots[0])); + assert!(cache.critical.read().store_keys.contains(&roots[1])); + + let blobs_0 = pending_blobs.pop_front().expect("should have blobs"); + let expected_blobs = blobs_0.len(); + let mut kzg_verified_blobs = vec![]; + for (blob_index, gossip_blob) in blobs_0.into_iter().enumerate() { + kzg_verified_blobs.push(gossip_blob.into_inner()); + let availability = cache + .put_kzg_verified_blobs(roots[0], kzg_verified_blobs.clone()) + .expect("should put blob"); + if blob_index == expected_blobs - 1 { + assert!(matches!(availability, Availability::Available(_))); + } else { + // the first block should be brought back into memory + assert!( + cache.critical.read().in_memory.peek(&roots[0]).is_some(), + "first block should be in memory" + ); + assert!(matches!(availability, Availability::MissingComponents(_))); + } + } + assert_eq!( + cache.critical.read().in_memory.len(), + threshold, + "cache should no longer have the first block" + ); + cache.prune_disk(Epoch::new(0)).expect("should prune disk"); + assert!( + cache + .overflow_store + .load_pending_components(roots[1]) + .expect("no error") + .is_some(), + "second block should still be on disk" + ); + assert!( + cache + .overflow_store + .load_pending_components(roots[0]) + .expect("no error") + .is_none(), + "first block should not be on disk" + ); + } + + #[tokio::test] + async fn overflow_cache_test_maintenance() { + type E = MinimalEthSpec; + type T = DiskHarnessType; + let capacity = E::slots_per_epoch() as usize; + let (harness, cache, _path) = setup_harness_and_cache::(capacity).await; + + let n_epochs = 4; + let mut pending_blocks = VecDeque::new(); + let mut pending_blobs = VecDeque::new(); + let mut epoch_count = BTreeMap::new(); + while pending_blobs.len() < n_epochs * capacity { + let (pending_block, blobs) = availability_pending_block(&harness).await; + if pending_block.num_blobs_expected() == 0 { + // we need blocks with blobs + continue; + } + let epoch = pending_block + .block + .as_block() + .slot() + .epoch(E::slots_per_epoch()); + epoch_count.entry(epoch).or_insert_with(|| 0).add_assign(1); + + pending_blocks.push_back(pending_block); + pending_blobs.push_back(blobs); + } + + for _ in 0..(n_epochs * capacity) { + let pending_block = pending_blocks.pop_front().expect("should have block"); + let mut pending_block_blobs = pending_blobs.pop_front().expect("should have blobs"); + let block_root = pending_block.block.as_block().canonical_root(); + let expected_blobs = pending_block.num_blobs_expected(); + if expected_blobs > 1 { + // might as well add a blob too + let one_blob = pending_block_blobs + .pop() + .expect("should have at least one blob"); + let kzg_verified_blobs = vec![one_blob.into_inner()]; + // generate random boolean + let block_first = (rand::random::() % 2) == 0; + if block_first { + let availability = cache + .put_pending_executed_block(pending_block) + .expect("should put block"); + assert!( + matches!(availability, Availability::MissingComponents(_)), + "should have pending blobs" + ); + let availability = cache + .put_kzg_verified_blobs(block_root, kzg_verified_blobs) + .expect("should put blob"); + assert!( + matches!(availability, Availability::MissingComponents(_)), + "availabilty should be pending blobs: {:?}", + availability + ); + } else { + let availability = cache + .put_kzg_verified_blobs(block_root, kzg_verified_blobs) + .expect("should put blob"); + let root = pending_block.block.as_block().canonical_root(); + assert_eq!( + availability, + Availability::MissingComponents(root), + "should be pending block" + ); + let availability = cache + .put_pending_executed_block(pending_block) + .expect("should put block"); + assert!( + matches!(availability, Availability::MissingComponents(_)), + "should have pending blobs" + ); + } + } else { + let availability = cache + .put_pending_executed_block(pending_block) + .expect("should put block"); + assert!( + matches!(availability, Availability::MissingComponents(_)), + "should be pending blobs" + ); + } + } + + // now we should have a full cache spanning multiple epochs + // run the maintenance routine for increasing epochs and ensure that the cache is pruned + assert_eq!( + cache.critical.read().in_memory.len(), + capacity, + "cache memory should be full" + ); + let store_keys = cache + .overflow_store + .read_keys_on_disk() + .expect("should read keys"); + assert_eq!( + store_keys.len(), + capacity * (n_epochs - 1), + "cache disk should have the rest" + ); + let mut expected_length = n_epochs * capacity; + for (epoch, count) in epoch_count { + cache + .do_maintenance(epoch + 1) + .expect("should run maintenance"); + let disk_keys = cache + .overflow_store + .read_keys_on_disk() + .expect("should read keys") + .len(); + let mem_keys = cache.critical.read().in_memory.len(); + expected_length -= count; + info!( + harness.chain.log, + "EPOCH: {} DISK KEYS: {} MEM KEYS: {} TOTAL: {} EXPECTED: {}", + epoch, + disk_keys, + mem_keys, + (disk_keys + mem_keys), + std::cmp::max(expected_length, capacity * 3 / 4), + ); + assert_eq!( + (disk_keys + mem_keys), + std::cmp::max(expected_length, capacity * 3 / 4), + "cache should be pruned" + ); + } + } + + #[tokio::test] + async fn overflow_cache_test_persist_recover() { + type E = MinimalEthSpec; + type T = DiskHarnessType; + let capacity = E::slots_per_epoch() as usize; + let (harness, cache, _path) = setup_harness_and_cache::(capacity).await; + + let n_epochs = 4; + let mut pending_blocks = VecDeque::new(); + let mut pending_blobs = VecDeque::new(); + let mut epoch_count = BTreeMap::new(); + while pending_blobs.len() < n_epochs * capacity { + let (pending_block, blobs) = availability_pending_block(&harness).await; + if pending_block.num_blobs_expected() == 0 { + // we need blocks with blobs + continue; + } + let epoch = pending_block + .block + .as_block() + .slot() + .epoch(E::slots_per_epoch()); + epoch_count.entry(epoch).or_insert_with(|| 0).add_assign(1); + + pending_blocks.push_back(pending_block); + pending_blobs.push_back(blobs); + } + + let mut remaining_blobs = HashMap::new(); + for _ in 0..(n_epochs * capacity) { + let pending_block = pending_blocks.pop_front().expect("should have block"); + let mut pending_block_blobs = pending_blobs.pop_front().expect("should have blobs"); + let block_root = pending_block.block.as_block().canonical_root(); + let expected_blobs = pending_block.num_blobs_expected(); + if expected_blobs > 1 { + // might as well add a blob too + let one_blob = pending_block_blobs + .pop() + .expect("should have at least one blob"); + let kzg_verified_blobs = vec![one_blob.into_inner()]; + // generate random boolean + let block_first = (rand::random::() % 2) == 0; + if block_first { + let availability = cache + .put_pending_executed_block(pending_block) + .expect("should put block"); + assert!( + matches!(availability, Availability::MissingComponents(_)), + "should have pending blobs" + ); + let availability = cache + .put_kzg_verified_blobs(block_root, kzg_verified_blobs) + .expect("should put blob"); + assert!( + matches!(availability, Availability::MissingComponents(_)), + "availabilty should be pending blobs: {:?}", + availability + ); + } else { + let availability = cache + .put_kzg_verified_blobs(block_root, kzg_verified_blobs) + .expect("should put blob"); + let root = pending_block.block.as_block().canonical_root(); + assert_eq!( + availability, + Availability::MissingComponents(root), + "should be pending block" + ); + let availability = cache + .put_pending_executed_block(pending_block) + .expect("should put block"); + assert!( + matches!(availability, Availability::MissingComponents(_)), + "should have pending blobs" + ); + } + } else { + let availability = cache + .put_pending_executed_block(pending_block) + .expect("should put block"); + assert!( + matches!(availability, Availability::MissingComponents(_)), + "should be pending blobs" + ); + } + remaining_blobs.insert(block_root, pending_block_blobs); + } + + // now we should have a full cache spanning multiple epochs + // cache should be at capacity + assert_eq!( + cache.critical.read().in_memory.len(), + capacity, + "cache memory should be full" + ); + // write all components to disk + cache.write_all_to_disk().expect("should write all to disk"); + // everything should be on disk now + assert_eq!( + cache + .overflow_store + .read_keys_on_disk() + .expect("should read keys") + .len(), + capacity * n_epochs, + "cache disk should have the rest" + ); + assert_eq!( + cache.critical.read().in_memory.len(), + 0, + "cache memory should be empty" + ); + assert_eq!( + cache.critical.read().store_keys.len(), + n_epochs * capacity, + "cache store should have the rest" + ); + drop(cache); + + // create a new cache with the same store + let recovered_cache = OverflowLRUCache::::new( + new_non_zero_usize(capacity), + harness.chain.store.clone(), + harness.chain.spec.clone(), + ) + .expect("should recover cache"); + // again, everything should be on disk + assert_eq!( + recovered_cache + .overflow_store + .read_keys_on_disk() + .expect("should read keys") + .len(), + capacity * n_epochs, + "cache disk should have the rest" + ); + assert_eq!( + recovered_cache.critical.read().in_memory.len(), + 0, + "cache memory should be empty" + ); + assert_eq!( + recovered_cache.critical.read().store_keys.len(), + n_epochs * capacity, + "cache store should have the rest" + ); + + // now lets insert the remaining blobs until the cache is empty + for (root, blobs) in remaining_blobs { + let additional_blobs = blobs.len(); + let mut kzg_verified_blobs = vec![]; + for (i, gossip_blob) in blobs.into_iter().enumerate() { + kzg_verified_blobs.push(gossip_blob.into_inner()); + let availability = recovered_cache + .put_kzg_verified_blobs(root, kzg_verified_blobs.clone()) + .expect("should put blob"); + if i == additional_blobs - 1 { + assert!(matches!(availability, Availability::Available(_))) + } else { + assert!(matches!(availability, Availability::MissingComponents(_))); + } + } + } + } + + #[tokio::test] + // ensure the state cache keeps memory usage low and that it can properly recover states + // THIS TEST CAN BE DELETED ONCE TREE STATES IS MERGED AND WE RIP OUT THE STATE CACHE + async fn overflow_cache_test_state_cache() { + type E = MinimalEthSpec; + type T = DiskHarnessType; + let capacity = STATE_LRU_CAPACITY * 2; + let (harness, cache, _path) = setup_harness_and_cache::(capacity).await; + + let mut pending_blocks = VecDeque::new(); + let mut states = Vec::new(); + let mut state_roots = Vec::new(); + // Get enough blocks to fill the cache to capacity, ensuring all blocks have blobs + while pending_blocks.len() < capacity { + let (pending_block, _) = availability_pending_block(&harness).await; + if pending_block.num_blobs_expected() == 0 { + // we need blocks with blobs + continue; + } + let state_root = pending_block.import_data.state.canonical_root(); + states.push(pending_block.import_data.state.clone()); + pending_blocks.push_back(pending_block); + state_roots.push(state_root); + } + + let state_cache = cache.state_lru_cache().lru_cache(); + let mut pushed_diet_blocks = VecDeque::new(); + + for i in 0..capacity { + let pending_block = pending_blocks.pop_front().expect("should have block"); + let block_root = pending_block.as_block().canonical_root(); + + assert_eq!( + state_cache.read().len(), + std::cmp::min(i, STATE_LRU_CAPACITY), + "state cache should be empty at start" + ); + + if i >= STATE_LRU_CAPACITY { + let lru_root = state_roots[i - STATE_LRU_CAPACITY]; + assert_eq!( + state_cache.read().peek_lru().map(|(root, _)| root), + Some(&lru_root), + "lru block should be in cache" + ); + } + + // put the block in the cache + let availability = cache + .put_pending_executed_block(pending_block) + .expect("should put block"); + + // grab the diet block from the cache for later testing + let diet_block = cache + .critical + .read() + .in_memory + .peek(&block_root) + .map(|pending_components| { + pending_components + .executed_block + .clone() + .expect("should exist") + }) + .expect("should exist"); + pushed_diet_blocks.push_back(diet_block); + + // should be unavailable since we made sure all blocks had blobs + assert!( + matches!(availability, Availability::MissingComponents(_)), + "should be pending blobs" + ); + + if i >= STATE_LRU_CAPACITY { + let evicted_index = i - STATE_LRU_CAPACITY; + let evicted_root = state_roots[evicted_index]; + assert!( + state_cache.read().peek(&evicted_root).is_none(), + "lru root should be evicted" + ); + // get the diet block via direct conversion (testing only) + let diet_block = pushed_diet_blocks.pop_front().expect("should have block"); + // reconstruct the pending block by replaying the block on the parent state + let recovered_pending_block = cache + .state_lru_cache() + .reconstruct_pending_executed_block(diet_block) + .expect("should reconstruct pending block"); + + // assert the recovered state is the same as the original + assert_eq!( + recovered_pending_block.import_data.state, states[evicted_index], + "recovered state should be the same as the original" + ); + } + } + + // now check the last block + let last_block = pushed_diet_blocks.pop_back().expect("should exist").clone(); + // the state should still be in the cache + assert!( + state_cache + .read() + .peek(&last_block.as_block().state_root()) + .is_some(), + "last block state should still be in cache" + ); + // get the diet block via direct conversion (testing only) + let diet_block = last_block.clone(); + // recover the pending block from the cache + let recovered_pending_block = cache + .state_lru_cache() + .recover_pending_executed_block(diet_block) + .expect("should reconstruct pending block"); + // assert the recovered state is the same as the original + assert_eq!( + Some(&recovered_pending_block.import_data.state), + states.last(), + "recovered state should be the same as the original" + ); + // the state should no longer be in the cache + assert!( + state_cache + .read() + .peek(&last_block.as_block().state_root()) + .is_none(), + "last block state should no longer be in cache" + ); + } +} diff --git a/beacon_node/beacon_chain/src/data_availability_checker/processing_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/processing_cache.rs new file mode 100644 index 000000000..969034c65 --- /dev/null +++ b/beacon_node/beacon_chain/src/data_availability_checker/processing_cache.rs @@ -0,0 +1,74 @@ +use crate::data_availability_checker::AvailabilityView; +use std::collections::hash_map::Entry; +use std::collections::HashMap; +use types::beacon_block_body::{KzgCommitmentOpts, KzgCommitments}; +use types::{EthSpec, Hash256, Slot}; + +/// This cache is used only for gossip blocks/blobs and single block/blob lookups, to give req/resp +/// a view of what we have and what we require. This cache serves a slightly different purpose than +/// gossip caches because it allows us to process duplicate blobs that are valid in gossip. +/// See `AvailabilityView`'s trait definition. +#[derive(Default)] +pub struct ProcessingCache { + processing_cache: HashMap>, +} + +impl ProcessingCache { + pub fn get(&self, block_root: &Hash256) -> Option<&ProcessingComponents> { + self.processing_cache.get(block_root) + } + pub fn entry(&mut self, block_root: Hash256) -> Entry<'_, Hash256, ProcessingComponents> { + self.processing_cache.entry(block_root) + } + pub fn remove(&mut self, block_root: &Hash256) { + self.processing_cache.remove(block_root); + } + pub fn has_block(&self, block_root: &Hash256) -> bool { + self.processing_cache + .get(block_root) + .map_or(false, |b| b.block_exists()) + } + pub fn incomplete_processing_components(&self, slot: Slot) -> Vec { + let mut roots_missing_components = vec![]; + for (&block_root, info) in self.processing_cache.iter() { + if info.slot == slot && !info.is_available() { + roots_missing_components.push(block_root); + } + } + roots_missing_components + } +} + +#[derive(Debug, Clone)] +pub struct ProcessingComponents { + slot: Slot, + /// Blobs required for a block can only be known if we have seen the block. So `Some` here + /// means we've seen it, a `None` means we haven't. The `kzg_commitments` value helps us figure + /// out whether incoming blobs actually match the block. + pub block_commitments: Option>, + /// `KzgCommitments` for blobs are always known, even if we haven't seen the block. See + /// `AvailabilityView`'s trait definition for more details. + pub blob_commitments: KzgCommitmentOpts, +} + +impl ProcessingComponents { + pub fn new(slot: Slot) -> Self { + Self { + slot, + block_commitments: None, + blob_commitments: KzgCommitmentOpts::::default(), + } + } +} + +// Not safe for use outside of tests as this always required a slot. +#[cfg(test)] +impl ProcessingComponents { + pub fn empty(_block_root: Hash256) -> Self { + Self { + slot: Slot::new(0), + block_commitments: None, + blob_commitments: KzgCommitmentOpts::::default(), + } + } +} diff --git a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs new file mode 100644 index 000000000..bd125a7f4 --- /dev/null +++ b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs @@ -0,0 +1,230 @@ +use crate::block_verification_types::AsBlock; +use crate::{ + block_verification_types::BlockImportData, + data_availability_checker::{AvailabilityCheckError, STATE_LRU_CAPACITY_NON_ZERO}, + eth1_finalization_cache::Eth1FinalizationData, + AvailabilityPendingExecutedBlock, BeaconChainTypes, BeaconStore, PayloadVerificationOutcome, +}; +use lru::LruCache; +use parking_lot::RwLock; +use ssz_derive::{Decode, Encode}; +use state_processing::{BlockReplayer, ConsensusContext, StateProcessingStrategy}; +use std::sync::Arc; +use types::{ssz_tagged_signed_beacon_block, ssz_tagged_signed_beacon_block_arc}; +use types::{BeaconState, BlindedPayload, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock}; + +/// This mirrors everything in the `AvailabilityPendingExecutedBlock`, except +/// that it is much smaller because it contains only a state root instead of +/// a full `BeaconState`. +#[derive(Encode, Decode, Clone)] +pub struct DietAvailabilityPendingExecutedBlock { + #[ssz(with = "ssz_tagged_signed_beacon_block_arc")] + block: Arc>, + state_root: Hash256, + #[ssz(with = "ssz_tagged_signed_beacon_block")] + parent_block: SignedBeaconBlock>, + parent_eth1_finalization_data: Eth1FinalizationData, + confirmed_state_roots: Vec, + consensus_context: ConsensusContext, + payload_verification_outcome: PayloadVerificationOutcome, +} + +/// just implementing the same methods as `AvailabilityPendingExecutedBlock` +impl DietAvailabilityPendingExecutedBlock { + pub fn as_block(&self) -> &SignedBeaconBlock { + &self.block + } + + pub fn num_blobs_expected(&self) -> usize { + self.block + .message() + .body() + .blob_kzg_commitments() + .map_or(0, |commitments| commitments.len()) + } +} + +/// This LRU cache holds BeaconStates used for block import. If the cache overflows, +/// the least recently used state will be dropped. If the dropped state is needed +/// later on, it will be recovered from the parent state and replaying the block. +/// +/// WARNING: This cache assumes the parent block of any `AvailabilityPendingExecutedBlock` +/// has already been imported into ForkChoice. If this is not the case, the cache +/// will fail to recover the state when the cache overflows because it can't load +/// the parent state! +pub struct StateLRUCache { + states: RwLock>>, + store: BeaconStore, + spec: ChainSpec, +} + +impl StateLRUCache { + pub fn new(store: BeaconStore, spec: ChainSpec) -> Self { + Self { + states: RwLock::new(LruCache::new(STATE_LRU_CAPACITY_NON_ZERO)), + store, + spec, + } + } + + /// This will store the state in the LRU cache and return a + /// `DietAvailabilityPendingExecutedBlock` which is much cheaper to + /// keep around in memory. + pub fn register_pending_executed_block( + &self, + executed_block: AvailabilityPendingExecutedBlock, + ) -> DietAvailabilityPendingExecutedBlock { + let state = executed_block.import_data.state; + let state_root = executed_block.block.state_root(); + self.states.write().put(state_root, state); + + DietAvailabilityPendingExecutedBlock { + block: executed_block.block, + state_root, + parent_block: executed_block.import_data.parent_block, + parent_eth1_finalization_data: executed_block.import_data.parent_eth1_finalization_data, + confirmed_state_roots: executed_block.import_data.confirmed_state_roots, + consensus_context: executed_block.import_data.consensus_context, + payload_verification_outcome: executed_block.payload_verification_outcome, + } + } + + /// Recover the `AvailabilityPendingExecutedBlock` from the diet version. + /// This method will first check the cache and if the state is not found + /// it will reconstruct the state by loading the parent state from disk and + /// replaying the block. + pub fn recover_pending_executed_block( + &self, + diet_executed_block: DietAvailabilityPendingExecutedBlock, + ) -> Result, AvailabilityCheckError> { + let maybe_state = self.states.write().pop(&diet_executed_block.state_root); + if let Some(state) = maybe_state { + let block_root = diet_executed_block.block.canonical_root(); + Ok(AvailabilityPendingExecutedBlock { + block: diet_executed_block.block, + import_data: BlockImportData { + block_root, + state, + parent_block: diet_executed_block.parent_block, + parent_eth1_finalization_data: diet_executed_block + .parent_eth1_finalization_data, + confirmed_state_roots: diet_executed_block.confirmed_state_roots, + consensus_context: diet_executed_block.consensus_context, + }, + payload_verification_outcome: diet_executed_block.payload_verification_outcome, + }) + } else { + self.reconstruct_pending_executed_block(diet_executed_block) + } + } + + /// Reconstruct the `AvailabilityPendingExecutedBlock` by loading the parent + /// state from disk and replaying the block. This function does NOT check the + /// LRU cache. + pub fn reconstruct_pending_executed_block( + &self, + diet_executed_block: DietAvailabilityPendingExecutedBlock, + ) -> Result, AvailabilityCheckError> { + let block_root = diet_executed_block.block.canonical_root(); + let state = self.reconstruct_state(&diet_executed_block)?; + Ok(AvailabilityPendingExecutedBlock { + block: diet_executed_block.block, + import_data: BlockImportData { + block_root, + state, + parent_block: diet_executed_block.parent_block, + parent_eth1_finalization_data: diet_executed_block.parent_eth1_finalization_data, + confirmed_state_roots: diet_executed_block.confirmed_state_roots, + consensus_context: diet_executed_block.consensus_context, + }, + payload_verification_outcome: diet_executed_block.payload_verification_outcome, + }) + } + + /// Reconstruct the state by loading the parent state from disk and replaying + /// the block. + fn reconstruct_state( + &self, + diet_executed_block: &DietAvailabilityPendingExecutedBlock, + ) -> Result, AvailabilityCheckError> { + let parent_block_root = diet_executed_block.parent_block.canonical_root(); + let parent_block_state_root = diet_executed_block.parent_block.state_root(); + let (parent_state_root, parent_state) = self + .store + .get_advanced_hot_state( + parent_block_root, + diet_executed_block.parent_block.slot(), + parent_block_state_root, + ) + .map_err(AvailabilityCheckError::StoreError)? + .ok_or(AvailabilityCheckError::ParentStateMissing( + parent_block_state_root, + ))?; + + let state_roots = vec![ + Ok((parent_state_root, diet_executed_block.parent_block.slot())), + Ok(( + diet_executed_block.state_root, + diet_executed_block.block.slot(), + )), + ]; + + let block_replayer: BlockReplayer<'_, T::EthSpec, AvailabilityCheckError, _> = + BlockReplayer::new(parent_state, &self.spec) + .no_signature_verification() + .state_processing_strategy(StateProcessingStrategy::Accurate) + .state_root_iter(state_roots.into_iter()) + .minimal_block_root_verification(); + + block_replayer + .apply_blocks(vec![diet_executed_block.block.clone_as_blinded()], None) + .map(|block_replayer| block_replayer.into_state()) + .and_then(|mut state| { + state + .build_exit_cache(&self.spec) + .map_err(AvailabilityCheckError::RebuildingStateCaches)?; + state + .update_tree_hash_cache() + .map_err(AvailabilityCheckError::RebuildingStateCaches)?; + Ok(state) + }) + } + + /// returns the state cache for inspection in tests + #[cfg(test)] + pub fn lru_cache(&self) -> &RwLock>> { + &self.states + } + + /// remove any states from the cache from before the given epoch + pub fn do_maintenance(&self, cutoff_epoch: Epoch) { + let mut write_lock = self.states.write(); + while let Some((_, state)) = write_lock.peek_lru() { + if state.slot().epoch(T::EthSpec::slots_per_epoch()) < cutoff_epoch { + write_lock.pop_lru(); + } else { + break; + } + } + } +} + +/// This can only be used during testing. The intended way to +/// obtain a `DietAvailabilityPendingExecutedBlock` is to call +/// `register_pending_executed_block` on the `StateLRUCache`. +#[cfg(test)] +impl From> + for DietAvailabilityPendingExecutedBlock +{ + fn from(value: AvailabilityPendingExecutedBlock) -> Self { + Self { + block: value.block, + state_root: value.import_data.state.canonical_root(), + parent_block: value.import_data.parent_block, + parent_eth1_finalization_data: value.import_data.parent_eth1_finalization_data, + confirmed_state_roots: value.import_data.confirmed_state_roots, + consensus_context: value.import_data.consensus_context, + payload_verification_outcome: value.payload_verification_outcome, + } + } +} diff --git a/beacon_node/beacon_chain/src/deneb_readiness.rs b/beacon_node/beacon_chain/src/deneb_readiness.rs new file mode 100644 index 000000000..1ba6fe3ea --- /dev/null +++ b/beacon_node/beacon_chain/src/deneb_readiness.rs @@ -0,0 +1,121 @@ +//! Provides tools for checking if a node is ready for the Deneb upgrade. + +use crate::{BeaconChain, BeaconChainTypes}; +use execution_layer::http::{ + ENGINE_FORKCHOICE_UPDATED_V3, ENGINE_GET_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V3, +}; +use serde::{Deserialize, Serialize}; +use std::fmt; +use std::time::Duration; +use types::*; + +/// The time before the Deneb fork when we will start issuing warnings about preparation. +use super::merge_readiness::SECONDS_IN_A_WEEK; +pub const DENEB_READINESS_PREPARATION_SECONDS: u64 = SECONDS_IN_A_WEEK * 2; +pub const ENGINE_CAPABILITIES_REFRESH_INTERVAL: u64 = 300; + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +#[serde(tag = "type")] +pub enum DenebReadiness { + /// The execution engine is deneb-enabled (as far as we can tell) + Ready, + /// We are connected to an execution engine which doesn't support the V3 engine api methods + V3MethodsNotSupported { error: String }, + /// The transition configuration with the EL failed, there might be a problem with + /// connectivity, authentication or a difference in configuration. + ExchangeCapabilitiesFailed { error: String }, + /// The user has not configured an execution endpoint + NoExecutionEndpoint, +} + +impl fmt::Display for DenebReadiness { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + DenebReadiness::Ready => { + write!(f, "This node appears ready for Deneb.") + } + DenebReadiness::ExchangeCapabilitiesFailed { error } => write!( + f, + "Could not exchange capabilities with the \ + execution endpoint: {}", + error + ), + DenebReadiness::NoExecutionEndpoint => write!( + f, + "The --execution-endpoint flag is not specified, this is a \ + requirement post-merge" + ), + DenebReadiness::V3MethodsNotSupported { error } => write!( + f, + "Execution endpoint does not support Deneb methods: {}", + error + ), + } + } +} + +impl BeaconChain { + /// Returns `true` if deneb epoch is set and Deneb fork has occurred or will + /// occur within `DENEB_READINESS_PREPARATION_SECONDS` + pub fn is_time_to_prepare_for_deneb(&self, current_slot: Slot) -> bool { + if let Some(deneb_epoch) = self.spec.deneb_fork_epoch { + let deneb_slot = deneb_epoch.start_slot(T::EthSpec::slots_per_epoch()); + let deneb_readiness_preparation_slots = + DENEB_READINESS_PREPARATION_SECONDS / self.spec.seconds_per_slot; + // Return `true` if Deneb has happened or is within the preparation time. + current_slot + deneb_readiness_preparation_slots > deneb_slot + } else { + // The Deneb fork epoch has not been defined yet, no need to prepare. + false + } + } + + /// Attempts to connect to the EL and confirm that it is ready for capella. + pub async fn check_deneb_readiness(&self) -> DenebReadiness { + if let Some(el) = self.execution_layer.as_ref() { + match el + .get_engine_capabilities(Some(Duration::from_secs( + ENGINE_CAPABILITIES_REFRESH_INTERVAL, + ))) + .await + { + Err(e) => { + // The EL was either unreachable or responded with an error + DenebReadiness::ExchangeCapabilitiesFailed { + error: format!("{:?}", e), + } + } + Ok(capabilities) => { + let mut missing_methods = String::from("Required Methods Unsupported:"); + let mut all_good = true; + if !capabilities.get_payload_v3 { + missing_methods.push(' '); + missing_methods.push_str(ENGINE_GET_PAYLOAD_V3); + all_good = false; + } + if !capabilities.forkchoice_updated_v3 { + missing_methods.push(' '); + missing_methods.push_str(ENGINE_FORKCHOICE_UPDATED_V3); + all_good = false; + } + if !capabilities.new_payload_v3 { + missing_methods.push(' '); + missing_methods.push_str(ENGINE_NEW_PAYLOAD_V3); + all_good = false; + } + + if all_good { + DenebReadiness::Ready + } else { + DenebReadiness::V3MethodsNotSupported { + error: missing_methods, + } + } + } + } + } else { + DenebReadiness::NoExecutionEndpoint + } + } +} diff --git a/beacon_node/beacon_chain/src/early_attester_cache.rs b/beacon_node/beacon_chain/src/early_attester_cache.rs index 1ddbe1324..da3c2c8a1 100644 --- a/beacon_node/beacon_chain/src/early_attester_cache.rs +++ b/beacon_node/beacon_chain/src/early_attester_cache.rs @@ -1,3 +1,4 @@ +use crate::data_availability_checker::AvailableBlock; use crate::{ attester_cache::{CommitteeLengths, Error}, metrics, @@ -5,6 +6,7 @@ use crate::{ use parking_lot::RwLock; use proto_array::Block as ProtoBlock; use std::sync::Arc; +use types::blob_sidecar::BlobSidecarList; use types::*; pub struct CacheItem { @@ -20,6 +22,7 @@ pub struct CacheItem { * Values used to make the block available. */ block: Arc>, + blobs: Option>, proto_block: ProtoBlock, } @@ -49,7 +52,7 @@ impl EarlyAttesterCache { pub fn add_head_block( &self, beacon_block_root: Hash256, - block: Arc>, + block: AvailableBlock, proto_block: ProtoBlock, state: &BeaconState, spec: &ChainSpec, @@ -67,6 +70,7 @@ impl EarlyAttesterCache { }, }; + let (_, block, blobs) = block.deconstruct(); let item = CacheItem { epoch, committee_lengths, @@ -74,6 +78,7 @@ impl EarlyAttesterCache { source, target, block, + blobs, proto_block, }; @@ -94,9 +99,7 @@ impl EarlyAttesterCache { spec: &ChainSpec, ) -> Result>, Error> { let lock = self.item.read(); - let item = if let Some(item) = lock.as_ref() { - item - } else { + let Some(item) = lock.as_ref() else { return Ok(None); }; @@ -155,6 +158,15 @@ impl EarlyAttesterCache { .map(|item| item.block.clone()) } + /// Returns the blobs, if `block_root` matches the cached item. + pub fn get_blobs(&self, block_root: Hash256) -> Option> { + self.item + .read() + .as_ref() + .filter(|item| item.beacon_block_root == block_root) + .and_then(|item| item.blobs.clone()) + } + /// Returns the proto-array block, if `block_root` matches the cached item. pub fn get_proto_block(&self, block_root: Hash256) -> Option { self.item diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 073761b0f..9c1ba06f8 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -2,12 +2,14 @@ use crate::attester_cache::Error as AttesterCacheError; use crate::beacon_block_streamer::Error as BlockStreamerError; use crate::beacon_chain::ForkChoiceError; use crate::beacon_fork_choice_store::Error as ForkChoiceStoreError; +use crate::data_availability_checker::AvailabilityCheckError; use crate::eth1_chain::Error as Eth1ChainError; use crate::historical_blocks::HistoricalBlockError; use crate::migrate::PruningError; use crate::naive_aggregation_pool::Error as NaiveAggregationError; use crate::observed_aggregates::Error as ObservedAttestationsError; use crate::observed_attesters::Error as ObservedAttestersError; +use crate::observed_blob_sidecars::Error as ObservedBlobSidecarsError; use crate::observed_block_producers::Error as ObservedBlockProducersError; use execution_layer::PayloadStatus; use fork_choice::ExecutionStatus; @@ -102,6 +104,7 @@ pub enum BeaconChainError { ObservedAttestationsError(ObservedAttestationsError), ObservedAttestersError(ObservedAttestersError), ObservedBlockProducersError(ObservedBlockProducersError), + ObservedBlobSidecarsError(ObservedBlobSidecarsError), AttesterCacheError(AttesterCacheError), PruningError(PruningError), ArithError(ArithError), @@ -217,6 +220,9 @@ pub enum BeaconChainError { InconsistentFork(InconsistentFork), ProposerHeadForkChoiceError(fork_choice::Error), UnableToPublish, + AvailabilityCheckError(AvailabilityCheckError), + LightClientError(LightClientError), + UnsupportedFork, } easy_from_to!(SlotProcessingError, BeaconChainError); @@ -233,6 +239,7 @@ easy_from_to!(NaiveAggregationError, BeaconChainError); easy_from_to!(ObservedAttestationsError, BeaconChainError); easy_from_to!(ObservedAttestersError, BeaconChainError); easy_from_to!(ObservedBlockProducersError, BeaconChainError); +easy_from_to!(ObservedBlobSidecarsError, BeaconChainError); easy_from_to!(AttesterCacheError, BeaconChainError); easy_from_to!(BlockSignatureVerifierError, BeaconChainError); easy_from_to!(PruningError, BeaconChainError); @@ -242,6 +249,7 @@ easy_from_to!(HistoricalBlockError, BeaconChainError); easy_from_to!(StateAdvanceError, BeaconChainError); easy_from_to!(BlockReplayError, BeaconChainError); easy_from_to!(InconsistentFork, BeaconChainError); +easy_from_to!(AvailabilityCheckError, BeaconChainError); #[derive(Debug)] pub enum BlockProductionError { @@ -270,11 +278,17 @@ pub enum BlockProductionError { MissingFinalizedBlock(Hash256), BlockTooLarge(usize), ShuttingDown, + MissingBlobs, MissingSyncAggregate, MissingExecutionPayload, - TokioJoin(tokio::task::JoinError), + MissingKzgCommitment(String), + TokioJoin(JoinError), BeaconChain(BeaconChainError), InvalidPayloadFork, + TrustedSetupNotInitialized, + InvalidBlockVariant(String), + KzgError(kzg::Error), + FailedToBuildBlobSidecars(String), } easy_from_to!(BlockProcessingError, BlockProductionError); diff --git a/beacon_node/beacon_chain/src/eth1_finalization_cache.rs b/beacon_node/beacon_chain/src/eth1_finalization_cache.rs index 7cf805a12..24b6542ea 100644 --- a/beacon_node/beacon_chain/src/eth1_finalization_cache.rs +++ b/beacon_node/beacon_chain/src/eth1_finalization_cache.rs @@ -1,4 +1,5 @@ use slog::{debug, Logger}; +use ssz_derive::{Decode, Encode}; use std::cmp; use std::collections::BTreeMap; use types::{Checkpoint, Epoch, Eth1Data, Hash256 as Root}; @@ -10,7 +11,7 @@ pub const DEFAULT_ETH1_CACHE_SIZE: usize = 5; /// These fields are named the same as the corresponding fields in the `BeaconState` /// as this structure stores these values from the `BeaconState` at a `Checkpoint` -#[derive(Clone)] +#[derive(Clone, Debug, PartialEq, Encode, Decode)] pub struct Eth1FinalizationData { pub eth1_data: Eth1Data, pub eth1_deposit_index: u64, @@ -66,7 +67,7 @@ impl CheckpointMap { pub fn insert(&mut self, checkpoint: Checkpoint, eth1_finalization_data: Eth1FinalizationData) { self.store .entry(checkpoint.epoch) - .or_insert_with(Vec::new) + .or_default() .push((checkpoint.root, eth1_finalization_data)); // faster to reduce size after the fact than do pre-checking to see diff --git a/beacon_node/beacon_chain/src/events.rs b/beacon_node/beacon_chain/src/events.rs index b267cc853..0e5dfc805 100644 --- a/beacon_node/beacon_chain/src/events.rs +++ b/beacon_node/beacon_chain/src/events.rs @@ -9,6 +9,7 @@ const DEFAULT_CHANNEL_CAPACITY: usize = 16; pub struct ServerSentEventHandler { attestation_tx: Sender>, block_tx: Sender>, + blob_sidecar_tx: Sender>, finalized_tx: Sender>, head_tx: Sender>, exit_tx: Sender>, @@ -16,6 +17,8 @@ pub struct ServerSentEventHandler { contribution_tx: Sender>, payload_attributes_tx: Sender>, late_head: Sender>, + light_client_finality_update_tx: Sender>, + light_client_optimistic_update_tx: Sender>, block_reward_tx: Sender>, log: Logger, } @@ -31,6 +34,7 @@ impl ServerSentEventHandler { pub fn new_with_capacity(log: Logger, capacity: usize) -> Self { let (attestation_tx, _) = broadcast::channel(capacity); let (block_tx, _) = broadcast::channel(capacity); + let (blob_sidecar_tx, _) = broadcast::channel(capacity); let (finalized_tx, _) = broadcast::channel(capacity); let (head_tx, _) = broadcast::channel(capacity); let (exit_tx, _) = broadcast::channel(capacity); @@ -38,11 +42,14 @@ impl ServerSentEventHandler { let (contribution_tx, _) = broadcast::channel(capacity); let (payload_attributes_tx, _) = broadcast::channel(capacity); let (late_head, _) = broadcast::channel(capacity); + let (light_client_finality_update_tx, _) = broadcast::channel(capacity); + let (light_client_optimistic_update_tx, _) = broadcast::channel(capacity); let (block_reward_tx, _) = broadcast::channel(capacity); Self { attestation_tx, block_tx, + blob_sidecar_tx, finalized_tx, head_tx, exit_tx, @@ -50,6 +57,8 @@ impl ServerSentEventHandler { contribution_tx, payload_attributes_tx, late_head, + light_client_finality_update_tx, + light_client_optimistic_update_tx, block_reward_tx, log, } @@ -73,6 +82,10 @@ impl ServerSentEventHandler { .block_tx .send(kind) .map(|count| log_count("block", count)), + EventKind::BlobSidecar(_) => self + .blob_sidecar_tx + .send(kind) + .map(|count| log_count("blob sidecar", count)), EventKind::FinalizedCheckpoint(_) => self .finalized_tx .send(kind) @@ -101,6 +114,14 @@ impl ServerSentEventHandler { .late_head .send(kind) .map(|count| log_count("late head", count)), + EventKind::LightClientFinalityUpdate(_) => self + .light_client_finality_update_tx + .send(kind) + .map(|count| log_count("light client finality update", count)), + EventKind::LightClientOptimisticUpdate(_) => self + .light_client_optimistic_update_tx + .send(kind) + .map(|count| log_count("light client optimistic update", count)), EventKind::BlockReward(_) => self .block_reward_tx .send(kind) @@ -119,6 +140,10 @@ impl ServerSentEventHandler { self.block_tx.subscribe() } + pub fn subscribe_blob_sidecar(&self) -> Receiver> { + self.blob_sidecar_tx.subscribe() + } + pub fn subscribe_finalized(&self) -> Receiver> { self.finalized_tx.subscribe() } @@ -147,6 +172,14 @@ impl ServerSentEventHandler { self.late_head.subscribe() } + pub fn subscribe_light_client_finality_update(&self) -> Receiver> { + self.light_client_finality_update_tx.subscribe() + } + + pub fn subscribe_light_client_optimistic_update(&self) -> Receiver> { + self.light_client_optimistic_update_tx.subscribe() + } + pub fn subscribe_block_reward(&self) -> Receiver> { self.block_reward_tx.subscribe() } @@ -159,6 +192,10 @@ impl ServerSentEventHandler { self.block_tx.receiver_count() > 0 } + pub fn has_blob_sidecar_subscribers(&self) -> bool { + self.blob_sidecar_tx.receiver_count() > 0 + } + pub fn has_finalized_subscribers(&self) -> bool { self.finalized_tx.receiver_count() > 0 } diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 1ac7229cc..e25976c2a 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -12,7 +12,10 @@ use crate::{ BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, BlockProductionError, ExecutionPayloadError, }; -use execution_layer::{BlockProposalContents, BuilderParams, PayloadAttributes, PayloadStatus}; +use execution_layer::{ + BlockProposalContents, BlockProposalContentsType, BuilderParams, NewPayloadRequest, + PayloadAttributes, PayloadStatus, +}; use fork_choice::{InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Block as ProtoBlock, ExecutionStatus}; use slog::{debug, warn}; @@ -24,11 +27,11 @@ use state_processing::per_block_processing::{ use std::sync::Arc; use tokio::task::JoinHandle; use tree_hash::TreeHash; +use types::payload::BlockProductionVersion; use types::*; -pub type PreparePayloadResult = - Result, BlockProductionError>; -pub type PreparePayloadHandle = JoinHandle>>; +pub type PreparePayloadResult = Result, BlockProductionError>; +pub type PreparePayloadHandle = JoinHandle>>; #[derive(PartialEq)] pub enum AllowOptimisticImport { @@ -68,11 +71,10 @@ impl PayloadNotifier { // the block as optimistically imported. This is particularly relevant in the case // where we do not send the block to the EL at all. let block_message = block.message(); - let payload = block_message.execution_payload()?; partially_verify_execution_payload::<_, FullPayload<_>>( state, block.slot(), - payload, + block_message.body(), &chain.spec, ) .map_err(BlockError::PerBlockProcessingError)?; @@ -86,13 +88,11 @@ impl PayloadNotifier { .as_ref() .ok_or(ExecutionPayloadError::NoExecutionConnection)?; - if let Err(e) = - execution_layer.verify_payload_block_hash(payload.execution_payload_ref()) - { + if let Err(e) = execution_layer.verify_payload_block_hash(block_message) { warn!( chain.log, "Falling back to slow block hash verification"; - "block_number" => payload.block_number(), + "block_number" => ?block_message.execution_payload().map(|payload| payload.block_number()), "info" => "you can silence this warning with --disable-optimistic-finalized-sync", "error" => ?e, ); @@ -138,15 +138,15 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>( chain: &Arc>, block: BeaconBlockRef<'a, T::EthSpec>, ) -> Result> { - let execution_payload = block.execution_payload()?; - let execution_layer = chain .execution_layer .as_ref() .ok_or(ExecutionPayloadError::NoExecutionConnection)?; + let new_payload_request: NewPayloadRequest = block.try_into()?; + let execution_block_hash = new_payload_request.block_hash(); let new_payload_response = execution_layer - .notify_new_payload(&execution_payload.into()) + .notify_new_payload(new_payload_request) .await; match new_payload_response { @@ -164,7 +164,7 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>( "Invalid execution payload"; "validation_error" => ?validation_error, "latest_valid_hash" => ?latest_valid_hash, - "execution_block_hash" => ?execution_payload.block_hash(), + "execution_block_hash" => ?execution_block_hash, "root" => ?block.tree_hash_root(), "graffiti" => block.body().graffiti().as_utf8_lossy(), "proposer_index" => block.proposer_index(), @@ -210,7 +210,7 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>( chain.log, "Invalid execution payload block hash"; "validation_error" => ?validation_error, - "execution_block_hash" => ?execution_payload.block_hash(), + "execution_block_hash" => ?execution_block_hash, "root" => ?block.tree_hash_root(), "graffiti" => block.body().graffiti().as_utf8_lossy(), "proposer_index" => block.proposer_index(), @@ -399,15 +399,15 @@ pub fn validate_execution_payload_for_gossip( /// Equivalent to the `get_execution_payload` function in the Validator Guide: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md#block-proposal -pub fn get_execution_payload< - T: BeaconChainTypes, - Payload: AbstractExecPayload + 'static, ->( +pub fn get_execution_payload( chain: Arc>, state: &BeaconState, + parent_block_root: Hash256, proposer_index: u64, builder_params: BuilderParams, -) -> Result, BlockProductionError> { + builder_boost_factor: Option, + block_production_version: BlockProductionVersion, +) -> Result, BlockProductionError> { // Compute all required values from the `state` now to avoid needing to pass it into a spawned // task. let spec = &chain.spec; @@ -419,11 +419,19 @@ pub fn get_execution_payload< let latest_execution_payload_header_block_hash = state.latest_execution_payload_header()?.block_hash(); let withdrawals = match state { - &BeaconState::Capella(_) => Some(get_expected_withdrawals(state, spec)?.into()), + &BeaconState::Capella(_) | &BeaconState::Deneb(_) => { + Some(get_expected_withdrawals(state, spec)?.into()) + } &BeaconState::Merge(_) => None, // These shouldn't happen but they're here to make the pattern irrefutable &BeaconState::Base(_) | &BeaconState::Altair(_) => None, }; + let parent_beacon_block_root = match state { + BeaconState::Deneb(_) => Some(parent_block_root), + BeaconState::Merge(_) | BeaconState::Capella(_) => None, + // These shouldn't happen but they're here to make the pattern irrefutable + BeaconState::Base(_) | BeaconState::Altair(_) => None, + }; // Spawn a task to obtain the execution payload from the EL via a series of async calls. The // `join_handle` can be used to await the result of the function. @@ -432,7 +440,7 @@ pub fn get_execution_payload< .clone() .spawn_handle( async move { - prepare_execution_payload::( + prepare_execution_payload::( &chain, is_merge_transition_complete, timestamp, @@ -441,6 +449,9 @@ pub fn get_execution_payload< latest_execution_payload_header_block_hash, builder_params, withdrawals, + parent_beacon_block_root, + builder_boost_factor, + block_production_version, ) .await }, @@ -466,7 +477,7 @@ pub fn get_execution_payload< /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md#block-proposal #[allow(clippy::too_many_arguments)] -pub async fn prepare_execution_payload( +pub async fn prepare_execution_payload( chain: &Arc>, is_merge_transition_complete: bool, timestamp: u64, @@ -475,10 +486,12 @@ pub async fn prepare_execution_payload( latest_execution_payload_header_block_hash: ExecutionBlockHash, builder_params: BuilderParams, withdrawals: Option>, -) -> Result, BlockProductionError> + parent_beacon_block_root: Option, + builder_boost_factor: Option, + block_production_version: BlockProductionVersion, +) -> Result, BlockProductionError> where T: BeaconChainTypes, - Payload: AbstractExecPayload, { let current_epoch = builder_params.slot.epoch(T::EthSpec::slots_per_epoch()); let spec = &chain.spec; @@ -496,7 +509,12 @@ where if is_terminal_block_hash_set && !is_activation_epoch_reached { // Use the "empty" payload if there's a terminal block hash, but we haven't reached the // terminal block epoch yet. - return BlockProposalContents::default_at_fork(fork).map_err(Into::into); + return Ok(BlockProposalContentsType::Full( + BlockProposalContents::Payload { + payload: FullPayload::default_at_fork(fork)?, + block_value: Uint256::zero(), + }, + )); } let terminal_pow_block_hash = execution_layer @@ -509,7 +527,12 @@ where } else { // If the merge transition hasn't occurred yet and the EL hasn't found the terminal // block, return an "empty" payload. - return BlockProposalContents::default_at_fork(fork).map_err(Into::into); + return Ok(BlockProposalContentsType::Full( + BlockProposalContents::Payload { + payload: FullPayload::default_at_fork(fork)?, + block_value: Uint256::zero(), + }, + )); } } else { latest_execution_payload_header_block_hash @@ -536,20 +559,27 @@ where let suggested_fee_recipient = execution_layer .get_suggested_fee_recipient(proposer_index) .await; - let payload_attributes = - PayloadAttributes::new(timestamp, random, suggested_fee_recipient, withdrawals); + let payload_attributes = PayloadAttributes::new( + timestamp, + random, + suggested_fee_recipient, + withdrawals, + parent_beacon_block_root, + ); // Note: the suggested_fee_recipient is stored in the `execution_layer`, it will add this parameter. // // This future is not executed here, it's up to the caller to await it. let block_contents = execution_layer - .get_payload::( + .get_payload( parent_hash, &payload_attributes, forkchoice_update_params, builder_params, fork, &chain.spec, + builder_boost_factor, + block_production_version, ) .await .map_err(BlockProductionError::GetPayloadFailed)?; diff --git a/beacon_node/beacon_chain/src/head_tracker.rs b/beacon_node/beacon_chain/src/head_tracker.rs index 3fa577ff9..71e2473cd 100644 --- a/beacon_node/beacon_chain/src/head_tracker.rs +++ b/beacon_node/beacon_chain/src/head_tracker.rs @@ -1,4 +1,4 @@ -use parking_lot::RwLock; +use parking_lot::{RwLock, RwLockReadGuard}; use ssz_derive::{Decode, Encode}; use std::collections::HashMap; use types::{Hash256, Slot}; @@ -16,6 +16,8 @@ pub enum Error { #[derive(Default, Debug)] pub struct HeadTracker(pub RwLock>); +pub type HeadTrackerReader<'a> = RwLockReadGuard<'a, HashMap>; + impl HeadTracker { /// Register a block with `Self`, so it may or may not be included in a `Self::heads` call. /// @@ -44,6 +46,11 @@ impl HeadTracker { /// Returns a `SszHeadTracker`, which contains all necessary information to restore the state /// of `Self` at some later point. + /// + /// Should ONLY be used for tests, due to the potential for database races. + /// + /// See + #[cfg(test)] pub fn to_ssz_container(&self) -> SszHeadTracker { SszHeadTracker::from_map(&self.0.read()) } diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index 5f5907350..b5b42fcfc 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -1,3 +1,4 @@ +use crate::data_availability_checker::AvailableBlock; use crate::{errors::BeaconChainError as Error, metrics, BeaconChain, BeaconChainTypes}; use itertools::Itertools; use slog::debug; @@ -7,10 +8,9 @@ use state_processing::{ }; use std::borrow::Cow; use std::iter; -use std::sync::Arc; use std::time::Duration; -use store::{chunked_vector::BlockRoots, AnchorInfo, ChunkWriter, KeyValueStore}; -use types::{Hash256, SignedBlindedBeaconBlock, Slot}; +use store::{chunked_vector::BlockRoots, AnchorInfo, BlobInfo, ChunkWriter, KeyValueStore}; +use types::{Hash256, Slot}; /// Use a longer timeout on the pubkey cache. /// @@ -59,27 +59,30 @@ impl BeaconChain { /// Return the number of blocks successfully imported. pub fn import_historical_block_batch( &self, - blocks: Vec>>, + mut blocks: Vec>, ) -> Result { let anchor_info = self .store .get_anchor_info() .ok_or(HistoricalBlockError::NoAnchorInfo)?; + let blob_info = self.store.get_blob_info(); // Take all blocks with slots less than the oldest block slot. - let num_relevant = - blocks.partition_point(|block| block.slot() < anchor_info.oldest_block_slot); - let blocks_to_import = &blocks - .get(..num_relevant) - .ok_or(HistoricalBlockError::IndexOutOfBounds)?; + let num_relevant = blocks.partition_point(|available_block| { + available_block.block().slot() < anchor_info.oldest_block_slot + }); - if blocks_to_import.len() != blocks.len() { + let total_blocks = blocks.len(); + blocks.truncate(num_relevant); + let blocks_to_import = blocks; + + if blocks_to_import.len() != total_blocks { debug!( self.log, "Ignoring some historic blocks"; "oldest_block_slot" => anchor_info.oldest_block_slot, - "total_blocks" => blocks.len(), - "ignored" => blocks.len().saturating_sub(blocks_to_import.len()), + "total_blocks" => total_blocks, + "ignored" => total_blocks.saturating_sub(blocks_to_import.len()), ); } @@ -87,17 +90,24 @@ impl BeaconChain { return Ok(0); } + let n_blobs_lists_to_import = blocks_to_import + .iter() + .filter(|available_block| available_block.blobs().is_some()) + .count(); + let mut expected_block_root = anchor_info.oldest_block_parent; let mut prev_block_slot = anchor_info.oldest_block_slot; let mut chunk_writer = ChunkWriter::::new(&self.store.cold_db, prev_block_slot.as_usize())?; + let mut new_oldest_blob_slot = blob_info.oldest_blob_slot; - let mut cold_batch = Vec::with_capacity(blocks.len()); - let mut hot_batch = Vec::with_capacity(blocks.len()); + let mut blob_batch = Vec::with_capacity(n_blobs_lists_to_import); + let mut cold_batch = Vec::with_capacity(blocks_to_import.len()); + let mut hot_batch = Vec::with_capacity(blocks_to_import.len()); + let mut signed_blocks = Vec::with_capacity(blocks_to_import.len()); - for block in blocks_to_import.iter().rev() { - // Check chain integrity. - let block_root = block.canonical_root(); + for available_block in blocks_to_import.into_iter().rev() { + let (block_root, block, maybe_blobs) = available_block.deconstruct(); if block_root != expected_block_root { return Err(HistoricalBlockError::MismatchedBlockRoot { @@ -107,9 +117,16 @@ impl BeaconChain { .into()); } + let blinded_block = block.clone_as_blinded(); // Store block in the hot database without payload. self.store - .blinded_block_as_kv_store_ops(&block_root, block, &mut hot_batch); + .blinded_block_as_kv_store_ops(&block_root, &blinded_block, &mut hot_batch); + // Store the blobs too + if let Some(blobs) = maybe_blobs { + new_oldest_blob_slot = Some(block.slot()); + self.store + .blobs_as_kv_store_ops(&block_root, blobs, &mut blob_batch); + } // Store block roots, including at all skip slots in the freezer DB. for slot in (block.slot().as_usize()..prev_block_slot.as_usize()).rev() { @@ -119,21 +136,23 @@ impl BeaconChain { prev_block_slot = block.slot(); expected_block_root = block.message().parent_root(); - // If we've reached genesis, add the genesis block root to the batch and set the - // anchor slot to 0 to indicate completion. + // If we've reached genesis, add the genesis block root to the batch for all slots + // between 0 and the first block slot, and set the anchor slot to 0 to indicate + // completion. if expected_block_root == self.genesis_block_root { let genesis_slot = self.spec.genesis_slot; - chunk_writer.set( - genesis_slot.as_usize(), - self.genesis_block_root, - &mut cold_batch, - )?; + for slot in genesis_slot.as_usize()..block.slot().as_usize() { + chunk_writer.set(slot, self.genesis_block_root, &mut cold_batch)?; + } prev_block_slot = genesis_slot; expected_block_root = Hash256::zero(); break; } + signed_blocks.push(block); } chunk_writer.write(&mut cold_batch)?; + // these were pushed in reverse order so we reverse again + signed_blocks.reverse(); // Verify signatures in one batch, holding the pubkey cache lock for the shortest duration // possible. For each block fetch the parent root from its successor. Slicing from index 1 @@ -144,15 +163,16 @@ impl BeaconChain { .validator_pubkey_cache .try_read_for(PUBKEY_CACHE_LOCK_TIMEOUT) .ok_or(HistoricalBlockError::ValidatorPubkeyCacheTimeout)?; - let block_roots = blocks_to_import + let block_roots = signed_blocks .get(1..) .ok_or(HistoricalBlockError::IndexOutOfBounds)? .iter() .map(|block| block.parent_root()) .chain(iter::once(anchor_info.oldest_block_parent)); - let signature_set = blocks_to_import + let signature_set = signed_blocks .iter() .zip_eq(block_roots) + .filter(|&(_block, block_root)| (block_root != self.genesis_block_root)) .map(|(block, block_root)| { block_proposal_signature_set_from_parts( block, @@ -180,9 +200,26 @@ impl BeaconChain { // Write the I/O batches to disk, writing the blocks themselves first, as it's better // for the hot DB to contain extra blocks than for the cold DB to point to blocks that // do not exist. + self.store.blobs_db.do_atomically(blob_batch)?; self.store.hot_db.do_atomically(hot_batch)?; self.store.cold_db.do_atomically(cold_batch)?; + let mut anchor_and_blob_batch = Vec::with_capacity(2); + + // Update the blob info. + if new_oldest_blob_slot != blob_info.oldest_blob_slot { + if let Some(oldest_blob_slot) = new_oldest_blob_slot { + let new_blob_info = BlobInfo { + oldest_blob_slot: Some(oldest_blob_slot), + ..blob_info.clone() + }; + anchor_and_blob_batch.push( + self.store + .compare_and_set_blob_info(blob_info, new_blob_info)?, + ); + } + } + // Update the anchor. let new_anchor = AnchorInfo { oldest_block_slot: prev_block_slot, @@ -190,8 +227,11 @@ impl BeaconChain { ..anchor_info }; let backfill_complete = new_anchor.block_backfill_complete(self.genesis_backfill_slot); - self.store - .compare_and_set_anchor_info_with_write(Some(anchor_info), Some(new_anchor))?; + anchor_and_blob_batch.push( + self.store + .compare_and_set_anchor_info(Some(anchor_info), Some(new_anchor))?, + ); + self.store.hot_db.do_atomically(anchor_and_blob_batch)?; // If backfill has completed and the chain is configured to reconstruct historic states, // send a message to the background migrator instructing it to begin reconstruction. @@ -203,6 +243,6 @@ impl BeaconChain { self.store_migrator.process_reconstruction(); } - Ok(blocks_to_import.len()) + Ok(num_relevant) } } diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs new file mode 100644 index 000000000..924cc2652 --- /dev/null +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -0,0 +1,78 @@ +use kzg::{Blob as KzgBlob, Error as KzgError, Kzg}; +use types::{Blob, EthSpec, Hash256, KzgCommitment, KzgProof}; + +/// Converts a blob ssz List object to an array to be used with the kzg +/// crypto library. +fn ssz_blob_to_crypto_blob(blob: &Blob) -> Result { + KzgBlob::from_bytes(blob.as_ref()).map_err(Into::into) +} + +/// Validate a single blob-commitment-proof triplet from a `BlobSidecar`. +pub fn validate_blob( + kzg: &Kzg, + blob: &Blob, + kzg_commitment: KzgCommitment, + kzg_proof: KzgProof, +) -> Result<(), KzgError> { + let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_SINGLE_TIMES); + let kzg_blob = ssz_blob_to_crypto_blob::(blob)?; + kzg.verify_blob_kzg_proof(&kzg_blob, kzg_commitment, kzg_proof) +} + +/// Validate a batch of blob-commitment-proof triplets from multiple `BlobSidecars`. +pub fn validate_blobs( + kzg: &Kzg, + expected_kzg_commitments: &[KzgCommitment], + blobs: Vec<&Blob>, + kzg_proofs: &[KzgProof], +) -> Result<(), KzgError> { + let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_BATCH_TIMES); + let blobs = blobs + .into_iter() + .map(|blob| ssz_blob_to_crypto_blob::(blob)) + .collect::, KzgError>>()?; + + kzg.verify_blob_kzg_proof_batch(&blobs, expected_kzg_commitments, kzg_proofs) +} + +/// Compute the kzg proof given an ssz blob and its kzg commitment. +pub fn compute_blob_kzg_proof( + kzg: &Kzg, + blob: &Blob, + kzg_commitment: KzgCommitment, +) -> Result { + let kzg_blob = ssz_blob_to_crypto_blob::(blob)?; + kzg.compute_blob_kzg_proof(&kzg_blob, kzg_commitment) +} + +/// Compute the kzg commitment for a given blob. +pub fn blob_to_kzg_commitment( + kzg: &Kzg, + blob: &Blob, +) -> Result { + let kzg_blob = ssz_blob_to_crypto_blob::(blob)?; + kzg.blob_to_kzg_commitment(&kzg_blob) +} + +/// Compute the kzg proof for a given blob and an evaluation point z. +pub fn compute_kzg_proof( + kzg: &Kzg, + blob: &Blob, + z: Hash256, +) -> Result<(KzgProof, Hash256), KzgError> { + let z = z.0.into(); + let kzg_blob = ssz_blob_to_crypto_blob::(blob)?; + kzg.compute_kzg_proof(&kzg_blob, &z) + .map(|(proof, z)| (proof, Hash256::from_slice(&z.to_vec()))) +} + +/// Verify a `kzg_proof` for a `kzg_commitment` that evaluating a polynomial at `z` results in `y` +pub fn verify_kzg_proof( + kzg: &Kzg, + kzg_commitment: KzgCommitment, + kzg_proof: KzgProof, + z: Hash256, + y: Hash256, +) -> Result { + kzg.verify_kzg_proof(kzg_commitment, &z.0.into(), &y.0.into(), kzg_proof) +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 4ea1eeee0..ce841b106 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -1,4 +1,5 @@ pub mod attestation_rewards; +pub mod attestation_simulator; pub mod attestation_verification; mod attester_cache; pub mod beacon_block_reward; @@ -7,13 +8,17 @@ mod beacon_chain; mod beacon_fork_choice_store; pub mod beacon_proposer_cache; mod beacon_snapshot; +pub mod blob_verification; pub mod block_reward; mod block_times_cache; mod block_verification; +pub mod block_verification_types; pub mod builder; pub mod canonical_head; pub mod capella_readiness; pub mod chain_config; +pub mod data_availability_checker; +pub mod deneb_readiness; mod early_attester_cache; mod errors; pub mod eth1_chain; @@ -24,6 +29,7 @@ pub mod fork_choice_signal; pub mod fork_revert; mod head_tracker; pub mod historical_blocks; +pub mod kzg_utils; pub mod light_client_finality_update_verification; pub mod light_client_optimistic_update_verification; pub mod merge_readiness; @@ -32,8 +38,10 @@ pub mod migrate; mod naive_aggregation_pool; mod observed_aggregates; mod observed_attesters; +mod observed_blob_sidecars; pub mod observed_block_producers; pub mod observed_operations; +mod observed_slashable; pub mod otb_verification_service; mod persisted_beacon_chain; mod persisted_fork_choice; @@ -51,7 +59,8 @@ pub mod validator_monitor; pub mod validator_pubkey_cache; pub use self::beacon_chain::{ - AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, + AttestationProcessingOutcome, AvailabilityProcessingStatus, BeaconBlockResponse, + BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, ForkChoiceError, OverrideForkchoiceUpdate, ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, @@ -63,15 +72,19 @@ pub use self::historical_blocks::HistoricalBlockError; pub use attestation_verification::Error as AttestationError; pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError}; pub use block_verification::{ - get_block_root, BlockError, ExecutionPayloadError, GossipVerifiedBlock, - IntoExecutionPendingBlock, IntoGossipVerifiedBlock, + get_block_root, BlockError, ExecutionPayloadError, ExecutionPendingBlock, GossipVerifiedBlock, + IntoExecutionPendingBlock, IntoGossipVerifiedBlockContents, PayloadVerificationOutcome, + PayloadVerificationStatus, }; +pub use block_verification_types::AvailabilityPendingExecutedBlock; +pub use block_verification_types::ExecutedBlock; pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock}; pub use eth1_chain::{Eth1Chain, Eth1ChainBackend}; pub use events::ServerSentEventHandler; pub use execution_layer::EngineState; pub use execution_payload::NotifyExecutionLayer; pub use fork_choice::{ExecutionStatus, ForkchoiceUpdateParameters}; +pub use kzg::TrustedSetup; pub use metrics::scrape_for_metrics; pub use migrate::MigratorConfig; pub use parking_lot; diff --git a/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs index 638d2b401..791d63ccf 100644 --- a/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs @@ -34,7 +34,7 @@ pub enum Error { SigSlotStartIsNone, /// Failed to construct a LightClientFinalityUpdate from state. FailedConstructingUpdate, - /// Beacon chain error occured. + /// Beacon chain error occurred. BeaconChainError(BeaconChainError), LightClientUpdateError(LightClientUpdateError), } @@ -67,7 +67,7 @@ impl VerifiedLightClientFinalityUpdate { chain: &BeaconChain, seen_timestamp: Duration, ) -> Result { - let gossiped_finality_slot = light_client_finality_update.finalized_header.slot; + let gossiped_finality_slot = light_client_finality_update.finalized_header.beacon.slot; let one_third_slot_duration = Duration::new(chain.spec.seconds_per_slot / 3, 0); let signature_slot = light_client_finality_update.signature_slot; let start_time = chain.slot_clock.start_of(signature_slot); @@ -88,7 +88,7 @@ impl VerifiedLightClientFinalityUpdate { .get_blinded_block(&finalized_block_root)? .ok_or(Error::FailedConstructingUpdate)?; let latest_seen_finality_update_slot = match latest_seen_finality_update.as_ref() { - Some(update) => update.finalized_header.slot, + Some(update) => update.finalized_header.beacon.slot, None => Slot::new(0), }; diff --git a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs index 2d1a5cf97..374cc9a77 100644 --- a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs @@ -37,7 +37,7 @@ pub enum Error { FailedConstructingUpdate, /// Unknown block with parent root. UnknownBlockParentRoot(Hash256), - /// Beacon chain error occured. + /// Beacon chain error occurred. BeaconChainError(BeaconChainError), LightClientUpdateError(LightClientUpdateError), } @@ -71,7 +71,7 @@ impl VerifiedLightClientOptimisticUpdate { chain: &BeaconChain, seen_timestamp: Duration, ) -> Result { - let gossiped_optimistic_slot = light_client_optimistic_update.attested_header.slot; + let gossiped_optimistic_slot = light_client_optimistic_update.attested_header.beacon.slot; let one_third_slot_duration = Duration::new(chain.spec.seconds_per_slot / 3, 0); let signature_slot = light_client_optimistic_update.signature_slot; let start_time = chain.slot_clock.start_of(signature_slot); @@ -88,7 +88,7 @@ impl VerifiedLightClientOptimisticUpdate { .get_state(&attested_block.state_root(), Some(attested_block.slot()))? .ok_or(Error::FailedConstructingUpdate)?; let latest_seen_optimistic_update_slot = match latest_seen_optimistic_update.as_ref() { - Some(update) => update.attested_header.slot, + Some(update) => update.attested_header.beacon.slot, None => Slot::new(0), }; @@ -114,6 +114,7 @@ impl VerifiedLightClientOptimisticUpdate { // otherwise queue let canonical_root = light_client_optimistic_update .attested_header + .beacon .canonical_root(); if canonical_root != head_block.message().parent_root() { diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index dff663ded..ad095b37b 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -10,6 +10,20 @@ use types::{BeaconState, Epoch, EthSpec, Hash256, Slot}; /// The maximum time to wait for the snapshot cache lock during a metrics scrape. const SNAPSHOT_CACHE_TIMEOUT: Duration = Duration::from_millis(100); +// Attestation simulator metrics +pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT_TOTAL: &str = + "validator_monitor_attestation_simulator_head_attester_hit_total"; +pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_MISS_TOTAL: &str = + "validator_monitor_attestation_simulator_head_attester_miss_total"; +pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_HIT_TOTAL: &str = + "validator_monitor_attestation_simulator_target_attester_hit_total"; +pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_MISS_TOTAL: &str = + "validator_monitor_attestation_simulator_target_attester_miss_total"; +pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT_TOTAL: &str = + "validator_monitor_attestation_simulator_source_attester_hit_total"; +pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS_TOTAL: &str = + "validator_monitor_attestation_simulator_source_attester_miss_total"; + lazy_static! { /* * Block Processing @@ -40,6 +54,14 @@ lazy_static! { "beacon_block_processing_block_root_seconds", "Time spent calculating the block root when processing a block." ); + pub static ref BLOCK_HEADER_PROCESSING_BLOCK_ROOT: Result = try_create_histogram( + "beacon_block_header_processing_block_root_seconds", + "Time spent calculating the block root for a beacon block header." + ); + pub static ref BLOCK_PROCESSING_BLOB_ROOT: Result = try_create_histogram( + "beacon_block_processing_blob_root_seconds", + "Time spent calculating the blob root when processing a block." + ); pub static ref BLOCK_PROCESSING_DB_READ: Result = try_create_histogram( "beacon_block_processing_db_read_seconds", "Time spent loading block and state from DB for block processing" @@ -282,6 +304,11 @@ lazy_static! { "Count of times the early attester cache returns an attestation" ); +} + +// Second lazy-static block is used to account for macro recursion limit. +lazy_static! { + /* * Attestation Production */ @@ -301,10 +328,7 @@ lazy_static! { "attestation_production_cache_prime_seconds", "Time spent loading a new state from the disk due to a cache miss" ); -} -// Second lazy-static block is used to account for macro recursion limit. -lazy_static! { /* * Fork Choice */ @@ -380,6 +404,8 @@ lazy_static! { try_create_histogram("beacon_persist_eth1_cache", "Time taken to persist the eth1 caches"); pub static ref PERSIST_FORK_CHOICE: Result = try_create_histogram("beacon_persist_fork_choice", "Time taken to persist the fork choice struct"); + pub static ref PERSIST_DATA_AVAILABILITY_CHECKER: Result = + try_create_histogram("beacon_persist_data_availability_checker", "Time taken to persist the data availability checker"); /* * Eth1 @@ -980,6 +1006,30 @@ lazy_static! { "beacon_pre_finalization_block_lookup_count", "Number of block roots subject to single block lookups" ); + + /* + * Blob sidecar Verification + */ + pub static ref BLOBS_SIDECAR_PROCESSING_REQUESTS: Result = try_create_int_counter( + "beacon_blobs_sidecar_processing_requests_total", + "Count of all blob sidecars submitted for processing" + ); + pub static ref BLOBS_SIDECAR_PROCESSING_SUCCESSES: Result = try_create_int_counter( + "beacon_blobs_sidecar_processing_successes_total", + "Number of blob sidecars verified for gossip" + ); + pub static ref BLOBS_SIDECAR_GOSSIP_VERIFICATION_TIMES: Result = try_create_histogram( + "beacon_blobs_sidecar_gossip_verification_seconds", + "Full runtime of blob sidecars gossip verification" + ); + pub static ref BLOB_SIDECAR_INCLUSION_PROOF_VERIFICATION: Result = try_create_histogram( + "blob_sidecar_inclusion_proof_verification_seconds", + "Time taken to verify blob sidecar inclusion proof" + ); + pub static ref BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION: Result = try_create_histogram( + "blob_sidecar_inclusion_proof_computation_seconds", + "Time taken to compute blob sidecar inclusion proof" + ); } // Fifth lazy-static block is used to account for macro recursion limit. @@ -1009,6 +1059,75 @@ lazy_static! { "beacon_aggregated_attestation_subsets_total", "Count of new aggregated attestations that are subsets of already known aggregates" ); + /* + * Attestation simulator metrics + */ + pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT: Result = + try_create_int_counter( + VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT_TOTAL, + "Incremented if a validator is flagged as a previous slot head attester \ + during per slot processing", + ); + pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_MISS: Result = + try_create_int_counter( + VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_MISS_TOTAL, + "Incremented if a validator is not flagged as a previous slot head attester \ + during per slot processing", + ); + pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_HIT: Result = + try_create_int_counter( + VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_HIT_TOTAL, + "Incremented if a validator is flagged as a previous slot target attester \ + during per slot processing", + ); + pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_MISS: Result = + try_create_int_counter( + VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_MISS_TOTAL, + "Incremented if a validator is not flagged as a previous slot target attester \ + during per slot processing", + ); + pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT: Result = + try_create_int_counter( + VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT_TOTAL, + "Incremented if a validator is flagged as a previous slot source attester \ + during per slot processing", + ); + pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS: Result = + try_create_int_counter( + VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS_TOTAL, + "Incremented if a validator is not flagged as a previous slot source attester \ + during per slot processing", + ); + /* + * Missed block metrics + */ + pub static ref VALIDATOR_MONITOR_MISSED_BLOCKS_TOTAL: Result = try_create_int_counter_vec( + "validator_monitor_missed_blocks_total", + "Number of non-finalized blocks missed", + &["validator"] + ); + + /* + * Kzg related metrics + */ + pub static ref KZG_VERIFICATION_SINGLE_TIMES: Result = + try_create_histogram("kzg_verification_single_seconds", "Runtime of single kzg verification"); + pub static ref KZG_VERIFICATION_BATCH_TIMES: Result = + try_create_histogram("kzg_verification_batch_seconds", "Runtime of batched kzg verification"); + + pub static ref BLOCK_PRODUCTION_BLOBS_VERIFICATION_TIMES: Result = try_create_histogram( + "beacon_block_production_blobs_verification_seconds", + "Time taken to verify blobs against commitments and creating BlobSidecar objects in block production" + ); + /* + * Availability related metrics + */ + pub static ref BLOCK_AVAILABILITY_DELAY: Result = try_create_histogram_with_buckets( + "block_availability_delay", + "Duration between start of the slot and the time at which all components of the block are available.", + // Create a custom bucket list for greater granularity in block delay + Ok(vec![0.1, 0.2, 0.3,0.4,0.5,0.75,1.0,1.25,1.5,1.75,2.0,2.5,3.0,3.5,4.0,5.0,6.0,7.0,8.0,9.0,10.0,15.0,20.0]) + ); } /// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot, diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index 6353a64e0..ad597bf92 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -117,6 +117,7 @@ pub enum PruningError { pub enum Notification { Finalization(FinalizationNotification), Reconstruction, + PruneBlobs(Epoch), } pub struct FinalizationNotification { @@ -191,6 +192,14 @@ impl, Cold: ItemStore> BackgroundMigrator>, log: &Logger) { if let Err(e) = db.reconstruct_historic_states() { error!( @@ -201,6 +210,20 @@ impl, Cold: ItemStore> BackgroundMigrator>, + data_availability_boundary: Epoch, + log: &Logger, + ) { + if let Err(e) = db.try_prune_blobs(false, data_availability_boundary) { + error!( + log, + "Blob pruning failed"; + "error" => ?e, + ); + } + } + /// If configured to run in the background, send `notif` to the background thread. /// /// Return `None` if the message was sent to the background thread, `Some(notif)` otherwise. @@ -367,29 +390,44 @@ impl, Cold: ItemStore> BackgroundMigrator Notification::Reconstruction, - ( - Notification::Finalization(fin1), - Notification::Finalization(fin2), - ) => { - if fin2.finalized_checkpoint.epoch > fin1.finalized_checkpoint.epoch - { - other - } else { - best - } - } - }); - + let mut reconstruction_notif = None; + let mut finalization_notif = None; + let mut prune_blobs_notif = None; match notif { - Notification::Reconstruction => Self::run_reconstruction(db.clone(), &log), - Notification::Finalization(fin) => Self::run_migration(db.clone(), fin, &log), + Notification::Reconstruction => reconstruction_notif = Some(notif), + Notification::Finalization(fin) => finalization_notif = Some(fin), + Notification::PruneBlobs(dab) => prune_blobs_notif = Some(dab), + } + // Read the rest of the messages in the channel, taking the best of each type. + for notif in rx.try_iter() { + match notif { + Notification::Reconstruction => reconstruction_notif = Some(notif), + Notification::Finalization(fin) => { + if let Some(current) = finalization_notif.as_mut() { + if fin.finalized_checkpoint.epoch + > current.finalized_checkpoint.epoch + { + *current = fin; + } + } else { + finalization_notif = Some(fin); + } + } + Notification::PruneBlobs(dab) => { + prune_blobs_notif = std::cmp::max(prune_blobs_notif, Some(dab)); + } + } + } + // If reconstruction is on-going, ignore finalization migration and blob pruning. + if reconstruction_notif.is_some() { + Self::run_reconstruction(db.clone(), &log); + } else { + if let Some(fin) = finalization_notif { + Self::run_migration(db.clone(), fin, &log); + } + if let Some(dab) = prune_blobs_notif { + Self::run_prune_blobs(db.clone(), dab, &log); + } } } }); @@ -630,13 +668,14 @@ impl, Cold: ItemStore> BackgroundMigrator> = abandoned_blocks + let mut batch: Vec> = abandoned_blocks .into_iter() .map(Into::into) .flat_map(|block_root: Hash256| { [ StoreOp::DeleteBlock(block_root), StoreOp::DeleteExecutionPayload(block_root), + StoreOp::DeleteBlobs(block_root), ] }) .chain( @@ -646,8 +685,6 @@ impl, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator { + finalized_slot: Slot, + /// Stores all received blob indices for a given `(ValidatorIndex, Slot)` tuple. + items: HashMap>, + _phantom: PhantomData, +} + +impl Default for ObservedBlobSidecars { + /// Instantiates `Self` with `finalized_slot == 0`. + fn default() -> Self { + Self { + finalized_slot: Slot::new(0), + items: HashMap::new(), + _phantom: PhantomData, + } + } +} + +impl ObservedBlobSidecars { + /// Observe the `blob_sidecar` at (`blob_sidecar.block_proposer_index, blob_sidecar.slot`). + /// This will update `self` so future calls to it indicate that this `blob_sidecar` is known. + /// + /// The supplied `blob_sidecar` **MUST** have completed proposer signature verification. + pub fn observe_sidecar(&mut self, blob_sidecar: &BlobSidecar) -> Result { + self.sanitize_blob_sidecar(blob_sidecar)?; + + let blob_indices = self + .items + .entry(ProposalKey { + slot: blob_sidecar.slot(), + proposer: blob_sidecar.block_proposer_index(), + }) + .or_insert_with(|| HashSet::with_capacity(T::max_blobs_per_block())); + let did_not_exist = blob_indices.insert(blob_sidecar.index); + + Ok(!did_not_exist) + } + + /// Returns `true` if the `blob_sidecar` has already been observed in the cache within the prune window. + pub fn proposer_is_known(&self, blob_sidecar: &BlobSidecar) -> Result { + self.sanitize_blob_sidecar(blob_sidecar)?; + let is_known = self + .items + .get(&ProposalKey { + slot: blob_sidecar.slot(), + proposer: blob_sidecar.block_proposer_index(), + }) + .map_or(false, |blob_indices| { + blob_indices.contains(&blob_sidecar.index) + }); + Ok(is_known) + } + + fn sanitize_blob_sidecar(&self, blob_sidecar: &BlobSidecar) -> Result<(), Error> { + if blob_sidecar.index >= T::max_blobs_per_block() as u64 { + return Err(Error::InvalidBlobIndex(blob_sidecar.index)); + } + let finalized_slot = self.finalized_slot; + if finalized_slot > 0 && blob_sidecar.slot() <= finalized_slot { + return Err(Error::FinalizedBlob { + slot: blob_sidecar.slot(), + finalized_slot, + }); + } + + Ok(()) + } + + /// Prune `blob_sidecar` observations for slots less than or equal to the given slot. + pub fn prune(&mut self, finalized_slot: Slot) { + if finalized_slot == 0 { + return; + } + + self.finalized_slot = finalized_slot; + self.items.retain(|k, _| k.slot > finalized_slot); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use bls::Hash256; + use std::sync::Arc; + use types::{BlobSidecar, MainnetEthSpec}; + + type E = MainnetEthSpec; + + fn get_blob_sidecar(slot: u64, proposer_index: u64, index: u64) -> Arc> { + let mut blob_sidecar = BlobSidecar::empty(); + blob_sidecar.signed_block_header.message.slot = slot.into(); + blob_sidecar.signed_block_header.message.proposer_index = proposer_index; + blob_sidecar.index = index; + Arc::new(blob_sidecar) + } + + #[test] + fn pruning() { + let mut cache = ObservedBlobSidecars::default(); + + assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); + assert_eq!(cache.items.len(), 0, "no slots should be present"); + + // Slot 0, index 0 + let proposer_index_a = 420; + let sidecar_a = get_blob_sidecar(0, proposer_index_a, 0); + + assert_eq!( + cache.observe_sidecar(&sidecar_a), + Ok(false), + "can observe proposer, indicates proposer unobserved" + ); + + /* + * Preconditions. + */ + + assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); + assert_eq!( + cache.items.len(), + 1, + "only one (validator_index, slot) tuple should be present" + ); + + let cached_blob_indices = cache + .items + .get(&ProposalKey::new(proposer_index_a, Slot::new(0))) + .expect("slot zero should be present"); + assert_eq!( + cached_blob_indices.len(), + 1, + "only one proposer should be present" + ); + + /* + * Check that a prune at the genesis slot does nothing. + */ + + cache.prune(Slot::new(0)); + + assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); + assert_eq!(cache.items.len(), 1, "only one slot should be present"); + let cached_blob_indices = cache + .items + .get(&ProposalKey::new(proposer_index_a, Slot::new(0))) + .expect("slot zero should be present"); + assert_eq!( + cached_blob_indices.len(), + 1, + "only one proposer should be present" + ); + + /* + * Check that a prune empties the cache + */ + + cache.prune(E::slots_per_epoch().into()); + assert_eq!( + cache.finalized_slot, + Slot::from(E::slots_per_epoch()), + "finalized slot is updated" + ); + assert_eq!(cache.items.len(), 0, "no items left"); + + /* + * Check that we can't insert a finalized sidecar + */ + + // First slot of finalized epoch + let block_b = get_blob_sidecar(E::slots_per_epoch(), 419, 0); + + assert_eq!( + cache.observe_sidecar(&block_b), + Err(Error::FinalizedBlob { + slot: E::slots_per_epoch().into(), + finalized_slot: E::slots_per_epoch().into(), + }), + "cant insert finalized sidecar" + ); + + assert_eq!(cache.items.len(), 0, "sidecar was not added"); + + /* + * Check that we _can_ insert a non-finalized block + */ + + let three_epochs = E::slots_per_epoch() * 3; + + // First slot of finalized epoch + let proposer_index_b = 421; + let block_b = get_blob_sidecar(three_epochs, proposer_index_b, 0); + + assert_eq!( + cache.observe_sidecar(&block_b), + Ok(false), + "can insert non-finalized block" + ); + + assert_eq!(cache.items.len(), 1, "only one slot should be present"); + let cached_blob_indices = cache + .items + .get(&ProposalKey::new(proposer_index_b, Slot::new(three_epochs))) + .expect("the three epochs slot should be present"); + assert_eq!( + cached_blob_indices.len(), + 1, + "only one proposer should be present" + ); + + /* + * Check that a prune doesnt wipe later blocks + */ + + let two_epochs = E::slots_per_epoch() * 2; + cache.prune(two_epochs.into()); + + assert_eq!( + cache.finalized_slot, + Slot::from(two_epochs), + "finalized slot is updated" + ); + + assert_eq!(cache.items.len(), 1, "only one slot should be present"); + let cached_blob_indices = cache + .items + .get(&ProposalKey::new(proposer_index_b, Slot::new(three_epochs))) + .expect("the three epochs slot should be present"); + assert_eq!( + cached_blob_indices.len(), + 1, + "only one proposer should be present" + ); + } + + #[test] + fn simple_observations() { + let mut cache = ObservedBlobSidecars::default(); + + // Slot 0, index 0 + let proposer_index_a = 420; + let sidecar_a = get_blob_sidecar(0, proposer_index_a, 0); + + assert_eq!( + cache.proposer_is_known(&sidecar_a), + Ok(false), + "no observation in empty cache" + ); + + assert_eq!( + cache.observe_sidecar(&sidecar_a), + Ok(false), + "can observe proposer, indicates proposer unobserved" + ); + + assert_eq!( + cache.proposer_is_known(&sidecar_a), + Ok(true), + "observed block is indicated as true" + ); + + assert_eq!( + cache.observe_sidecar(&sidecar_a), + Ok(true), + "observing again indicates true" + ); + + assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); + assert_eq!(cache.items.len(), 1, "only one slot should be present"); + let cached_blob_indices = cache + .items + .get(&ProposalKey::new(proposer_index_a, Slot::new(0))) + .expect("slot zero should be present"); + assert_eq!( + cached_blob_indices.len(), + 1, + "only one proposer should be present" + ); + + // Slot 1, proposer 0 + + let proposer_index_b = 421; + let sidecar_b = get_blob_sidecar(1, proposer_index_b, 0); + + assert_eq!( + cache.proposer_is_known(&sidecar_b), + Ok(false), + "no observation for new slot" + ); + assert_eq!( + cache.observe_sidecar(&sidecar_b), + Ok(false), + "can observe proposer for new slot, indicates proposer unobserved" + ); + assert_eq!( + cache.proposer_is_known(&sidecar_b), + Ok(true), + "observed block in slot 1 is indicated as true" + ); + assert_eq!( + cache.observe_sidecar(&sidecar_b), + Ok(true), + "observing slot 1 again indicates true" + ); + + assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); + assert_eq!(cache.items.len(), 2, "two slots should be present"); + let cached_blob_indices = cache + .items + .get(&ProposalKey::new(proposer_index_a, Slot::new(0))) + .expect("slot zero should be present"); + assert_eq!( + cached_blob_indices.len(), + 1, + "only one proposer should be present in slot 0" + ); + let cached_blob_indices = cache + .items + .get(&ProposalKey::new(proposer_index_b, Slot::new(1))) + .expect("slot zero should be present"); + assert_eq!( + cached_blob_indices.len(), + 1, + "only one proposer should be present in slot 1" + ); + + // Slot 0, index 1 + let sidecar_c = get_blob_sidecar(0, proposer_index_a, 1); + + assert_eq!( + cache.proposer_is_known(&sidecar_c), + Ok(false), + "no observation for new index" + ); + assert_eq!( + cache.observe_sidecar(&sidecar_c), + Ok(false), + "can observe new index, indicates sidecar unobserved for new index" + ); + assert_eq!( + cache.proposer_is_known(&sidecar_c), + Ok(true), + "observed new sidecar is indicated as true" + ); + assert_eq!( + cache.observe_sidecar(&sidecar_c), + Ok(true), + "observing new sidecar again indicates true" + ); + + assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); + assert_eq!(cache.items.len(), 2, "two slots should be present"); + let cached_blob_indices = cache + .items + .get(&ProposalKey::new(proposer_index_a, Slot::new(0))) + .expect("slot zero should be present"); + assert_eq!( + cached_blob_indices.len(), + 2, + "two blob indices should be present in slot 0" + ); + + // Create a sidecar sharing slot and proposer but with a different block root. + let mut sidecar_d: BlobSidecar = BlobSidecar { + index: sidecar_c.index, + blob: sidecar_c.blob.clone(), + kzg_commitment: sidecar_c.kzg_commitment, + kzg_proof: sidecar_c.kzg_proof, + signed_block_header: sidecar_c.signed_block_header.clone(), + kzg_commitment_inclusion_proof: sidecar_c.kzg_commitment_inclusion_proof.clone(), + }; + sidecar_d.signed_block_header.message.body_root = Hash256::repeat_byte(7); + assert_eq!( + cache.proposer_is_known(&sidecar_d), + Ok(true), + "there has been an observation for this proposer index" + ); + assert_eq!( + cache.observe_sidecar(&sidecar_d), + Ok(true), + "indicates sidecar proposer was observed" + ); + let cached_blob_indices = cache + .items + .get(&ProposalKey::new(proposer_index_a, Slot::new(0))) + .expect("slot zero should be present"); + assert_eq!( + cached_blob_indices.len(), + 2, + "two blob indices should be present in slot 0" + ); + + // Try adding an out of bounds index + let invalid_index = E::max_blobs_per_block() as u64; + let sidecar_d = get_blob_sidecar(0, proposer_index_a, invalid_index); + assert_eq!( + cache.observe_sidecar(&sidecar_d), + Err(Error::InvalidBlobIndex(invalid_index)), + "cannot add an index > MaxBlobsPerBlock" + ); + } +} diff --git a/beacon_node/beacon_chain/src/observed_block_producers.rs b/beacon_node/beacon_chain/src/observed_block_producers.rs index f76fc5379..096c8bff7 100644 --- a/beacon_node/beacon_chain/src/observed_block_producers.rs +++ b/beacon_node/beacon_chain/src/observed_block_producers.rs @@ -16,9 +16,15 @@ pub enum Error { } #[derive(Eq, Hash, PartialEq, Debug, Default)] -struct ProposalKey { - slot: Slot, - proposer: u64, +pub struct ProposalKey { + pub slot: Slot, + pub proposer: u64, +} + +impl ProposalKey { + pub fn new(proposer: u64, slot: Slot) -> Self { + Self { slot, proposer } + } } /// Maintains a cache of observed `(block.slot, block.proposer)`. diff --git a/beacon_node/beacon_chain/src/observed_slashable.rs b/beacon_node/beacon_chain/src/observed_slashable.rs new file mode 100644 index 000000000..001a0d4a8 --- /dev/null +++ b/beacon_node/beacon_chain/src/observed_slashable.rs @@ -0,0 +1,486 @@ +//! Provides the `ObservedSlashable` struct which tracks slashable messages seen in +//! gossip or via RPC. Useful in supporting `broadcast_validation` in the Beacon API. + +use crate::observed_block_producers::Error; +use std::collections::hash_map::Entry; +use std::collections::{HashMap, HashSet}; +use std::marker::PhantomData; +use types::{EthSpec, Hash256, Slot, Unsigned}; + +#[derive(Eq, Hash, PartialEq, Debug, Default)] +pub struct ProposalKey { + pub slot: Slot, + pub proposer: u64, +} + +/// Maintains a cache of observed `(block.slot, block.proposer)`. +/// +/// The cache supports pruning based upon the finalized epoch. It does not automatically prune, you +/// must call `Self::prune` manually. +/// +/// The maximum size of the cache is determined by `slots_since_finality * +/// VALIDATOR_REGISTRY_LIMIT`. This is quite a large size, so it's important that upstream +/// functions only use this cache for blocks with a valid signature. Only allowing valid signed +/// blocks reduces the theoretical maximum size of this cache to `slots_since_finality * +/// active_validator_count`, however in reality that is more like `slots_since_finality * +/// known_distinct_shufflings` which is much smaller. +pub struct ObservedSlashable { + finalized_slot: Slot, + items: HashMap>, + _phantom: PhantomData, +} + +impl Default for ObservedSlashable { + /// Instantiates `Self` with `finalized_slot == 0`. + fn default() -> Self { + Self { + finalized_slot: Slot::new(0), + items: HashMap::new(), + _phantom: PhantomData, + } + } +} + +impl ObservedSlashable { + /// Observe that the `header` was produced by `header.proposer_index` at `header.slot`. This will + /// update `self` so future calls to it indicate that this block is known. + /// + /// The supplied `block` **MUST** be signature verified (see struct-level documentation). + /// + /// ## Errors + /// + /// - `header.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`. + /// - `header.slot` is equal to or less than the latest pruned `finalized_slot`. + pub fn observe_slashable( + &mut self, + slot: Slot, + proposer_index: u64, + block_root: Hash256, + ) -> Result<(), Error> { + self.sanitize_header(slot, proposer_index)?; + + let key = ProposalKey { + slot, + proposer: proposer_index, + }; + + let entry = self.items.entry(key); + + match entry { + Entry::Occupied(mut occupied_entry) => { + let block_roots = occupied_entry.get_mut(); + block_roots.insert(block_root); + } + Entry::Vacant(vacant_entry) => { + let block_roots = HashSet::from([block_root]); + vacant_entry.insert(block_roots); + } + } + + Ok(()) + } + + /// Returns `Ok(true)` if the `block_root` is slashable, `Ok(false)` if not. Does not + /// update the cache, so calling this function multiple times will continue to return + /// `Ok(false)`, until `Self::observe_proposer` is called. + /// + /// ## Errors + /// + /// - `proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`. + /// - `slot` is equal to or less than the latest pruned `finalized_slot`. + pub fn is_slashable( + &self, + slot: Slot, + proposer_index: u64, + block_root: Hash256, + ) -> Result { + self.sanitize_header(slot, proposer_index)?; + + let key = ProposalKey { + slot, + proposer: proposer_index, + }; + + if let Some(block_roots) = self.items.get(&key) { + let no_prev_known_blocks = + block_roots.difference(&HashSet::from([block_root])).count() == 0; + + Ok(!no_prev_known_blocks) + } else { + Ok(false) + } + } + + /// Returns `Ok(())` if the given `header` is sane. + fn sanitize_header(&self, slot: Slot, proposer_index: u64) -> Result<(), Error> { + if proposer_index >= E::ValidatorRegistryLimit::to_u64() { + return Err(Error::ValidatorIndexTooHigh(proposer_index)); + } + + let finalized_slot = self.finalized_slot; + if finalized_slot > 0 && slot <= finalized_slot { + return Err(Error::FinalizedBlock { + slot, + finalized_slot, + }); + } + + Ok(()) + } + + /// Removes all observations of blocks equal to or earlier than `finalized_slot`. + /// + /// Stores `finalized_slot` in `self`, so that `self` will reject any block that has a slot + /// equal to or less than `finalized_slot`. + /// + /// No-op if `finalized_slot == 0`. + pub fn prune(&mut self, finalized_slot: Slot) { + if finalized_slot == 0 { + return; + } + + self.finalized_slot = finalized_slot; + self.items.retain(|key, _| key.slot > finalized_slot); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use types::{BeaconBlock, Graffiti, MainnetEthSpec}; + + type E = MainnetEthSpec; + + fn get_block(slot: u64, proposer: u64) -> BeaconBlock { + let mut block = BeaconBlock::empty(&E::default_spec()); + *block.slot_mut() = slot.into(); + *block.proposer_index_mut() = proposer; + block + } + + #[test] + fn pruning() { + let mut cache = ObservedSlashable::::default(); + + assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); + assert_eq!(cache.items.len(), 0, "no slots should be present"); + + // Slot 0, proposer 0 + let block_a = get_block(0, 0); + let block_root = block_a.canonical_root(); + + assert_eq!( + cache.observe_slashable(block_a.slot(), block_a.proposer_index(), block_root), + Ok(()), + "can observe proposer" + ); + + /* + * Preconditions. + */ + assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); + assert_eq!(cache.items.len(), 1, "only one slot should be present"); + assert_eq!( + cache + .items + .get(&ProposalKey { + slot: Slot::new(0), + proposer: 0 + }) + .expect("slot zero should be present") + .len(), + 1, + "only one proposer should be present" + ); + + /* + * Check that a prune at the genesis slot does nothing. + */ + cache.prune(Slot::new(0)); + + assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); + assert_eq!(cache.items.len(), 1, "only one slot should be present"); + assert_eq!( + cache + .items + .get(&ProposalKey { + slot: Slot::new(0), + proposer: 0 + }) + .expect("slot zero should be present") + .len(), + 1, + "only one block root should be present" + ); + + /* + * Check that a prune empties the cache + */ + cache.prune(E::slots_per_epoch().into()); + assert_eq!( + cache.finalized_slot, + Slot::from(E::slots_per_epoch()), + "finalized slot is updated" + ); + assert_eq!(cache.items.len(), 0, "no items left"); + + /* + * Check that we can't insert a finalized block + */ + // First slot of finalized epoch, proposer 0 + let block_b = get_block(E::slots_per_epoch(), 0); + let block_root_b = block_b.canonical_root(); + + assert_eq!( + cache.observe_slashable(block_b.slot(), block_b.proposer_index(), block_root_b), + Err(Error::FinalizedBlock { + slot: E::slots_per_epoch().into(), + finalized_slot: E::slots_per_epoch().into(), + }), + "cant insert finalized block" + ); + + assert_eq!(cache.items.len(), 0, "block was not added"); + + /* + * Check that we _can_ insert a non-finalized block + */ + let three_epochs = E::slots_per_epoch() * 3; + + // First slot of finalized epoch, proposer 0 + let block_b = get_block(three_epochs, 0); + + assert_eq!( + cache.observe_slashable(block_b.slot(), block_b.proposer_index(), block_root_b), + Ok(()), + "can insert non-finalized block" + ); + + assert_eq!(cache.items.len(), 1, "only one slot should be present"); + assert_eq!( + cache + .items + .get(&ProposalKey { + slot: Slot::new(three_epochs), + proposer: 0 + }) + .expect("the three epochs slot should be present") + .len(), + 1, + "only one proposer should be present" + ); + + /* + * Check that a prune doesnt wipe later blocks + */ + let two_epochs = E::slots_per_epoch() * 2; + cache.prune(two_epochs.into()); + + assert_eq!( + cache.finalized_slot, + Slot::from(two_epochs), + "finalized slot is updated" + ); + + assert_eq!(cache.items.len(), 1, "only one slot should be present"); + assert_eq!( + cache + .items + .get(&ProposalKey { + slot: Slot::new(three_epochs), + proposer: 0 + }) + .expect("the three epochs slot should be present") + .len(), + 1, + "only one block root should be present" + ); + } + + #[test] + fn simple_observations() { + let mut cache = ObservedSlashable::::default(); + + // Slot 0, proposer 0 + let block_a = get_block(0, 0); + let block_root_a = block_a.canonical_root(); + + assert_eq!( + cache.is_slashable( + block_a.slot(), + block_a.proposer_index(), + block_a.canonical_root() + ), + Ok(false), + "no observation in empty cache" + ); + assert_eq!( + cache.observe_slashable(block_a.slot(), block_a.proposer_index(), block_root_a), + Ok(()), + "can observe proposer" + ); + assert_eq!( + cache.is_slashable( + block_a.slot(), + block_a.proposer_index(), + block_a.canonical_root() + ), + Ok(false), + "observed but unslashed block" + ); + assert_eq!( + cache.observe_slashable(block_a.slot(), block_a.proposer_index(), block_root_a), + Ok(()), + "observing again" + ); + + assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); + assert_eq!(cache.items.len(), 1, "only one slot should be present"); + assert_eq!( + cache + .items + .get(&ProposalKey { + slot: Slot::new(0), + proposer: 0 + }) + .expect("slot zero should be present") + .len(), + 1, + "only one block root should be present" + ); + + // Slot 1, proposer 0 + let block_b = get_block(1, 0); + let block_root_b = block_b.canonical_root(); + + assert_eq!( + cache.is_slashable( + block_b.slot(), + block_b.proposer_index(), + block_b.canonical_root() + ), + Ok(false), + "not slashable for new slot" + ); + assert_eq!( + cache.observe_slashable(block_b.slot(), block_b.proposer_index(), block_root_b), + Ok(()), + "can observe proposer for new slot" + ); + assert_eq!( + cache.is_slashable( + block_b.slot(), + block_b.proposer_index(), + block_b.canonical_root() + ), + Ok(false), + "observed but not slashable block in slot 1" + ); + assert_eq!( + cache.observe_slashable(block_b.slot(), block_b.proposer_index(), block_root_b), + Ok(()), + "observing slot 1 again" + ); + + assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); + assert_eq!(cache.items.len(), 2, "two slots should be present"); + assert_eq!( + cache + .items + .get(&ProposalKey { + slot: Slot::new(0), + proposer: 0 + }) + .expect("slot zero should be present") + .len(), + 1, + "only one block root should be present in slot 0" + ); + assert_eq!( + cache + .items + .get(&ProposalKey { + slot: Slot::new(1), + proposer: 0 + }) + .expect("slot zero should be present") + .len(), + 1, + "only one block root should be present in slot 1" + ); + + // Slot 0, proposer 1 + let block_c = get_block(0, 1); + let block_root_c = block_c.canonical_root(); + + assert_eq!( + cache.is_slashable( + block_c.slot(), + block_c.proposer_index(), + block_c.canonical_root() + ), + Ok(false), + "not slashable due to new proposer" + ); + assert_eq!( + cache.observe_slashable(block_c.slot(), block_c.proposer_index(), block_root_c), + Ok(()), + "can observe new proposer, indicates proposer unobserved" + ); + assert_eq!( + cache.is_slashable( + block_c.slot(), + block_c.proposer_index(), + block_c.canonical_root() + ), + Ok(false), + "not slashable due to new proposer" + ); + assert_eq!( + cache.observe_slashable(block_c.slot(), block_c.proposer_index(), block_root_c), + Ok(()), + "observing new proposer again" + ); + + assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); + assert_eq!(cache.items.len(), 3, "three slots should be present"); + assert_eq!( + cache + .items + .iter() + .filter(|(k, _)| k.slot == cache.finalized_slot) + .count(), + 2, + "two proposers should be present in slot 0" + ); + assert_eq!( + cache + .items + .iter() + .filter(|(k, _)| k.slot == Slot::new(1)) + .count(), + 1, + "only one proposer should be present in slot 1" + ); + + // Slot 0, proposer 1 (again) + let mut block_d = get_block(0, 1); + *block_d.body_mut().graffiti_mut() = Graffiti::from(*b"this is slashable "); + let block_root_d = block_d.canonical_root(); + + assert_eq!( + cache.is_slashable( + block_d.slot(), + block_d.proposer_index(), + block_d.canonical_root() + ), + Ok(true), + "slashable due to new proposer" + ); + assert_eq!( + cache.observe_slashable(block_d.slot(), block_d.proposer_index(), block_root_d), + Ok(()), + "can observe new proposer, indicates proposer unobserved" + ); + } +} diff --git a/beacon_node/beacon_chain/src/otb_verification_service.rs b/beacon_node/beacon_chain/src/otb_verification_service.rs index 805b61dd9..b934c553e 100644 --- a/beacon_node/beacon_chain/src/otb_verification_service.rs +++ b/beacon_node/beacon_chain/src/otb_verification_service.rs @@ -119,10 +119,13 @@ pub fn start_otb_verification_service( pub fn load_optimistic_transition_blocks( chain: &BeaconChain, ) -> Result, StoreError> { - process_results(chain.store.hot_db.iter_column(OTBColumn), |iter| { - iter.map(|(_, bytes)| OptimisticTransitionBlock::from_store_bytes(&bytes)) - .collect() - })? + process_results( + chain.store.hot_db.iter_column::(OTBColumn), + |iter| { + iter.map(|(_, bytes)| OptimisticTransitionBlock::from_store_bytes(&bytes)) + .collect() + }, + )? } #[derive(Debug)] diff --git a/beacon_node/beacon_chain/src/pre_finalization_cache.rs b/beacon_node/beacon_chain/src/pre_finalization_cache.rs index 112394bb1..22b76e026 100644 --- a/beacon_node/beacon_chain/src/pre_finalization_cache.rs +++ b/beacon_node/beacon_chain/src/pre_finalization_cache.rs @@ -3,11 +3,13 @@ use itertools::process_results; use lru::LruCache; use parking_lot::Mutex; use slog::debug; +use std::num::NonZeroUsize; use std::time::Duration; +use types::non_zero_usize::new_non_zero_usize; use types::Hash256; -const BLOCK_ROOT_CACHE_LIMIT: usize = 512; -const LOOKUP_LIMIT: usize = 8; +const BLOCK_ROOT_CACHE_LIMIT: NonZeroUsize = new_non_zero_usize(512); +const LOOKUP_LIMIT: NonZeroUsize = new_non_zero_usize(8); const METRICS_TIMEOUT: Duration = Duration::from_millis(100); /// Cache for rejecting attestations to blocks from before finalization. @@ -78,7 +80,7 @@ impl BeaconChain { // 3. Check the network with a single block lookup. cache.in_progress_lookups.put(block_root, ()); - if cache.in_progress_lookups.len() == LOOKUP_LIMIT { + if cache.in_progress_lookups.len() == LOOKUP_LIMIT.get() { // NOTE: we expect this to occur sometimes if a lot of blocks that we look up fail to be // imported for reasons other than being pre-finalization. The cache will eventually // self-repair in this case by replacing old entries with new ones until all the failed diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 7b398db2f..63eb72c43 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -1,19 +1,15 @@ //! Utilities for managing database schema changes. -mod migration_schema_v12; -mod migration_schema_v13; -mod migration_schema_v14; -mod migration_schema_v15; -mod migration_schema_v16; mod migration_schema_v17; +mod migration_schema_v18; +mod migration_schema_v19; -use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY}; -use crate::eth1_chain::SszEth1; +use crate::beacon_chain::BeaconChainTypes; use crate::types::ChainSpec; -use slog::{warn, Logger}; +use slog::Logger; use std::sync::Arc; use store::hot_cold_store::{HotColdDB, HotColdDBError}; use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION}; -use store::{Error as StoreError, StoreItem}; +use store::Error as StoreError; /// Migrate the database from one schema version to another, applying all requisite mutations. #[allow(clippy::only_used_in_recursion)] // spec is not used but likely to be used in future @@ -56,92 +52,8 @@ pub fn migrate_schema( } // - // Migrations from before SchemaVersion(11) are deprecated. + // Migrations from before SchemaVersion(16) are deprecated. // - - // Upgrade from v11 to v12 to store richer metadata in the attestation op pool. - (SchemaVersion(11), SchemaVersion(12)) => { - let ops = migration_schema_v12::upgrade_to_v12::(db.clone(), log)?; - db.store_schema_version_atomically(to, ops) - } - // Downgrade from v12 to v11 to drop richer metadata from the attestation op pool. - (SchemaVersion(12), SchemaVersion(11)) => { - let ops = migration_schema_v12::downgrade_from_v12::(db.clone(), log)?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(12), SchemaVersion(13)) => { - let mut ops = vec![]; - if let Some(persisted_eth1_v1) = db.get_item::(Ð1_CACHE_DB_KEY)? { - let upgraded_eth1_cache = - match migration_schema_v13::update_eth1_cache(persisted_eth1_v1) { - Ok(upgraded_eth1) => upgraded_eth1, - Err(e) => { - warn!(log, "Failed to deserialize SszEth1CacheV1"; "error" => ?e); - warn!(log, "Reinitializing eth1 cache"); - migration_schema_v13::reinitialized_eth1_cache_v13( - deposit_contract_deploy_block, - ) - } - }; - ops.push(upgraded_eth1_cache.as_kv_store_op(ETH1_CACHE_DB_KEY)); - } - - db.store_schema_version_atomically(to, ops)?; - - Ok(()) - } - (SchemaVersion(13), SchemaVersion(12)) => { - let mut ops = vec![]; - if let Some(persisted_eth1_v13) = db.get_item::(Ð1_CACHE_DB_KEY)? { - let downgraded_eth1_cache = match migration_schema_v13::downgrade_eth1_cache( - persisted_eth1_v13, - ) { - Ok(Some(downgraded_eth1)) => downgraded_eth1, - Ok(None) => { - warn!(log, "Unable to downgrade eth1 cache from newer version: reinitializing eth1 cache"); - migration_schema_v13::reinitialized_eth1_cache_v1( - deposit_contract_deploy_block, - ) - } - Err(e) => { - warn!(log, "Unable to downgrade eth1 cache from newer version: failed to deserialize SszEth1CacheV13"; "error" => ?e); - warn!(log, "Reinitializing eth1 cache"); - migration_schema_v13::reinitialized_eth1_cache_v1( - deposit_contract_deploy_block, - ) - } - }; - ops.push(downgraded_eth1_cache.as_kv_store_op(ETH1_CACHE_DB_KEY)); - } - - db.store_schema_version_atomically(to, ops)?; - - Ok(()) - } - (SchemaVersion(13), SchemaVersion(14)) => { - let ops = migration_schema_v14::upgrade_to_v14::(db.clone(), log)?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(14), SchemaVersion(13)) => { - let ops = migration_schema_v14::downgrade_from_v14::(db.clone(), log)?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(14), SchemaVersion(15)) => { - let ops = migration_schema_v15::upgrade_to_v15::(db.clone(), log)?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(15), SchemaVersion(14)) => { - let ops = migration_schema_v15::downgrade_from_v15::(db.clone(), log)?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(15), SchemaVersion(16)) => { - let ops = migration_schema_v16::upgrade_to_v16::(db.clone(), log)?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(16), SchemaVersion(15)) => { - let ops = migration_schema_v16::downgrade_from_v16::(db.clone(), log)?; - db.store_schema_version_atomically(to, ops) - } (SchemaVersion(16), SchemaVersion(17)) => { let ops = migration_schema_v17::upgrade_to_v17::(db.clone(), log)?; db.store_schema_version_atomically(to, ops) @@ -150,6 +62,22 @@ pub fn migrate_schema( let ops = migration_schema_v17::downgrade_from_v17::(db.clone(), log)?; db.store_schema_version_atomically(to, ops) } + (SchemaVersion(17), SchemaVersion(18)) => { + let ops = migration_schema_v18::upgrade_to_v18::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } + (SchemaVersion(18), SchemaVersion(17)) => { + let ops = migration_schema_v18::downgrade_from_v18::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } + (SchemaVersion(18), SchemaVersion(19)) => { + let ops = migration_schema_v19::upgrade_to_v19::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } + (SchemaVersion(19), SchemaVersion(18)) => { + let ops = migration_schema_v19::downgrade_from_v19::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs deleted file mode 100644 index c9aa2097f..000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs +++ /dev/null @@ -1,224 +0,0 @@ -use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}; -use crate::persisted_fork_choice::PersistedForkChoiceV11; -use operation_pool::{PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV5}; -use slog::{debug, info, Logger}; -use state_processing::{ - common::get_indexed_attestation, per_block_processing::is_valid_indexed_attestation, - VerifyOperation, VerifySignatures, -}; -use std::sync::Arc; -use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; - -pub fn upgrade_to_v12( - db: Arc>, - log: Logger, -) -> Result, Error> { - let spec = db.get_chain_spec(); - - // Load a V5 op pool and transform it to V12. - let PersistedOperationPoolV5 { - attestations_v5, - sync_contributions, - attester_slashings_v5, - proposer_slashings_v5, - voluntary_exits_v5, - } = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? { - op_pool - } else { - debug!(log, "Nothing to do, no operation pool stored"); - return Ok(vec![]); - }; - - // Load the persisted fork choice so we can grab the state of the justified block and use - // it to verify the stored attestations, slashings and exits. - let fork_choice = db - .get_item::(&FORK_CHOICE_DB_KEY)? - .ok_or_else(|| Error::SchemaMigrationError("fork choice missing from database".into()))?; - let justified_block_root = fork_choice - .fork_choice_store - .unrealized_justified_checkpoint - .root; - let justified_block = db - .get_blinded_block(&justified_block_root)? - .ok_or_else(|| { - Error::SchemaMigrationError(format!( - "unrealized justified block missing for migration: {justified_block_root:?}", - )) - })?; - let justified_state_root = justified_block.state_root(); - let mut state = db - .get_state(&justified_state_root, Some(justified_block.slot()))? - .ok_or_else(|| { - Error::SchemaMigrationError(format!( - "justified state missing for migration: {justified_state_root:?}" - )) - })?; - state.build_all_committee_caches(spec).map_err(|e| { - Error::SchemaMigrationError(format!("unable to build committee caches: {e:?}")) - })?; - - // Re-verify attestations while adding attesting indices. - let attestations = attestations_v5 - .into_iter() - .flat_map(|(_, attestations)| attestations) - .filter_map(|attestation| { - let res = state - .get_beacon_committee(attestation.data.slot, attestation.data.index) - .map_err(Into::into) - .and_then(|committee| get_indexed_attestation(committee.committee, &attestation)) - .and_then(|indexed_attestation| { - is_valid_indexed_attestation( - &state, - &indexed_attestation, - VerifySignatures::True, - spec, - )?; - Ok(indexed_attestation) - }); - - match res { - Ok(indexed) => Some((attestation, indexed.attesting_indices.into())), - Err(e) => { - debug!( - log, - "Dropping attestation on migration"; - "err" => ?e, - "head_block" => ?attestation.data.beacon_block_root, - ); - None - } - } - }) - .collect::>(); - - let attester_slashings = attester_slashings_v5 - .iter() - .filter_map(|(slashing, _)| { - slashing - .clone() - .validate(&state, spec) - .map_err(|e| { - debug!( - log, - "Dropping attester slashing on migration"; - "err" => ?e, - "slashing" => ?slashing, - ); - }) - .ok() - }) - .collect::>(); - - let proposer_slashings = proposer_slashings_v5 - .iter() - .filter_map(|slashing| { - slashing - .clone() - .validate(&state, spec) - .map_err(|e| { - debug!( - log, - "Dropping proposer slashing on migration"; - "err" => ?e, - "slashing" => ?slashing, - ); - }) - .ok() - }) - .collect::>(); - - let voluntary_exits = voluntary_exits_v5 - .iter() - .filter_map(|exit| { - exit.clone() - .validate(&state, spec) - .map_err(|e| { - debug!( - log, - "Dropping voluntary exit on migration"; - "err" => ?e, - "exit" => ?exit, - ); - }) - .ok() - }) - .collect::>(); - - debug!( - log, - "Migrated op pool"; - "attestations" => attestations.len(), - "attester_slashings" => attester_slashings.len(), - "proposer_slashings" => proposer_slashings.len(), - "voluntary_exits" => voluntary_exits.len() - ); - - let v12 = PersistedOperationPool::V12(PersistedOperationPoolV12 { - attestations, - sync_contributions, - attester_slashings, - proposer_slashings, - voluntary_exits, - }); - Ok(vec![v12.as_kv_store_op(OP_POOL_DB_KEY)]) -} - -pub fn downgrade_from_v12( - db: Arc>, - log: Logger, -) -> Result, Error> { - // Load a V12 op pool and transform it to V5. - let PersistedOperationPoolV12:: { - attestations, - sync_contributions, - attester_slashings, - proposer_slashings, - voluntary_exits, - } = if let Some(op_pool_v12) = db.get_item(&OP_POOL_DB_KEY)? { - op_pool_v12 - } else { - debug!(log, "Nothing to do, no operation pool stored"); - return Ok(vec![]); - }; - - info!( - log, - "Dropping attestations from pool"; - "count" => attestations.len(), - ); - - let attester_slashings_v5 = attester_slashings - .into_iter() - .filter_map(|slashing| { - let fork_version = slashing.first_fork_verified_against()?; - Some((slashing.into_inner(), fork_version)) - }) - .collect::>(); - - let proposer_slashings_v5 = proposer_slashings - .into_iter() - .map(|slashing| slashing.into_inner()) - .collect::>(); - - let voluntary_exits_v5 = voluntary_exits - .into_iter() - .map(|exit| exit.into_inner()) - .collect::>(); - - info!( - log, - "Migrated slashings and exits"; - "attester_slashings" => attester_slashings_v5.len(), - "proposer_slashings" => proposer_slashings_v5.len(), - "voluntary_exits" => voluntary_exits_v5.len(), - ); - - let v5 = PersistedOperationPoolV5 { - attestations_v5: vec![], - sync_contributions, - attester_slashings_v5, - proposer_slashings_v5, - voluntary_exits_v5, - }; - Ok(vec![v5.as_kv_store_op(OP_POOL_DB_KEY)]) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v13.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v13.rs deleted file mode 100644 index d4ac97460..000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v13.rs +++ /dev/null @@ -1,150 +0,0 @@ -use crate::eth1_chain::SszEth1; -use eth1::{BlockCache, SszDepositCacheV1, SszDepositCacheV13, SszEth1CacheV1, SszEth1CacheV13}; -use ssz::{Decode, Encode}; -use state_processing::common::DepositDataTree; -use store::Error; -use types::DEPOSIT_TREE_DEPTH; - -pub fn update_eth1_cache(persisted_eth1_v1: SszEth1) -> Result { - if persisted_eth1_v1.use_dummy_backend { - // backend_bytes is empty when using dummy backend - return Ok(persisted_eth1_v1); - } - - let SszEth1 { - use_dummy_backend, - backend_bytes, - } = persisted_eth1_v1; - - let ssz_eth1_cache_v1 = SszEth1CacheV1::from_ssz_bytes(&backend_bytes)?; - let SszEth1CacheV1 { - block_cache, - deposit_cache: deposit_cache_v1, - last_processed_block, - } = ssz_eth1_cache_v1; - - let SszDepositCacheV1 { - logs, - leaves, - deposit_contract_deploy_block, - deposit_roots, - } = deposit_cache_v1; - - let deposit_cache_v13 = SszDepositCacheV13 { - logs, - leaves, - deposit_contract_deploy_block, - finalized_deposit_count: 0, - finalized_block_height: deposit_contract_deploy_block.saturating_sub(1), - deposit_tree_snapshot: None, - deposit_roots, - }; - - let ssz_eth1_cache_v13 = SszEth1CacheV13 { - block_cache, - deposit_cache: deposit_cache_v13, - last_processed_block, - }; - - let persisted_eth1_v13 = SszEth1 { - use_dummy_backend, - backend_bytes: ssz_eth1_cache_v13.as_ssz_bytes(), - }; - - Ok(persisted_eth1_v13) -} - -pub fn downgrade_eth1_cache(persisted_eth1_v13: SszEth1) -> Result, Error> { - if persisted_eth1_v13.use_dummy_backend { - // backend_bytes is empty when using dummy backend - return Ok(Some(persisted_eth1_v13)); - } - - let SszEth1 { - use_dummy_backend, - backend_bytes, - } = persisted_eth1_v13; - - let ssz_eth1_cache_v13 = SszEth1CacheV13::from_ssz_bytes(&backend_bytes)?; - let SszEth1CacheV13 { - block_cache, - deposit_cache: deposit_cache_v13, - last_processed_block, - } = ssz_eth1_cache_v13; - - let SszDepositCacheV13 { - logs, - leaves, - deposit_contract_deploy_block, - finalized_deposit_count, - finalized_block_height: _, - deposit_tree_snapshot, - deposit_roots, - } = deposit_cache_v13; - - if finalized_deposit_count == 0 && deposit_tree_snapshot.is_none() { - // This tree was never finalized and can be directly downgraded to v1 without re-initializing - let deposit_cache_v1 = SszDepositCacheV1 { - logs, - leaves, - deposit_contract_deploy_block, - deposit_roots, - }; - let ssz_eth1_cache_v1 = SszEth1CacheV1 { - block_cache, - deposit_cache: deposit_cache_v1, - last_processed_block, - }; - return Ok(Some(SszEth1 { - use_dummy_backend, - backend_bytes: ssz_eth1_cache_v1.as_ssz_bytes(), - })); - } - // deposit cache was finalized; can't downgrade - Ok(None) -} - -pub fn reinitialized_eth1_cache_v13(deposit_contract_deploy_block: u64) -> SszEth1 { - let empty_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH); - let deposit_cache_v13 = SszDepositCacheV13 { - logs: vec![], - leaves: vec![], - deposit_contract_deploy_block, - finalized_deposit_count: 0, - finalized_block_height: deposit_contract_deploy_block.saturating_sub(1), - deposit_tree_snapshot: empty_tree.get_snapshot(), - deposit_roots: vec![empty_tree.root()], - }; - - let ssz_eth1_cache_v13 = SszEth1CacheV13 { - block_cache: BlockCache::default(), - deposit_cache: deposit_cache_v13, - last_processed_block: None, - }; - - SszEth1 { - use_dummy_backend: false, - backend_bytes: ssz_eth1_cache_v13.as_ssz_bytes(), - } -} - -pub fn reinitialized_eth1_cache_v1(deposit_contract_deploy_block: u64) -> SszEth1 { - let empty_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH); - let deposit_cache_v1 = SszDepositCacheV1 { - logs: vec![], - leaves: vec![], - deposit_contract_deploy_block, - deposit_roots: vec![empty_tree.root()], - }; - - let ssz_eth1_cache_v1 = SszEth1CacheV1 { - block_cache: BlockCache::default(), - deposit_cache: deposit_cache_v1, - last_processed_block: None, - }; - - SszEth1 { - use_dummy_backend: false, - backend_bytes: ssz_eth1_cache_v1.as_ssz_bytes(), - } -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs deleted file mode 100644 index be913d8cc..000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs +++ /dev/null @@ -1,125 +0,0 @@ -use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY}; -use operation_pool::{ - PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV14, -}; -use slog::{debug, error, info, Logger}; -use slot_clock::SlotClock; -use std::sync::Arc; -use std::time::Duration; -use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; -use types::{EthSpec, Hash256, Slot}; - -/// The slot clock isn't usually available before the database is initialized, so we construct a -/// temporary slot clock by reading the genesis state. It should always exist if the database is -/// initialized at a prior schema version, however we still handle the lack of genesis state -/// gracefully. -fn get_slot_clock( - db: &HotColdDB, - log: &Logger, -) -> Result, Error> { - let spec = db.get_chain_spec(); - let genesis_block = if let Some(block) = db.get_blinded_block(&Hash256::zero())? { - block - } else { - error!(log, "Missing genesis block"); - return Ok(None); - }; - let genesis_state = - if let Some(state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? { - state - } else { - error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root()); - return Ok(None); - }; - Ok(Some(T::SlotClock::new( - spec.genesis_slot, - Duration::from_secs(genesis_state.genesis_time()), - Duration::from_secs(spec.seconds_per_slot), - ))) -} - -pub fn upgrade_to_v14( - db: Arc>, - log: Logger, -) -> Result, Error> { - // Load a V12 op pool and transform it to V14. - let PersistedOperationPoolV12:: { - attestations, - sync_contributions, - attester_slashings, - proposer_slashings, - voluntary_exits, - } = if let Some(op_pool_v12) = db.get_item(&OP_POOL_DB_KEY)? { - op_pool_v12 - } else { - debug!(log, "Nothing to do, no operation pool stored"); - return Ok(vec![]); - }; - - // initialize with empty vector - let bls_to_execution_changes = vec![]; - let v14 = PersistedOperationPool::V14(PersistedOperationPoolV14 { - attestations, - sync_contributions, - attester_slashings, - proposer_slashings, - voluntary_exits, - bls_to_execution_changes, - }); - Ok(vec![v14.as_kv_store_op(OP_POOL_DB_KEY)]) -} - -pub fn downgrade_from_v14( - db: Arc>, - log: Logger, -) -> Result, Error> { - // We cannot downgrade from V14 once the Capella fork has been reached because there will - // be HistoricalSummaries stored in the database instead of HistoricalRoots and prior versions - // of Lighthouse can't handle that. - if let Some(capella_fork_epoch) = db.get_chain_spec().capella_fork_epoch { - let current_epoch = get_slot_clock::(&db, &log)? - .and_then(|clock| clock.now()) - .map(|slot| slot.epoch(T::EthSpec::slots_per_epoch())) - .ok_or(Error::SlotClockUnavailableForMigration)?; - - if current_epoch >= capella_fork_epoch { - error!( - log, - "Capella already active: v14+ is mandatory"; - "current_epoch" => current_epoch, - "capella_fork_epoch" => capella_fork_epoch, - ); - return Err(Error::UnableToDowngrade); - } - } - - // Load a V14 op pool and transform it to V12. - let PersistedOperationPoolV14:: { - attestations, - sync_contributions, - attester_slashings, - proposer_slashings, - voluntary_exits, - bls_to_execution_changes, - } = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? { - op_pool - } else { - debug!(log, "Nothing to do, no operation pool stored"); - return Ok(vec![]); - }; - - info!( - log, - "Dropping bls_to_execution_changes from pool"; - "count" => bls_to_execution_changes.len(), - ); - - let v12 = PersistedOperationPoolV12 { - attestations, - sync_contributions, - attester_slashings, - proposer_slashings, - voluntary_exits, - }; - Ok(vec![v12.as_kv_store_op(OP_POOL_DB_KEY)]) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs deleted file mode 100644 index 07c86bd93..000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs +++ /dev/null @@ -1,76 +0,0 @@ -use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY}; -use operation_pool::{ - PersistedOperationPool, PersistedOperationPoolV14, PersistedOperationPoolV15, -}; -use slog::{debug, info, Logger}; -use std::sync::Arc; -use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; - -pub fn upgrade_to_v15( - db: Arc>, - log: Logger, -) -> Result, Error> { - // Load a V14 op pool and transform it to V15. - let PersistedOperationPoolV14:: { - attestations, - sync_contributions, - attester_slashings, - proposer_slashings, - voluntary_exits, - bls_to_execution_changes, - } = if let Some(op_pool_v14) = db.get_item(&OP_POOL_DB_KEY)? { - op_pool_v14 - } else { - debug!(log, "Nothing to do, no operation pool stored"); - return Ok(vec![]); - }; - - let v15 = PersistedOperationPool::V15(PersistedOperationPoolV15 { - attestations, - sync_contributions, - attester_slashings, - proposer_slashings, - voluntary_exits, - bls_to_execution_changes, - // Initialize with empty set - capella_bls_change_broadcast_indices: <_>::default(), - }); - Ok(vec![v15.as_kv_store_op(OP_POOL_DB_KEY)]) -} - -pub fn downgrade_from_v15( - db: Arc>, - log: Logger, -) -> Result, Error> { - // Load a V15 op pool and transform it to V14. - let PersistedOperationPoolV15:: { - attestations, - sync_contributions, - attester_slashings, - proposer_slashings, - voluntary_exits, - bls_to_execution_changes, - capella_bls_change_broadcast_indices, - } = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? { - op_pool - } else { - debug!(log, "Nothing to do, no operation pool stored"); - return Ok(vec![]); - }; - - info!( - log, - "Forgetting address changes for Capella broadcast"; - "count" => capella_bls_change_broadcast_indices.len(), - ); - - let v14 = PersistedOperationPoolV14 { - attestations, - sync_contributions, - attester_slashings, - proposer_slashings, - voluntary_exits, - bls_to_execution_changes, - }; - Ok(vec![v14.as_kv_store_op(OP_POOL_DB_KEY)]) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v16.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v16.rs deleted file mode 100644 index 230573b02..000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v16.rs +++ /dev/null @@ -1,46 +0,0 @@ -use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY}; -use crate::persisted_fork_choice::PersistedForkChoiceV11; -use slog::{debug, Logger}; -use std::sync::Arc; -use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; - -pub fn upgrade_to_v16( - db: Arc>, - log: Logger, -) -> Result, Error> { - drop_balances_cache::(db, log) -} - -pub fn downgrade_from_v16( - db: Arc>, - log: Logger, -) -> Result, Error> { - drop_balances_cache::(db, log) -} - -/// Drop the balances cache from the fork choice store. -/// -/// There aren't any type-level changes in this schema migration, however the -/// way that we compute the `JustifiedBalances` has changed due to: -/// https://github.com/sigp/lighthouse/pull/3962 -pub fn drop_balances_cache( - db: Arc>, - log: Logger, -) -> Result, Error> { - let mut persisted_fork_choice = db - .get_item::(&FORK_CHOICE_DB_KEY)? - .ok_or_else(|| Error::SchemaMigrationError("fork choice missing from database".into()))?; - - debug!( - log, - "Dropping fork choice balances cache"; - "item_count" => persisted_fork_choice.fork_choice_store.balances_cache.items.len() - ); - - // Drop all items in the balances cache. - persisted_fork_choice.fork_choice_store.balances_cache = <_>::default(); - - let kv_op = persisted_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY); - - Ok(vec![kv_op]) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v18.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v18.rs new file mode 100644 index 000000000..04a9da841 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v18.rs @@ -0,0 +1,119 @@ +use crate::beacon_chain::BeaconChainTypes; +use slog::{error, info, warn, Logger}; +use slot_clock::SlotClock; +use std::sync::Arc; +use std::time::Duration; +use store::{ + get_key_for_col, metadata::BLOB_INFO_KEY, DBColumn, Error, HotColdDB, KeyValueStoreOp, +}; +use types::{Epoch, EthSpec, Hash256, Slot}; + +/// The slot clock isn't usually available before the database is initialized, so we construct a +/// temporary slot clock by reading the genesis state. It should always exist if the database is +/// initialized at a prior schema version, however we still handle the lack of genesis state +/// gracefully. +fn get_slot_clock( + db: &HotColdDB, + log: &Logger, +) -> Result, Error> { + let spec = db.get_chain_spec(); + let Some(genesis_block) = db.get_blinded_block(&Hash256::zero())? else { + error!(log, "Missing genesis block"); + return Ok(None); + }; + let Some(genesis_state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? else { + error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root()); + return Ok(None); + }; + Ok(Some(T::SlotClock::new( + spec.genesis_slot, + Duration::from_secs(genesis_state.genesis_time()), + Duration::from_secs(spec.seconds_per_slot), + ))) +} + +fn get_current_epoch( + db: &Arc>, + log: &Logger, +) -> Result { + get_slot_clock::(db, log)? + .and_then(|clock| clock.now()) + .map(|slot| slot.epoch(T::EthSpec::slots_per_epoch())) + .ok_or(Error::SlotClockUnavailableForMigration) +} + +pub fn upgrade_to_v18( + db: Arc>, + log: Logger, +) -> Result, Error> { + db.heal_freezer_block_roots_at_split()?; + db.heal_freezer_block_roots_at_genesis()?; + info!(log, "Healed freezer block roots"); + + // No-op, even if Deneb has already occurred. The database is probably borked in this case, but + // *maybe* the fork recovery will revert the minority fork and succeed. + if let Some(deneb_fork_epoch) = db.get_chain_spec().deneb_fork_epoch { + let current_epoch = get_current_epoch::(&db, &log)?; + if current_epoch >= deneb_fork_epoch { + warn!( + log, + "Attempting upgrade to v18 schema"; + "info" => "this may not work as Deneb has already been activated" + ); + } else { + info!( + log, + "Upgrading to v18 schema"; + "info" => "ready for Deneb", + "epochs_until_deneb" => deneb_fork_epoch - current_epoch + ); + } + } else { + info!( + log, + "Upgrading to v18 schema"; + "info" => "ready for Deneb once it is scheduled" + ); + } + Ok(vec![]) +} + +pub fn downgrade_from_v18( + db: Arc>, + log: Logger, +) -> Result, Error> { + // We cannot downgrade from V18 once the Deneb fork has been activated, because there will + // be blobs and blob metadata in the database that aren't understood by the V17 schema. + if let Some(deneb_fork_epoch) = db.get_chain_spec().deneb_fork_epoch { + let current_epoch = get_current_epoch::(&db, &log)?; + if current_epoch >= deneb_fork_epoch { + error!( + log, + "Deneb already active: v18+ is mandatory"; + "current_epoch" => current_epoch, + "deneb_fork_epoch" => deneb_fork_epoch, + ); + return Err(Error::UnableToDowngrade); + } else { + info!( + log, + "Downgrading to v17 schema"; + "info" => "you will need to upgrade before Deneb", + "epochs_until_deneb" => deneb_fork_epoch - current_epoch + ); + } + } else { + info!( + log, + "Downgrading to v17 schema"; + "info" => "you need to upgrade before Deneb", + ); + } + + let ops = vec![KeyValueStoreOp::DeleteKey(get_key_for_col( + DBColumn::BeaconMeta.into(), + BLOB_INFO_KEY.as_bytes(), + ))]; + + Ok(ops) +} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v19.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v19.rs new file mode 100644 index 000000000..578e9bad3 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v19.rs @@ -0,0 +1,65 @@ +use crate::beacon_chain::BeaconChainTypes; +use slog::{debug, info, Logger}; +use std::sync::Arc; +use store::{get_key_for_col, DBColumn, Error, HotColdDB, KeyValueStore, KeyValueStoreOp}; + +pub fn upgrade_to_v19( + db: Arc>, + log: Logger, +) -> Result, Error> { + let mut hot_delete_ops = vec![]; + let mut blob_keys = vec![]; + let column = DBColumn::BeaconBlob; + + debug!(log, "Migrating from v18 to v19"); + // Iterate through the blobs on disk. + for res in db.hot_db.iter_column_keys::>(column) { + let key = res?; + let key_col = get_key_for_col(column.as_str(), &key); + hot_delete_ops.push(KeyValueStoreOp::DeleteKey(key_col)); + blob_keys.push(key); + } + + let num_blobs = blob_keys.len(); + debug!(log, "Collected {} blob lists to migrate", num_blobs); + + let batch_size = 500; + let mut batch = Vec::with_capacity(batch_size); + + for key in blob_keys { + let next_blob = db.hot_db.get_bytes(column.as_str(), &key)?; + if let Some(next_blob) = next_blob { + let key_col = get_key_for_col(column.as_str(), &key); + batch.push(KeyValueStoreOp::PutKeyValue(key_col, next_blob)); + + if batch.len() >= batch_size { + db.blobs_db.do_atomically(batch.clone())?; + batch.clear(); + } + } + } + + // Process the remaining batch if it's not empty + if !batch.is_empty() { + db.blobs_db.do_atomically(batch)?; + } + + debug!(log, "Wrote {} blobs to the blobs db", num_blobs); + + // Delete all the blobs + info!(log, "Upgrading to v19 schema"); + Ok(hot_delete_ops) +} + +pub fn downgrade_from_v19( + _db: Arc>, + log: Logger, +) -> Result, Error> { + // No-op + info!( + log, + "Downgrading to v18 schema"; + ); + + Ok(vec![]) +} diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index f73223fa5..c04815ebc 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -45,6 +45,9 @@ const MAX_ADVANCE_DISTANCE: u64 = 4; /// impact whilst having 8 epochs without a block is a comfortable grace period. const MAX_FORK_CHOICE_DISTANCE: u64 = 256; +/// Drop any unused block production state cache after this many slots. +const MAX_BLOCK_PRODUCTION_CACHE_DISTANCE: u64 = 4; + #[derive(Debug)] enum Error { BeaconChain(BeaconChainError), @@ -113,14 +116,11 @@ async fn state_advance_timer( let slot_duration = slot_clock.slot_duration(); loop { - let duration_to_next_slot = match beacon_chain.slot_clock.duration_to_next_slot() { - Some(duration) => duration, - None => { - error!(log, "Failed to read slot clock"); - // If we can't read the slot clock, just wait another slot. - sleep(slot_duration).await; - continue; - } + let Some(duration_to_next_slot) = beacon_chain.slot_clock.duration_to_next_slot() else { + error!(log, "Failed to read slot clock"); + // If we can't read the slot clock, just wait another slot. + sleep(slot_duration).await; + continue; }; // Run the state advance 3/4 of the way through the slot (9s on mainnet). @@ -230,19 +230,73 @@ async fn state_advance_timer( // Prepare proposers so that the node can send payload attributes in the case where // it decides to abandon a proposer boost re-org. - if let Err(e) = beacon_chain.prepare_beacon_proposer(current_slot).await { - warn!( - log, - "Unable to prepare proposer with lookahead"; - "error" => ?e, - "slot" => next_slot, - ); - } + let proposer_head = beacon_chain + .prepare_beacon_proposer(current_slot) + .await + .unwrap_or_else(|e| { + warn!( + log, + "Unable to prepare proposer with lookahead"; + "error" => ?e, + "slot" => next_slot, + ); + None + }); // Use a blocking task to avoid blocking the core executor whilst waiting for locks // in `ForkChoiceSignalTx`. beacon_chain.task_executor.clone().spawn_blocking( move || { + // If we're proposing, clone the head state preemptively so that it isn't on + // the hot path of proposing. We can delete this once we have tree-states. + if let Some(proposer_head) = proposer_head { + let mut cache = beacon_chain.block_production_state.lock(); + + // Avoid holding two states in memory. It's OK to hold the lock because + // we always lock the block production cache before the snapshot cache + // and we prefer for block production to wait for the block production + // cache if a clone is in-progress. + if cache + .as_ref() + .map_or(false, |(cached_head, _)| *cached_head != proposer_head) + { + drop(cache.take()); + } + if let Some(proposer_state) = beacon_chain + .snapshot_cache + .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) + .and_then(|snapshot_cache| { + snapshot_cache.get_state_for_block_production(proposer_head) + }) + { + *cache = Some((proposer_head, proposer_state)); + debug!( + log, + "Cloned state ready for block production"; + "head_block_root" => ?proposer_head, + "slot" => next_slot + ); + } else { + warn!( + log, + "Block production state missing from snapshot cache"; + "head_block_root" => ?proposer_head, + "slot" => next_slot + ); + } + } else { + // If we aren't proposing, drop any old block production cache to save + // memory. + let mut cache = beacon_chain.block_production_state.lock(); + if let Some((_, state)) = &*cache { + if state.pre_state.slot() + MAX_BLOCK_PRODUCTION_CACHE_DISTANCE + <= current_slot + { + drop(cache.take()); + } + } + } + // Signal block proposal for the next slot (if it happens to be waiting). if let Some(tx) = &beacon_chain.fork_choice_signal_tx { if let Err(e) = tx.notify_fork_choice_complete(next_slot) { diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 5e54b1194..6b85c8e49 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1,10 +1,12 @@ +use crate::block_verification_types::{AsBlock, RpcBlock}; use crate::observed_operations::ObservationOutcome; pub use crate::persisted_beacon_chain::PersistedBeaconChain; +use crate::BeaconBlockResponseWrapper; pub use crate::{ beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}, migrate::MigratorConfig, sync_committee_verification::Error as SyncCommitteeError, - validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, + validator_monitor::{ValidatorMonitor, ValidatorMonitorConfig}, BeaconChainError, NotifyExecutionLayer, ProduceBlockVerification, }; use crate::{ @@ -14,17 +16,21 @@ use crate::{ StateSkipConfig, }; use bls::get_withdrawal_credentials; +use eth2::types::SignedBlockContentsTuple; +use eth2_network_config::TRUSTED_SETUP_BYTES; +use execution_layer::test_utils::generate_genesis_header; use execution_layer::{ auth::JwtKey, test_utils::{ - ExecutionBlockGenerator, MockBuilder, MockBuilderServer, MockExecutionLayer, - DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, + ExecutionBlockGenerator, MockBuilder, MockExecutionLayer, DEFAULT_JWT_SECRET, + DEFAULT_TERMINAL_BLOCK, }, ExecutionLayer, }; use futures::channel::mpsc::Receiver; pub use genesis::{interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; +use kzg::{Kzg, TrustedSetup}; use merkle_proof::MerkleTree; use operation_pool::ReceivedPreCapella; use parking_lot::Mutex; @@ -34,7 +40,9 @@ use rand::Rng; use rand::SeedableRng; use rayon::prelude::*; use sensitive_url::SensitiveUrl; -use slog::Logger; +use slog::{o, Drain, Logger}; +use slog_async::Async; +use slog_term::{FullFormat, TermDecorator}; use slot_clock::{SlotClock, TestingSlotClock}; use state_processing::per_block_processing::compute_timestamp_at_slot; use state_processing::{ @@ -45,19 +53,23 @@ use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::fmt; use std::str::FromStr; +use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; use store::{config::StoreConfig, HotColdDB, ItemStore, LevelDB, MemoryStore}; +use task_executor::TaskExecutor; use task_executor::{test_utils::TestRuntime, ShutdownReason}; use tree_hash::TreeHash; +use types::payload::BlockProductionVersion; use types::sync_selection_proof::SyncSelectionProof; pub use types::test_utils::generate_deterministic_keypairs; +use types::test_utils::TestRandom; use types::{typenum::U4294967296, *}; // 4th September 2019 pub const HARNESS_GENESIS_TIME: u64 = 1_567_552_690; // Environment variable to read if `fork_from_env` feature is enabled. -const FORK_NAME_ENV_VAR: &str = "FORK_NAME"; +pub const FORK_NAME_ENV_VAR: &str = "FORK_NAME"; // Default target aggregators to set during testing, this ensures an aggregator at each slot. // @@ -168,6 +180,7 @@ pub struct Builder { execution_layer: Option>, mock_execution_layer: Option>, testing_slot_clock: Option, + validator_monitor_config: Option, runtime: TestRuntime, log: Logger, } @@ -189,11 +202,12 @@ impl Builder> { .unwrap(), ); let mutator = move |builder: BeaconChainBuilder<_>| { + let header = generate_genesis_header::(builder.get_spec(), false); let genesis_state = interop_genesis_state_with_eth1::( &validator_keypairs, HARNESS_GENESIS_TIME, Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), - None, + header, builder.get_spec(), ) .expect("should generate interop state"); @@ -250,11 +264,12 @@ impl Builder> { .expect("cannot build without validator keypairs"); let mutator = move |builder: BeaconChainBuilder<_>| { + let header = generate_genesis_header::(builder.get_spec(), false); let genesis_state = interop_genesis_state_with_eth1::( &validator_keypairs, HARNESS_GENESIS_TIME, Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), - None, + header, builder.get_spec(), ) .expect("should generate interop state"); @@ -301,6 +316,7 @@ where execution_layer: None, mock_execution_layer: None, testing_slot_clock: None, + validator_monitor_config: None, runtime, log, } @@ -317,6 +333,11 @@ where self } + pub fn withdrawal_keypairs(mut self, withdrawal_keypairs: Vec>) -> Self { + self.withdrawal_keypairs = withdrawal_keypairs; + self + } + /// Initializes the BLS withdrawal keypairs for `num_keypairs` validators to /// the "determistic" values, regardless of wether or not the validator has /// a BLS or execution address in the genesis deposits. @@ -332,11 +353,6 @@ where ) } - pub fn withdrawal_keypairs(mut self, withdrawal_keypairs: Vec>) -> Self { - self.withdrawal_keypairs = withdrawal_keypairs; - self - } - pub fn default_spec(self) -> Self { self.spec_or_default(None) } @@ -373,6 +389,14 @@ where self } + pub fn validator_monitor_config( + mut self, + validator_monitor_config: ValidatorMonitorConfig, + ) -> Self { + self.validator_monitor_config = Some(validator_monitor_config); + self + } + /// Purposefully replace the `store_mutator`. pub fn override_store_mutator(mut self, mutator: BoxedMutator) -> Self { assert!(self.store_mutator.is_some(), "store mutator not set"); @@ -385,7 +409,7 @@ where self } - pub fn execution_layer(mut self, urls: &[&str]) -> Self { + pub fn execution_layer_from_urls(mut self, urls: &[&str]) -> Self { assert!( self.execution_layer.is_none(), "execution layer already defined" @@ -414,6 +438,11 @@ where self } + pub fn execution_layer(mut self, el: Option>) -> Self { + self.execution_layer = el; + self + } + pub fn recalculate_fork_times_with_genesis(mut self, genesis_time: u64) -> Self { let mock = self .mock_execution_layer @@ -427,26 +456,21 @@ where spec.capella_fork_epoch.map(|epoch| { genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() }); + mock.server.execution_block_generator().cancun_time = spec.deneb_fork_epoch.map(|epoch| { + genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + }); self } pub fn mock_execution_layer(self) -> Self { - self.mock_execution_layer_with_config(None) + self.mock_execution_layer_with_config() } - pub fn mock_execution_layer_with_config(mut self, builder_threshold: Option) -> Self { - let spec = self.spec.clone().expect("cannot build without spec"); - let shanghai_time = spec.capella_fork_epoch.map(|epoch| { - HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() - }); - let mock = MockExecutionLayer::new( + pub fn mock_execution_layer_with_config(mut self) -> Self { + let mock = mock_execution_layer_from_parts::( + self.spec.as_ref().expect("cannot build without spec"), self.runtime.task_executor.clone(), - DEFAULT_TERMINAL_BLOCK, - shanghai_time, - builder_threshold, - Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), - spec, ); self.execution_layer = Some(mock.el.clone()); self.mock_execution_layer = Some(mock); @@ -478,8 +502,13 @@ where let validator_keypairs = self .validator_keypairs .expect("cannot build without validator keypairs"); - let chain_config = self.chain_config.unwrap_or_default(); + let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) + .map_err(|e| format!("Unable to read trusted setup file: {}", e)) + .unwrap(); + let validator_monitor_config = self.validator_monitor_config.unwrap_or_default(); + + let chain_config = self.chain_config.unwrap_or_default(); let mut builder = BeaconChainBuilder::new(self.eth_spec_instance) .logger(log.clone()) .custom_spec(spec) @@ -499,7 +528,8 @@ where log.clone(), 5, ))) - .monitor_validators(true, vec![], DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, log); + .validator_monitor_config(validator_monitor_config) + .trusted_setup(trusted_setup); builder = if let Some(mutator) = self.initial_mutator { mutator(builder) @@ -540,6 +570,33 @@ where } } +pub fn mock_execution_layer_from_parts( + spec: &ChainSpec, + task_executor: TaskExecutor, +) -> MockExecutionLayer { + let shanghai_time = spec.capella_fork_epoch.map(|epoch| { + HARNESS_GENESIS_TIME + spec.seconds_per_slot * T::slots_per_epoch() * epoch.as_u64() + }); + let cancun_time = spec.deneb_fork_epoch.map(|epoch| { + HARNESS_GENESIS_TIME + spec.seconds_per_slot * T::slots_per_epoch() * epoch.as_u64() + }); + + let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) + .map_err(|e| format!("Unable to read trusted setup file: {}", e)) + .expect("should have trusted setup"); + let kzg = Kzg::new_from_trusted_setup(trusted_setup).expect("should create kzg"); + + MockExecutionLayer::new( + task_executor, + DEFAULT_TERMINAL_BLOCK, + shanghai_time, + cancun_time, + Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), + spec.clone(), + Some(kzg), + ) +} + /// A testing harness which can instantiate a `BeaconChain` and populate it with blocks and /// attestations. /// @@ -595,7 +652,10 @@ where .execution_block_generator() } - pub fn set_mock_builder(&mut self, beacon_url: SensitiveUrl) -> MockBuilderServer { + pub fn set_mock_builder( + &mut self, + beacon_url: SensitiveUrl, + ) -> impl futures::Future { let mock_el = self .mock_execution_layer .as_ref() @@ -604,7 +664,7 @@ where let mock_el_url = SensitiveUrl::parse(mock_el.server.url().as_str()).unwrap(); // Create the builder, listening on a free port. - let (mock_builder, mock_builder_server) = MockBuilder::new_for_testing( + let (mock_builder, (addr, mock_builder_server)) = MockBuilder::new_for_testing( mock_el_url, beacon_url, self.spec.clone(), @@ -612,8 +672,7 @@ where ); // Set the builder URL in the execution layer now that its port is known. - let builder_listen_addr = mock_builder_server.local_addr(); - let port = builder_listen_addr.port(); + let port = addr.port(); mock_el .el .set_builder_url( @@ -638,6 +697,20 @@ where mock_builder_server } + pub fn get_head_block(&self) -> RpcBlock { + let block = self.chain.head_beacon_block(); + let block_root = block.canonical_root(); + let blobs = self.chain.get_blobs(&block_root).unwrap(); + RpcBlock::new(Some(block_root), block, Some(blobs)).unwrap() + } + + pub fn get_full_block(&self, block_root: &Hash256) -> RpcBlock { + let block = self.chain.get_blinded_block(block_root).unwrap().unwrap(); + let full_block = self.chain.store.make_full_block(block_root, block).unwrap(); + let blobs = self.chain.get_blobs(block_root).unwrap(); + RpcBlock::new(Some(*block_root), Arc::new(full_block), Some(blobs)).unwrap() + } + pub fn get_all_validators(&self) -> Vec { (0..self.validator_keypairs.len()).collect() } @@ -749,7 +822,7 @@ where slot: Slot, ) -> (SignedBlindedBeaconBlock, BeaconState) { let (unblinded, new_state) = self.make_block(state, slot).await; - (unblinded.into(), new_state) + ((*unblinded.0).clone().into(), new_state) } /// Returns a newly created block, signed by the proposer for the given slot. @@ -757,7 +830,7 @@ where &self, mut state: BeaconState, slot: Slot, - ) -> (SignedBeaconBlock, BeaconState) { + ) -> (SignedBlockContentsTuple, BeaconState) { assert_ne!(slot, 0, "can't produce a block at slot 0"); assert!(slot >= state.slot()); @@ -775,7 +848,7 @@ where let randao_reveal = self.sign_randao_reveal(&state, proposer_index, slot); - let (block, state) = self + let BeaconBlockResponseWrapper::Full(block_response) = self .chain .produce_block_on_state( state, @@ -784,18 +857,31 @@ where randao_reveal, Some(graffiti), ProduceBlockVerification::VerifyRandao, + None, + BlockProductionVersion::FullV2, ) .await - .unwrap(); + .unwrap() + else { + panic!("Should always be a full payload response"); + }; - let signed_block = block.sign( + let signed_block = Arc::new(block_response.block.sign( &self.validator_keypairs[proposer_index].sk, - &state.fork(), - state.genesis_validators_root(), + &block_response.state.fork(), + block_response.state.genesis_validators_root(), &self.spec, - ); + )); - (signed_block, state) + let block_contents: SignedBlockContentsTuple = match *signed_block { + SignedBeaconBlock::Base(_) + | SignedBeaconBlock::Altair(_) + | SignedBeaconBlock::Merge(_) + | SignedBeaconBlock::Capella(_) => (signed_block, None), + SignedBeaconBlock::Deneb(_) => (signed_block, block_response.blob_items), + }; + + (block_contents, block_response.state) } /// Useful for the `per_block_processing` tests. Creates a block, and returns the state after @@ -804,7 +890,7 @@ where &self, mut state: BeaconState, slot: Slot, - ) -> (SignedBeaconBlock, BeaconState) { + ) -> (SignedBlockContentsTuple, BeaconState) { assert_ne!(slot, 0, "can't produce a block at slot 0"); assert!(slot >= state.slot()); @@ -824,7 +910,7 @@ where let pre_state = state.clone(); - let (block, state) = self + let BeaconBlockResponseWrapper::Full(block_response) = self .chain .produce_block_on_state( state, @@ -833,18 +919,30 @@ where randao_reveal, Some(graffiti), ProduceBlockVerification::VerifyRandao, + None, + BlockProductionVersion::FullV2, ) .await - .unwrap(); + .unwrap() + else { + panic!("Should always be a full payload response"); + }; - let signed_block = block.sign( + let signed_block = Arc::new(block_response.block.sign( &self.validator_keypairs[proposer_index].sk, - &state.fork(), - state.genesis_validators_root(), + &block_response.state.fork(), + block_response.state.genesis_validators_root(), &self.spec, - ); + )); - (signed_block, pre_state) + let block_contents: SignedBlockContentsTuple = match *signed_block { + SignedBeaconBlock::Base(_) + | SignedBeaconBlock::Altair(_) + | SignedBeaconBlock::Merge(_) + | SignedBeaconBlock::Capella(_) => (signed_block, None), + SignedBeaconBlock::Deneb(_) => (signed_block, block_response.blob_items), + }; + (block_contents, pre_state) } /// Create a randao reveal for a block at `slot`. @@ -980,9 +1078,9 @@ where ) -> (Vec>, Vec) { let MakeAttestationOptions { limit, fork } = opts; let committee_count = state.get_committee_count_at_slot(state.slot()).unwrap(); - let attesters = Mutex::new(vec![]); + let num_attesters = AtomicUsize::new(0); - let attestations = state + let (attestations, split_attesters) = state .get_beacon_committees_at_slot(attestation_slot) .expect("should get committees") .iter() @@ -995,13 +1093,14 @@ where return None; } - let mut attesters = attesters.lock(); if let Some(limit) = limit { - if attesters.len() >= limit { + // This atomics stuff is necessary because we're under a par_iter, + // and Rayon will deadlock if we use a mutex. + if num_attesters.fetch_add(1, Ordering::Relaxed) >= limit { + num_attesters.fetch_sub(1, Ordering::Relaxed); return None; } } - attesters.push(*validator_index); let mut attestation = self .produce_unaggregated_attestation_for_block( @@ -1041,14 +1140,17 @@ where ) .unwrap(); - Some((attestation, subnet_id)) + Some(((attestation, subnet_id), validator_index)) }) - .collect::>() + .unzip::<_, _, Vec<_>, Vec<_>>() }) - .collect::>(); + .unzip::<_, _, Vec<_>, Vec<_>>(); + + // Flatten attesters. + let attesters = split_attesters.into_iter().flatten().collect::>(); - let attesters = attesters.into_inner(); if let Some(limit) = limit { + assert_eq!(limit, num_attesters.load(Ordering::Relaxed)); assert_eq!( limit, attesters.len(), @@ -1519,14 +1621,13 @@ where pub fn make_voluntary_exit(&self, validator_index: u64, epoch: Epoch) -> SignedVoluntaryExit { let sk = &self.validator_keypairs[validator_index as usize].sk; - let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; VoluntaryExit { epoch, validator_index, } - .sign(sk, &fork, genesis_validators_root, &self.chain.spec) + .sign(sk, genesis_validators_root, &self.chain.spec) } pub fn add_proposer_slashing(&self, validator_index: u64) -> Result<(), String> { @@ -1635,12 +1736,13 @@ where state: BeaconState, slot: Slot, block_modifier: impl FnOnce(&mut BeaconBlock), - ) -> (SignedBeaconBlock, BeaconState) { + ) -> (SignedBlockContentsTuple, BeaconState) { assert_ne!(slot, 0, "can't produce a block at slot 0"); assert!(slot >= state.slot()); - let (block, state) = self.make_block_return_pre_state(state, slot).await; - let (mut block, _) = block.deconstruct(); + let ((block, blobs), state) = self.make_block_return_pre_state(state, slot).await; + + let (mut block, _) = (*block).clone().deconstruct(); block_modifier(&mut block); @@ -1652,7 +1754,33 @@ where state.genesis_validators_root(), &self.spec, ); - (signed_block, state) + ((Arc::new(signed_block), blobs), state) + } + + pub async fn make_blob_with_modifier( + &self, + state: BeaconState, + slot: Slot, + blob_modifier: impl FnOnce(&mut BlobsList), + ) -> (SignedBlockContentsTuple, BeaconState) { + assert_ne!(slot, 0, "can't produce a block at slot 0"); + assert!(slot >= state.slot()); + + let ((block, mut blobs), state) = self.make_block_return_pre_state(state, slot).await; + + let (block, _) = (*block).clone().deconstruct(); + + blob_modifier(&mut blobs.as_mut().unwrap().1); + + let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); + + let signed_block = block.sign( + &self.validator_keypairs[proposer_index].sk, + &state.fork(), + state.genesis_validators_root(), + &self.spec, + ); + ((Arc::new(signed_block), blobs), state) } pub fn make_deposits<'a>( @@ -1732,37 +1860,52 @@ where &self, slot: Slot, block_root: Hash256, - block: SignedBeaconBlock, + block_contents: SignedBlockContentsTuple, ) -> Result> { self.set_current_slot(slot); + let (block, blob_items) = block_contents; + + let sidecars = blob_items + .map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs)) + .transpose() + .unwrap(); let block_hash: SignedBeaconBlockHash = self .chain .process_block( block_root, - Arc::new(block), + RpcBlock::new(Some(block_root), block, sidecars).unwrap(), NotifyExecutionLayer::Yes, || Ok(()), ) .await? - .into(); + .try_into() + .unwrap(); self.chain.recompute_head_at_current_slot().await; Ok(block_hash) } pub async fn process_block_result( &self, - block: SignedBeaconBlock, + block_contents: SignedBlockContentsTuple, ) -> Result> { + let (block, blob_items) = block_contents; + + let sidecars = blob_items + .map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs)) + .transpose() + .unwrap(); + let block_root = block.canonical_root(); let block_hash: SignedBeaconBlockHash = self .chain .process_block( - block.canonical_root(), - Arc::new(block), + block_root, + RpcBlock::new(Some(block_root), block, sidecars).unwrap(), NotifyExecutionLayer::Yes, || Ok(()), ) .await? - .into(); + .try_into() + .expect("block blobs are available"); self.chain.recompute_head_at_current_slot().await; Ok(block_hash) } @@ -1822,13 +1965,25 @@ where &self, slot: Slot, state: BeaconState, - ) -> Result<(SignedBeaconBlockHash, SignedBeaconBlock, BeaconState), BlockError> { + ) -> Result< + ( + SignedBeaconBlockHash, + SignedBlockContentsTuple, + BeaconState, + ), + BlockError, + > { self.set_current_slot(slot); - let (block, new_state) = self.make_block(state, slot).await; + let (block_contents, new_state) = self.make_block(state, slot).await; + let block_hash = self - .process_block(slot, block.canonical_root(), block.clone()) + .process_block( + slot, + block_contents.0.canonical_root(), + block_contents.clone(), + ) .await?; - Ok((block_hash, block, new_state)) + Ok((block_hash, block_contents, new_state)) } pub fn attest_block( @@ -1882,7 +2037,7 @@ where sync_committee_strategy: SyncCommitteeStrategy, ) -> Result<(SignedBeaconBlockHash, BeaconState), BlockError> { let (block_hash, block, state) = self.add_block_at_slot(slot, state).await?; - self.attest_block(&state, state_root, block_hash, &block, validators); + self.attest_block(&state, state_root, block_hash, &block.0, validators); if sync_committee_strategy == SyncCommitteeStrategy::AllValidators && state.current_sync_committee().is_ok() @@ -2080,8 +2235,9 @@ where chain_dump .iter() .cloned() - .map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint().root.into()) - .filter(|block_hash| *block_hash != Hash256::zero().into()) + .map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint().root) + .filter(|block_hash| *block_hash != Hash256::zero()) + .map(|hash| hash.into()) .collect() } @@ -2133,6 +2289,29 @@ where .await } + /// Uses `Self::extend_chain` to `num_slots` blocks. + /// + /// Utilizes: + /// + /// - BlockStrategy::OnCanonicalHead, + /// - AttestationStrategy::SomeValidators(validators), + pub async fn extend_slots_some_validators( + &self, + num_slots: usize, + validators: Vec, + ) -> Hash256 { + if self.chain.slot().unwrap() == self.chain.canonical_head.cached_head().head_slot() { + self.advance_slot(); + } + + self.extend_chain( + num_slots, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(validators), + ) + .await + } + /// Extend the `BeaconChain` with some blocks and attestations. Returns the root of the /// last-produced block (the head of the chain). /// @@ -2293,3 +2472,74 @@ pub struct MakeAttestationOptions { /// Fork to use for signing attestations. pub fork: Fork, } + +pub fn build_log(level: slog::Level, enabled: bool) -> Logger { + let decorator = TermDecorator::new().build(); + let drain = FullFormat::new(decorator).build().fuse(); + let drain = Async::new(drain).build().fuse(); + + if enabled { + Logger::root(drain.filter_level(level).fuse(), o!()) + } else { + Logger::root(drain.filter(|_| false).fuse(), o!()) + } +} + +pub enum NumBlobs { + Random, + None, +} + +pub fn generate_rand_block_and_blobs( + fork_name: ForkName, + num_blobs: NumBlobs, + rng: &mut impl Rng, +) -> (SignedBeaconBlock>, Vec>) { + let inner = map_fork_name!(fork_name, BeaconBlock, <_>::random_for_test(rng)); + let mut block = SignedBeaconBlock::from_block(inner, types::Signature::random_for_test(rng)); + let mut blob_sidecars = vec![]; + if let Ok(message) = block.message_deneb_mut() { + // Get either zero blobs or a random number of blobs between 1 and Max Blobs. + let payload: &mut FullPayloadDeneb = &mut message.body.execution_payload; + let num_blobs = match num_blobs { + NumBlobs::Random => rng.gen_range(1..=E::max_blobs_per_block()), + NumBlobs::None => 0, + }; + let (bundle, transactions) = + execution_layer::test_utils::generate_blobs::(num_blobs).unwrap(); + + payload.execution_payload.transactions = <_>::default(); + for tx in Vec::from(transactions) { + payload.execution_payload.transactions.push(tx).unwrap(); + } + message.body.blob_kzg_commitments = bundle.commitments.clone(); + + let eth2::types::BlobsBundle { + commitments, + proofs, + blobs, + } = bundle; + + for (index, ((blob, kzg_commitment), kzg_proof)) in blobs + .into_iter() + .zip(commitments.into_iter()) + .zip(proofs.into_iter()) + .enumerate() + { + blob_sidecars.push(BlobSidecar { + index: index as u64, + blob: blob.clone(), + kzg_commitment, + kzg_proof, + signed_block_header: block.signed_block_header(), + kzg_commitment_inclusion_proof: block + .message() + .body() + .kzg_commitment_merkle_proof(index) + .unwrap(), + }); + } + } + + (block, blob_sidecars) +} diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index 396aac71b..49a555816 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -2,10 +2,15 @@ //! //! This component should not affect consensus. +use crate::beacon_proposer_cache::{BeaconProposerCache, TYPICAL_SLOTS_PER_EPOCH}; use crate::metrics; -use parking_lot::RwLock; -use slog::{crit, debug, info, Logger}; +use itertools::Itertools; +use parking_lot::{Mutex, RwLock}; +use serde::{Deserialize, Serialize}; +use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; +use smallvec::SmallVec; +use state_processing::common::get_attestation_participation_flag_indices; use state_processing::per_epoch_processing::{ errors::EpochProcessingError, EpochProcessingSummary, }; @@ -14,12 +19,16 @@ use std::convert::TryFrom; use std::io; use std::marker::PhantomData; use std::str::Utf8Error; +use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::AbstractExecPayload; +use types::consts::altair::{ + TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX, +}; use types::{ - AttesterSlashing, BeaconBlockRef, BeaconState, ChainSpec, Epoch, EthSpec, Hash256, - IndexedAttestation, ProposerSlashing, PublicKeyBytes, SignedAggregateAndProof, - SignedContributionAndProof, Slot, SyncCommitteeMessage, VoluntaryExit, + Attestation, AttestationData, AttesterSlashing, BeaconBlockRef, BeaconState, BeaconStateError, + ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, ProposerSlashing, PublicKeyBytes, + SignedAggregateAndProof, SignedContributionAndProof, Slot, SyncCommitteeMessage, VoluntaryExit, }; /// Used for Prometheus labels. @@ -35,7 +44,43 @@ pub const HISTORIC_EPOCHS: usize = 10; /// Once the validator monitor reaches this number of validators it will stop /// tracking their metrics/logging individually in an effort to reduce /// Prometheus cardinality and log volume. -pub const DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD: usize = 64; +const DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD: usize = 64; + +/// Lag slots used in detecting missed blocks for the monitored validators +pub const MISSED_BLOCK_LAG_SLOTS: usize = 4; + +/// The number of epochs to look back when determining if a validator has missed a block. This value is used with +/// the beacon_proposer_cache to determine if a validator has missed a block. +/// And so, setting this value to anything higher than 1 is likely going to be problematic because the beacon_proposer_cache +/// is only populated for the current and the previous epoch. +pub const MISSED_BLOCK_LOOKBACK_EPOCHS: u64 = 1; + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +// Initial configuration values for the `ValidatorMonitor`. +pub struct ValidatorMonitorConfig { + pub auto_register: bool, + pub validators: Vec, + pub individual_tracking_threshold: usize, +} + +impl Default for ValidatorMonitorConfig { + fn default() -> Self { + Self { + auto_register: false, + validators: vec![], + individual_tracking_threshold: DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, + } + } +} + +/// The goal is to check the behaviour of the BN if it pretends to attest at each slot +/// Check the head/target/source once the state.slot is some slots beyond attestation.data.slot +/// to defend against re-orgs. 16 slots is the minimum to defend against re-orgs of up to 16 slots. +pub const UNAGGREGATED_ATTESTATION_LAG_SLOTS: usize = 16; + +/// Bound the storage size of simulated attestations. The head state can only verify attestations +/// from the current and previous epoch. +pub const MAX_UNAGGREGATED_ATTESTATION_HASHMAP_LENGTH: usize = 64; #[derive(Debug)] pub enum Error { @@ -323,6 +368,13 @@ impl MonitoredValidator { } } +#[derive(PartialEq, Hash, Eq)] +struct MissedBlock { + slot: Slot, + parent_root: Hash256, + validator_index: u64, +} + /// Holds a collection of `MonitoredValidator` and is notified about a variety of events on the P2P /// network, HTTP API and `BeaconChain`. /// @@ -331,7 +383,7 @@ impl MonitoredValidator { /// /// The intention of this struct is to provide users with more logging and Prometheus metrics around /// validators that they are interested in. -pub struct ValidatorMonitor { +pub struct ValidatorMonitor { /// The validators that require additional monitoring. validators: HashMap, /// A map of validator index (state.validators) to a validator public key. @@ -343,26 +395,40 @@ pub struct ValidatorMonitor { /// large validator counts causing infeasibly high cardinailty for /// Prometheus and high log volumes. individual_tracking_threshold: usize, + /// A Map representing the (non-finalized) missed blocks by epoch, validator_index(state.validators) and slot + missed_blocks: HashSet, + // A beacon proposer cache + beacon_proposer_cache: Arc>, + // Unaggregated attestations generated by the committee index at each slot. + unaggregated_attestations: HashMap>, log: Logger, _phantom: PhantomData, } impl ValidatorMonitor { pub fn new( - pubkeys: Vec, - auto_register: bool, - individual_tracking_threshold: usize, + config: ValidatorMonitorConfig, + beacon_proposer_cache: Arc>, log: Logger, ) -> Self { + let ValidatorMonitorConfig { + auto_register, + validators, + individual_tracking_threshold, + } = config; + let mut s = Self { validators: <_>::default(), indices: <_>::default(), auto_register, individual_tracking_threshold, + missed_blocks: <_>::default(), + beacon_proposer_cache, + unaggregated_attestations: <_>::default(), log, _phantom: PhantomData, }; - for pubkey in pubkeys { + for pubkey in validators { s.add_validator_pubkey(pubkey) } s @@ -376,7 +442,7 @@ impl ValidatorMonitor { } /// Add some validators to `self` for additional monitoring. - fn add_validator_pubkey(&mut self, pubkey: PublicKeyBytes) { + pub fn add_validator_pubkey(&mut self, pubkey: PublicKeyBytes) { let index_opt = self .indices .iter() @@ -394,9 +460,32 @@ impl ValidatorMonitor { }); } + /// Add an unaggregated attestation + pub fn set_unaggregated_attestation(&mut self, attestation: Attestation) { + let unaggregated_attestations = &mut self.unaggregated_attestations; + + // Pruning, this removes the oldest key/pair of the hashmap if it's greater than MAX_UNAGGREGATED_ATTESTATION_HASHMAP_LENGTH + if unaggregated_attestations.len() >= MAX_UNAGGREGATED_ATTESTATION_HASHMAP_LENGTH { + if let Some(oldest_slot) = unaggregated_attestations.keys().min().copied() { + unaggregated_attestations.remove(&oldest_slot); + } + } + let slot = attestation.data.slot; + self.unaggregated_attestations.insert(slot, attestation); + } + + pub fn get_unaggregated_attestation(&self, slot: Slot) -> Option<&Attestation> { + self.unaggregated_attestations.get(&slot) + } + /// Reads information from the given `state`. The `state` *must* be valid (i.e, able to be /// imported). - pub fn process_valid_state(&mut self, current_epoch: Epoch, state: &BeaconState) { + pub fn process_valid_state( + &mut self, + current_epoch: Epoch, + state: &BeaconState, + spec: &ChainSpec, + ) { // Add any new validator indices. state .validators() @@ -411,6 +500,10 @@ impl ValidatorMonitor { self.indices.insert(i, validator.pubkey); }); + // Add missed non-finalized blocks for the monitored validators + self.add_validators_missed_blocks(state); + self.process_unaggregated_attestations(state, spec); + // Update metrics for individual validators. for monitored_validator in self.validators.values() { if let Some(i) = monitored_validator.index { @@ -489,6 +582,192 @@ impl ValidatorMonitor { } } } + + // Prune missed blocks that are prior to last finalized epochs - MISSED_BLOCK_LOOKBACK_EPOCHS + let finalized_epoch = state.finalized_checkpoint().epoch; + self.missed_blocks.retain(|missed_block| { + let epoch = missed_block.slot.epoch(T::slots_per_epoch()); + epoch + Epoch::new(MISSED_BLOCK_LOOKBACK_EPOCHS) >= finalized_epoch + }); + } + + /// Add missed non-finalized blocks for the monitored validators + fn add_validators_missed_blocks(&mut self, state: &BeaconState) { + // Define range variables + let current_slot = state.slot(); + let current_epoch = current_slot.epoch(T::slots_per_epoch()); + // start_slot needs to be coherent with what can be retrieved from the beacon_proposer_cache + let start_slot = current_epoch.start_slot(T::slots_per_epoch()) + - Slot::new(MISSED_BLOCK_LOOKBACK_EPOCHS * T::slots_per_epoch()); + + let end_slot = current_slot.saturating_sub(MISSED_BLOCK_LAG_SLOTS).as_u64(); + + // List of proposers per epoch from the beacon_proposer_cache, and the epoch at which the + // cache is valid. + let mut proposers_per_epoch: Option<(SmallVec<[usize; TYPICAL_SLOTS_PER_EPOCH]>, Epoch)> = + None; + + for (prev_slot, slot) in (start_slot.as_u64()..=end_slot) + .map(Slot::new) + .tuple_windows() + { + // Condition for missed_block is defined such as block_root(slot) == block_root(slot - 1) + // where the proposer who missed the block is the proposer of the block at block_root(slot) + if let (Ok(block_root), Ok(prev_block_root)) = + (state.get_block_root(slot), state.get_block_root(prev_slot)) + { + // Found missed block + if block_root == prev_block_root { + let slot_epoch = slot.epoch(T::slots_per_epoch()); + + if let Ok(shuffling_decision_block) = + state.proposer_shuffling_decision_root_at_epoch(slot_epoch, *block_root) + { + // Update the cache if it has not yet been initialised, or if it is + // initialised for a prior epoch. This is an optimisation to avoid bouncing + // the proposer shuffling cache lock when there are lots of missed blocks. + if proposers_per_epoch + .as_ref() + .map_or(true, |(_, cached_epoch)| *cached_epoch != slot_epoch) + { + proposers_per_epoch = self + .get_proposers_by_epoch_from_cache( + slot_epoch, + shuffling_decision_block, + ) + .map(|cache| (cache, slot_epoch)); + } + + // Only add missed blocks for the proposer if it's in the list of monitored validators + let slot_in_epoch = slot % T::slots_per_epoch(); + if let Some(proposer_index) = proposers_per_epoch + .as_ref() + .and_then(|(proposers, _)| proposers.get(slot_in_epoch.as_usize())) + { + let i = *proposer_index as u64; + if let Some(pub_key) = self.indices.get(&i) { + if let Some(validator) = self.validators.get(pub_key) { + let missed_block = MissedBlock { + slot, + parent_root: *prev_block_root, + validator_index: i, + }; + // Incr missed block counter for the validator only if it doesn't already exist in the hashset + if self.missed_blocks.insert(missed_block) { + self.aggregatable_metric(&validator.id, |label| { + metrics::inc_counter_vec( + &metrics::VALIDATOR_MONITOR_MISSED_BLOCKS_TOTAL, + &[label], + ); + }); + error!( + self.log, + "Validator missed a block"; + "index" => i, + "slot" => slot, + "parent block root" => ?prev_block_root, + ); + } + } + } else { + warn!( + self.log, + "Missing validator index"; + "info" => "potentially inconsistency in the validator manager", + "index" => i, + ) + } + } else { + debug!( + self.log, + "Could not get proposers from cache"; + "epoch" => ?slot_epoch, + "decision_root" => ?shuffling_decision_block, + ); + } + } + } + } + } + } + + fn get_proposers_by_epoch_from_cache( + &mut self, + epoch: Epoch, + shuffling_decision_block: Hash256, + ) -> Option> { + let mut cache = self.beacon_proposer_cache.lock(); + cache + .get_epoch::(shuffling_decision_block, epoch) + .cloned() + } + + /// Process the unaggregated attestations generated by the service `attestation_simulator_service` + /// and check if the attestation qualifies for a reward matching the flags source/target/head + fn process_unaggregated_attestations(&mut self, state: &BeaconState, spec: &ChainSpec) { + let current_slot = state.slot(); + + // Ensures that we process attestation when there have been skipped slots between blocks + let attested_slots: Vec<_> = self + .unaggregated_attestations + .keys() + .filter(|&&attestation_slot| { + attestation_slot + < current_slot - Slot::new(UNAGGREGATED_ATTESTATION_LAG_SLOTS as u64) + }) + .cloned() + .collect(); + + let unaggregated_attestations = &mut self.unaggregated_attestations; + for slot in attested_slots { + if let Some(unaggregated_attestation) = unaggregated_attestations.remove(&slot) { + // Don't process this attestation, it's too old to be processed by this state. + if slot.epoch(T::slots_per_epoch()) < state.previous_epoch() { + continue; + } + + // We are simulating that unaggregated attestation in a service that produces unaggregated attestations + // every slot, the inclusion_delay shouldn't matter here as long as the minimum value + // that qualifies the committee index for reward is included + let inclusion_delay = spec.min_attestation_inclusion_delay; + + let data = &unaggregated_attestation.data; + + // Get the reward indices for the unaggregated attestation or log an error + match get_attestation_participation_flag_indices( + state, + &unaggregated_attestation.data, + inclusion_delay, + spec, + ) { + Ok(flag_indices) => { + let head_hit = flag_indices.contains(&TIMELY_HEAD_FLAG_INDEX); + let target_hit = flag_indices.contains(&TIMELY_TARGET_FLAG_INDEX); + let source_hit = flag_indices.contains(&TIMELY_SOURCE_FLAG_INDEX); + register_simulated_attestation( + data, head_hit, target_hit, source_hit, &self.log, + ) + } + Err(BeaconStateError::IncorrectAttestationSource) => { + register_simulated_attestation(data, false, false, false, &self.log) + } + Err(err) => { + error!( + self.log, + "Failed to get attestation participation flag indices"; + "error" => ?err, + "unaggregated_attestation" => ?unaggregated_attestation, + ); + } + } + } else { + error!( + self.log, + "Failed to remove unaggregated attestation from the hashmap"; + "slot" => ?slot, + ); + } + } } /// Run `func` with the `TOTAL_LABEL` and optionally the @@ -822,6 +1101,17 @@ impl ValidatorMonitor { } } + pub fn get_monitored_validator_missed_block_count(&self, validator_index: u64) -> u64 { + self.missed_blocks + .iter() + .filter(|missed_block| missed_block.validator_index == validator_index) + .count() as u64 + } + + pub fn get_beacon_proposer_cache(&self) -> Arc> { + self.beacon_proposer_cache.clone() + } + /// If `self.auto_register == true`, add the `validator_index` to `self.monitored_validators`. /// Otherwise, do nothing. pub fn auto_register_local_validator(&mut self, validator_index: u64) { @@ -1731,6 +2021,46 @@ impl ValidatorMonitor { } } +fn register_simulated_attestation( + data: &AttestationData, + head_hit: bool, + target_hit: bool, + source_hit: bool, + log: &Logger, +) { + if head_hit { + metrics::inc_counter(&metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT); + } else { + metrics::inc_counter(&metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_MISS); + } + if target_hit { + metrics::inc_counter(&metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_HIT); + } else { + metrics::inc_counter( + &metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_MISS, + ); + } + if source_hit { + metrics::inc_counter(&metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT); + } else { + metrics::inc_counter( + &metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS, + ); + } + + debug!( + log, + "Simulated attestation evaluated"; + "attestation_source" => ?data.source.root, + "attestation_target" => ?data.target.root, + "attestation_head" => ?data.beacon_block_root, + "attestation_slot" => ?data.slot, + "source_hit" => source_hit, + "target_hit" => target_hit, + "head_hit" => head_hit, + ); +} + /// Returns the duration since the unix epoch. pub fn timestamp_now() -> Duration { SystemTime::now() diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index 79910df29..00140dd6e 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -38,7 +38,7 @@ impl ValidatorPubkeyCache { }; let store_ops = cache.import_new_pubkeys(state)?; - store.do_atomically(store_ops)?; + store.do_atomically_with_block_and_blobs_cache(store_ops)?; Ok(cache) } @@ -299,7 +299,7 @@ mod test { let ops = cache .import_new_pubkeys(&state) .expect("should import pubkeys"); - store.do_atomically(ops).unwrap(); + store.do_atomically_with_block_and_blobs_cache(ops).unwrap(); check_cache_get(&cache, &keypairs[..]); drop(cache); diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index 85e4f1f09..ff83b2532 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -1,7 +1,10 @@ #![cfg(not(debug_assertions))] +use beacon_chain::attestation_simulator::produce_unaggregated_attestation; +use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy}; -use beacon_chain::{StateSkipConfig, WhenSlotSkipped}; +use beacon_chain::validator_monitor::UNAGGREGATED_ATTESTATION_LAG_SLOTS; +use beacon_chain::{metrics, StateSkipConfig, WhenSlotSkipped}; use lazy_static::lazy_static; use std::sync::Arc; use tree_hash::TreeHash; @@ -14,6 +17,91 @@ lazy_static! { static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); } +/// This test builds a chain that is testing the performance of the unaggregated attestations +/// produced by the attestation simulator service. +#[tokio::test] +async fn produces_attestations_from_attestation_simulator_service() { + // Produce 2 epochs, or 64 blocks + let num_blocks_produced = MainnetEthSpec::slots_per_epoch() * 2; + + let harness = BeaconChainHarness::builder(MainnetEthSpec) + .default_spec() + .keypairs(KEYPAIRS[..].to_vec()) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + let chain = &harness.chain; + + // Test all valid committee indices and their rewards for all slots in the chain + // using validator monitor + for slot in 0..=num_blocks_produced { + // We do not produce at slot=0, and there's no committe cache available anyway + if slot > 0 && slot <= num_blocks_produced { + harness.advance_slot(); + + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + } + // Set the state to the current slot + let slot = Slot::from(slot); + let mut state = chain + .state_at_slot(slot, StateSkipConfig::WithStateRoots) + .expect("should get state"); + + // Prebuild the committee cache for the current epoch + state + .build_committee_cache(RelativeEpoch::Current, &harness.chain.spec) + .unwrap(); + + // Produce an unaggragetated attestation + produce_unaggregated_attestation(chain.clone(), chain.slot().unwrap()); + + // Verify that the ua is stored in validator monitor + let validator_monitor = chain.validator_monitor.read(); + validator_monitor + .get_unaggregated_attestation(slot) + .expect("should get unaggregated attestation"); + } + + // Compare the prometheus metrics that evaluates the performance of the unaggregated attestations + let hit_prometheus_metrics = vec![ + metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT_TOTAL, + metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_HIT_TOTAL, + metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT_TOTAL, + ]; + let miss_prometheus_metrics = vec![ + metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_MISS_TOTAL, + metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_MISS_TOTAL, + metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS_TOTAL, + ]; + + // Expected metrics count should only apply to hit metrics as miss metrics are never set, nor can be found + // when gathering prometheus metrics. If they are found, which should not, it will diff from 0 and fail the test + let expected_miss_metrics_count = 0; + let expected_hit_metrics_count = + num_blocks_produced - UNAGGREGATED_ATTESTATION_LAG_SLOTS as u64; + lighthouse_metrics::gather().iter().for_each(|mf| { + if hit_prometheus_metrics.contains(&mf.get_name()) { + assert_eq!( + mf.get_metric()[0].get_counter().get_value() as u64, + expected_hit_metrics_count + ); + } + if miss_prometheus_metrics.contains(&mf.get_name()) { + assert_eq!( + mf.get_metric()[0].get_counter().get_value() as u64, + expected_miss_metrics_count + ); + } + }); +} + /// This test builds a chain that is just long enough to finalize an epoch then it produces an /// attestation at each slot from genesis through to three epochs past the head. /// @@ -67,6 +155,7 @@ async fn produces_attestations() { .store .make_full_block(&block_root, blinded_block) .unwrap(); + let blobs = chain.get_blobs(&block_root).unwrap(); let epoch_boundary_slot = state .current_epoch() @@ -131,6 +220,19 @@ async fn produces_attestations() { assert_eq!(data.target.epoch, state.current_epoch(), "bad target epoch"); assert_eq!(data.target.root, target_root, "bad target root"); + let rpc_block = + RpcBlock::::new(None, Arc::new(block.clone()), Some(blobs.clone())) + .unwrap(); + let beacon_chain::data_availability_checker::MaybeAvailableBlock::Available( + available_block, + ) = chain + .data_availability_checker + .verify_kzg_for_rpc_block(rpc_block) + .unwrap() + else { + panic!("block should be available") + }; + let early_attestation = { let proto_block = chain .canonical_head @@ -141,7 +243,7 @@ async fn produces_attestations() { .early_attester_cache .add_head_block( block_root, - Arc::new(block.clone()), + available_block, proto_block, &state, &chain.spec, @@ -192,12 +294,29 @@ async fn early_attester_cache_old_request() { .get_block(&head.beacon_block_root) .unwrap(); + let head_blobs = harness + .chain + .get_blobs(&head.beacon_block_root) + .expect("should get blobs"); + + let rpc_block = + RpcBlock::::new(None, head.beacon_block.clone(), Some(head_blobs)).unwrap(); + let beacon_chain::data_availability_checker::MaybeAvailableBlock::Available(available_block) = + harness + .chain + .data_availability_checker + .verify_kzg_for_rpc_block(rpc_block) + .unwrap() + else { + panic!("block should be available") + }; + harness .chain .early_attester_cache .add_head_block( head.beacon_block_root, - head.beacon_block.clone(), + available_block, head_proto_block, &head.beacon_state, &harness.chain.spec, diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 7878fd14a..2501768c7 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -334,10 +334,28 @@ impl GossipTester { self.harness.chain.epoch().unwrap() } - pub fn two_epochs_ago(&self) -> Slot { + pub fn earliest_valid_attestation_slot(&self) -> Slot { + let offset = match self.harness.spec.fork_name_at_epoch(self.epoch()) { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + // Subtract an additional slot since the harness will be exactly on the start of the + // slot and the propagation tolerance will allow an extra slot. + E::slots_per_epoch() + 1 + } + // EIP-7045 + ForkName::Deneb => { + let epoch_slot_offset = (self.slot() % E::slots_per_epoch()).as_u64(); + if epoch_slot_offset != 0 { + E::slots_per_epoch() + epoch_slot_offset + } else { + // Here the propagation tolerance will cause the cutoff to be an entire epoch earlier + 2 * E::slots_per_epoch() + } + } + }; + self.slot() .as_u64() - .checked_sub(E::slots_per_epoch() + 2) + .checked_sub(offset) .expect("chain is not sufficiently deep for test") .into() } @@ -484,18 +502,21 @@ async fn aggregated_gossip_verification() { ) .inspect_aggregate_err( "aggregate from past slot", - |tester, a| a.message.aggregate.data.slot = tester.two_epochs_ago(), + |tester, a| { + let too_early_slot = tester.earliest_valid_attestation_slot() - 1; + a.message.aggregate.data.slot = too_early_slot; + a.message.aggregate.data.target.epoch = too_early_slot.epoch(E::slots_per_epoch()); + }, |tester, err| { + let valid_early_slot = tester.earliest_valid_attestation_slot(); assert!(matches!( err, AttnError::PastSlot { attestation_slot, - // Subtract an additional slot since the harness will be exactly on the start of the - // slot and the propagation tolerance will allow an extra slot. earliest_permissible_slot } - if attestation_slot == tester.two_epochs_ago() - && earliest_permissible_slot == tester.slot() - E::slots_per_epoch() - 1 + if attestation_slot == valid_early_slot - 1 + && earliest_permissible_slot == valid_early_slot )) }, ) @@ -800,22 +821,20 @@ async fn unaggregated_gossip_verification() { .inspect_unaggregate_err( "attestation from past slot", |tester, a, _| { - let early_slot = tester.two_epochs_ago(); - a.data.slot = early_slot; - a.data.target.epoch = early_slot.epoch(E::slots_per_epoch()); + let too_early_slot = tester.earliest_valid_attestation_slot() - 1; + a.data.slot = too_early_slot; + a.data.target.epoch = too_early_slot.epoch(E::slots_per_epoch()); }, |tester, err| { - dbg!(&err); + let valid_early_slot = tester.earliest_valid_attestation_slot(); assert!(matches!( err, AttnError::PastSlot { attestation_slot, - // Subtract an additional slot since the harness will be exactly on the start of the - // slot and the propagation tolerance will allow an extra slot. earliest_permissible_slot, } - if attestation_slot == tester.two_epochs_ago() - && earliest_permissible_slot == tester.slot() - E::slots_per_epoch() - 1 + if attestation_slot == valid_early_slot - 1 + && earliest_permissible_slot == valid_early_slot )) }, ) diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 0b87ad148..541e97436 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -1,7 +1,9 @@ -#![cfg(not(debug_assertions))] +// #![cfg(not(debug_assertions))] -use beacon_chain::test_utils::{ - AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, +use beacon_chain::block_verification_types::{AsBlock, ExecutedBlock, RpcBlock}; +use beacon_chain::{ + test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, + AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, ExecutionPendingBlock, }; use beacon_chain::{ BeaconSnapshot, BlockError, ChainConfig, ChainSegmentResult, IntoExecutionPendingBlock, @@ -33,7 +35,7 @@ lazy_static! { static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); } -async fn get_chain_segment() -> Vec> { +async fn get_chain_segment() -> (Vec>, Vec>>) { let harness = get_harness(VALIDATOR_COUNT); harness @@ -45,6 +47,7 @@ async fn get_chain_segment() -> Vec> { .await; let mut segment = Vec::with_capacity(CHAIN_SEGMENT_LENGTH); + let mut segment_blobs = Vec::with_capacity(CHAIN_SEGMENT_LENGTH); for snapshot in harness .chain .chain_dump() @@ -63,8 +66,55 @@ async fn get_chain_segment() -> Vec> { beacon_block: Arc::new(full_block), beacon_state: snapshot.beacon_state, }); + segment_blobs.push(Some( + harness + .chain + .get_blobs(&snapshot.beacon_block_root) + .unwrap(), + )) } - segment + (segment, segment_blobs) +} + +async fn get_chain_segment_with_blob_sidecars( +) -> (Vec>, Vec>>) { + let harness = get_harness(VALIDATOR_COUNT); + + harness + .extend_chain( + CHAIN_SEGMENT_LENGTH, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let mut segment = Vec::with_capacity(CHAIN_SEGMENT_LENGTH); + let mut segment_blobs = Vec::with_capacity(CHAIN_SEGMENT_LENGTH); + for snapshot in harness + .chain + .chain_dump() + .expect("should dump chain") + .into_iter() + .skip(1) + { + let full_block = harness + .chain + .get_block(&snapshot.beacon_block_root) + .await + .unwrap() + .unwrap(); + segment.push(BeaconSnapshot { + beacon_block_root: snapshot.beacon_block_root, + beacon_block: Arc::new(full_block), + beacon_state: snapshot.beacon_state, + }); + let blob_sidecars = harness + .chain + .get_blobs(&snapshot.beacon_block_root) + .unwrap(); + segment_blobs.push(Some(blob_sidecars)) + } + (segment, segment_blobs) } fn get_harness(validator_count: usize) -> BeaconChainHarness> { @@ -84,10 +134,16 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness]) -> Vec>> { +fn chain_segment_blocks( + chain_segment: &[BeaconSnapshot], + blobs: &[Option>], +) -> Vec> { chain_segment .iter() - .map(|snapshot| snapshot.beacon_block.clone()) + .zip(blobs.iter()) + .map(|(snapshot, blobs)| { + RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap() + }) .collect() } @@ -129,22 +185,52 @@ fn update_proposal_signatures( } } -fn update_parent_roots(snapshots: &mut [BeaconSnapshot]) { +fn update_parent_roots( + snapshots: &mut [BeaconSnapshot], + blobs: &mut [Option>], +) { for i in 0..snapshots.len() { let root = snapshots[i].beacon_block.canonical_root(); - if let Some(child) = snapshots.get_mut(i + 1) { + if let (Some(child), Some(child_blobs)) = (snapshots.get_mut(i + 1), blobs.get_mut(i + 1)) { let (mut block, signature) = child.beacon_block.as_ref().clone().deconstruct(); *block.parent_root_mut() = root; - child.beacon_block = Arc::new(SignedBeaconBlock::from_block(block, signature)) + let new_child = Arc::new(SignedBeaconBlock::from_block(block, signature)); + if let Some(blobs) = child_blobs { + update_blob_signed_header(&new_child, blobs); + } + child.beacon_block = new_child; } } } +fn update_blob_signed_header( + signed_block: &SignedBeaconBlock, + blobs: &mut BlobSidecarList, +) { + for old_blob_sidecar in blobs.iter_mut() { + let new_blob = Arc::new(BlobSidecar:: { + index: old_blob_sidecar.index, + blob: old_blob_sidecar.blob.clone(), + kzg_commitment: old_blob_sidecar.kzg_commitment, + kzg_proof: old_blob_sidecar.kzg_proof, + signed_block_header: signed_block.signed_block_header(), + kzg_commitment_inclusion_proof: signed_block + .message() + .body() + .kzg_commitment_merkle_proof(old_blob_sidecar.index as usize) + .unwrap(), + }); + *old_blob_sidecar = new_blob; + } +} + #[tokio::test] async fn chain_segment_full_segment() { let harness = get_harness(VALIDATOR_COUNT); - let chain_segment = get_chain_segment().await; - let blocks = chain_segment_blocks(&chain_segment); + let (chain_segment, chain_segment_blobs) = get_chain_segment().await; + let blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) + .into_iter() + .collect(); harness .chain @@ -179,8 +265,10 @@ async fn chain_segment_full_segment() { async fn chain_segment_varying_chunk_size() { for chunk_size in &[1, 2, 3, 5, 31, 32, 33, 42] { let harness = get_harness(VALIDATOR_COUNT); - let chain_segment = get_chain_segment().await; - let blocks = chain_segment_blocks(&chain_segment); + let (chain_segment, chain_segment_blobs) = get_chain_segment().await; + let blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) + .into_iter() + .collect(); harness .chain @@ -209,7 +297,7 @@ async fn chain_segment_varying_chunk_size() { #[tokio::test] async fn chain_segment_non_linear_parent_roots() { let harness = get_harness(VALIDATOR_COUNT); - let chain_segment = get_chain_segment().await; + let (chain_segment, chain_segment_blobs) = get_chain_segment().await; harness .chain @@ -219,7 +307,9 @@ async fn chain_segment_non_linear_parent_roots() { /* * Test with a block removed. */ - let mut blocks = chain_segment_blocks(&chain_segment); + let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) + .into_iter() + .collect(); blocks.remove(2); assert!( @@ -237,10 +327,16 @@ async fn chain_segment_non_linear_parent_roots() { /* * Test with a modified parent root. */ - let mut blocks = chain_segment_blocks(&chain_segment); - let (mut block, signature) = blocks[3].as_ref().clone().deconstruct(); + let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) + .into_iter() + .collect(); + + let (mut block, signature) = blocks[3].as_block().clone().deconstruct(); *block.parent_root_mut() = Hash256::zero(); - blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)); + blocks[3] = RpcBlock::new_without_blobs( + None, + Arc::new(SignedBeaconBlock::from_block(block, signature)), + ); assert!( matches!( @@ -258,7 +354,7 @@ async fn chain_segment_non_linear_parent_roots() { #[tokio::test] async fn chain_segment_non_linear_slots() { let harness = get_harness(VALIDATOR_COUNT); - let chain_segment = get_chain_segment().await; + let (chain_segment, chain_segment_blobs) = get_chain_segment().await; harness .chain .slot_clock @@ -268,10 +364,15 @@ async fn chain_segment_non_linear_slots() { * Test where a child is lower than the parent. */ - let mut blocks = chain_segment_blocks(&chain_segment); - let (mut block, signature) = blocks[3].as_ref().clone().deconstruct(); + let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) + .into_iter() + .collect(); + let (mut block, signature) = blocks[3].as_block().clone().deconstruct(); *block.slot_mut() = Slot::new(0); - blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)); + blocks[3] = RpcBlock::new_without_blobs( + None, + Arc::new(SignedBeaconBlock::from_block(block, signature)), + ); assert!( matches!( @@ -289,10 +390,15 @@ async fn chain_segment_non_linear_slots() { * Test where a child is equal to the parent. */ - let mut blocks = chain_segment_blocks(&chain_segment); - let (mut block, signature) = blocks[3].as_ref().clone().deconstruct(); + let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) + .into_iter() + .collect(); + let (mut block, signature) = blocks[3].as_block().clone().deconstruct(); *block.slot_mut() = blocks[2].slot(); - blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)); + blocks[3] = RpcBlock::new_without_blobs( + None, + Arc::new(SignedBeaconBlock::from_block(block, signature)), + ); assert!( matches!( @@ -309,14 +415,18 @@ async fn chain_segment_non_linear_slots() { async fn assert_invalid_signature( chain_segment: &[BeaconSnapshot], + chain_segment_blobs: &[Option>], harness: &BeaconChainHarness>, block_index: usize, snapshots: &[BeaconSnapshot], item: &str, ) { - let blocks = snapshots + let blocks: Vec> = snapshots .iter() - .map(|snapshot| snapshot.beacon_block.clone()) + .zip(chain_segment_blobs.iter()) + .map(|(snapshot, blobs)| { + RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap() + }) .collect(); // Ensure the block will be rejected if imported in a chain segment. @@ -340,7 +450,10 @@ async fn assert_invalid_signature( let ancestor_blocks = chain_segment .iter() .take(block_index) - .map(|snapshot| snapshot.beacon_block.clone()) + .zip(chain_segment_blobs.iter()) + .map(|(snapshot, blobs)| { + RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap() + }) .collect(); // We don't care if this fails, we just call this to ensure that all prior blocks have been // imported prior to this test. @@ -354,7 +467,12 @@ async fn assert_invalid_signature( .chain .process_block( snapshots[block_index].beacon_block.canonical_root(), - snapshots[block_index].beacon_block.clone(), + RpcBlock::new( + None, + snapshots[block_index].beacon_block.clone(), + chain_segment_blobs[block_index].clone(), + ) + .unwrap(), NotifyExecutionLayer::Yes, || Ok(()), ) @@ -386,7 +504,7 @@ async fn get_invalid_sigs_harness( } #[tokio::test] async fn invalid_signature_gossip_block() { - let chain_segment = get_chain_segment().await; + let (chain_segment, chain_segment_blobs) = get_chain_segment().await; for &block_index in BLOCK_INDICES { // Ensure the block will be rejected if imported on its own (without gossip checking). let harness = get_invalid_sigs_harness(&chain_segment).await; @@ -404,7 +522,10 @@ async fn invalid_signature_gossip_block() { let ancestor_blocks = chain_segment .iter() .take(block_index) - .map(|snapshot| snapshot.beacon_block.clone()) + .zip(chain_segment_blobs.iter()) + .map(|(snapshot, blobs)| { + RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap() + }) .collect(); harness .chain @@ -433,7 +554,7 @@ async fn invalid_signature_gossip_block() { #[tokio::test] async fn invalid_signature_block_proposal() { - let chain_segment = get_chain_segment().await; + let (chain_segment, chain_segment_blobs) = get_chain_segment().await; for &block_index in BLOCK_INDICES { let harness = get_invalid_sigs_harness(&chain_segment).await; let mut snapshots = chain_segment.clone(); @@ -446,9 +567,12 @@ async fn invalid_signature_block_proposal() { block.clone(), junk_signature(), )); - let blocks = snapshots + let blocks: Vec> = snapshots .iter() - .map(|snapshot| snapshot.beacon_block.clone()) + .zip(chain_segment_blobs.iter()) + .map(|(snapshot, blobs)| { + RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap() + }) .collect::>(); // Ensure the block will be rejected if imported in a chain segment. assert!( @@ -467,7 +591,7 @@ async fn invalid_signature_block_proposal() { #[tokio::test] async fn invalid_signature_randao_reveal() { - let chain_segment = get_chain_segment().await; + let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await; for &block_index in BLOCK_INDICES { let harness = get_invalid_sigs_harness(&chain_segment).await; let mut snapshots = chain_segment.clone(); @@ -479,15 +603,23 @@ async fn invalid_signature_randao_reveal() { *block.body_mut().randao_reveal_mut() = junk_signature(); snapshots[block_index].beacon_block = Arc::new(SignedBeaconBlock::from_block(block, signature)); - update_parent_roots(&mut snapshots); + update_parent_roots(&mut snapshots, &mut chain_segment_blobs); update_proposal_signatures(&mut snapshots, &harness); - assert_invalid_signature(&chain_segment, &harness, block_index, &snapshots, "randao").await; + assert_invalid_signature( + &chain_segment, + &chain_segment_blobs, + &harness, + block_index, + &snapshots, + "randao", + ) + .await; } } #[tokio::test] async fn invalid_signature_proposer_slashing() { - let chain_segment = get_chain_segment().await; + let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await; for &block_index in BLOCK_INDICES { let harness = get_invalid_sigs_harness(&chain_segment).await; let mut snapshots = chain_segment.clone(); @@ -513,10 +645,11 @@ async fn invalid_signature_proposer_slashing() { .expect("should update proposer slashing"); snapshots[block_index].beacon_block = Arc::new(SignedBeaconBlock::from_block(block, signature)); - update_parent_roots(&mut snapshots); + update_parent_roots(&mut snapshots, &mut chain_segment_blobs); update_proposal_signatures(&mut snapshots, &harness); assert_invalid_signature( &chain_segment, + &chain_segment_blobs, &harness, block_index, &snapshots, @@ -528,7 +661,7 @@ async fn invalid_signature_proposer_slashing() { #[tokio::test] async fn invalid_signature_attester_slashing() { - let chain_segment = get_chain_segment().await; + let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await; for &block_index in BLOCK_INDICES { let harness = get_invalid_sigs_harness(&chain_segment).await; let mut snapshots = chain_segment.clone(); @@ -565,10 +698,11 @@ async fn invalid_signature_attester_slashing() { .expect("should update attester slashing"); snapshots[block_index].beacon_block = Arc::new(SignedBeaconBlock::from_block(block, signature)); - update_parent_roots(&mut snapshots); + update_parent_roots(&mut snapshots, &mut chain_segment_blobs); update_proposal_signatures(&mut snapshots, &harness); assert_invalid_signature( &chain_segment, + &chain_segment_blobs, &harness, block_index, &snapshots, @@ -580,7 +714,7 @@ async fn invalid_signature_attester_slashing() { #[tokio::test] async fn invalid_signature_attestation() { - let chain_segment = get_chain_segment().await; + let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await; let mut checked_attestation = false; for &block_index in BLOCK_INDICES { @@ -595,10 +729,11 @@ async fn invalid_signature_attestation() { attestation.signature = junk_aggregate_signature(); snapshots[block_index].beacon_block = Arc::new(SignedBeaconBlock::from_block(block, signature)); - update_parent_roots(&mut snapshots); + update_parent_roots(&mut snapshots, &mut chain_segment_blobs); update_proposal_signatures(&mut snapshots, &harness); assert_invalid_signature( &chain_segment, + &chain_segment_blobs, &harness, block_index, &snapshots, @@ -617,7 +752,7 @@ async fn invalid_signature_attestation() { #[tokio::test] async fn invalid_signature_deposit() { - let chain_segment = get_chain_segment().await; + let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await; for &block_index in BLOCK_INDICES { // Note: an invalid deposit signature is permitted! let harness = get_invalid_sigs_harness(&chain_segment).await; @@ -643,11 +778,14 @@ async fn invalid_signature_deposit() { .expect("should update deposit"); snapshots[block_index].beacon_block = Arc::new(SignedBeaconBlock::from_block(block, signature)); - update_parent_roots(&mut snapshots); + update_parent_roots(&mut snapshots, &mut chain_segment_blobs); update_proposal_signatures(&mut snapshots, &harness); - let blocks = snapshots + let blocks: Vec> = snapshots .iter() - .map(|snapshot| snapshot.beacon_block.clone()) + .zip(chain_segment_blobs.iter()) + .map(|(snapshot, blobs)| { + RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap() + }) .collect(); assert!( !matches!( @@ -665,7 +803,7 @@ async fn invalid_signature_deposit() { #[tokio::test] async fn invalid_signature_exit() { - let chain_segment = get_chain_segment().await; + let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await; for &block_index in BLOCK_INDICES { let harness = get_invalid_sigs_harness(&chain_segment).await; let mut snapshots = chain_segment.clone(); @@ -688,10 +826,11 @@ async fn invalid_signature_exit() { .expect("should update deposit"); snapshots[block_index].beacon_block = Arc::new(SignedBeaconBlock::from_block(block, signature)); - update_parent_roots(&mut snapshots); + update_parent_roots(&mut snapshots, &mut chain_segment_blobs); update_proposal_signatures(&mut snapshots, &harness); assert_invalid_signature( &chain_segment, + &chain_segment_blobs, &harness, block_index, &snapshots, @@ -711,7 +850,7 @@ fn unwrap_err(result: Result) -> E { #[tokio::test] async fn block_gossip_verification() { let harness = get_harness(VALIDATOR_COUNT); - let chain_segment = get_chain_segment().await; + let (chain_segment, chain_segment_blobs) = get_chain_segment_with_blob_sidecars().await; let block_index = CHAIN_SEGMENT_LENGTH - 2; @@ -721,7 +860,10 @@ async fn block_gossip_verification() { .set_slot(chain_segment[block_index].beacon_block.slot().as_u64()); // Import the ancestors prior to the block we're testing. - for snapshot in &chain_segment[0..block_index] { + for (snapshot, blobs_opt) in chain_segment[0..block_index] + .iter() + .zip(chain_segment_blobs.iter()) + { let gossip_verified = harness .chain .verify_block_for_gossip(snapshot.beacon_block.clone()) @@ -738,6 +880,21 @@ async fn block_gossip_verification() { ) .await .expect("should import valid gossip verified block"); + if let Some(blob_sidecars) = blobs_opt { + for blob_sidecar in blob_sidecars { + let blob_index = blob_sidecar.index; + let gossip_verified = harness + .chain + .verify_blob_sidecar_for_gossip(blob_sidecar.clone(), blob_index) + .expect("should obtain gossip verified blob"); + + harness + .chain + .process_gossip_blob(gossip_verified) + .await + .expect("should import valid gossip verified blob"); + } + } } // Recompute the head to ensure we cache the latest view of fork choice. @@ -906,7 +1063,6 @@ async fn block_gossip_verification() { .0; let expected_proposer = block.proposer_index(); let other_proposer = (0..VALIDATOR_COUNT as u64) - .into_iter() .find(|i| *i != block.proposer_index()) .expect("there must be more than one validator in this test"); *block.proposer_index_mut() = other_proposer; @@ -958,8 +1114,7 @@ async fn block_gossip_verification() { .chain .verify_block_for_gossip(block.clone()) .await - .err() - .expect("should error when processing known block"), + .expect_err("should error when processing known block"), BlockError::BlockIsAlreadyKnown ), "the second proposal by this validator should be rejected" @@ -984,14 +1139,27 @@ async fn verify_block_for_gossip_slashing_detection() { harness.advance_slot(); let state = harness.get_current_state(); - let (block1, _) = harness.make_block(state.clone(), Slot::new(1)).await; - let (block2, _) = harness.make_block(state, Slot::new(1)).await; + let ((block1, blobs1), _) = harness.make_block(state.clone(), Slot::new(1)).await; + let ((block2, _blobs2), _) = harness.make_block(state, Slot::new(1)).await; - let verified_block = harness - .chain - .verify_block_for_gossip(Arc::new(block1)) - .await - .unwrap(); + let verified_block = harness.chain.verify_block_for_gossip(block1).await.unwrap(); + + if let Some((kzg_proofs, blobs)) = blobs1 { + let sidecars = + BlobSidecar::build_sidecars(blobs, verified_block.block(), kzg_proofs).unwrap(); + for sidecar in sidecars { + let blob_index = sidecar.index; + let verified_blob = harness + .chain + .verify_blob_sidecar_for_gossip(sidecar, blob_index) + .unwrap(); + harness + .chain + .process_gossip_blob(verified_blob) + .await + .unwrap(); + } + } harness .chain .process_block( @@ -1002,12 +1170,7 @@ async fn verify_block_for_gossip_slashing_detection() { ) .await .unwrap(); - unwrap_err( - harness - .chain - .verify_block_for_gossip(Arc::new(block2)) - .await, - ); + unwrap_err(harness.chain.verify_block_for_gossip(block2).await); // Slasher should have been handed the two conflicting blocks and crafted a slashing. slasher.process_queued(Epoch::new(0)).unwrap(); @@ -1024,13 +1187,9 @@ async fn verify_block_for_gossip_doppelganger_detection() { let harness = get_harness(VALIDATOR_COUNT); let state = harness.get_current_state(); - let (block, _) = harness.make_block(state.clone(), Slot::new(1)).await; + let ((block, _), _) = harness.make_block(state.clone(), Slot::new(1)).await; - let verified_block = harness - .chain - .verify_block_for_gossip(Arc::new(block)) - .await - .unwrap(); + let verified_block = harness.chain.verify_block_for_gossip(block).await.unwrap(); let attestations = verified_block.block.message().body().attestations().clone(); harness .chain @@ -1111,7 +1270,7 @@ async fn add_base_block_to_altair_chain() { // Produce an Altair block. let state = harness.get_current_state(); let slot = harness.get_current_slot(); - let (altair_signed_block, _) = harness.make_block(state.clone(), slot).await; + let ((altair_signed_block, _), _) = harness.make_block(state.clone(), slot).await; let altair_block = &altair_signed_block .as_altair() .expect("test expects an altair block") @@ -1170,8 +1329,7 @@ async fn add_base_block_to_altair_chain() { .chain .verify_block_for_gossip(Arc::new(base_block.clone())) .await - .err() - .expect("should error when processing base block"), + .expect_err("should error when processing base block"), BlockError::InconsistentFork(InconsistentFork { fork_at_slot: ForkName::Altair, object_fork: ForkName::Base, @@ -1189,8 +1347,7 @@ async fn add_base_block_to_altair_chain() { || Ok(()), ) .await - .err() - .expect("should error when processing base block"), + .expect_err("should error when processing base block"), BlockError::InconsistentFork(InconsistentFork { fork_at_slot: ForkName::Altair, object_fork: ForkName::Base, @@ -1201,7 +1358,10 @@ async fn add_base_block_to_altair_chain() { assert!(matches!( harness .chain - .process_chain_segment(vec![Arc::new(base_block)], NotifyExecutionLayer::Yes,) + .process_chain_segment( + vec![RpcBlock::new_without_blobs(None, Arc::new(base_block))], + NotifyExecutionLayer::Yes, + ) .await, ChainSegmentResult::Failed { imported_blocks: 0, @@ -1245,7 +1405,7 @@ async fn add_altair_block_to_base_chain() { // Produce an altair block. let state = harness.get_current_state(); let slot = harness.get_current_slot(); - let (base_signed_block, _) = harness.make_block(state.clone(), slot).await; + let ((base_signed_block, _), _) = harness.make_block(state.clone(), slot).await; let base_block = &base_signed_block .as_base() .expect("test expects a base block") @@ -1305,8 +1465,7 @@ async fn add_altair_block_to_base_chain() { .chain .verify_block_for_gossip(Arc::new(altair_block.clone())) .await - .err() - .expect("should error when processing altair block"), + .expect_err("should error when processing altair block"), BlockError::InconsistentFork(InconsistentFork { fork_at_slot: ForkName::Base, object_fork: ForkName::Altair, @@ -1324,8 +1483,7 @@ async fn add_altair_block_to_base_chain() { || Ok(()), ) .await - .err() - .expect("should error when processing altair block"), + .expect_err("should error when processing altair block"), BlockError::InconsistentFork(InconsistentFork { fork_at_slot: ForkName::Base, object_fork: ForkName::Altair, @@ -1336,7 +1494,10 @@ async fn add_altair_block_to_base_chain() { assert!(matches!( harness .chain - .process_chain_segment(vec![Arc::new(altair_block)], NotifyExecutionLayer::Yes) + .process_chain_segment( + vec![RpcBlock::new_without_blobs(None, Arc::new(altair_block))], + NotifyExecutionLayer::Yes + ) .await, ChainSegmentResult::Failed { imported_blocks: 0, @@ -1378,16 +1539,18 @@ async fn import_duplicate_block_unrealized_justification() { // The store's justified checkpoint must still be at epoch 0, while unrealized justification // must be at epoch 1. - let fc = chain.canonical_head.fork_choice_read_lock(); - assert_eq!(fc.justified_checkpoint().epoch, 0); - assert_eq!(fc.unrealized_justified_checkpoint().epoch, 1); - drop(fc); + { + let fc = chain.canonical_head.fork_choice_read_lock(); + assert_eq!(fc.justified_checkpoint().epoch, 0); + assert_eq!(fc.unrealized_justified_checkpoint().epoch, 1); + drop(fc); + } // Produce a block to justify epoch 2. let state = harness.get_current_state(); let slot = harness.get_current_slot(); - let (block, _) = harness.make_block(state.clone(), slot).await; - let block = Arc::new(block); + let (block_contents, _) = harness.make_block(state.clone(), slot).await; + let (block, _) = block_contents; let block_root = block.canonical_root(); // Create two verified variants of the block, representing the same block being processed in @@ -1395,52 +1558,71 @@ async fn import_duplicate_block_unrealized_justification() { let notify_execution_layer = NotifyExecutionLayer::Yes; let verified_block1 = block .clone() - .into_execution_pending_block(block_root, &chain, notify_execution_layer) + .into_execution_pending_block(block_root, chain, notify_execution_layer) .unwrap(); let verified_block2 = block - .into_execution_pending_block(block_root, &chain, notify_execution_layer) + .into_execution_pending_block(block_root, chain, notify_execution_layer) .unwrap(); // Import the first block, simulating a block processed via a finalized chain segment. - chain - .clone() - .import_execution_pending_block(verified_block1) + import_execution_pending_block(chain.clone(), verified_block1) .await .unwrap(); // Unrealized justification should NOT have updated. - let fc = chain.canonical_head.fork_choice_read_lock(); - assert_eq!(fc.justified_checkpoint().epoch, 0); - let unrealized_justification = fc.unrealized_justified_checkpoint(); - assert_eq!(unrealized_justification.epoch, 2); - - // The fork choice node for the block should have unrealized justification. - let fc_block = fc.get_block(&block_root).unwrap(); - assert_eq!( - fc_block.unrealized_justified_checkpoint, - Some(unrealized_justification) - ); - drop(fc); + let unrealized_justification = { + let fc = chain.canonical_head.fork_choice_read_lock(); + assert_eq!(fc.justified_checkpoint().epoch, 0); + let unrealized_justification = fc.unrealized_justified_checkpoint(); + assert_eq!(unrealized_justification.epoch, 2); + // The fork choice node for the block should have unrealized justification. + let fc_block = fc.get_block(&block_root).unwrap(); + assert_eq!( + fc_block.unrealized_justified_checkpoint, + Some(unrealized_justification) + ); + drop(fc); + unrealized_justification + }; // Import the second verified block, simulating a block processed via RPC. - chain - .clone() - .import_execution_pending_block(verified_block2) + import_execution_pending_block(chain.clone(), verified_block2) .await .unwrap(); // Unrealized justification should still be updated. - let fc = chain.canonical_head.fork_choice_read_lock(); - assert_eq!(fc.justified_checkpoint().epoch, 0); + let fc3 = chain.canonical_head.fork_choice_read_lock(); + assert_eq!(fc3.justified_checkpoint().epoch, 0); assert_eq!( - fc.unrealized_justified_checkpoint(), + fc3.unrealized_justified_checkpoint(), unrealized_justification ); // The fork choice node for the block should still have the unrealized justified checkpoint. - let fc_block = fc.get_block(&block_root).unwrap(); + let fc_block = fc3.get_block(&block_root).unwrap(); + drop(fc3); assert_eq!( fc_block.unrealized_justified_checkpoint, Some(unrealized_justification) ); } + +async fn import_execution_pending_block( + chain: Arc>, + execution_pending_block: ExecutionPendingBlock, +) -> Result { + match chain + .clone() + .into_executed_block(execution_pending_block) + .await + .unwrap() + { + ExecutedBlock::Available(block) => chain + .import_available_block(Box::from(block)) + .await + .map_err(|e| format!("{e:?}")), + ExecutedBlock::AvailabilityPending(_) => { + Err("AvailabilityPending not expected in this test. Block not imported.".to_string()) + } + } +} diff --git a/beacon_node/beacon_chain/tests/events.rs b/beacon_node/beacon_chain/tests/events.rs new file mode 100644 index 000000000..d54543e4f --- /dev/null +++ b/beacon_node/beacon_chain/tests/events.rs @@ -0,0 +1,93 @@ +use beacon_chain::blob_verification::GossipVerifiedBlob; +use beacon_chain::test_utils::BeaconChainHarness; +use eth2::types::{EventKind, SseBlobSidecar}; +use rand::rngs::StdRng; +use rand::SeedableRng; +use std::sync::Arc; +use types::blob_sidecar::FixedBlobSidecarList; +use types::{BlobSidecar, EthSpec, ForkName, MinimalEthSpec}; + +type E = MinimalEthSpec; + +/// Verifies that a blob event is emitted when a gossip verified blob is received via gossip or the publish block API. +#[tokio::test] +async fn blob_sidecar_event_on_process_gossip_blob() { + let spec = ForkName::Deneb.make_genesis_spec(E::default_spec()); + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec) + .deterministic_keypairs(8) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + // subscribe to blob sidecar events + let event_handler = harness.chain.event_handler.as_ref().unwrap(); + let mut blob_event_receiver = event_handler.subscribe_blob_sidecar(); + + // build and process a gossip verified blob + let kzg = harness.chain.kzg.as_ref().unwrap(); + let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64); + let sidecar = BlobSidecar::random_valid(&mut rng, kzg) + .map(Arc::new) + .unwrap(); + let gossip_verified_blob = GossipVerifiedBlob::__assumed_valid(sidecar); + let expected_sse_blobs = SseBlobSidecar::from_blob_sidecar(gossip_verified_blob.as_blob()); + + let _ = harness + .chain + .process_gossip_blob(gossip_verified_blob) + .await + .unwrap(); + + let sidecar_event = blob_event_receiver.try_recv().unwrap(); + assert_eq!(sidecar_event, EventKind::BlobSidecar(expected_sse_blobs)); +} + +/// Verifies that a blob event is emitted when blobs are received via RPC. +#[tokio::test] +async fn blob_sidecar_event_on_process_rpc_blobs() { + let spec = ForkName::Deneb.make_genesis_spec(E::default_spec()); + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec) + .deterministic_keypairs(8) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + // subscribe to blob sidecar events + let event_handler = harness.chain.event_handler.as_ref().unwrap(); + let mut blob_event_receiver = event_handler.subscribe_blob_sidecar(); + + // build and process multiple rpc blobs + let kzg = harness.chain.kzg.as_ref().unwrap(); + let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64); + + let blob_1 = BlobSidecar::random_valid(&mut rng, kzg) + .map(Arc::new) + .unwrap(); + let blob_2 = Arc::new(BlobSidecar { + index: 1, + ..BlobSidecar::random_valid(&mut rng, kzg).unwrap() + }); + let blobs = FixedBlobSidecarList::from(vec![Some(blob_1.clone()), Some(blob_2.clone())]); + let expected_sse_blobs = vec![ + SseBlobSidecar::from_blob_sidecar(blob_1.as_ref()), + SseBlobSidecar::from_blob_sidecar(blob_2.as_ref()), + ]; + + let _ = harness + .chain + .process_rpc_blobs(blob_1.slot(), blob_1.block_root(), blobs) + .await + .unwrap(); + + let mut sse_blobs: Vec = vec![]; + while let Ok(sidecar_event) = blob_event_receiver.try_recv() { + if let EventKind::BlobSidecar(sse_blob_sidecar) = sidecar_event { + sse_blobs.push(sse_blob_sidecar); + } else { + panic!("`BlobSidecar` event kind expected."); + } + } + assert_eq!(sse_blobs, expected_sse_blobs); +} diff --git a/beacon_node/beacon_chain/tests/main.rs b/beacon_node/beacon_chain/tests/main.rs index c81a54740..e0564e151 100644 --- a/beacon_node/beacon_chain/tests/main.rs +++ b/beacon_node/beacon_chain/tests/main.rs @@ -2,6 +2,7 @@ mod attestation_production; mod attestation_verification; mod block_verification; mod capella; +mod events; mod merge; mod op_verification; mod payload_invalidation; @@ -9,3 +10,4 @@ mod rewards; mod store_tests; mod sync_committee_verification; mod tests; +mod validator_monitor; diff --git a/beacon_node/beacon_chain/tests/op_verification.rs b/beacon_node/beacon_chain/tests/op_verification.rs index 535fe080a..f6cf40a39 100644 --- a/beacon_node/beacon_chain/tests/op_verification.rs +++ b/beacon_node/beacon_chain/tests/op_verification.rs @@ -29,10 +29,19 @@ fn get_store(db_path: &TempDir) -> Arc { let spec = test_spec::(); let hot_path = db_path.path().join("hot_db"); let cold_path = db_path.path().join("cold_db"); + let blobs_path = db_path.path().join("blobs_db"); let config = StoreConfig::default(); let log = NullLoggerBuilder.build().expect("logger should build"); - HotColdDB::open(&hot_path, &cold_path, |_, _, _| Ok(()), config, spec, log) - .expect("disk store should initialize") + HotColdDB::open( + &hot_path, + &cold_path, + &blobs_path, + |_, _, _| Ok(()), + config, + spec, + log, + ) + .expect("disk store should initialize") } fn get_harness(store: Arc, validator_count: usize) -> TestHarness { diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index cd4351297..a0b7fbd36 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -171,7 +171,7 @@ impl InvalidPayloadRig { async fn build_blocks(&mut self, num_blocks: u64, is_valid: Payload) -> Vec { let mut roots = Vec::with_capacity(num_blocks as usize); for _ in 0..num_blocks { - roots.push(self.import_block(is_valid.clone()).await); + roots.push(self.import_block(is_valid).await); } roots } @@ -225,7 +225,7 @@ impl InvalidPayloadRig { let head = self.harness.chain.head_snapshot(); let state = head.beacon_state.clone_with_only_committee_caches(); let slot = slot_override.unwrap_or(state.slot() + 1); - let (block, post_state) = self.harness.make_block(state, slot).await; + let ((block, blobs), post_state) = self.harness.make_block(state, slot).await; let block_root = block.canonical_root(); let set_new_payload = |payload: Payload| match payload { @@ -289,7 +289,7 @@ impl InvalidPayloadRig { } let root = self .harness - .process_block(slot, block.canonical_root(), block.clone()) + .process_block(slot, block.canonical_root(), (block.clone(), blobs.clone())) .await .unwrap(); @@ -319,7 +319,7 @@ impl InvalidPayloadRig { .get_full_block(&block_root) .unwrap() .unwrap(), - block, + *block, "block from db must match block imported" ); } @@ -330,7 +330,7 @@ impl InvalidPayloadRig { match self .harness - .process_block(slot, block.canonical_root(), block) + .process_block(slot, block.canonical_root(), (block, blobs)) .await { Err(error) if evaluate_error(&error) => (), @@ -693,17 +693,20 @@ async fn invalidates_all_descendants() { .state_at_slot(fork_parent_slot, StateSkipConfig::WithStateRoots) .unwrap(); assert_eq!(fork_parent_state.slot(), fork_parent_slot); - let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot).await; + let ((fork_block, _), _fork_post_state) = + rig.harness.make_block(fork_parent_state, fork_slot).await; let fork_block_root = rig .harness .chain .process_block( fork_block.canonical_root(), - Arc::new(fork_block), + fork_block, NotifyExecutionLayer::Yes, || Ok(()), ) .await + .unwrap() + .try_into() .unwrap(); rig.recompute_head().await; @@ -789,18 +792,21 @@ async fn switches_heads() { .state_at_slot(fork_parent_slot, StateSkipConfig::WithStateRoots) .unwrap(); assert_eq!(fork_parent_state.slot(), fork_parent_slot); - let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot).await; + let ((fork_block, _), _fork_post_state) = + rig.harness.make_block(fork_parent_state, fork_slot).await; let fork_parent_root = fork_block.parent_root(); let fork_block_root = rig .harness .chain .process_block( fork_block.canonical_root(), - Arc::new(fork_block), + fork_block, NotifyExecutionLayer::Yes, || Ok(()), ) .await + .unwrap() + .try_into() .unwrap(); rig.recompute_head().await; @@ -815,13 +821,16 @@ async fn switches_heads() { }) .await; - // The fork block should become the head. - assert_eq!(rig.harness.head_block_root(), fork_block_root); + // NOTE: The `import_block` method above will cause the `ExecutionStatus` of the + // `fork_block_root`'s payload to switch from `Optimistic` to `Invalid`. This means it *won't* + // be set as head, it's parent block will instead. This is an issue with the mock EL and/or + // the payload invalidation rig. + assert_eq!(rig.harness.head_block_root(), fork_parent_root); // The fork block has not yet been validated. assert!(rig .execution_status(fork_block_root) - .is_strictly_optimistic()); + .is_optimistic_or_invalid()); for root in blocks { let slot = rig @@ -1012,6 +1021,7 @@ async fn payload_preparation() { .unwrap(), fee_recipient, None, + None, ); assert_eq!(rig.previous_payload_attributes(), payload_attributes); } @@ -1034,8 +1044,7 @@ async fn invalid_parent() { // Produce another block atop the parent, but don't import yet. let slot = parent_block.slot() + 1; rig.harness.set_current_slot(slot); - let (block, state) = rig.harness.make_block(parent_state, slot).await; - let block = Arc::new(block); + let ((block, _), state) = rig.harness.make_block(parent_state, slot).await; let block_root = block.canonical_root(); assert_eq!(block.parent_root(), parent_root); @@ -1045,7 +1054,7 @@ async fn invalid_parent() { // Ensure the block built atop an invalid payload is invalid for gossip. assert!(matches!( - rig.harness.chain.clone().verify_block_for_gossip(block.clone()).await, + rig.harness.chain.clone().verify_block_for_gossip(block.clone().into()).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) if invalid_root == parent_root )); @@ -1428,13 +1437,13 @@ async fn build_optimistic_chain( .server .all_get_block_by_hash_requests_return_natural_value(); - return rig; + rig } #[tokio::test] async fn optimistic_transition_block_valid_unfinalized() { let ttd = 42; - let num_blocks = 16 as usize; + let num_blocks = 16_usize; let rig = build_optimistic_chain(ttd, ttd, num_blocks).await; let post_transition_block_root = rig @@ -1488,7 +1497,7 @@ async fn optimistic_transition_block_valid_unfinalized() { #[tokio::test] async fn optimistic_transition_block_valid_finalized() { let ttd = 42; - let num_blocks = 130 as usize; + let num_blocks = 130_usize; let rig = build_optimistic_chain(ttd, ttd, num_blocks).await; let post_transition_block_root = rig @@ -1543,7 +1552,7 @@ async fn optimistic_transition_block_valid_finalized() { async fn optimistic_transition_block_invalid_unfinalized() { let block_ttd = 42; let rig_ttd = 1337; - let num_blocks = 22 as usize; + let num_blocks = 22_usize; let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; let post_transition_block_root = rig @@ -1619,7 +1628,7 @@ async fn optimistic_transition_block_invalid_unfinalized() { async fn optimistic_transition_block_invalid_unfinalized_syncing_ee() { let block_ttd = 42; let rig_ttd = 1337; - let num_blocks = 22 as usize; + let num_blocks = 22_usize; let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; let post_transition_block_root = rig @@ -1732,7 +1741,7 @@ async fn optimistic_transition_block_invalid_unfinalized_syncing_ee() { async fn optimistic_transition_block_invalid_finalized() { let block_ttd = 42; let rig_ttd = 1337; - let num_blocks = 130 as usize; + let num_blocks = 130_usize; let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; let post_transition_block_root = rig @@ -1854,8 +1863,8 @@ impl InvalidHeadSetup { .chain .state_at_slot(slot - 1, StateSkipConfig::WithStateRoots) .unwrap(); - let (fork_block, _) = rig.harness.make_block(parent_state, slot).await; - opt_fork_block = Some(Arc::new(fork_block)); + let (fork_block_tuple, _) = rig.harness.make_block(parent_state, slot).await; + opt_fork_block = Some(fork_block_tuple.0); } else { // Skipped slot. }; diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index be271804b..a78463ef5 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -14,7 +14,7 @@ use eth2::lighthouse::StandardAttestationRewards; use eth2::types::ValidatorId; use lazy_static::lazy_static; use types::beacon_state::Error as BeaconStateError; -use types::{BeaconState, ChainSpec}; +use types::{BeaconState, ChainSpec, ForkName, Slot}; pub const VALIDATOR_COUNT: usize = 64; @@ -219,6 +219,359 @@ async fn test_verify_attestation_rewards_base_inactivity_leak() { assert_eq!(expected_balances, balances); } +#[tokio::test] +async fn test_verify_attestation_rewards_base_inactivity_leak_justification_epoch() { + let spec = E::default_spec(); + let harness = get_harness(spec.clone()); + + let half = VALIDATOR_COUNT / 2; + let half_validators: Vec = (0..half).collect(); + // target epoch is the epoch where the chain enters inactivity leak + let mut target_epoch = &spec.min_epochs_to_inactivity_penalty + 2; + + // advance until beginning of epoch N + 2 + harness + .extend_chain( + (E::slots_per_epoch() * (target_epoch + 1)) as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(half_validators.clone()), + ) + .await; + + // advance to create first justification epoch and get initial balances + harness.extend_slots(E::slots_per_epoch() as usize).await; + target_epoch += 1; + let initial_balances: Vec = harness.get_current_state().balances().clone().into(); + + //assert previous_justified_checkpoint matches 0 as we were in inactivity leak from beginning + assert_eq!( + 0, + harness + .get_current_state() + .previous_justified_checkpoint() + .epoch + .as_u64() + ); + + // extend slots to beginning of epoch N + 1 + harness.extend_slots(E::slots_per_epoch() as usize).await; + + //assert target epoch and previous_justified_checkpoint match + assert_eq!( + target_epoch, + harness + .get_current_state() + .previous_justified_checkpoint() + .epoch + .as_u64() + ); + + // compute reward deltas for all validators in epoch N + let StandardAttestationRewards { + ideal_rewards, + total_rewards, + } = harness + .chain + .compute_attestation_rewards(Epoch::new(target_epoch), vec![]) + .unwrap(); + + // assert we successfully get ideal rewards for justified epoch out of inactivity leak + assert!(ideal_rewards + .iter() + .all(|reward| reward.head > 0 && reward.target > 0 && reward.source > 0)); + + // apply attestation rewards to initial balances + let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); + + // verify expected balances against actual balances + let balances: Vec = harness.get_current_state().balances().clone().into(); + assert_eq!(expected_balances, balances); +} + +#[tokio::test] +async fn test_verify_attestation_rewards_altair() { + let spec = ForkName::Altair.make_genesis_spec(E::default_spec()); + let harness = get_harness(spec.clone()); + let target_epoch = 0; + + // advance until epoch N + 1 and get initial balances + harness + .extend_slots((E::slots_per_epoch() * (target_epoch + 1)) as usize) + .await; + let initial_balances: Vec = harness.get_current_state().balances().clone().into(); + + // advance until epoch N + 2 and build proposal rewards map + let mut proposal_rewards_map: HashMap = HashMap::new(); + let mut sync_committee_rewards_map: HashMap = HashMap::new(); + for _ in 0..E::slots_per_epoch() { + let state = harness.get_current_state(); + let slot = state.slot() + Slot::new(1); + + // calculate beacon block rewards / penalties + let ((signed_block, _maybe_blob_sidecars), mut state) = + harness.make_block_return_pre_state(state, slot).await; + let beacon_block_reward = harness + .chain + .compute_beacon_block_reward( + signed_block.message(), + signed_block.canonical_root(), + &mut state, + ) + .unwrap(); + + let total_proposer_reward = proposal_rewards_map + .get(&beacon_block_reward.proposer_index) + .unwrap_or(&0u64) + + beacon_block_reward.total; + + proposal_rewards_map.insert(beacon_block_reward.proposer_index, total_proposer_reward); + + // calculate sync committee rewards / penalties + let reward_payload = harness + .chain + .compute_sync_committee_rewards(signed_block.message(), &mut state) + .unwrap(); + + reward_payload.iter().for_each(|reward| { + let mut amount = *sync_committee_rewards_map + .get(&reward.validator_index) + .unwrap_or(&0); + amount += reward.reward; + sync_committee_rewards_map.insert(reward.validator_index, amount); + }); + + harness.extend_slots(1).await; + } + + // compute reward deltas for all validators in epoch N + let StandardAttestationRewards { + ideal_rewards, + total_rewards, + } = harness + .chain + .compute_attestation_rewards(Epoch::new(target_epoch), vec![]) + .unwrap(); + + // assert ideal rewards are greater than 0 + assert!(ideal_rewards + .iter() + .all(|reward| reward.head > 0 && reward.target > 0 && reward.source > 0)); + + // apply attestation, proposal, and sync committee rewards and penalties to initial balances + let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); + let expected_balances = apply_beacon_block_rewards(&proposal_rewards_map, expected_balances); + let expected_balances = + apply_sync_committee_rewards(&sync_committee_rewards_map, expected_balances); + + // verify expected balances against actual balances + let balances: Vec = harness.get_current_state().balances().clone().into(); + + assert_eq!(expected_balances, balances); +} + +#[tokio::test] +async fn test_verify_attestation_rewards_altair_inactivity_leak() { + let spec = ForkName::Altair.make_genesis_spec(E::default_spec()); + let harness = get_harness(spec.clone()); + + let half = VALIDATOR_COUNT / 2; + let half_validators: Vec = (0..half).collect(); + // target epoch is the epoch where the chain enters inactivity leak + let target_epoch = &spec.min_epochs_to_inactivity_penalty + 1; + + // advance until beginning of epoch N + 1 and get balances + harness + .extend_slots_some_validators( + (E::slots_per_epoch() * (target_epoch + 1)) as usize, + half_validators.clone(), + ) + .await; + let initial_balances: Vec = harness.get_current_state().balances().clone().into(); + + // advance until epoch N + 2 and build proposal rewards map + let mut proposal_rewards_map: HashMap = HashMap::new(); + let mut sync_committee_rewards_map: HashMap = HashMap::new(); + for _ in 0..E::slots_per_epoch() { + let state = harness.get_current_state(); + let slot = state.slot() + Slot::new(1); + + // calculate beacon block rewards / penalties + let ((signed_block, _maybe_blob_sidecars), mut state) = + harness.make_block_return_pre_state(state, slot).await; + let beacon_block_reward = harness + .chain + .compute_beacon_block_reward( + signed_block.message(), + signed_block.canonical_root(), + &mut state, + ) + .unwrap(); + + let total_proposer_reward = proposal_rewards_map + .get(&beacon_block_reward.proposer_index) + .unwrap_or(&0u64) + + beacon_block_reward.total; + + proposal_rewards_map.insert(beacon_block_reward.proposer_index, total_proposer_reward); + + // calculate sync committee rewards / penalties + let reward_payload = harness + .chain + .compute_sync_committee_rewards(signed_block.message(), &mut state) + .unwrap(); + + reward_payload.iter().for_each(|reward| { + let mut amount = *sync_committee_rewards_map + .get(&reward.validator_index) + .unwrap_or(&0); + amount += reward.reward; + sync_committee_rewards_map.insert(reward.validator_index, amount); + }); + + harness + .extend_slots_some_validators(1, half_validators.clone()) + .await; + } + + // compute reward deltas for all validators in epoch N + let StandardAttestationRewards { + ideal_rewards, + total_rewards, + } = harness + .chain + .compute_attestation_rewards(Epoch::new(target_epoch), vec![]) + .unwrap(); + + // assert inactivity penalty for both ideal rewards and individual validators + assert!(ideal_rewards.iter().all(|reward| reward.inactivity == 0)); + assert!(total_rewards[..half] + .iter() + .all(|reward| reward.inactivity == 0)); + assert!(total_rewards[half..] + .iter() + .all(|reward| reward.inactivity < 0)); + + // apply attestation, proposal, and sync committee rewards and penalties to initial balances + let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); + let expected_balances = apply_beacon_block_rewards(&proposal_rewards_map, expected_balances); + let expected_balances = + apply_sync_committee_rewards(&sync_committee_rewards_map, expected_balances); + + // verify expected balances against actual balances + let balances: Vec = harness.get_current_state().balances().clone().into(); + + assert_eq!(expected_balances, balances); +} + +#[tokio::test] +async fn test_verify_attestation_rewards_altair_inactivity_leak_justification_epoch() { + let spec = ForkName::Altair.make_genesis_spec(E::default_spec()); + let harness = get_harness(spec.clone()); + + let half = VALIDATOR_COUNT / 2; + let half_validators: Vec = (0..half).collect(); + // target epoch is the epoch where the chain enters inactivity leak + 1 + let mut target_epoch = &spec.min_epochs_to_inactivity_penalty + 2; + + // advance until beginning of epoch N + 1 + harness + .extend_slots_some_validators( + (E::slots_per_epoch() * (target_epoch + 1)) as usize, + half_validators.clone(), + ) + .await; + + let validator_inactivity_score = harness + .get_current_state() + .get_inactivity_score(VALIDATOR_COUNT - 1) + .unwrap(); + + //assert to ensure we are in inactivity leak + assert_eq!(4, validator_inactivity_score); + + // advance for first justification epoch and get balances + harness.extend_slots(E::slots_per_epoch() as usize).await; + target_epoch += 1; + let initial_balances: Vec = harness.get_current_state().balances().clone().into(); + + // advance until epoch N + 2 and build proposal rewards map + let mut proposal_rewards_map: HashMap = HashMap::new(); + let mut sync_committee_rewards_map: HashMap = HashMap::new(); + for _ in 0..E::slots_per_epoch() { + let state = harness.get_current_state(); + let slot = state.slot() + Slot::new(1); + + // calculate beacon block rewards / penalties + let ((signed_block, _maybe_blob_sidecars), mut state) = + harness.make_block_return_pre_state(state, slot).await; + let beacon_block_reward = harness + .chain + .compute_beacon_block_reward( + signed_block.message(), + signed_block.canonical_root(), + &mut state, + ) + .unwrap(); + + let total_proposer_reward = proposal_rewards_map + .get(&beacon_block_reward.proposer_index) + .unwrap_or(&0u64) + + beacon_block_reward.total; + + proposal_rewards_map.insert(beacon_block_reward.proposer_index, total_proposer_reward); + + // calculate sync committee rewards / penalties + let reward_payload = harness + .chain + .compute_sync_committee_rewards(signed_block.message(), &mut state) + .unwrap(); + + reward_payload.iter().for_each(|reward| { + let mut amount = *sync_committee_rewards_map + .get(&reward.validator_index) + .unwrap_or(&0); + amount += reward.reward; + sync_committee_rewards_map.insert(reward.validator_index, amount); + }); + + harness.extend_slots(1).await; + } + + //assert target epoch and previous_justified_checkpoint match + assert_eq!( + target_epoch, + harness + .get_current_state() + .previous_justified_checkpoint() + .epoch + .as_u64() + ); + + // compute reward deltas for all validators in epoch N + let StandardAttestationRewards { + ideal_rewards, + total_rewards, + } = harness + .chain + .compute_attestation_rewards(Epoch::new(target_epoch), vec![]) + .unwrap(); + + // assert ideal rewards are greater than 0 + assert!(ideal_rewards + .iter() + .all(|reward| reward.head > 0 && reward.target > 0 && reward.source > 0)); + + // apply attestation, proposal, and sync committee rewards and penalties to initial balances + let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); + let expected_balances = apply_beacon_block_rewards(&proposal_rewards_map, expected_balances); + let expected_balances = + apply_sync_committee_rewards(&sync_committee_rewards_map, expected_balances); + + // verify expected balances against actual balances + let balances: Vec = harness.get_current_state().balances().clone().into(); + assert_eq!(expected_balances, balances); +} + #[tokio::test] async fn test_verify_attestation_rewards_base_subset_only() { let harness = get_harness(E::default_spec()); @@ -297,3 +650,32 @@ fn get_validator_balances(state: BeaconState, validators: &[usize]) -> Vec, + expected_balances: Vec, +) -> Vec { + let calculated_balances = expected_balances + .iter() + .enumerate() + .map(|(i, balance)| balance + proposal_rewards_map.get(&(i as u64)).unwrap_or(&0u64)) + .collect(); + + calculated_balances +} + +fn apply_sync_committee_rewards( + sync_committee_rewards_map: &HashMap, + expected_balances: Vec, +) -> Vec { + let calculated_balances = expected_balances + .iter() + .enumerate() + .map(|(i, balance)| { + (*balance as i64 + sync_committee_rewards_map.get(&(i as u64)).unwrap_or(&0i64)) + .unsigned_abs() + }) + .collect(); + + calculated_balances +} diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index ab54af42c..9b832bd76 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -1,17 +1,20 @@ #![cfg(not(debug_assertions))] use beacon_chain::attestation_verification::Error as AttnError; +use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::builder::BeaconChainBuilder; use beacon_chain::schema_change::migrate_schema; use beacon_chain::test_utils::{ - test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, + mock_execution_layer_from_parts, test_spec, AttestationStrategy, BeaconChainHarness, + BlockStrategy, DiskHarnessType, }; -use beacon_chain::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD; use beacon_chain::{ - historical_blocks::HistoricalBlockError, migrate::MigratorConfig, BeaconChain, - BeaconChainError, BeaconChainTypes, BeaconSnapshot, BlockError, ChainConfig, - NotifyExecutionLayer, ServerSentEventHandler, WhenSlotSkipped, + data_availability_checker::MaybeAvailableBlock, historical_blocks::HistoricalBlockError, + migrate::MigratorConfig, BeaconChain, BeaconChainError, BeaconChainTypes, BeaconSnapshot, + BlockError, ChainConfig, NotifyExecutionLayer, ServerSentEventHandler, WhenSlotSkipped, }; +use eth2_network_config::TRUSTED_SETUP_BYTES; +use kzg::TrustedSetup; use lazy_static::lazy_static; use logging::test_logger; use maplit::hashset; @@ -23,10 +26,13 @@ use std::collections::HashSet; use std::convert::TryInto; use std::sync::Arc; use std::time::Duration; -use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION}; +use store::chunked_vector::Chunk; +use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION, STATE_UPPER_LIMIT_NO_RETAIN}; use store::{ + chunked_vector::{chunk_key, Field}, + get_key_for_col, iter::{BlockRootsIterator, StateRootsIterator}, - HotColdDB, LevelDB, StoreConfig, + BlobInfo, DBColumn, HotColdDB, KeyValueStore, KeyValueStoreOp, LevelDB, StoreConfig, }; use tempfile::{tempdir, TempDir}; use tokio::time::sleep; @@ -47,20 +53,29 @@ type E = MinimalEthSpec; type TestHarness = BeaconChainHarness>; fn get_store(db_path: &TempDir) -> Arc, LevelDB>> { - get_store_with_spec(db_path, test_spec::()) + get_store_generic(db_path, StoreConfig::default(), test_spec::()) } -fn get_store_with_spec( +fn get_store_generic( db_path: &TempDir, + config: StoreConfig, spec: ChainSpec, ) -> Arc, LevelDB>> { let hot_path = db_path.path().join("hot_db"); let cold_path = db_path.path().join("cold_db"); - let config = StoreConfig::default(); + let blobs_path = db_path.path().join("blobs_db"); let log = test_logger(); - HotColdDB::open(&hot_path, &cold_path, |_, _, _| Ok(()), config, spec, log) - .expect("disk store should initialize") + HotColdDB::open( + &hot_path, + &cold_path, + &blobs_path, + |_, _, _| Ok(()), + config, + spec, + log, + ) + .expect("disk store should initialize") } fn get_harness( @@ -80,8 +95,8 @@ fn get_harness_generic( validator_count: usize, chain_config: ChainConfig, ) -> TestHarness { - let harness = BeaconChainHarness::builder(MinimalEthSpec) - .default_spec() + let harness = TestHarness::builder(MinimalEthSpec) + .spec(store.get_chain_spec().clone()) .keypairs(KEYPAIRS[0..validator_count].to_vec()) .logger(store.logger().clone()) .fresh_disk_store(store) @@ -92,6 +107,253 @@ fn get_harness_generic( harness } +/// Tests that `store.heal_freezer_block_roots_at_split` inserts block roots between last restore point +/// slot and the split slot. +#[tokio::test] +async fn heal_freezer_block_roots_at_split() { + // chunk_size is hard-coded to 128 + let num_blocks_produced = E::slots_per_epoch() * 20; + let db_path = tempdir().unwrap(); + let store = get_store_generic( + &db_path, + StoreConfig { + slots_per_restore_point: 2 * E::slots_per_epoch(), + ..Default::default() + }, + test_spec::(), + ); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let split_slot = store.get_split_slot(); + assert_eq!(split_slot, 18 * E::slots_per_epoch()); + + // Do a heal before deleting to make sure that it doesn't break. + let last_restore_point_slot = Slot::new(16 * E::slots_per_epoch()); + store.heal_freezer_block_roots_at_split().unwrap(); + check_freezer_block_roots(&harness, last_restore_point_slot, split_slot); + + // Delete block roots between `last_restore_point_slot` and `split_slot`. + let chunk_index = >::chunk_index( + last_restore_point_slot.as_usize(), + ); + let key_chunk = get_key_for_col(DBColumn::BeaconBlockRoots.as_str(), &chunk_key(chunk_index)); + store + .cold_db + .do_atomically(vec![KeyValueStoreOp::DeleteKey(key_chunk)]) + .unwrap(); + + let block_root_err = store + .forwards_block_roots_iterator_until( + last_restore_point_slot, + last_restore_point_slot + 1, + || unreachable!(), + &harness.chain.spec, + ) + .unwrap() + .next() + .unwrap() + .unwrap_err(); + + assert!(matches!(block_root_err, store::Error::NoContinuationData)); + + // Re-insert block roots + store.heal_freezer_block_roots_at_split().unwrap(); + check_freezer_block_roots(&harness, last_restore_point_slot, split_slot); + + // Run for another two epochs to check that the invariant is maintained. + let additional_blocks_produced = 2 * E::slots_per_epoch(); + harness + .extend_slots(additional_blocks_produced as usize) + .await; + + check_finalization(&harness, num_blocks_produced + additional_blocks_produced); + check_split_slot(&harness, store); + check_chain_dump( + &harness, + num_blocks_produced + additional_blocks_produced + 1, + ); + check_iterators(&harness); +} + +/// Tests that `store.heal_freezer_block_roots` inserts block roots between last restore point +/// slot and the split slot. +#[tokio::test] +async fn heal_freezer_block_roots_with_skip_slots() { + // chunk_size is hard-coded to 128 + let num_blocks_produced = E::slots_per_epoch() * 20; + let db_path = tempdir().unwrap(); + let store = get_store_generic( + &db_path, + StoreConfig { + slots_per_restore_point: 2 * E::slots_per_epoch(), + ..Default::default() + }, + test_spec::(), + ); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + let current_state = harness.get_current_state(); + let state_root = harness.get_current_state().tree_hash_root(); + let all_validators = &harness.get_all_validators(); + harness + .add_attested_blocks_at_slots( + current_state, + state_root, + &(1..=num_blocks_produced) + .filter(|i| i % 12 != 0) + .map(Slot::new) + .collect::>(), + all_validators, + ) + .await; + + // split slot should be 18 here + let split_slot = store.get_split_slot(); + assert_eq!(split_slot, 18 * E::slots_per_epoch()); + + let last_restore_point_slot = Slot::new(16 * E::slots_per_epoch()); + let chunk_index = >::chunk_index( + last_restore_point_slot.as_usize(), + ); + let key_chunk = get_key_for_col(DBColumn::BeaconBlockRoots.as_str(), &chunk_key(chunk_index)); + store + .cold_db + .do_atomically(vec![KeyValueStoreOp::DeleteKey(key_chunk)]) + .unwrap(); + + let block_root_err = store + .forwards_block_roots_iterator_until( + last_restore_point_slot, + last_restore_point_slot + 1, + || unreachable!(), + &harness.chain.spec, + ) + .unwrap() + .next() + .unwrap() + .unwrap_err(); + + assert!(matches!(block_root_err, store::Error::NoContinuationData)); + + // heal function + store.heal_freezer_block_roots_at_split().unwrap(); + check_freezer_block_roots(&harness, last_restore_point_slot, split_slot); + + // Run for another two epochs to check that the invariant is maintained. + let additional_blocks_produced = 2 * E::slots_per_epoch(); + harness + .extend_slots(additional_blocks_produced as usize) + .await; + + check_finalization(&harness, num_blocks_produced + additional_blocks_produced); + check_split_slot(&harness, store); + check_iterators(&harness); +} + +/// Tests that `store.heal_freezer_block_roots_at_genesis` replaces 0x0 block roots between slot +/// 0 and the first non-skip slot with genesis block root. +#[tokio::test] +async fn heal_freezer_block_roots_at_genesis() { + // Run for a few epochs to ensure we're past finalization. + let num_blocks_produced = E::slots_per_epoch() * 4; + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + // Start with 2 skip slots. + harness.advance_slot(); + harness.advance_slot(); + + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Do a heal before deleting to make sure that it doesn't break. + store.heal_freezer_block_roots_at_genesis().unwrap(); + check_freezer_block_roots( + &harness, + Slot::new(0), + Epoch::new(1).end_slot(E::slots_per_epoch()), + ); + + // Write 0x0 block roots at slot 1 and slot 2. + let chunk_index = 0; + let chunk_db_key = chunk_key(chunk_index); + let mut chunk = + Chunk::::load(&store.cold_db, DBColumn::BeaconBlockRoots, &chunk_db_key) + .unwrap() + .unwrap(); + + chunk.values[1] = Hash256::zero(); + chunk.values[2] = Hash256::zero(); + + let mut ops = vec![]; + chunk + .store(DBColumn::BeaconBlockRoots, &chunk_db_key, &mut ops) + .unwrap(); + store.cold_db.do_atomically(ops).unwrap(); + + // Ensure the DB is corrupted + let block_roots = store + .forwards_block_roots_iterator_until( + Slot::new(1), + Slot::new(2), + || unreachable!(), + &harness.chain.spec, + ) + .unwrap() + .map(Result::unwrap) + .take(2) + .collect::>(); + assert_eq!( + block_roots, + vec![ + (Hash256::zero(), Slot::new(1)), + (Hash256::zero(), Slot::new(2)) + ] + ); + + // Insert genesis block roots at skip slots before first block slot + store.heal_freezer_block_roots_at_genesis().unwrap(); + check_freezer_block_roots( + &harness, + Slot::new(0), + Epoch::new(1).end_slot(E::slots_per_epoch()), + ); +} + +fn check_freezer_block_roots(harness: &TestHarness, start_slot: Slot, end_slot: Slot) { + for slot in (start_slot.as_u64()..end_slot.as_u64()).map(Slot::new) { + let (block_root, result_slot) = harness + .chain + .store + .forwards_block_roots_iterator_until(slot, slot, || unreachable!(), &harness.chain.spec) + .unwrap() + .next() + .unwrap() + .unwrap(); + assert_eq!(slot, result_slot); + let expected_block_root = harness + .chain + .block_root_at_slot(slot, WhenSlotSkipped::Prev) + .unwrap() + .unwrap(); + assert_eq!(expected_block_root, block_root); + } +} + #[tokio::test] async fn full_participation_no_skips() { let num_blocks_produced = E::slots_per_epoch() * 5; @@ -707,7 +969,7 @@ async fn multi_epoch_fork_valid_blocks_test( let store = get_store(&db_path); let validators_keypairs = types::test_utils::generate_deterministic_keypairs(LOW_VALIDATOR_COUNT); - let harness = BeaconChainHarness::builder(MinimalEthSpec) + let harness = TestHarness::builder(MinimalEthSpec) .default_spec() .keypairs(validators_keypairs) .fresh_disk_store(store) @@ -1079,7 +1341,7 @@ async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { ); } - assert_eq!(rig.get_finalized_checkpoints(), hashset! {},); + assert_eq!(rig.get_finalized_checkpoints(), hashset! {}); assert!(rig.chain.knows_head(&stray_head)); @@ -1106,8 +1368,11 @@ async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { for &block_hash in stray_blocks.values() { assert!( !rig.block_exists(block_hash), - "abandoned block {} should have been pruned", - block_hash + "abandoned block {block_hash:?} should have been pruned", + ); + assert!( + !rig.chain.store.blobs_exist(&block_hash.into()).unwrap(), + "blobs for abandoned block {block_hash:?} should have been pruned" ); } @@ -1796,6 +2061,10 @@ fn check_no_blocks_exist<'a>( "did not expect block {:?} to be in the DB", block_hash ); + assert!( + !harness.chain.store.blobs_exist(&block_hash.into()).unwrap(), + "blobs for abandoned block {block_hash:?} should have been pruned" + ); } } @@ -1997,23 +2266,26 @@ async fn garbage_collect_temp_states_from_failed_block() { let genesis_state = harness.get_current_state(); let block_slot = Slot::new(2 * slots_per_epoch); - let (signed_block, state) = harness.make_block(genesis_state, block_slot).await; + let ((signed_block, _), state) = harness.make_block(genesis_state, block_slot).await; - let (mut block, _) = signed_block.deconstruct(); + let (mut block, _) = (*signed_block).clone().deconstruct(); // Mutate the block to make it invalid, and re-sign it. *block.state_root_mut() = Hash256::repeat_byte(0xff); let proposer_index = block.proposer_index() as usize; - let block = block.sign( + let block = Arc::new(block.sign( &harness.validator_keypairs[proposer_index].sk, &state.fork(), state.genesis_validators_root(), &harness.spec, - ); + )); // The block should be rejected, but should store a bunch of temporary states. harness.set_current_slot(block_slot); - harness.process_block_result(block).await.unwrap_err(); + harness + .process_block_result((block, None)) + .await + .unwrap_err(); assert_eq!( store.iter_temporary_state_roots().count(), @@ -2074,6 +2346,18 @@ async fn weak_subjectivity_sync_unaligned_unadvanced_checkpoint() { weak_subjectivity_sync_test(slots, checkpoint_slot).await } +// Regression test for https://github.com/sigp/lighthouse/issues/4817 +// Skip 3 slots immediately after genesis, creating a gap between the genesis block and the first +// real block. +#[tokio::test] +async fn weak_subjectivity_sync_skips_at_genesis() { + let start_slot = 4; + let end_slot = E::slots_per_epoch() * 4; + let slots = (start_slot..end_slot).map(Slot::new).collect(); + let checkpoint_slot = Slot::new(E::slots_per_epoch() * 2); + weak_subjectivity_sync_test(slots, checkpoint_slot).await +} + async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { // Build an initial chain on one harness, representing a synced node with full history. let num_final_blocks = E::slots_per_epoch() * 2; @@ -2132,6 +2416,12 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { let store = get_store(&temp2); let spec = test_spec::(); let seconds_per_slot = spec.seconds_per_slot; + let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) + .map_err(|e| println!("Unable to read trusted setup file: {}", e)) + .unwrap(); + + let mock = + mock_execution_layer_from_parts(&harness.spec, harness.runtime.task_executor.clone()); // Initialise a new beacon chain from the finalized checkpoint. // The slot clock must be set to a time ahead of the checkpoint state. @@ -2141,28 +2431,30 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { Duration::from_secs(seconds_per_slot), ); slot_clock.set_slot(harness.get_current_slot().as_u64()); - let beacon_chain = Arc::new( - BeaconChainBuilder::new(MinimalEthSpec) - .store(store.clone()) - .custom_spec(test_spec::()) - .task_executor(harness.chain.task_executor.clone()) - .logger(log.clone()) - .weak_subjectivity_state(wss_state, wss_block.clone(), genesis_state) - .unwrap() - .store_migrator_config(MigratorConfig::default().blocking()) - .dummy_eth1_backend() - .expect("should build dummy backend") - .slot_clock(slot_clock) - .shutdown_sender(shutdown_tx) - .chain_config(ChainConfig::default()) - .event_handler(Some(ServerSentEventHandler::new_with_capacity( - log.clone(), - 1, - ))) - .monitor_validators(true, vec![], DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, log) - .build() - .expect("should build"), - ); + + let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec) + .store(store.clone()) + .custom_spec(test_spec::()) + .task_executor(harness.chain.task_executor.clone()) + .logger(log.clone()) + .weak_subjectivity_state(wss_state, wss_block.clone(), genesis_state) + .unwrap() + .store_migrator_config(MigratorConfig::default().blocking()) + .dummy_eth1_backend() + .expect("should build dummy backend") + .slot_clock(slot_clock) + .shutdown_sender(shutdown_tx) + .chain_config(ChainConfig::default()) + .event_handler(Some(ServerSentEventHandler::new_with_capacity( + log.clone(), + 1, + ))) + .execution_layer(Some(mock.el)) + .trusted_setup(trusted_setup) + .build() + .expect("should build"); + + let beacon_chain = Arc::new(beacon_chain); // Apply blocks forward to reach head. let chain_dump = harness.chain.chain_dump().unwrap(); @@ -2171,12 +2463,14 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .filter(|snapshot| snapshot.beacon_block.slot() > checkpoint_slot); for snapshot in new_blocks { + let block_root = snapshot.beacon_block_root; let full_block = harness .chain .get_block(&snapshot.beacon_block_root) .await .unwrap() .unwrap(); + let blobs = harness.chain.get_blobs(&block_root).expect("blobs"); let slot = full_block.slot(); let state_root = full_block.state_root(); @@ -2184,7 +2478,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { beacon_chain .process_block( full_block.canonical_root(), - Arc::new(full_block), + RpcBlock::new(Some(block_root), Arc::new(full_block), Some(blobs)).unwrap(), NotifyExecutionLayer::Yes, || Ok(()), ) @@ -2229,14 +2523,38 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .filter(|s| s.beacon_block.slot() != 0) .map(|s| s.beacon_block.clone()) .collect::>(); + + let mut available_blocks = vec![]; + for blinded in historical_blocks { + let block_root = blinded.canonical_root(); + let full_block = harness + .chain + .get_block(&block_root) + .await + .expect("should get block") + .expect("should get block"); + let blobs = harness.chain.get_blobs(&block_root).expect("blobs"); + + if let MaybeAvailableBlock::Available(block) = harness + .chain + .data_availability_checker + .verify_kzg_for_rpc_block( + RpcBlock::new(Some(block_root), Arc::new(full_block), Some(blobs)).unwrap(), + ) + .expect("should verify kzg") + { + available_blocks.push(block); + } + } + beacon_chain - .import_historical_block_batch(historical_blocks.clone()) + .import_historical_block_batch(available_blocks.clone()) .unwrap(); assert_eq!(beacon_chain.store.get_oldest_block_slot(), 0); // Resupplying the blocks should not fail, they can be safely ignored. beacon_chain - .import_historical_block_batch(historical_blocks) + .import_historical_block_batch(available_blocks) .unwrap(); // The forwards iterator should now match the original chain @@ -2328,10 +2646,10 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { let (unadvanced_split_state, unadvanced_split_state_root) = harness.get_current_state_and_root(); - let (invalid_fork_block, _) = harness + let ((invalid_fork_block, _), _) = harness .make_block(unadvanced_split_state.clone(), split_slot) .await; - let (valid_fork_block, _) = harness + let ((valid_fork_block, _), _) = harness .make_block(unadvanced_split_state.clone(), split_slot + 1) .await; @@ -2359,7 +2677,7 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { .chain .process_block( invalid_fork_block.canonical_root(), - Arc::new(invalid_fork_block.clone()), + invalid_fork_block.clone(), NotifyExecutionLayer::Yes, || Ok(()), ) @@ -2372,7 +2690,7 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { .chain .process_block( valid_fork_block.canonical_root(), - Arc::new(valid_fork_block.clone()), + valid_fork_block.clone(), NotifyExecutionLayer::Yes, || Ok(()), ) @@ -2468,12 +2786,12 @@ async fn finalizes_after_resuming_from_db() { let original_chain = harness.chain; - let resumed_harness = BeaconChainHarness::builder(MinimalEthSpec) + let resumed_harness = BeaconChainHarness::>::builder(MinimalEthSpec) .default_spec() .keypairs(KEYPAIRS[0..validator_count].to_vec()) .resumed_disk_store(store) .testing_slot_clock(original_chain.slot_clock.clone()) - .mock_execution_layer() + .execution_layer(original_chain.execution_layer.clone()) .build(); assert_chains_pretty_much_the_same(&original_chain, &resumed_harness.chain); @@ -2538,7 +2856,7 @@ async fn revert_minority_fork_on_resume() { // Chain with no fork epoch configured. let db_path1 = tempdir().unwrap(); - let store1 = get_store_with_spec(&db_path1, spec1.clone()); + let store1 = get_store_generic(&db_path1, StoreConfig::default(), spec1.clone()); let harness1 = BeaconChainHarness::builder(MinimalEthSpec) .spec(spec1) .keypairs(KEYPAIRS[0..validator_count].to_vec()) @@ -2548,7 +2866,7 @@ async fn revert_minority_fork_on_resume() { // Chain with fork epoch configured. let db_path2 = tempdir().unwrap(); - let store2 = get_store_with_spec(&db_path2, spec2.clone()); + let store2 = get_store_generic(&db_path2, StoreConfig::default(), spec2.clone()); let harness2 = BeaconChainHarness::builder(MinimalEthSpec) .spec(spec2.clone()) .keypairs(KEYPAIRS[0..validator_count].to_vec()) @@ -2574,14 +2892,14 @@ async fn revert_minority_fork_on_resume() { harness1.process_attestations(attestations.clone()); harness2.process_attestations(attestations); - let (block, new_state) = harness1.make_block(state, slot).await; + let ((block, blobs), new_state) = harness1.make_block(state, slot).await; harness1 - .process_block(slot, block.canonical_root(), block.clone()) + .process_block(slot, block.canonical_root(), (block.clone(), blobs.clone())) .await .unwrap(); harness2 - .process_block(slot, block.canonical_root(), block.clone()) + .process_block(slot, block.canonical_root(), (block.clone(), blobs.clone())) .await .unwrap(); @@ -2615,17 +2933,17 @@ async fn revert_minority_fork_on_resume() { harness2.process_attestations(attestations); // Minority chain block (no attesters). - let (block1, new_state1) = harness1.make_block(state1, slot).await; + let ((block1, blobs1), new_state1) = harness1.make_block(state1, slot).await; harness1 - .process_block(slot, block1.canonical_root(), block1) + .process_block(slot, block1.canonical_root(), (block1, blobs1)) .await .unwrap(); state1 = new_state1; // Majority chain block (all attesters). - let (block2, new_state2) = harness2.make_block(state2, slot).await; + let ((block2, blobs2), new_state2) = harness2.make_block(state2, slot).await; harness2 - .process_block(slot, block2.canonical_root(), block2.clone()) + .process_block(slot, block2.canonical_root(), (block2.clone(), blobs2)) .await .unwrap(); @@ -2643,9 +2961,9 @@ async fn revert_minority_fork_on_resume() { // We have to do some hackery with the `slot_clock` so that the correct slot is set when // the beacon chain builder loads the head block. drop(harness1); - let resume_store = get_store_with_spec(&db_path1, spec2.clone()); + let resume_store = get_store_generic(&db_path1, StoreConfig::default(), spec2.clone()); - let resumed_harness = BeaconChainHarness::builder(MinimalEthSpec) + let resumed_harness = TestHarness::builder(MinimalEthSpec) .spec(spec2) .keypairs(KEYPAIRS[0..validator_count].to_vec()) .resumed_disk_store(resume_store) @@ -2678,7 +2996,7 @@ async fn revert_minority_fork_on_resume() { let initial_split_slot = resumed_harness.chain.store.get_split_slot(); for block in &majority_blocks { resumed_harness - .process_block_result(block.clone()) + .process_block_result((block.clone(), None)) .await .unwrap(); @@ -2718,12 +3036,12 @@ async fn schema_downgrade_to_min_version() { ) .await; - let min_version = if harness.spec.capella_fork_epoch.is_some() { - // Can't downgrade beyond V14 once Capella is reached, for simplicity don't test that - // at all if Capella is enabled. - SchemaVersion(14) + let min_version = if harness.spec.deneb_fork_epoch.is_some() { + // Can't downgrade beyond V18 once Deneb is reached, for simplicity don't test that + // at all if Deneb is enabled. + SchemaVersion(18) } else { - SchemaVersion(11) + SchemaVersion(16) }; // Save the slot clock so that the new harness doesn't revert in time. @@ -2760,15 +3078,6 @@ async fn schema_downgrade_to_min_version() { .expect("schema upgrade from minimum version should work"); // Recreate the harness. - /* - let slot_clock = TestingSlotClock::new( - Slot::new(0), - Duration::from_secs(harness.chain.genesis_time), - Duration::from_secs(spec.seconds_per_slot), - ); - slot_clock.set_slot(harness.get_current_slot().as_u64()); - */ - let harness = BeaconChainHarness::builder(MinimalEthSpec) .default_spec() .keypairs(KEYPAIRS[0..LOW_VALIDATOR_COUNT].to_vec()) @@ -2796,6 +3105,383 @@ async fn schema_downgrade_to_min_version() { .expect_err("should not downgrade below minimum version"); } +/// Check that blob pruning prunes blobs older than the data availability boundary. +#[tokio::test] +async fn deneb_prune_blobs_happy_case() { + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + + let Some(deneb_fork_epoch) = store.get_chain_spec().deneb_fork_epoch else { + // No-op prior to Deneb. + return; + }; + let deneb_fork_slot = deneb_fork_epoch.start_slot(E::slots_per_epoch()); + + let num_blocks_produced = E::slots_per_epoch() * 8; + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Prior to manual pruning with an artifically low data availability boundary all blobs should + // be stored. + assert_eq!( + store.get_blob_info().oldest_blob_slot, + Some(deneb_fork_slot) + ); + check_blob_existence(&harness, Slot::new(1), harness.head_slot(), true); + + // Trigger blob pruning of blobs older than epoch 2. + let data_availability_boundary = Epoch::new(2); + store + .try_prune_blobs(true, data_availability_boundary) + .unwrap(); + + // Check oldest blob slot is updated accordingly and prior blobs have been deleted. + let oldest_blob_slot = store.get_blob_info().oldest_blob_slot.unwrap(); + assert_eq!( + oldest_blob_slot, + data_availability_boundary.start_slot(E::slots_per_epoch()) + ); + check_blob_existence(&harness, Slot::new(0), oldest_blob_slot - 1, false); + check_blob_existence(&harness, oldest_blob_slot, harness.head_slot(), true); +} + +/// Check that blob pruning does not prune without finalization. +#[tokio::test] +async fn deneb_prune_blobs_no_finalization() { + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + + let Some(deneb_fork_epoch) = store.get_chain_spec().deneb_fork_epoch else { + // No-op prior to Deneb. + return; + }; + let deneb_fork_slot = deneb_fork_epoch.start_slot(E::slots_per_epoch()); + + let initial_num_blocks = E::slots_per_epoch() * 5; + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + // Finalize to epoch 3. + harness + .extend_chain( + initial_num_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Extend the chain for another few epochs without attestations. + let unfinalized_num_blocks = E::slots_per_epoch() * 3; + harness.advance_slot(); + harness + .extend_chain( + unfinalized_num_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(vec![]), + ) + .await; + + // Finalization should be at epoch 3. + let finalized_slot = Slot::new(E::slots_per_epoch() * 3); + assert_eq!(harness.get_current_state().finalized_checkpoint().epoch, 3); + assert_eq!(store.get_split_slot(), finalized_slot); + + // All blobs should still be available. + assert_eq!( + store.get_blob_info().oldest_blob_slot, + Some(deneb_fork_slot) + ); + check_blob_existence(&harness, Slot::new(0), harness.head_slot(), true); + + // Attempt blob pruning of blobs older than epoch 4, which is newer than finalization. + let data_availability_boundary = Epoch::new(4); + store + .try_prune_blobs(true, data_availability_boundary) + .unwrap(); + + // Check oldest blob slot is only updated to finalization, and NOT to the DAB. + let oldest_blob_slot = store.get_blob_info().oldest_blob_slot.unwrap(); + assert_eq!(oldest_blob_slot, finalized_slot); + check_blob_existence(&harness, Slot::new(0), finalized_slot - 1, false); + check_blob_existence(&harness, finalized_slot, harness.head_slot(), true); +} + +/// Check that blob pruning does not fail trying to prune across the fork boundary. +#[tokio::test] +async fn deneb_prune_blobs_fork_boundary() { + let deneb_fork_epoch = Epoch::new(4); + let mut spec = ForkName::Capella.make_genesis_spec(E::default_spec()); + spec.deneb_fork_epoch = Some(deneb_fork_epoch); + let deneb_fork_slot = deneb_fork_epoch.start_slot(E::slots_per_epoch()); + + let db_path = tempdir().unwrap(); + let store = get_store_generic(&db_path, StoreConfig::default(), spec); + + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + let num_blocks = E::slots_per_epoch() * 7; + + // Finalize to epoch 5. + harness + .extend_chain( + num_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Finalization should be at epoch 5. + let finalized_epoch = Epoch::new(5); + let finalized_slot = finalized_epoch.start_slot(E::slots_per_epoch()); + assert_eq!( + harness.get_current_state().finalized_checkpoint().epoch, + finalized_epoch + ); + assert_eq!(store.get_split_slot(), finalized_slot); + + // All blobs should still be available. + assert_eq!( + store.get_blob_info().oldest_blob_slot, + Some(deneb_fork_slot) + ); + check_blob_existence(&harness, Slot::new(0), harness.head_slot(), true); + + // Attempt pruning with data availability epochs that precede the fork epoch. + // No pruning should occur. + assert!(deneb_fork_epoch < finalized_epoch); + for data_availability_boundary in [Epoch::new(0), Epoch::new(3), deneb_fork_epoch] { + store + .try_prune_blobs(true, data_availability_boundary) + .unwrap(); + + // Check oldest blob slot is not updated. + assert_eq!( + store.get_blob_info().oldest_blob_slot, + Some(deneb_fork_slot) + ); + } + // All blobs should still be available. + check_blob_existence(&harness, Slot::new(0), harness.head_slot(), true); + + // Prune one epoch past the fork. + let pruned_slot = (deneb_fork_epoch + 1).start_slot(E::slots_per_epoch()); + store.try_prune_blobs(true, deneb_fork_epoch + 1).unwrap(); + assert_eq!(store.get_blob_info().oldest_blob_slot, Some(pruned_slot)); + check_blob_existence(&harness, Slot::new(0), pruned_slot - 1, false); + check_blob_existence(&harness, pruned_slot, harness.head_slot(), true); +} + +/// Check that blob pruning prunes blobs older than the data availability boundary with margin +/// applied. +#[tokio::test] +async fn deneb_prune_blobs_margin1() { + deneb_prune_blobs_margin_test(1).await; +} + +#[tokio::test] +async fn deneb_prune_blobs_margin3() { + deneb_prune_blobs_margin_test(3).await; +} + +#[tokio::test] +async fn deneb_prune_blobs_margin4() { + deneb_prune_blobs_margin_test(4).await; +} + +async fn deneb_prune_blobs_margin_test(margin: u64) { + let config = StoreConfig { + blob_prune_margin_epochs: margin, + ..StoreConfig::default() + }; + let db_path = tempdir().unwrap(); + let store = get_store_generic(&db_path, config, test_spec::()); + + let Some(deneb_fork_epoch) = store.get_chain_spec().deneb_fork_epoch else { + // No-op prior to Deneb. + return; + }; + let deneb_fork_slot = deneb_fork_epoch.start_slot(E::slots_per_epoch()); + + let num_blocks_produced = E::slots_per_epoch() * 8; + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Prior to manual pruning with an artifically low data availability boundary all blobs should + // be stored. + assert_eq!( + store.get_blob_info().oldest_blob_slot, + Some(deneb_fork_slot) + ); + check_blob_existence(&harness, Slot::new(1), harness.head_slot(), true); + + // Trigger blob pruning of blobs older than epoch 6 - margin (6 is the minimum, due to + // finalization). + let data_availability_boundary = Epoch::new(6); + let effective_data_availability_boundary = + data_availability_boundary - store.get_config().blob_prune_margin_epochs; + assert!( + effective_data_availability_boundary > 0, + "must be > 0 because epoch 0 won't get pruned alone" + ); + store + .try_prune_blobs(true, data_availability_boundary) + .unwrap(); + + // Check oldest blob slot is updated accordingly and prior blobs have been deleted. + let oldest_blob_slot = store.get_blob_info().oldest_blob_slot.unwrap(); + assert_eq!( + oldest_blob_slot, + effective_data_availability_boundary.start_slot(E::slots_per_epoch()) + ); + check_blob_existence(&harness, Slot::new(0), oldest_blob_slot - 1, false); + check_blob_existence(&harness, oldest_blob_slot, harness.head_slot(), true); +} + +/// Check that a database with `blobs_db=false` can be upgraded to `blobs_db=true` before Deneb. +#[tokio::test] +async fn change_to_separate_blobs_db_before_deneb() { + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + + // Only run this test on forks prior to Deneb. If the blobs database already has blobs, we can't + // move it. + if store.get_chain_spec().deneb_fork_epoch.is_some() { + return; + } + + let init_blob_info = store.get_blob_info(); + assert!( + init_blob_info.blobs_db, + "separate blobs DB should be the default" + ); + + // Change to `blobs_db=false` to emulate legacy Deneb DB. + let legacy_blob_info = BlobInfo { + blobs_db: false, + ..init_blob_info + }; + store + .compare_and_set_blob_info_with_write(init_blob_info.clone(), legacy_blob_info.clone()) + .unwrap(); + assert_eq!(store.get_blob_info(), legacy_blob_info); + + // Re-open the DB and check that `blobs_db` gets changed back to true. + drop(store); + let store = get_store(&db_path); + assert_eq!(store.get_blob_info(), init_blob_info); +} + +/// Check that there are blob sidecars (or not) at every slot in the range. +fn check_blob_existence( + harness: &TestHarness, + start_slot: Slot, + end_slot: Slot, + should_exist: bool, +) { + let mut blobs_seen = 0; + for (block_root, slot) in harness + .chain + .forwards_iter_block_roots_until(start_slot, end_slot) + .unwrap() + .map(Result::unwrap) + { + if let Some(blobs) = harness.chain.store.get_blobs(&block_root).unwrap() { + assert!(should_exist, "blobs at slot {slot} exist but should not"); + blobs_seen += blobs.len(); + } else { + // We don't actually store empty blobs, so unfortunately we can't assert anything + // meaningful here (like asserting that the blob should not exist). + } + } + if should_exist { + assert_ne!(blobs_seen, 0, "expected non-zero number of blobs"); + } +} + +#[tokio::test] +async fn prune_historic_states() { + let num_blocks_produced = E::slots_per_epoch() * 5; + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + let genesis_state_root = harness.chain.genesis_state_root; + let genesis_state = harness + .chain + .get_state(&genesis_state_root, None) + .unwrap() + .unwrap(); + + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Check historical state is present. + let state_roots_iter = harness + .chain + .forwards_iter_state_roots(Slot::new(0)) + .unwrap(); + for (state_root, slot) in state_roots_iter + .take(E::slots_per_epoch() as usize) + .map(Result::unwrap) + { + assert!(store.get_state(&state_root, Some(slot)).unwrap().is_some()); + } + + store + .prune_historic_states(genesis_state_root, &genesis_state) + .unwrap(); + + // Check that anchor info is updated. + let anchor_info = store.get_anchor_info().unwrap(); + assert_eq!(anchor_info.state_lower_limit, 0); + assert_eq!(anchor_info.state_upper_limit, STATE_UPPER_LIMIT_NO_RETAIN); + + // Historical states should be pruned. + let state_roots_iter = harness + .chain + .forwards_iter_state_roots(Slot::new(1)) + .unwrap(); + for (state_root, slot) in state_roots_iter + .take(E::slots_per_epoch() as usize) + .map(Result::unwrap) + { + assert!(store.get_state(&state_root, Some(slot)).unwrap().is_none()); + } + + // Ensure that genesis state is still accessible + let genesis_state_root = harness.chain.genesis_state_root; + assert!(store + .get_state(&genesis_state_root, Some(Slot::new(0))) + .unwrap() + .is_some()); + + // Run for another two epochs. + let additional_blocks_produced = 2 * E::slots_per_epoch(); + harness + .extend_slots(additional_blocks_produced as usize) + .await; + + check_finalization(&harness, num_blocks_produced + additional_blocks_produced); + check_split_slot(&harness, store); +} + /// Checks that two chains are the same, for the purpose of these tests. /// /// Several fields that are hard/impossible to check are ignored (e.g., the store). diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 8935c6992..4334f9083 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -684,19 +684,20 @@ async fn run_skip_slot_test(skip_slots: u64) { Slot::new(0) ); - assert_eq!( - harness_b - .chain - .process_block( - harness_a.chain.head_snapshot().beacon_block_root, - harness_a.chain.head_snapshot().beacon_block.clone(), - NotifyExecutionLayer::Yes, - || Ok(()) - ) - .await - .unwrap(), - harness_a.chain.head_snapshot().beacon_block_root - ); + let status = harness_b + .chain + .process_block( + harness_a.chain.head_snapshot().beacon_block_root, + harness_a.get_head_block(), + NotifyExecutionLayer::Yes, + || Ok(()), + ) + .await + .unwrap(); + + let root: Hash256 = status.try_into().unwrap(); + + assert_eq!(root, harness_a.chain.head_snapshot().beacon_block_root); harness_b.chain.recompute_head_at_current_slot().await; diff --git a/beacon_node/beacon_chain/tests/validator_monitor.rs b/beacon_node/beacon_chain/tests/validator_monitor.rs new file mode 100644 index 000000000..d9ff57b1b --- /dev/null +++ b/beacon_node/beacon_chain/tests/validator_monitor.rs @@ -0,0 +1,377 @@ +use beacon_chain::test_utils::{ + AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, +}; +use beacon_chain::validator_monitor::{ValidatorMonitorConfig, MISSED_BLOCK_LAG_SLOTS}; +use lazy_static::lazy_static; +use logging::test_logger; +use types::{Epoch, EthSpec, Keypair, MainnetEthSpec, PublicKeyBytes, Slot}; + +// Should ideally be divisible by 3. +pub const VALIDATOR_COUNT: usize = 48; + +lazy_static! { + /// A cached set of keys. + static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); +} + +type E = MainnetEthSpec; + +fn get_harness( + validator_count: usize, + validator_indexes_to_monitor: Vec, +) -> BeaconChainHarness> { + let harness = BeaconChainHarness::builder(MainnetEthSpec) + .default_spec() + .keypairs(KEYPAIRS[0..validator_count].to_vec()) + .logger(test_logger()) + .fresh_ephemeral_store() + .mock_execution_layer() + .validator_monitor_config(ValidatorMonitorConfig { + validators: validator_indexes_to_monitor + .iter() + .map(|i| PublicKeyBytes::from(KEYPAIRS[*i].pk.clone())) + .collect(), + ..<_>::default() + }) + .build(); + + harness.advance_slot(); + + harness +} + +// Regression test for off-by-one caching issue in missed block detection. +#[tokio::test] +async fn missed_blocks_across_epochs() { + let slots_per_epoch = E::slots_per_epoch(); + let all_validators = (0..VALIDATOR_COUNT).collect::>(); + + let harness = get_harness(VALIDATOR_COUNT, vec![]); + let validator_monitor = &harness.chain.validator_monitor; + let mut genesis_state = harness.get_current_state(); + let genesis_state_root = genesis_state.update_tree_hash_cache().unwrap(); + let genesis_block_root = harness.head_block_root(); + + // Skip a slot in the first epoch (to prime the cache inside the missed block function) and then + // at a different offset in the 2nd epoch. The missed block in the 2nd epoch MUST NOT reuse + // the cache from the first epoch. + let first_skip_offset = 3; + let second_skip_offset = slots_per_epoch / 2; + assert_ne!(first_skip_offset, second_skip_offset); + let first_skip_slot = Slot::new(first_skip_offset); + let second_skip_slot = Slot::new(slots_per_epoch + second_skip_offset); + let slots = (1..2 * slots_per_epoch) + .map(Slot::new) + .filter(|slot| *slot != first_skip_slot && *slot != second_skip_slot) + .collect::>(); + + let (block_roots_by_slot, state_roots_by_slot, _, head_state) = harness + .add_attested_blocks_at_slots(genesis_state, genesis_state_root, &slots, &all_validators) + .await; + + // Prime the proposer shuffling cache. + let mut proposer_shuffling_cache = harness.chain.beacon_proposer_cache.lock(); + for epoch in [0, 1].into_iter().map(Epoch::new) { + let start_slot = epoch.start_slot(slots_per_epoch) + 1; + let state = harness + .get_hot_state(state_roots_by_slot[&start_slot]) + .unwrap(); + let decision_root = state + .proposer_shuffling_decision_root(genesis_block_root) + .unwrap(); + proposer_shuffling_cache + .insert( + epoch, + decision_root, + state + .get_beacon_proposer_indices(&harness.chain.spec) + .unwrap(), + state.fork(), + ) + .unwrap(); + } + drop(proposer_shuffling_cache); + + // Monitor the validator that proposed the block at the same offset in the 0th epoch as the skip + // in the 1st epoch. + let innocent_proposer_slot = Slot::new(second_skip_offset); + let innocent_proposer = harness + .get_block(block_roots_by_slot[&innocent_proposer_slot]) + .unwrap() + .message() + .proposer_index(); + + let mut vm_write = validator_monitor.write(); + + // Call `process_` once to update validator indices. + vm_write.process_valid_state(head_state.current_epoch(), &head_state, &harness.chain.spec); + // Start monitoring the innocent validator. + vm_write.add_validator_pubkey(KEYPAIRS[innocent_proposer as usize].pk.compress()); + // Check for missed blocks. + vm_write.process_valid_state(head_state.current_epoch(), &head_state, &harness.chain.spec); + + // My client is innocent, your honour! + assert_eq!( + vm_write.get_monitored_validator_missed_block_count(innocent_proposer), + 0 + ); +} + +#[tokio::test] +async fn produces_missed_blocks() { + let validator_count = 16; + + let slots_per_epoch = E::slots_per_epoch(); + + let nb_epoch_to_simulate = Epoch::new(2); + + // Generate 63 slots (2 epochs * 32 slots per epoch - 1) + let initial_blocks = slots_per_epoch * nb_epoch_to_simulate.as_u64() - 1; + + // The validator index of the validator that is 'supposed' to miss a block + let mut validator_index_to_monitor = 1; + + // 1st scenario // + // + // Missed block happens when slot and prev_slot are in the same epoch + let harness1 = get_harness(validator_count, vec![validator_index_to_monitor]); + harness1 + .extend_chain( + initial_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let mut _state = &mut harness1.get_current_state(); + let mut epoch = _state.current_epoch(); + + // We have a total of 63 slots and we want slot 57 to be a missed block + // and this is slot=25 in epoch=1 + let mut idx = initial_blocks - 6; + let mut slot = Slot::new(idx); + let mut slot_in_epoch = slot % slots_per_epoch; + let mut prev_slot = Slot::new(idx - 1); + let mut duplicate_block_root = *_state.block_roots().get(idx as usize).unwrap(); + let mut validator_indexes = _state.get_beacon_proposer_indices(&harness1.spec).unwrap(); + let mut validator_index = validator_indexes[slot_in_epoch.as_usize()]; + let mut proposer_shuffling_decision_root = _state + .proposer_shuffling_decision_root(duplicate_block_root) + .unwrap(); + + let beacon_proposer_cache = harness1 + .chain + .validator_monitor + .read() + .get_beacon_proposer_cache(); + + // Let's fill the cache with the proposers for the current epoch + // and push the duplicate_block_root to the block_roots vector + assert_eq!( + beacon_proposer_cache.lock().insert( + epoch, + proposer_shuffling_decision_root, + validator_indexes.into_iter().collect::>(), + _state.fork() + ), + Ok(()) + ); + + // Modify the block root of the previous slot to be the same as the block root of the current slot + // in order to simulate a missed block + assert_eq!( + _state.set_block_root(prev_slot, duplicate_block_root), + Ok(()) + ); + + { + // Let's validate the state which will call the function responsible for + // adding the missed blocks to the validator monitor + let mut validator_monitor = harness1.chain.validator_monitor.write(); + validator_monitor.process_valid_state(nb_epoch_to_simulate, _state, &harness1.chain.spec); + + // We should have one entry in the missed blocks map + assert_eq!( + validator_monitor.get_monitored_validator_missed_block_count(validator_index as u64), + 1 + ); + } + + // 2nd scenario // + // + // Missed block happens when slot and prev_slot are not in the same epoch + // making sure that the cache reloads when the epoch changes + // in that scenario the slot that missed a block is the first slot of the epoch + validator_index_to_monitor = 7; + // We are adding other validators to monitor as thoses one will miss a block depending on + // the fork name specified when running the test as the proposer cache differs depending on the fork name (cf. seed) + let validator_index_to_monitor_altair = 2; + // Same as above but for the merge upgrade + let validator_index_to_monitor_merge = 4; + // Same as above but for the capella upgrade + let validator_index_to_monitor_capella = 11; + // Same as above but for the deneb upgrade + let validator_index_to_monitor_deneb = 3; + let harness2 = get_harness( + validator_count, + vec![ + validator_index_to_monitor, + validator_index_to_monitor_altair, + validator_index_to_monitor_merge, + validator_index_to_monitor_capella, + validator_index_to_monitor_deneb, + ], + ); + let advance_slot_by = 9; + harness2 + .extend_chain( + (initial_blocks + advance_slot_by) as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let mut _state2 = &mut harness2.get_current_state(); + epoch = _state2.current_epoch(); + + // We have a total of 72 slots and we want slot 64 to be the missed block + // and this is slot=64 in epoch=2 + idx = initial_blocks + (advance_slot_by) - 8; + slot = Slot::new(idx); + prev_slot = Slot::new(idx - 1); + slot_in_epoch = slot % slots_per_epoch; + duplicate_block_root = *_state2.block_roots().get(idx as usize).unwrap(); + validator_indexes = _state2.get_beacon_proposer_indices(&harness2.spec).unwrap(); + validator_index = validator_indexes[slot_in_epoch.as_usize()]; + + let beacon_proposer_cache = harness2 + .chain + .validator_monitor + .read() + .get_beacon_proposer_cache(); + + // Let's fill the cache with the proposers for the current epoch + // and push the duplicate_block_root to the block_roots vector + assert_eq!( + beacon_proposer_cache.lock().insert( + epoch, + duplicate_block_root, + validator_indexes.into_iter().collect::>(), + _state2.fork() + ), + Ok(()) + ); + + assert_eq!( + _state2.set_block_root(prev_slot, duplicate_block_root), + Ok(()) + ); + + { + // Let's validate the state which will call the function responsible for + // adding the missed blocks to the validator monitor + let mut validator_monitor2 = harness2.chain.validator_monitor.write(); + validator_monitor2.process_valid_state(epoch, _state2, &harness2.chain.spec); + // We should have one entry in the missed blocks map + assert_eq!( + validator_monitor2.get_monitored_validator_missed_block_count(validator_index as u64), + 1 + ); + + // 3rd scenario // + // + // A missed block happens but the validator is not monitored + // it should not be flagged as a missed block + idx = initial_blocks + (advance_slot_by) - 7; + slot = Slot::new(idx); + prev_slot = Slot::new(idx - 1); + slot_in_epoch = slot % slots_per_epoch; + duplicate_block_root = *_state2.block_roots().get(idx as usize).unwrap(); + validator_indexes = _state2.get_beacon_proposer_indices(&harness2.spec).unwrap(); + let not_monitored_validator_index = validator_indexes[slot_in_epoch.as_usize()]; + + assert_eq!( + _state2.set_block_root(prev_slot, duplicate_block_root), + Ok(()) + ); + + // Let's validate the state which will call the function responsible for + // adding the missed blocks to the validator monitor + validator_monitor2.process_valid_state(epoch, _state2, &harness2.chain.spec); + + // We shouldn't have any entry in the missed blocks map + assert_ne!(validator_index, not_monitored_validator_index); + assert_eq!( + validator_monitor2 + .get_monitored_validator_missed_block_count(not_monitored_validator_index as u64), + 0 + ); + } + + // 4th scenario // + // + // A missed block happens at state.slot - LOG_SLOTS_PER_EPOCH + // it shouldn't be flagged as a missed block + let harness3 = get_harness(validator_count, vec![validator_index_to_monitor]); + harness3 + .extend_chain( + slots_per_epoch as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let mut _state3 = &mut harness3.get_current_state(); + epoch = _state3.current_epoch(); + + // We have a total of 32 slots and we want slot 30 to be a missed block + // and this is slot=30 in epoch=0 + idx = slots_per_epoch - MISSED_BLOCK_LAG_SLOTS as u64 + 2; + slot = Slot::new(idx); + slot_in_epoch = slot % slots_per_epoch; + prev_slot = Slot::new(idx - 1); + duplicate_block_root = *_state3.block_roots().get(idx as usize).unwrap(); + validator_indexes = _state3.get_beacon_proposer_indices(&harness3.spec).unwrap(); + validator_index = validator_indexes[slot_in_epoch.as_usize()]; + proposer_shuffling_decision_root = _state3 + .proposer_shuffling_decision_root_at_epoch(epoch, duplicate_block_root) + .unwrap(); + + let beacon_proposer_cache = harness3 + .chain + .validator_monitor + .read() + .get_beacon_proposer_cache(); + + // Let's fill the cache with the proposers for the current epoch + // and push the duplicate_block_root to the block_roots vector + assert_eq!( + beacon_proposer_cache.lock().insert( + epoch, + proposer_shuffling_decision_root, + validator_indexes.into_iter().collect::>(), + _state3.fork() + ), + Ok(()) + ); + + // Modify the block root of the previous slot to be the same as the block root of the current slot + // in order to simulate a missed block + assert_eq!( + _state3.set_block_root(prev_slot, duplicate_block_root), + Ok(()) + ); + + { + // Let's validate the state which will call the function responsible for + // adding the missed blocks to the validator monitor + let mut validator_monitor3 = harness3.chain.validator_monitor.write(); + validator_monitor3.process_valid_state(epoch, _state3, &harness3.chain.spec); + + // We shouldn't have one entry in the missed blocks map + assert_eq!( + validator_monitor3.get_monitored_validator_missed_block_count(validator_index as u64), + 0 + ); + } +} diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 4c1da85fa..045b06a1e 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -39,13 +39,11 @@ //! task. use crate::work_reprocessing_queue::{ - spawn_reprocess_scheduler, QueuedAggregate, QueuedBackfillBatch, QueuedGossipBlock, - QueuedLightClientUpdate, QueuedRpcBlock, QueuedUnaggregate, ReadyWork, ReprocessQueueMessage, + QueuedBackfillBatch, QueuedGossipBlock, ReprocessQueueMessage, }; use futures::stream::{Stream, StreamExt}; use futures::task::Poll; -use lighthouse_network::NetworkGlobals; -use lighthouse_network::{MessageId, PeerId}; +use lighthouse_network::{MessageId, NetworkGlobals, PeerId}; use logging::TimeLatch; use parking_lot::Mutex; use serde::{Deserialize, Serialize}; @@ -62,8 +60,13 @@ use std::time::Duration; use task_executor::TaskExecutor; use tokio::sync::mpsc; use tokio::sync::mpsc::error::TrySendError; -use types::{Attestation, EthSpec, Hash256, SignedAggregateAndProof, Slot, SubnetId}; +use types::{Attestation, Hash256, SignedAggregateAndProof, SubnetId}; +use types::{EthSpec, Slot}; use work_reprocessing_queue::IgnoredRpcBlock; +use work_reprocessing_queue::{ + spawn_reprocess_scheduler, QueuedAggregate, QueuedLightClientUpdate, QueuedRpcBlock, + QueuedUnaggregate, ReadyWork, +}; mod metrics; pub mod work_reprocessing_queue; @@ -102,6 +105,10 @@ const MAX_AGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN: usize = 1_024; /// before we start dropping them. const MAX_GOSSIP_BLOCK_QUEUE_LEN: usize = 1_024; +/// The maximum number of queued `BlobSidecar` objects received on gossip that +/// will be stored before we start dropping them. +const MAX_GOSSIP_BLOB_QUEUE_LEN: usize = 1_024; + /// The maximum number of queued `SignedBeaconBlock` objects received prior to their slot (but /// within acceptable clock disparity) that will be queued before we start dropping them. const MAX_DELAYED_BLOCK_QUEUE_LEN: usize = 1_024; @@ -142,6 +149,10 @@ const MAX_SYNC_CONTRIBUTION_QUEUE_LEN: usize = 1024; /// will be stored before we start dropping them. const MAX_RPC_BLOCK_QUEUE_LEN: usize = 1_024; +/// The maximum number of queued `BlobSidecar` objects received from the network RPC that +/// will be stored before we start dropping them. +const MAX_RPC_BLOB_QUEUE_LEN: usize = 1_024; + /// The maximum number of queued `Vec` objects received during syncing that will /// be stored before we start dropping them. const MAX_CHAIN_SEGMENT_QUEUE_LEN: usize = 64; @@ -154,10 +165,18 @@ const MAX_STATUS_QUEUE_LEN: usize = 1_024; /// will be stored before we start dropping them. const MAX_BLOCKS_BY_RANGE_QUEUE_LEN: usize = 1_024; +/// The maximum number of queued `BlobsByRangeRequest` objects received from the network RPC that +/// will be stored before we start dropping them. +const MAX_BLOBS_BY_RANGE_QUEUE_LEN: usize = 1024; + /// The maximum number of queued `BlocksByRootRequest` objects received from the network RPC that /// will be stored before we start dropping them. const MAX_BLOCKS_BY_ROOTS_QUEUE_LEN: usize = 1_024; +/// The maximum number of queued `BlobsByRootRequest` objects received from the network RPC that +/// will be stored before we start dropping them. +const MAX_BLOBS_BY_ROOTS_QUEUE_LEN: usize = 1_024; + /// Maximum number of `SignedBlsToExecutionChange` messages to queue before dropping them. /// /// This value is set high to accommodate the large spike that is expected immediately after Capella @@ -204,6 +223,7 @@ pub const GOSSIP_ATTESTATION_BATCH: &str = "gossip_attestation_batch"; pub const GOSSIP_AGGREGATE: &str = "gossip_aggregate"; pub const GOSSIP_AGGREGATE_BATCH: &str = "gossip_aggregate_batch"; pub const GOSSIP_BLOCK: &str = "gossip_block"; +pub const GOSSIP_BLOBS_SIDECAR: &str = "gossip_blobs_sidecar"; pub const DELAYED_IMPORT_BLOCK: &str = "delayed_import_block"; pub const GOSSIP_VOLUNTARY_EXIT: &str = "gossip_voluntary_exit"; pub const GOSSIP_PROPOSER_SLASHING: &str = "gossip_proposer_slashing"; @@ -214,11 +234,14 @@ pub const GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_upd pub const GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update"; pub const RPC_BLOCK: &str = "rpc_block"; pub const IGNORED_RPC_BLOCK: &str = "ignored_rpc_block"; +pub const RPC_BLOBS: &str = "rpc_blob"; pub const CHAIN_SEGMENT: &str = "chain_segment"; pub const CHAIN_SEGMENT_BACKFILL: &str = "chain_segment_backfill"; pub const STATUS_PROCESSING: &str = "status_processing"; pub const BLOCKS_BY_RANGE_REQUEST: &str = "blocks_by_range_request"; pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request"; +pub const BLOBS_BY_RANGE_REQUEST: &str = "blobs_by_range_request"; +pub const BLOBS_BY_ROOTS_REQUEST: &str = "blobs_by_roots_request"; pub const LIGHT_CLIENT_BOOTSTRAP_REQUEST: &str = "light_client_bootstrap"; pub const UNKNOWN_BLOCK_ATTESTATION: &str = "unknown_block_attestation"; pub const UNKNOWN_BLOCK_AGGREGATE: &str = "unknown_block_aggregate"; @@ -566,6 +589,7 @@ pub enum Work { process_batch: Box>) + Send + Sync>, }, GossipBlock(AsyncFn), + GossipBlobSidecar(AsyncFn), DelayedImportBlock { beacon_block_slot: Slot, beacon_block_root: Hash256, @@ -581,6 +605,9 @@ pub enum Work { RpcBlock { process_fn: AsyncFn, }, + RpcBlobs { + process_fn: AsyncFn, + }, IgnoredRpcBlock { process_fn: BlockingFn, }, @@ -589,6 +616,8 @@ pub enum Work { Status(BlockingFn), BlocksByRangeRequest(BlockingFnWithManualSendOnIdle), BlocksByRootsRequest(BlockingFnWithManualSendOnIdle), + BlobsByRangeRequest(BlockingFn), + BlobsByRootsRequest(BlockingFn), GossipBlsToExecutionChange(BlockingFn), LightClientBootstrapRequest(BlockingFn), ApiRequestP0(BlockingOrAsync), @@ -610,6 +639,7 @@ impl Work { Work::GossipAggregate { .. } => GOSSIP_AGGREGATE, Work::GossipAggregateBatch { .. } => GOSSIP_AGGREGATE_BATCH, Work::GossipBlock(_) => GOSSIP_BLOCK, + Work::GossipBlobSidecar(_) => GOSSIP_BLOBS_SIDECAR, Work::DelayedImportBlock { .. } => DELAYED_IMPORT_BLOCK, Work::GossipVoluntaryExit(_) => GOSSIP_VOLUNTARY_EXIT, Work::GossipProposerSlashing(_) => GOSSIP_PROPOSER_SLASHING, @@ -619,12 +649,15 @@ impl Work { Work::GossipLightClientFinalityUpdate(_) => GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE, Work::GossipLightClientOptimisticUpdate(_) => GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE, Work::RpcBlock { .. } => RPC_BLOCK, + Work::RpcBlobs { .. } => RPC_BLOBS, Work::IgnoredRpcBlock { .. } => IGNORED_RPC_BLOCK, Work::ChainSegment { .. } => CHAIN_SEGMENT, Work::ChainSegmentBackfill(_) => CHAIN_SEGMENT_BACKFILL, Work::Status(_) => STATUS_PROCESSING, Work::BlocksByRangeRequest(_) => BLOCKS_BY_RANGE_REQUEST, Work::BlocksByRootsRequest(_) => BLOCKS_BY_ROOTS_REQUEST, + Work::BlobsByRangeRequest(_) => BLOBS_BY_RANGE_REQUEST, + Work::BlobsByRootsRequest(_) => BLOBS_BY_ROOTS_REQUEST, Work::LightClientBootstrapRequest(_) => LIGHT_CLIENT_BOOTSTRAP_REQUEST, Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION, Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE, @@ -771,14 +804,18 @@ impl BeaconProcessor { // Using a FIFO queue since blocks need to be imported sequentially. let mut rpc_block_queue = FifoQueue::new(MAX_RPC_BLOCK_QUEUE_LEN); + let mut rpc_blob_queue = FifoQueue::new(MAX_RPC_BLOB_QUEUE_LEN); let mut chain_segment_queue = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); let mut backfill_chain_segment = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); let mut gossip_block_queue = FifoQueue::new(MAX_GOSSIP_BLOCK_QUEUE_LEN); + let mut gossip_blob_queue = FifoQueue::new(MAX_GOSSIP_BLOB_QUEUE_LEN); let mut delayed_block_queue = FifoQueue::new(MAX_DELAYED_BLOCK_QUEUE_LEN); let mut status_queue = FifoQueue::new(MAX_STATUS_QUEUE_LEN); let mut bbrange_queue = FifoQueue::new(MAX_BLOCKS_BY_RANGE_QUEUE_LEN); let mut bbroots_queue = FifoQueue::new(MAX_BLOCKS_BY_ROOTS_QUEUE_LEN); + let mut blbroots_queue = FifoQueue::new(MAX_BLOBS_BY_ROOTS_QUEUE_LEN); + let mut blbrange_queue = FifoQueue::new(MAX_BLOBS_BY_RANGE_QUEUE_LEN); let mut gossip_bls_to_execution_change_queue = FifoQueue::new(MAX_BLS_TO_EXECUTION_CHANGE_QUEUE_LEN); @@ -915,6 +952,8 @@ impl BeaconProcessor { // requested these blocks. } else if let Some(item) = rpc_block_queue.pop() { self.spawn_worker(item, idle_tx); + } else if let Some(item) = rpc_blob_queue.pop() { + self.spawn_worker(item, idle_tx); // Check delayed blocks before gossip blocks, the gossip blocks might rely // on the delayed ones. } else if let Some(item) = delayed_block_queue.pop() { @@ -923,7 +962,9 @@ impl BeaconProcessor { // required to verify some attestations. } else if let Some(item) = gossip_block_queue.pop() { self.spawn_worker(item, idle_tx); - // Check the priority 0 API requests after blocks, but before attestations. + } else if let Some(item) = gossip_blob_queue.pop() { + self.spawn_worker(item, idle_tx); + // Check the priority 0 API requests after blocks and blobs, but before attestations. } else if let Some(item) = api_request_p0_queue.pop() { self.spawn_worker(item, idle_tx); // Check the aggregates, *then* the unaggregates since we assume that @@ -1068,6 +1109,10 @@ impl BeaconProcessor { self.spawn_worker(item, idle_tx); } else if let Some(item) = bbroots_queue.pop() { self.spawn_worker(item, idle_tx); + } else if let Some(item) = blbrange_queue.pop() { + self.spawn_worker(item, idle_tx); + } else if let Some(item) = blbroots_queue.pop() { + self.spawn_worker(item, idle_tx); // Check slashings after all other consensus messages so we prioritize // following head. // @@ -1158,6 +1203,9 @@ impl BeaconProcessor { Work::GossipBlock { .. } => { gossip_block_queue.push(work, work_id, &self.log) } + Work::GossipBlobSidecar { .. } => { + gossip_blob_queue.push(work, work_id, &self.log) + } Work::DelayedImportBlock { .. } => { delayed_block_queue.push(work, work_id, &self.log) } @@ -1183,6 +1231,7 @@ impl BeaconProcessor { Work::RpcBlock { .. } | Work::IgnoredRpcBlock { .. } => { rpc_block_queue.push(work, work_id, &self.log) } + Work::RpcBlobs { .. } => rpc_blob_queue.push(work, work_id, &self.log), Work::ChainSegment { .. } => { chain_segment_queue.push(work, work_id, &self.log) } @@ -1196,6 +1245,9 @@ impl BeaconProcessor { Work::BlocksByRootsRequest { .. } => { bbroots_queue.push(work, work_id, &self.log) } + Work::BlobsByRangeRequest { .. } => { + blbrange_queue.push(work, work_id, &self.log) + } Work::LightClientBootstrapRequest { .. } => { lcbootstrap_queue.push(work, work_id, &self.log) } @@ -1208,6 +1260,9 @@ impl BeaconProcessor { Work::GossipBlsToExecutionChange { .. } => { gossip_bls_to_execution_change_queue.push(work, work_id, &self.log) } + Work::BlobsByRootsRequest { .. } => { + blbroots_queue.push(work, work_id, &self.log) + } Work::UnknownLightClientOptimisticUpdate { .. } => { unknown_light_client_update_queue.push(work, work_id, &self.log) } @@ -1245,10 +1300,18 @@ impl BeaconProcessor { &metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_QUEUE_TOTAL, gossip_block_queue.len() as i64, ); + metrics::set_gauge( + &metrics::BEACON_PROCESSOR_GOSSIP_BLOB_QUEUE_TOTAL, + gossip_blob_queue.len() as i64, + ); metrics::set_gauge( &metrics::BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL, rpc_block_queue.len() as i64, ); + metrics::set_gauge( + &metrics::BEACON_PROCESSOR_RPC_BLOB_QUEUE_TOTAL, + rpc_blob_queue.len() as i64, + ); metrics::set_gauge( &metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL, chain_segment_queue.len() as i64, @@ -1388,11 +1451,18 @@ impl BeaconProcessor { beacon_block_root: _, process_fn, } => task_spawner.spawn_async(process_fn), - Work::RpcBlock { process_fn } => task_spawner.spawn_async(process_fn), + Work::RpcBlock { process_fn } | Work::RpcBlobs { process_fn } => { + task_spawner.spawn_async(process_fn) + } Work::IgnoredRpcBlock { process_fn } => task_spawner.spawn_blocking(process_fn), - Work::GossipBlock(work) => task_spawner.spawn_async(async move { - work.await; - }), + Work::GossipBlock(work) | Work::GossipBlobSidecar(work) => { + task_spawner.spawn_async(async move { + work.await; + }) + } + Work::BlobsByRangeRequest(process_fn) | Work::BlobsByRootsRequest(process_fn) => { + task_spawner.spawn_blocking(process_fn) + } Work::BlocksByRangeRequest(work) | Work::BlocksByRootsRequest(work) => { task_spawner.spawn_blocking_with_manual_send_idle(work) } diff --git a/beacon_node/beacon_processor/src/metrics.rs b/beacon_node/beacon_processor/src/metrics.rs index e14c39e9a..fa7d7d7b9 100644 --- a/beacon_node/beacon_processor/src/metrics.rs +++ b/beacon_node/beacon_processor/src/metrics.rs @@ -46,6 +46,11 @@ lazy_static::lazy_static! { "beacon_processor_gossip_block_queue_total", "Count of blocks from gossip waiting to be verified." ); + // Gossip blobs. + pub static ref BEACON_PROCESSOR_GOSSIP_BLOB_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_gossip_blob_queue_total", + "Count of blobs from gossip waiting to be verified." + ); // Gossip Exits. pub static ref BEACON_PROCESSOR_EXIT_QUEUE_TOTAL: Result = try_create_int_gauge( "beacon_processor_exit_queue_total", @@ -71,6 +76,11 @@ lazy_static::lazy_static! { "beacon_processor_rpc_block_queue_total", "Count of blocks from the rpc waiting to be verified." ); + // Rpc blobs. + pub static ref BEACON_PROCESSOR_RPC_BLOB_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_rpc_blob_queue_total", + "Count of blobs from the rpc waiting to be verified." + ); // Chain segments. pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL: Result = try_create_int_gauge( "beacon_processor_chain_segment_queue_total", diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs index c78f686d0..934ef059d 100644 --- a/beacon_node/builder_client/src/lib.rs +++ b/beacon_node/builder_client/src/lib.rs @@ -1,9 +1,9 @@ use eth2::types::builder_bid::SignedBuilderBid; use eth2::types::{ - AbstractExecPayload, BlindedPayload, EthSpec, ExecutionBlockHash, ExecutionPayload, - ForkVersionedResponse, PublicKeyBytes, SignedBeaconBlock, SignedValidatorRegistrationData, - Slot, + EthSpec, ExecutionBlockHash, ForkVersionedResponse, PublicKeyBytes, + SignedValidatorRegistrationData, Slot, }; +use eth2::types::{FullPayloadContents, SignedBlindedBeaconBlock}; pub use eth2::Error; use eth2::{ok_or_error, StatusCode}; use reqwest::{IntoUrl, Response}; @@ -140,8 +140,8 @@ impl BuilderHttpClient { /// `POST /eth/v1/builder/blinded_blocks` pub async fn post_builder_blinded_blocks( &self, - blinded_block: &SignedBeaconBlock>, - ) -> Result>, Error> { + blinded_block: &SignedBlindedBeaconBlock, + ) -> Result>, Error> { let mut path = self.server.full.clone(); path.path_segments_mut() @@ -163,12 +163,12 @@ impl BuilderHttpClient { } /// `GET /eth/v1/builder/header` - pub async fn get_builder_header>( + pub async fn get_builder_header( &self, slot: Slot, parent_hash: ExecutionBlockHash, pubkey: &PublicKeyBytes, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.server.full.clone(); path.path_segments_mut() diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index b60748e30..26c53154e 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -22,7 +22,6 @@ types = { workspace = true } eth2_config = { workspace = true } slot_clock = { workspace = true } serde = { workspace = true } -serde_derive = "1.0.116" error-chain = { workspace = true } slog = { workspace = true } tokio = { workspace = true } diff --git a/beacon_node/client/src/address_change_broadcast.rs b/beacon_node/client/src/address_change_broadcast.rs index 272ee908f..69614159f 100644 --- a/beacon_node/client/src/address_change_broadcast.rs +++ b/beacon_node/client/src/address_change_broadcast.rs @@ -99,7 +99,7 @@ pub async fn broadcast_address_changes( messages: vec![pubsub_message], }; // It seems highly unlikely that this unbounded send will fail, but - // we handle the result nontheless. + // we handle the result nonetheless. if let Err(e) = network_send.send(message) { debug!( log, diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 50e78aa45..9c88eccc7 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -2,6 +2,8 @@ use crate::address_change_broadcast::broadcast_address_changes_at_capella; use crate::config::{ClientGenesis, Config as ClientConfig}; use crate::notifier::spawn_notifier; use crate::Client; +use beacon_chain::attestation_simulator::start_attestation_simulator_service; +use beacon_chain::data_availability_checker::start_availability_cache_maintenance_service; use beacon_chain::otb_verification_service::start_otb_verification_service; use beacon_chain::proposer_prep_service::start_proposer_prep_service; use beacon_chain::schema_change::migrate_schema; @@ -33,6 +35,7 @@ use std::net::TcpListener; use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::Duration; +use std::time::{SystemTime, UNIX_EPOCH}; use timer::spawn_timer; use tokio::sync::oneshot; use types::{ @@ -43,6 +46,11 @@ use types::{ /// Interval between polling the eth1 node for genesis information. pub const ETH1_GENESIS_UPDATE_INTERVAL_MILLIS: u64 = 7_000; +/// Reduces the blob availability period by some epochs. Helps prevent the user +/// from starting a genesis sync so near to the blob pruning window that blobs +/// have been pruned before they can manage to sync the chain. +const BLOB_AVAILABILITY_REDUCTION_EPOCHS: u64 = 2; + /// Builds a `Client` instance. /// /// ## Notes @@ -67,7 +75,7 @@ pub struct ClientBuilder { eth1_service: Option, network_globals: Option>>, network_senders: Option>, - gossipsub_registry: Option, + libp2p_registry: Option, db_path: Option, freezer_db_path: Option, http_api_config: http_api::Config, @@ -101,7 +109,7 @@ where eth1_service: None, network_globals: None, network_senders: None, - gossipsub_registry: None, + libp2p_registry: None, db_path: None, freezer_db_path: None, http_api_config: <_>::default(), @@ -190,15 +198,7 @@ where .graffiti(graffiti) .event_handler(event_handler) .execution_layer(execution_layer) - .monitor_validators( - config.validator_monitor_auto, - config.validator_monitor_pubkeys.clone(), - config.validator_monitor_individual_tracking_threshold, - runtime_context - .service_context("val_mon".to_string()) - .log() - .clone(), - ); + .validator_monitor_config(config.validator_monitor.clone()); let builder = if let Some(slasher) = self.slasher.clone() { builder.slasher(slasher) @@ -258,6 +258,45 @@ where let genesis_state = genesis_state(&runtime_context, &config, log).await?; + // If the user has not explicitly allowed genesis sync, prevent + // them from trying to sync from genesis if we're outside of the + // blob P2P availability window. + // + // It doesn't make sense to try and sync the chain if we can't + // verify blob availability by downloading blobs from the P2P + // network. The user should do a checkpoint sync instead. + if !config.allow_insecure_genesis_sync { + if let Some(deneb_fork_epoch) = spec.deneb_fork_epoch { + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .map_err(|e| format!("Unable to read system time: {e:}"))? + .as_secs(); + let genesis_time = genesis_state.genesis_time(); + let deneb_time = + genesis_time + (deneb_fork_epoch.as_u64() * spec.seconds_per_slot); + + // Shrink the blob availability window so users don't start + // a sync right before blobs start to disappear from the P2P + // network. + let reduced_p2p_availability_epochs = spec + .min_epochs_for_blob_sidecars_requests + .saturating_sub(BLOB_AVAILABILITY_REDUCTION_EPOCHS); + let blob_availability_window = reduced_p2p_availability_epochs + * TEthSpec::slots_per_epoch() + * spec.seconds_per_slot; + + if now > deneb_time + blob_availability_window { + return Err( + "Syncing from genesis is insecure and incompatible with data availability checks. \ + You should instead perform a checkpoint sync from a trusted node using the --checkpoint-sync-url option. \ + For a list of public endpoints, see: https://eth-clients.github.io/checkpoint-sync-endpoints/ \ + Alternatively, use --allow-insecure-genesis-sync if the risks are understood." + .to_string(), + ); + } + } + } + builder.genesis_state(genesis_state).map(|v| (v, None))? } ClientGenesis::WeakSubjSszBytes { @@ -508,6 +547,12 @@ where ClientGenesis::FromStore => builder.resume_from_db().map(|v| (v, None))?, }; + let beacon_chain_builder = if let Some(trusted_setup) = config.trusted_setup { + beacon_chain_builder.trusted_setup(trusted_setup) + } else { + beacon_chain_builder + }; + if config.sync_eth1_chain { self.eth1_service = eth1_service_option; } @@ -532,7 +577,7 @@ where .ok_or("network requires beacon_processor_channels")?; // If gossipsub metrics are required we build a registry to record them - let mut gossipsub_registry = if config.metrics_enabled { + let mut libp2p_registry = if config.metrics_enabled { Some(Registry::default()) } else { None @@ -542,9 +587,7 @@ where beacon_chain, config, context.executor, - gossipsub_registry - .as_mut() - .map(|registry| registry.sub_registry_with_prefix("gossipsub")), + libp2p_registry.as_mut(), beacon_processor_channels.beacon_processor_tx.clone(), beacon_processor_channels.work_reprocessing_tx.clone(), ) @@ -553,7 +596,7 @@ where self.network_globals = Some(network_globals); self.network_senders = Some(network_senders); - self.gossipsub_registry = gossipsub_registry; + self.libp2p_registry = libp2p_registry; Ok(self) } @@ -719,7 +762,7 @@ where chain: self.beacon_chain.clone(), db_path: self.db_path.clone(), freezer_db_path: self.freezer_db_path.clone(), - gossipsub_registry: self.gossipsub_registry.take().map(std::sync::Mutex::new), + gossipsub_registry: self.libp2p_registry.take().map(std::sync::Mutex::new), log: log.clone(), }); @@ -838,6 +881,14 @@ where start_proposer_prep_service(runtime_context.executor.clone(), beacon_chain.clone()); start_otb_verification_service(runtime_context.executor.clone(), beacon_chain.clone()); + start_availability_cache_maintenance_service( + runtime_context.executor.clone(), + beacon_chain.clone(), + ); + start_attestation_simulator_service( + beacon_chain.task_executor.clone(), + beacon_chain.clone(), + ); } Ok(Client { @@ -898,6 +949,7 @@ where mut self, hot_path: &Path, cold_path: &Path, + blobs_path: &Path, config: StoreConfig, log: Logger, ) -> Result { @@ -935,6 +987,7 @@ where let store = HotColdDB::open( hot_path, cold_path, + blobs_path, schema_upgrade, config, spec, diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index adaf02798..275f99986 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,16 +1,20 @@ -use beacon_chain::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD; +use beacon_chain::validator_monitor::ValidatorMonitorConfig; +use beacon_chain::TrustedSetup; use beacon_processor::BeaconProcessorConfig; use directory::DEFAULT_ROOT_DIR; use environment::LoggerConfig; use network::NetworkConfig; use sensitive_url::SensitiveUrl; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::fs; use std::path::PathBuf; use std::time::Duration; -use types::{Graffiti, PublicKeyBytes}; +use types::Graffiti; + /// Default directory name for the freezer database under the top-level data dir. const DEFAULT_FREEZER_DB_DIR: &str = "freezer_db"; +/// Default directory name for the blobs database under the top-level data dir. +const DEFAULT_BLOBS_DB_DIR: &str = "blobs_db"; /// Defines how the client should initialize the `BeaconChain` and other components. #[derive(Debug, Clone, Serialize, Deserialize, Default)] @@ -45,6 +49,8 @@ pub struct Config { pub db_name: String, /// Path where the freezer database will be located. pub freezer_db_path: Option, + /// Path where the blobs database will be located if blobs should be in a separate database. + pub blobs_db_path: Option, pub log_file: PathBuf, /// If true, the node will use co-ordinated junk for eth1 values. /// @@ -53,15 +59,7 @@ pub struct Config { pub sync_eth1_chain: bool, /// Graffiti to be inserted everytime we create a block. pub graffiti: Graffiti, - /// When true, automatically monitor validators using the HTTP API. - pub validator_monitor_auto: bool, - /// A list of validator pubkeys to monitor. - pub validator_monitor_pubkeys: Vec, - /// Once the number of monitored validators goes above this threshold, we - /// will stop tracking metrics on a per-validator basis. This prevents large - /// validator counts causing infeasibly high cardinailty for Prometheus and - /// high log volumes. - pub validator_monitor_individual_tracking_threshold: usize, + pub validator_monitor: ValidatorMonitorConfig, #[serde(skip)] /// The `genesis` field is not serialized or deserialized by `serde` to ensure it is defined /// via the CLI at runtime, instead of from a configuration file saved to disk. @@ -71,6 +69,7 @@ pub struct Config { pub chain: beacon_chain::ChainConfig, pub eth1: eth1::Config, pub execution_layer: Option, + pub trusted_setup: Option, pub http_api: http_api::Config, pub http_metrics: http_metrics::Config, pub monitoring_api: Option, @@ -79,6 +78,7 @@ pub struct Config { pub beacon_processor: BeaconProcessorConfig, pub genesis_state_url: Option, pub genesis_state_url_timeout: Duration, + pub allow_insecure_genesis_sync: bool, } impl Default for Config { @@ -87,6 +87,7 @@ impl Default for Config { data_dir: PathBuf::from(DEFAULT_ROOT_DIR), db_name: "chain_db".to_string(), freezer_db_path: None, + blobs_db_path: None, log_file: PathBuf::from(""), genesis: <_>::default(), store: <_>::default(), @@ -96,19 +97,19 @@ impl Default for Config { sync_eth1_chain: false, eth1: <_>::default(), execution_layer: None, + trusted_setup: None, graffiti: Graffiti::default(), http_api: <_>::default(), http_metrics: <_>::default(), monitoring_api: None, slasher: None, - validator_monitor_auto: false, - validator_monitor_pubkeys: vec![], - validator_monitor_individual_tracking_threshold: DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, + validator_monitor: <_>::default(), logger_config: LoggerConfig::default(), beacon_processor: <_>::default(), genesis_state_url: <_>::default(), // This default value should always be overwritten by the CLI default value. genesis_state_url_timeout: Duration::from_secs(60), + allow_insecure_genesis_sync: false, } } } @@ -150,11 +151,31 @@ impl Config { .unwrap_or_else(|| self.default_freezer_db_path()) } + /// Fetch default path to use for the blobs database. + fn default_blobs_db_path(&self) -> PathBuf { + self.get_data_dir().join(DEFAULT_BLOBS_DB_DIR) + } + + /// Returns the path to which the client may initialize the on-disk blobs database. + /// + /// Will attempt to use the user-supplied path from e.g. the CLI, or will default + /// to None. + pub fn get_blobs_db_path(&self) -> PathBuf { + self.blobs_db_path + .clone() + .unwrap_or_else(|| self.default_blobs_db_path()) + } + /// Get the freezer DB path, creating it if necessary. pub fn create_freezer_db_path(&self) -> Result { ensure_dir_exists(self.get_freezer_db_path()) } + /// Get the blobs DB path, creating it if necessary. + pub fn create_blobs_db_path(&self) -> Result { + ensure_dir_exists(self.get_blobs_db_path()) + } + /// Returns the "modern" path to the data_dir. /// /// See `Self::get_data_dir` documentation for more info. diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 2c7738e8f..8a0e5ce22 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -1,6 +1,7 @@ use crate::metrics; use beacon_chain::{ capella_readiness::CapellaReadiness, + deneb_readiness::DenebReadiness, merge_readiness::{GenesisExecutionPayloadStatus, MergeConfig, MergeReadiness}, BeaconChain, BeaconChainTypes, ExecutionStatus, }; @@ -319,6 +320,7 @@ pub fn spawn_notifier( eth1_logging(&beacon_chain, &log); merge_readiness_logging(current_slot, &beacon_chain, &log).await; capella_readiness_logging(current_slot, &beacon_chain, &log).await; + deneb_readiness_logging(current_slot, &beacon_chain, &log).await; } }; @@ -356,8 +358,8 @@ async fn merge_readiness_logging( } if merge_completed && !has_execution_layer { + // Logging of the EE being offline is handled in the other readiness logging functions. if !beacon_chain.is_time_to_prepare_for_capella(current_slot) { - // logging of the EE being offline is handled in `capella_readiness_logging()` error!( log, "Execution endpoint required"; @@ -445,12 +447,15 @@ async fn capella_readiness_logging( } if capella_completed && !has_execution_layer { - error!( - log, - "Execution endpoint required"; - "info" => "you need a Capella enabled execution engine to validate blocks, see: \ - https://lighthouse-book.sigmaprime.io/merge-migration.html" - ); + // Logging of the EE being offline is handled in the other readiness logging functions. + if !beacon_chain.is_time_to_prepare_for_deneb(current_slot) { + error!( + log, + "Execution endpoint required"; + "info" => "you need a Capella enabled execution engine to validate blocks, see: \ + https://lighthouse-book.sigmaprime.io/merge-migration.html" + ); + } return; } @@ -479,6 +484,65 @@ async fn capella_readiness_logging( } } +/// Provides some helpful logging to users to indicate if their node is ready for Deneb +async fn deneb_readiness_logging( + current_slot: Slot, + beacon_chain: &BeaconChain, + log: &Logger, +) { + let deneb_completed = beacon_chain + .canonical_head + .cached_head() + .snapshot + .beacon_block + .message() + .body() + .execution_payload() + .map_or(false, |payload| payload.blob_gas_used().is_ok()); + + let has_execution_layer = beacon_chain.execution_layer.is_some(); + + if deneb_completed && has_execution_layer + || !beacon_chain.is_time_to_prepare_for_deneb(current_slot) + { + return; + } + + if deneb_completed && !has_execution_layer { + error!( + log, + "Execution endpoint required"; + "info" => "you need a Deneb enabled execution engine to validate blocks, see: \ + https://lighthouse-book.sigmaprime.io/merge-migration.html" + ); + return; + } + + match beacon_chain.check_deneb_readiness().await { + DenebReadiness::Ready => { + info!( + log, + "Ready for Deneb"; + "info" => "ensure the execution endpoint is updated to the latest Deneb/Cancun release" + ) + } + readiness @ DenebReadiness::ExchangeCapabilitiesFailed { error: _ } => { + error!( + log, + "Not ready for Deneb"; + "hint" => "the execution endpoint may be offline", + "info" => %readiness, + ) + } + readiness => warn!( + log, + "Not ready for Deneb"; + "hint" => "try updating the execution endpoint", + "info" => %readiness, + ), + } +} + async fn genesis_execution_payload_logging( beacon_chain: &BeaconChain, log: &Logger, diff --git a/beacon_node/eth1/src/block_cache.rs b/beacon_node/eth1/src/block_cache.rs index 26e160115..e676d17ab 100644 --- a/beacon_node/eth1/src/block_cache.rs +++ b/beacon_node/eth1/src/block_cache.rs @@ -13,7 +13,7 @@ pub enum Error { /// Some `Eth1Block` was provided with the same block number but different data. The source /// of eth1 data is inconsistent. Conflicting(u64), - /// The given block was not one block number higher than the higest known block number. + /// The given block was not one block number higher than the highest known block number. NonConsecutive { given: u64, expected: u64 }, /// Some invariant was violated, there is a likely bug in the code. Internal(String), diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 3651e3718..7f6526898 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -25,6 +25,7 @@ hex = { workspace = true } ethereum_ssz = { workspace = true } ssz_types = { workspace = true } eth2 = { workspace = true } +kzg = { workspace = true } state_processing = { workspace = true } superstruct = { workspace = true } lru = { workspace = true } @@ -41,11 +42,6 @@ lazy_static = { workspace = true } ethers-core = { workspace = true } builder_client = { path = "../builder_client" } fork_choice = { workspace = true } -mev-rs = { git = "https://github.com/ralexstokes/mev-rs", rev = "216657016d5c0889b505857c89ae42c7aa2764af" } -axum = "0.6" -hyper = "0.14" -ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus", rev = "e380108" } -ssz_rs = "0.9.0" tokio-stream = { workspace = true } strum = { workspace = true } keccak-hash = "0.10.0" @@ -54,3 +50,4 @@ triehash = "0.8.4" hash-db = "0.15.2" pretty_reqwest_error = { workspace = true } arc-swap = "1.6.0" +eth2_network_config = { workspace = true } diff --git a/beacon_node/execution_layer/src/block_hash.rs b/beacon_node/execution_layer/src/block_hash.rs index c889fead0..5ba61beaf 100644 --- a/beacon_node/execution_layer/src/block_hash.rs +++ b/beacon_node/execution_layer/src/block_hash.rs @@ -7,7 +7,7 @@ use ethers_core::utils::rlp::RlpStream; use keccak_hash::KECCAK_EMPTY_LIST_RLP; use triehash::ordered_trie_root; use types::{ - map_execution_block_header_fields_except_withdrawals, Address, EthSpec, ExecutionBlockHash, + map_execution_block_header_fields_base, Address, BeaconBlockRef, EthSpec, ExecutionBlockHash, ExecutionBlockHeader, ExecutionPayloadRef, Hash256, Hash64, Uint256, }; @@ -18,6 +18,7 @@ impl ExecutionLayer { /// transactions. pub fn calculate_execution_block_hash( payload: ExecutionPayloadRef, + parent_beacon_block_root: Hash256, ) -> (ExecutionBlockHash, Hash256) { // Calculate the transactions root. // We're currently using a deprecated Parity library for this. We should move to a @@ -37,12 +38,23 @@ impl ExecutionLayer { None }; + let rlp_blob_gas_used = payload.blob_gas_used().ok(); + let rlp_excess_blob_gas = payload.excess_blob_gas().ok(); + + // Calculate parent beacon block root (post-Deneb). + let rlp_parent_beacon_block_root = rlp_excess_blob_gas + .as_ref() + .map(|_| parent_beacon_block_root); + // Construct the block header. let exec_block_header = ExecutionBlockHeader::from_payload( payload, KECCAK_EMPTY_LIST_RLP.as_fixed_bytes().into(), rlp_transactions_root, rlp_withdrawals_root, + rlp_blob_gas_used, + rlp_excess_blob_gas, + rlp_parent_beacon_block_root, ); // Hash the RLP encoding of the block header. @@ -56,10 +68,14 @@ impl ExecutionLayer { /// Verify `payload.block_hash` locally within Lighthouse. /// /// No remote calls to the execution client will be made, so this is quite a cheap check. - pub fn verify_payload_block_hash(&self, payload: ExecutionPayloadRef) -> Result<(), Error> { + pub fn verify_payload_block_hash(&self, block: BeaconBlockRef) -> Result<(), Error> { + let payload = block.execution_payload()?.execution_payload_ref(); + let parent_beacon_block_root = block.parent_root(); + let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_VERIFY_BLOCK_HASH); - let (header_hash, rlp_transactions_root) = Self::calculate_execution_block_hash(payload); + let (header_hash, rlp_transactions_root) = + Self::calculate_execution_block_hash(payload, parent_beacon_block_root); if header_hash != payload.block_hash() { return Err(Error::BlockHashMismatch { @@ -88,12 +104,21 @@ pub fn rlp_encode_withdrawal(withdrawal: &JsonWithdrawal) -> Vec { pub fn rlp_encode_block_header(header: &ExecutionBlockHeader) -> Vec { let mut rlp_header_stream = RlpStream::new(); rlp_header_stream.begin_unbounded_list(); - map_execution_block_header_fields_except_withdrawals!(&header, |_, field| { + map_execution_block_header_fields_base!(&header, |_, field| { rlp_header_stream.append(field); }); if let Some(withdrawals_root) = &header.withdrawals_root { rlp_header_stream.append(withdrawals_root); } + if let Some(blob_gas_used) = &header.blob_gas_used { + rlp_header_stream.append(blob_gas_used); + } + if let Some(excess_blob_gas) = &header.excess_blob_gas { + rlp_header_stream.append(excess_blob_gas); + } + if let Some(parent_beacon_block_root) = &header.parent_beacon_block_root { + rlp_header_stream.append(parent_beacon_block_root); + } rlp_header_stream.finalize_unbounded_list(); rlp_header_stream.out().into() } @@ -140,6 +165,9 @@ mod test { nonce: Hash64::zero(), base_fee_per_gas: 0x036b_u64.into(), withdrawals_root: None, + blob_gas_used: None, + excess_blob_gas: None, + parent_beacon_block_root: None, }; let expected_rlp = "f90200a0e0a94a7a3c9617401586b1a27025d2d9671332d22d540e0af72b069170380f2aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0ec3c94b18b8a1cff7d60f8d258ec723312932928626b4c9355eb4ab3568ec7f7a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000000000088000000000000000082036b"; let expected_hash = @@ -168,6 +196,9 @@ mod test { nonce: Hash64::zero(), base_fee_per_gas: 0x036b_u64.into(), withdrawals_root: None, + blob_gas_used: None, + excess_blob_gas: None, + parent_beacon_block_root: None, }; let expected_rlp = "f901fda0927ca537f06c783a3a2635b8805eef1c8c2124f7444ad4a3389898dd832f2dbea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0e97859b065bd8dbbb4519c7cb935024de2484c2b7f881181b4360492f0b06b82a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000002000088000000000000000082036b"; let expected_hash = @@ -197,10 +228,43 @@ mod test { nonce: Hash64::zero(), base_fee_per_gas: 0x34187b238_u64.into(), withdrawals_root: None, + blob_gas_used: None, + excess_blob_gas: None, + parent_beacon_block_root: None, }; let expected_hash = Hash256::from_str("6da69709cd5a34079b6604d29cd78fc01dacd7c6268980057ad92a2bede87351") .unwrap(); test_rlp_encoding(&header, None, expected_hash); } + + #[test] + fn test_rlp_encode_block_deneb() { + let header = ExecutionBlockHeader { + parent_hash: Hash256::from_str("172864416698b842f4c92f7b476be294b4ef720202779df194cd225f531053ab").unwrap(), + ommers_hash: Hash256::from_str("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347").unwrap(), + beneficiary: Address::from_str("878705ba3f8bc32fcf7f4caa1a35e72af65cf766").unwrap(), + state_root: Hash256::from_str("c6457d0df85c84c62d1c68f68138b6e796e8a44fb44de221386fb2d5611c41e0").unwrap(), + transactions_root: Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), + receipts_root: Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), + logs_bloom:<[u8; 256]>::from_hex("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap().into(), + difficulty: 0.into(), + number: 97.into(), + gas_limit: 27482534.into(), + gas_used: 0.into(), + timestamp: 1692132829u64, + extra_data: hex::decode("d883010d00846765746888676f312e32302e37856c696e7578").unwrap(), + mix_hash: Hash256::from_str("0b493c22d2ad4ca76c77ae6ad916af429b42b1dc98fdcb8e5ddbd049bbc5d623").unwrap(), + nonce: Hash64::zero(), + base_fee_per_gas: 2374u64.into(), + withdrawals_root: Some(Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap()), + blob_gas_used: Some(0x0u64), + excess_blob_gas: Some(0x0u64), + parent_beacon_block_root: Some(Hash256::from_str("f7d327d2c04e4f12e9cdd492e53d39a1d390f8b1571e3b2a22ac6e1e170e5b1a").unwrap()), + }; + let expected_hash = + Hash256::from_str("a7448e600ead0a23d16f96aa46e8dea9eef8a7c5669a5f0a5ff32709afe9c408") + .unwrap(); + test_rlp_encoding(&header, None, expected_hash); + } } diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 359dcb522..19b9a58eb 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -1,26 +1,35 @@ use crate::engines::ForkchoiceState; use crate::http::{ - ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, + ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_FORKCHOICE_UPDATED_V3, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, - ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, + ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_GET_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V1, + ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, }; -use eth2::types::{SsePayloadAttributes, SsePayloadAttributesV1, SsePayloadAttributesV2}; -pub use ethers_core::types::Transaction; -use ethers_core::utils::rlp::{self, Decodable, Rlp}; +use eth2::types::{ + BlobsBundle, SsePayloadAttributes, SsePayloadAttributesV1, SsePayloadAttributesV2, + SsePayloadAttributesV3, +}; +use ethers_core::types::Transaction; +use ethers_core::utils::rlp; +use ethers_core::utils::rlp::{Decodable, Rlp}; use http::deposit_methods::RpcError; pub use json_structures::{JsonWithdrawal, TransitionConfigurationV1}; use pretty_reqwest_error::PrettyReqwestError; use reqwest::StatusCode; use serde::{Deserialize, Serialize}; +use state_processing::per_block_processing::deneb::kzg_commitment_to_versioned_hash; use std::convert::TryFrom; use strum::IntoStaticStr; use superstruct::superstruct; pub use types::{ - Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, + Address, BeaconBlockRef, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, ExecutionPayloadRef, FixedVector, ForkName, Hash256, Transactions, Uint256, VariableList, Withdrawal, Withdrawals, }; -use types::{ExecutionPayloadCapella, ExecutionPayloadMerge}; +use types::{ + BeaconStateError, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadMerge, + KzgProofs, VersionedHash, +}; pub mod auth; pub mod http; @@ -48,14 +57,12 @@ pub enum Error { PayloadIdUnavailable, TransitionConfigurationMismatch, PayloadConversionLogicFlaw, - DeserializeTransaction(ssz_types::Error), - DeserializeTransactions(ssz_types::Error), + SszError(ssz_types::Error), DeserializeWithdrawals(ssz_types::Error), BuilderApi(builder_client::Error), IncorrectStateVariant, RequiredMethodUnsupported(&'static str), UnsupportedForkVariant(String), - BadConversion(String), RlpDecoderError(rlp::DecoderError), } @@ -96,6 +103,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: ssz_types::Error) -> Self { + Error::SszError(e) + } +} + #[derive(Clone, Copy, Debug, PartialEq, IntoStaticStr)] #[strum(serialize_all = "snake_case")] pub enum PayloadStatusV1Status { @@ -137,7 +150,7 @@ pub struct ExecutionBlock { /// Representation of an execution block with enough detail to reconstruct a payload. #[superstruct( - variants(Merge, Capella), + variants(Merge, Capella, Deneb), variant_attributes( derive(Clone, Debug, PartialEq, Serialize, Deserialize,), serde(bound = "T: EthSpec", rename_all = "camelCase"), @@ -171,8 +184,14 @@ pub struct ExecutionBlockWithTransactions { #[serde(rename = "hash")] pub block_hash: ExecutionBlockHash, pub transactions: Vec, - #[superstruct(only(Capella))] + #[superstruct(only(Capella, Deneb))] pub withdrawals: Vec, + #[superstruct(only(Deneb))] + #[serde(with = "serde_utils::u64_hex_be")] + pub blob_gas_used: u64, + #[superstruct(only(Deneb))] + #[serde(with = "serde_utils::u64_hex_be")] + pub excess_blob_gas: u64, } impl TryFrom> for ExecutionBlockWithTransactions { @@ -226,13 +245,39 @@ impl TryFrom> for ExecutionBlockWithTransactions .collect(), }) } + ExecutionPayload::Deneb(block) => Self::Deneb(ExecutionBlockWithTransactionsDeneb { + parent_hash: block.parent_hash, + fee_recipient: block.fee_recipient, + state_root: block.state_root, + receipts_root: block.receipts_root, + logs_bloom: block.logs_bloom, + prev_randao: block.prev_randao, + block_number: block.block_number, + gas_limit: block.gas_limit, + gas_used: block.gas_used, + timestamp: block.timestamp, + extra_data: block.extra_data, + base_fee_per_gas: block.base_fee_per_gas, + block_hash: block.block_hash, + transactions: block + .transactions + .iter() + .map(|tx| Transaction::decode(&Rlp::new(tx))) + .collect::, _>>()?, + withdrawals: Vec::from(block.withdrawals) + .into_iter() + .map(|withdrawal| withdrawal.into()) + .collect(), + blob_gas_used: block.blob_gas_used, + excess_blob_gas: block.excess_blob_gas, + }), }; Ok(json_payload) } } #[superstruct( - variants(V1, V2), + variants(V1, V2, V3), variant_attributes(derive(Clone, Debug, Eq, Hash, PartialEq),), cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") @@ -245,8 +290,10 @@ pub struct PayloadAttributes { pub prev_randao: Hash256, #[superstruct(getter(copy))] pub suggested_fee_recipient: Address, - #[superstruct(only(V2))] + #[superstruct(only(V2, V3))] pub withdrawals: Vec, + #[superstruct(only(V3), partial_getter(copy))] + pub parent_beacon_block_root: Hash256, } impl PayloadAttributes { @@ -255,14 +302,24 @@ impl PayloadAttributes { prev_randao: Hash256, suggested_fee_recipient: Address, withdrawals: Option>, + parent_beacon_block_root: Option, ) -> Self { match withdrawals { - Some(withdrawals) => PayloadAttributes::V2(PayloadAttributesV2 { - timestamp, - prev_randao, - suggested_fee_recipient, - withdrawals, - }), + Some(withdrawals) => match parent_beacon_block_root { + Some(parent_beacon_block_root) => PayloadAttributes::V3(PayloadAttributesV3 { + timestamp, + prev_randao, + suggested_fee_recipient, + withdrawals, + parent_beacon_block_root, + }), + None => PayloadAttributes::V2(PayloadAttributesV2 { + timestamp, + prev_randao, + suggested_fee_recipient, + withdrawals, + }), + }, None => PayloadAttributes::V1(PayloadAttributesV1 { timestamp, prev_randao, @@ -295,6 +352,19 @@ impl From for SsePayloadAttributes { suggested_fee_recipient, withdrawals, }), + PayloadAttributes::V3(PayloadAttributesV3 { + timestamp, + prev_randao, + suggested_fee_recipient, + withdrawals, + parent_beacon_block_root, + }) => Self::V3(SsePayloadAttributesV3 { + timestamp, + prev_randao, + suggested_fee_recipient, + withdrawals, + parent_beacon_block_root, + }), } } } @@ -320,7 +390,7 @@ pub struct ProposeBlindedBlockResponse { } #[superstruct( - variants(Merge, Capella), + variants(Merge, Capella, Deneb), variant_attributes(derive(Clone, Debug, PartialEq),), map_into(ExecutionPayload), map_ref_into(ExecutionPayloadRef), @@ -333,7 +403,27 @@ pub struct GetPayloadResponse { pub execution_payload: ExecutionPayloadMerge, #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] pub execution_payload: ExecutionPayloadCapella, + #[superstruct(only(Deneb), partial_getter(rename = "execution_payload_deneb"))] + pub execution_payload: ExecutionPayloadDeneb, pub block_value: Uint256, + #[superstruct(only(Deneb))] + pub blobs_bundle: BlobsBundle, + #[superstruct(only(Deneb), partial_getter(copy))] + pub should_override_builder: bool, +} + +impl GetPayloadResponse { + pub fn fee_recipient(&self) -> Address { + ExecutionPayloadRef::from(self.to_ref()).fee_recipient() + } + + pub fn block_hash(&self) -> ExecutionBlockHash { + ExecutionPayloadRef::from(self.to_ref()).block_hash() + } + + pub fn block_number(&self) -> u64 { + ExecutionPayloadRef::from(self.to_ref()).block_number() + } } impl<'a, T: EthSpec> From> for ExecutionPayloadRef<'a, T> { @@ -352,21 +442,35 @@ impl From> for ExecutionPayload { } } -impl From> for (ExecutionPayload, Uint256) { +impl From> + for (ExecutionPayload, Uint256, Option>) +{ fn from(response: GetPayloadResponse) -> Self { match response { GetPayloadResponse::Merge(inner) => ( ExecutionPayload::Merge(inner.execution_payload), inner.block_value, + None, ), GetPayloadResponse::Capella(inner) => ( ExecutionPayload::Capella(inner.execution_payload), inner.block_value, + None, + ), + GetPayloadResponse::Deneb(inner) => ( + ExecutionPayload::Deneb(inner.execution_payload), + inner.block_value, + Some(inner.blobs_bundle), ), } } } +pub enum GetPayloadResponseType { + Full(GetPayloadResponse), + Blinded(GetPayloadResponse), +} + impl GetPayloadResponse { pub fn execution_payload_ref(&self) -> ExecutionPayloadRef { self.to_ref().into() @@ -435,6 +539,138 @@ impl ExecutionPayloadBodyV1 { )) } } + ExecutionPayloadHeader::Deneb(header) => { + if let Some(withdrawals) = self.withdrawals { + Ok(ExecutionPayload::Deneb(ExecutionPayloadDeneb { + parent_hash: header.parent_hash, + fee_recipient: header.fee_recipient, + state_root: header.state_root, + receipts_root: header.receipts_root, + logs_bloom: header.logs_bloom, + prev_randao: header.prev_randao, + block_number: header.block_number, + gas_limit: header.gas_limit, + gas_used: header.gas_used, + timestamp: header.timestamp, + extra_data: header.extra_data, + base_fee_per_gas: header.base_fee_per_gas, + block_hash: header.block_hash, + transactions: self.transactions, + withdrawals, + blob_gas_used: header.blob_gas_used, + excess_blob_gas: header.excess_blob_gas, + })) + } else { + Err(format!( + "block {} is post capella but payload body doesn't have withdrawals", + header.block_hash + )) + } + } + } + } +} + +#[superstruct( + variants(Merge, Capella, Deneb), + variant_attributes(derive(Clone, Debug, PartialEq),), + map_into(ExecutionPayload), + map_ref_into(ExecutionPayloadRef), + cast_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), + partial_getter_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ) +)] +#[derive(Clone, Debug, PartialEq)] +pub struct NewPayloadRequest { + #[superstruct(only(Merge), partial_getter(rename = "execution_payload_merge"))] + pub execution_payload: ExecutionPayloadMerge, + #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] + pub execution_payload: ExecutionPayloadCapella, + #[superstruct(only(Deneb), partial_getter(rename = "execution_payload_deneb"))] + pub execution_payload: ExecutionPayloadDeneb, + #[superstruct(only(Deneb))] + pub versioned_hashes: Vec, + #[superstruct(only(Deneb))] + pub parent_beacon_block_root: Hash256, +} + +impl NewPayloadRequest { + pub fn parent_hash(&self) -> ExecutionBlockHash { + match self { + Self::Merge(payload) => payload.execution_payload.parent_hash, + Self::Capella(payload) => payload.execution_payload.parent_hash, + Self::Deneb(payload) => payload.execution_payload.parent_hash, + } + } + + pub fn block_hash(&self) -> ExecutionBlockHash { + match self { + Self::Merge(payload) => payload.execution_payload.block_hash, + Self::Capella(payload) => payload.execution_payload.block_hash, + Self::Deneb(payload) => payload.execution_payload.block_hash, + } + } + + pub fn block_number(&self) -> u64 { + match self { + Self::Merge(payload) => payload.execution_payload.block_number, + Self::Capella(payload) => payload.execution_payload.block_number, + Self::Deneb(payload) => payload.execution_payload.block_number, + } + } + + pub fn into_execution_payload(self) -> ExecutionPayload { + map_new_payload_request_into_execution_payload!(self, |request, cons| { + cons(request.execution_payload) + }) + } +} + +impl<'a, E: EthSpec> TryFrom> for NewPayloadRequest { + type Error = BeaconStateError; + + fn try_from(block: BeaconBlockRef<'a, E>) -> Result { + match block { + BeaconBlockRef::Base(_) | BeaconBlockRef::Altair(_) => { + Err(Self::Error::IncorrectStateVariant) + } + BeaconBlockRef::Merge(block_ref) => Ok(Self::Merge(NewPayloadRequestMerge { + execution_payload: block_ref.body.execution_payload.execution_payload.clone(), + })), + BeaconBlockRef::Capella(block_ref) => Ok(Self::Capella(NewPayloadRequestCapella { + execution_payload: block_ref.body.execution_payload.execution_payload.clone(), + })), + BeaconBlockRef::Deneb(block_ref) => Ok(Self::Deneb(NewPayloadRequestDeneb { + execution_payload: block_ref.body.execution_payload.execution_payload.clone(), + versioned_hashes: block_ref + .body + .blob_kzg_commitments + .iter() + .map(kzg_commitment_to_versioned_hash) + .collect(), + parent_beacon_block_root: block_ref.parent_root, + })), + } + } +} + +impl TryFrom> for NewPayloadRequest { + type Error = BeaconStateError; + + fn try_from(payload: ExecutionPayload) -> Result { + match payload { + ExecutionPayload::Merge(payload) => Ok(Self::Merge(NewPayloadRequestMerge { + execution_payload: payload, + })), + ExecutionPayload::Capella(payload) => Ok(Self::Capella(NewPayloadRequestCapella { + execution_payload: payload, + })), + ExecutionPayload::Deneb(_) => Err(Self::Error::IncorrectStateVariant), } } } @@ -443,12 +679,15 @@ impl ExecutionPayloadBodyV1 { pub struct EngineCapabilities { pub new_payload_v1: bool, pub new_payload_v2: bool, + pub new_payload_v3: bool, pub forkchoice_updated_v1: bool, pub forkchoice_updated_v2: bool, + pub forkchoice_updated_v3: bool, pub get_payload_bodies_by_hash_v1: bool, pub get_payload_bodies_by_range_v1: bool, pub get_payload_v1: bool, pub get_payload_v2: bool, + pub get_payload_v3: bool, } impl EngineCapabilities { @@ -460,12 +699,18 @@ impl EngineCapabilities { if self.new_payload_v2 { response.push(ENGINE_NEW_PAYLOAD_V2); } + if self.new_payload_v3 { + response.push(ENGINE_NEW_PAYLOAD_V3); + } if self.forkchoice_updated_v1 { response.push(ENGINE_FORKCHOICE_UPDATED_V1); } if self.forkchoice_updated_v2 { response.push(ENGINE_FORKCHOICE_UPDATED_V2); } + if self.forkchoice_updated_v3 { + response.push(ENGINE_FORKCHOICE_UPDATED_V3); + } if self.get_payload_bodies_by_hash_v1 { response.push(ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1); } @@ -478,6 +723,9 @@ impl EngineCapabilities { if self.get_payload_v2 { response.push(ENGINE_GET_PAYLOAD_V2); } + if self.get_payload_v3 { + response.push(ENGINE_GET_PAYLOAD_V3); + } response } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 0ce03e602..ac7dfa57e 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -32,14 +32,17 @@ pub const ETH_SYNCING_TIMEOUT: Duration = Duration::from_secs(1); pub const ENGINE_NEW_PAYLOAD_V1: &str = "engine_newPayloadV1"; pub const ENGINE_NEW_PAYLOAD_V2: &str = "engine_newPayloadV2"; +pub const ENGINE_NEW_PAYLOAD_V3: &str = "engine_newPayloadV3"; pub const ENGINE_NEW_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(8); pub const ENGINE_GET_PAYLOAD_V1: &str = "engine_getPayloadV1"; pub const ENGINE_GET_PAYLOAD_V2: &str = "engine_getPayloadV2"; +pub const ENGINE_GET_PAYLOAD_V3: &str = "engine_getPayloadV3"; pub const ENGINE_GET_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2); pub const ENGINE_FORKCHOICE_UPDATED_V1: &str = "engine_forkchoiceUpdatedV1"; pub const ENGINE_FORKCHOICE_UPDATED_V2: &str = "engine_forkchoiceUpdatedV2"; +pub const ENGINE_FORKCHOICE_UPDATED_V3: &str = "engine_forkchoiceUpdatedV3"; pub const ENGINE_FORKCHOICE_UPDATED_TIMEOUT: Duration = Duration::from_secs(8); pub const ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1: &str = "engine_getPayloadBodiesByHashV1"; @@ -58,10 +61,13 @@ pub const METHOD_NOT_FOUND_CODE: i64 = -32601; pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, + ENGINE_NEW_PAYLOAD_V3, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, + ENGINE_GET_PAYLOAD_V3, ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, + ENGINE_FORKCHOICE_UPDATED_V3, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ]; @@ -72,12 +78,15 @@ pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ pub static PRE_CAPELLA_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { new_payload_v1: true, new_payload_v2: false, + new_payload_v3: false, forkchoice_updated_v1: true, forkchoice_updated_v2: false, + forkchoice_updated_v3: false, get_payload_bodies_by_hash_v1: false, get_payload_bodies_by_range_v1: false, get_payload_v1: true, get_payload_v2: false, + get_payload_v3: false, }; /// Contains methods to convert arbitrary bytes to an ETH2 deposit contract object. @@ -741,6 +750,14 @@ impl HttpJsonRpc { ) .await?, ), + ForkName::Deneb => ExecutionBlockWithTransactions::Deneb( + self.rpc_request( + ETH_GET_BLOCK_BY_HASH, + params, + ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?, + ), ForkName::Base | ForkName::Altair => { return Err(Error::UnsupportedForkVariant(format!( "called get_block_by_hash_with_txns with fork {:?}", @@ -784,6 +801,27 @@ impl HttpJsonRpc { Ok(response.into()) } + pub async fn new_payload_v3( + &self, + new_payload_request_deneb: NewPayloadRequestDeneb, + ) -> Result { + let params = json!([ + JsonExecutionPayload::V3(new_payload_request_deneb.execution_payload.into()), + new_payload_request_deneb.versioned_hashes, + new_payload_request_deneb.parent_beacon_block_root, + ]); + + let response: JsonPayloadStatusV1 = self + .rpc_request( + ENGINE_NEW_PAYLOAD_V3, + params, + ENGINE_NEW_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + + Ok(response.into()) + } + pub async fn get_payload_v1( &self, payload_id: PayloadId, @@ -835,10 +873,33 @@ impl HttpJsonRpc { .await?; Ok(JsonGetPayloadResponse::V2(response).into()) } - ForkName::Base | ForkName::Altair => Err(Error::UnsupportedForkVariant(format!( - "called get_payload_v2 with {}", - fork_name - ))), + ForkName::Base | ForkName::Altair | ForkName::Deneb => Err( + Error::UnsupportedForkVariant(format!("called get_payload_v2 with {}", fork_name)), + ), + } + } + + pub async fn get_payload_v3( + &self, + fork_name: ForkName, + payload_id: PayloadId, + ) -> Result, Error> { + let params = json!([JsonPayloadIdRequest::from(payload_id)]); + + match fork_name { + ForkName::Deneb => { + let response: JsonGetPayloadResponseV3 = self + .rpc_request( + ENGINE_GET_PAYLOAD_V3, + params, + ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + Ok(JsonGetPayloadResponse::V3(response).into()) + } + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => Err( + Error::UnsupportedForkVariant(format!("called get_payload_v3 with {}", fork_name)), + ), } } @@ -884,6 +945,27 @@ impl HttpJsonRpc { Ok(response.into()) } + pub async fn forkchoice_updated_v3( + &self, + forkchoice_state: ForkchoiceState, + payload_attributes: Option, + ) -> Result { + let params = json!([ + JsonForkchoiceStateV1::from(forkchoice_state), + payload_attributes.map(JsonPayloadAttributes::from) + ]); + + let response: JsonForkchoiceUpdatedV1Response = self + .rpc_request( + ENGINE_FORKCHOICE_UPDATED_V3, + params, + ENGINE_FORKCHOICE_UPDATED_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + + Ok(response.into()) + } + pub async fn get_payload_bodies_by_hash_v1( &self, block_hashes: Vec, @@ -950,14 +1032,17 @@ impl HttpJsonRpc { Ok(capabilities) => Ok(EngineCapabilities { new_payload_v1: capabilities.contains(ENGINE_NEW_PAYLOAD_V1), new_payload_v2: capabilities.contains(ENGINE_NEW_PAYLOAD_V2), + new_payload_v3: capabilities.contains(ENGINE_NEW_PAYLOAD_V3), forkchoice_updated_v1: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V1), forkchoice_updated_v2: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V2), + forkchoice_updated_v3: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V3), get_payload_bodies_by_hash_v1: capabilities .contains(ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1), get_payload_bodies_by_range_v1: capabilities .contains(ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1), get_payload_v1: capabilities.contains(ENGINE_GET_PAYLOAD_V1), get_payload_v2: capabilities.contains(ENGINE_GET_PAYLOAD_V2), + get_payload_v3: capabilities.contains(ENGINE_GET_PAYLOAD_V3), }), } } @@ -994,15 +1079,28 @@ impl HttpJsonRpc { // new_payload that the execution engine supports pub async fn new_payload( &self, - execution_payload: ExecutionPayload, + new_payload_request: NewPayloadRequest, ) -> Result { let engine_capabilities = self.get_engine_capabilities(None).await?; - if engine_capabilities.new_payload_v2 { - self.new_payload_v2(execution_payload).await - } else if engine_capabilities.new_payload_v1 { - self.new_payload_v1(execution_payload).await - } else { - Err(Error::RequiredMethodUnsupported("engine_newPayload")) + match new_payload_request { + NewPayloadRequest::Merge(_) | NewPayloadRequest::Capella(_) => { + if engine_capabilities.new_payload_v2 { + self.new_payload_v2(new_payload_request.into_execution_payload()) + .await + } else if engine_capabilities.new_payload_v1 { + self.new_payload_v1(new_payload_request.into_execution_payload()) + .await + } else { + Err(Error::RequiredMethodUnsupported("engine_newPayload")) + } + } + NewPayloadRequest::Deneb(new_payload_request_deneb) => { + if engine_capabilities.new_payload_v3 { + self.new_payload_v3(new_payload_request_deneb).await + } else { + Err(Error::RequiredMethodUnsupported("engine_newPayloadV3")) + } + } } } @@ -1014,12 +1112,27 @@ impl HttpJsonRpc { payload_id: PayloadId, ) -> Result, Error> { let engine_capabilities = self.get_engine_capabilities(None).await?; - if engine_capabilities.get_payload_v2 { - self.get_payload_v2(fork_name, payload_id).await - } else if engine_capabilities.new_payload_v1 { - self.get_payload_v1(payload_id).await - } else { - Err(Error::RequiredMethodUnsupported("engine_getPayload")) + match fork_name { + ForkName::Merge | ForkName::Capella => { + if engine_capabilities.get_payload_v2 { + self.get_payload_v2(fork_name, payload_id).await + } else if engine_capabilities.new_payload_v1 { + self.get_payload_v1(payload_id).await + } else { + Err(Error::RequiredMethodUnsupported("engine_getPayload")) + } + } + ForkName::Deneb => { + if engine_capabilities.get_payload_v3 { + self.get_payload_v3(fork_name, payload_id).await + } else { + Err(Error::RequiredMethodUnsupported("engine_getPayloadV3")) + } + } + ForkName::Base | ForkName::Altair => Err(Error::UnsupportedForkVariant(format!( + "called get_payload with {}", + fork_name + ))), } } @@ -1028,14 +1141,41 @@ impl HttpJsonRpc { pub async fn forkchoice_updated( &self, forkchoice_state: ForkchoiceState, - payload_attributes: Option, + maybe_payload_attributes: Option, ) -> Result { let engine_capabilities = self.get_engine_capabilities(None).await?; - if engine_capabilities.forkchoice_updated_v2 { - self.forkchoice_updated_v2(forkchoice_state, payload_attributes) + if let Some(payload_attributes) = maybe_payload_attributes.as_ref() { + match payload_attributes { + PayloadAttributes::V1(_) | PayloadAttributes::V2(_) => { + if engine_capabilities.forkchoice_updated_v2 { + self.forkchoice_updated_v2(forkchoice_state, maybe_payload_attributes) + .await + } else if engine_capabilities.forkchoice_updated_v1 { + self.forkchoice_updated_v1(forkchoice_state, maybe_payload_attributes) + .await + } else { + Err(Error::RequiredMethodUnsupported("engine_forkchoiceUpdated")) + } + } + PayloadAttributes::V3(_) => { + if engine_capabilities.forkchoice_updated_v3 { + self.forkchoice_updated_v3(forkchoice_state, maybe_payload_attributes) + .await + } else { + Err(Error::RequiredMethodUnsupported( + "engine_forkchoiceUpdatedV3", + )) + } + } + } + } else if engine_capabilities.forkchoice_updated_v3 { + self.forkchoice_updated_v3(forkchoice_state, maybe_payload_attributes) + .await + } else if engine_capabilities.forkchoice_updated_v2 { + self.forkchoice_updated_v2(forkchoice_state, maybe_payload_attributes) .await } else if engine_capabilities.forkchoice_updated_v1 { - self.forkchoice_updated_v1(forkchoice_state, payload_attributes) + self.forkchoice_updated_v1(forkchoice_state, maybe_payload_attributes) .await } else { Err(Error::RequiredMethodUnsupported("engine_forkchoiceUpdated")) diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index d85d294c8..e8641be79 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -2,10 +2,12 @@ use super::*; use serde::{Deserialize, Serialize}; use strum::EnumString; use superstruct::superstruct; +use types::beacon_block_body::KzgCommitments; +use types::blob_sidecar::BlobsList; use types::{ - EthSpec, ExecutionBlockHash, FixedVector, Transactions, Unsigned, VariableList, Withdrawal, + EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadDeneb, + ExecutionPayloadMerge, FixedVector, Transactions, Unsigned, VariableList, Withdrawal, }; -use types::{ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge}; #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -61,7 +63,7 @@ pub struct JsonPayloadIdResponse { } #[superstruct( - variants(V1, V2), + variants(V1, V2, V3), variant_attributes( derive(Debug, PartialEq, Default, Serialize, Deserialize,), serde(bound = "T: EthSpec", rename_all = "camelCase"), @@ -94,8 +96,14 @@ pub struct JsonExecutionPayload { pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: Transactions, - #[superstruct(only(V2))] + #[superstruct(only(V2, V3))] pub withdrawals: VariableList, + #[superstruct(only(V3))] + #[serde(with = "serde_utils::u64_hex_be")] + pub blob_gas_used: u64, + #[superstruct(only(V3))] + #[serde(with = "serde_utils::u64_hex_be")] + pub excess_blob_gas: u64, } impl From> for JsonExecutionPayloadV1 { @@ -144,12 +152,41 @@ impl From> for JsonExecutionPayloadV2 } } } +impl From> for JsonExecutionPayloadV3 { + fn from(payload: ExecutionPayloadDeneb) -> Self { + JsonExecutionPayloadV3 { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + withdrawals: payload + .withdrawals + .into_iter() + .map(Into::into) + .collect::>() + .into(), + blob_gas_used: payload.blob_gas_used, + excess_blob_gas: payload.excess_blob_gas, + } + } +} impl From> for JsonExecutionPayload { fn from(execution_payload: ExecutionPayload) -> Self { match execution_payload { ExecutionPayload::Merge(payload) => JsonExecutionPayload::V1(payload.into()), ExecutionPayload::Capella(payload) => JsonExecutionPayload::V2(payload.into()), + ExecutionPayload::Deneb(payload) => JsonExecutionPayload::V3(payload.into()), } } } @@ -200,18 +237,47 @@ impl From> for ExecutionPayloadCapella } } } +impl From> for ExecutionPayloadDeneb { + fn from(payload: JsonExecutionPayloadV3) -> Self { + ExecutionPayloadDeneb { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + withdrawals: payload + .withdrawals + .into_iter() + .map(Into::into) + .collect::>() + .into(), + blob_gas_used: payload.blob_gas_used, + excess_blob_gas: payload.excess_blob_gas, + } + } +} impl From> for ExecutionPayload { fn from(json_execution_payload: JsonExecutionPayload) -> Self { match json_execution_payload { JsonExecutionPayload::V1(payload) => ExecutionPayload::Merge(payload.into()), JsonExecutionPayload::V2(payload) => ExecutionPayload::Capella(payload.into()), + JsonExecutionPayload::V3(payload) => ExecutionPayload::Deneb(payload.into()), } } } #[superstruct( - variants(V1, V2), + variants(V1, V2, V3), variant_attributes( derive(Debug, PartialEq, Serialize, Deserialize), serde(bound = "T: EthSpec", rename_all = "camelCase") @@ -226,8 +292,14 @@ pub struct JsonGetPayloadResponse { pub execution_payload: JsonExecutionPayloadV1, #[superstruct(only(V2), partial_getter(rename = "execution_payload_v2"))] pub execution_payload: JsonExecutionPayloadV2, + #[superstruct(only(V3), partial_getter(rename = "execution_payload_v3"))] + pub execution_payload: JsonExecutionPayloadV3, #[serde(with = "serde_utils::u256_hex_be")] pub block_value: Uint256, + #[superstruct(only(V3))] + pub blobs_bundle: JsonBlobsBundleV1, + #[superstruct(only(V3))] + pub should_override_builder: bool, } impl From> for GetPayloadResponse { @@ -245,6 +317,14 @@ impl From> for GetPayloadResponse { block_value: response.block_value, }) } + JsonGetPayloadResponse::V3(response) => { + GetPayloadResponse::Deneb(GetPayloadResponseDeneb { + execution_payload: response.execution_payload.into(), + block_value: response.block_value, + blobs_bundle: response.blobs_bundle.into(), + should_override_builder: response.should_override_builder, + }) + } } } } @@ -284,7 +364,7 @@ impl From for Withdrawal { } #[superstruct( - variants(V1, V2), + variants(V1, V2, V3), variant_attributes( derive(Debug, Clone, PartialEq, Serialize, Deserialize), serde(rename_all = "camelCase") @@ -299,13 +379,15 @@ pub struct JsonPayloadAttributes { pub timestamp: u64, pub prev_randao: Hash256, pub suggested_fee_recipient: Address, - #[superstruct(only(V2))] + #[superstruct(only(V2, V3))] pub withdrawals: Vec, + #[superstruct(only(V3))] + pub parent_beacon_block_root: Hash256, } impl From for JsonPayloadAttributes { - fn from(payload_atributes: PayloadAttributes) -> Self { - match payload_atributes { + fn from(payload_attributes: PayloadAttributes) -> Self { + match payload_attributes { PayloadAttributes::V1(pa) => Self::V1(JsonPayloadAttributesV1 { timestamp: pa.timestamp, prev_randao: pa.prev_randao, @@ -317,6 +399,13 @@ impl From for JsonPayloadAttributes { suggested_fee_recipient: pa.suggested_fee_recipient, withdrawals: pa.withdrawals.into_iter().map(Into::into).collect(), }), + PayloadAttributes::V3(pa) => Self::V3(JsonPayloadAttributesV3 { + timestamp: pa.timestamp, + prev_randao: pa.prev_randao, + suggested_fee_recipient: pa.suggested_fee_recipient, + withdrawals: pa.withdrawals.into_iter().map(Into::into).collect(), + parent_beacon_block_root: pa.parent_beacon_block_root, + }), } } } @@ -335,6 +424,41 @@ impl From for PayloadAttributes { suggested_fee_recipient: jpa.suggested_fee_recipient, withdrawals: jpa.withdrawals.into_iter().map(Into::into).collect(), }), + JsonPayloadAttributes::V3(jpa) => Self::V3(PayloadAttributesV3 { + timestamp: jpa.timestamp, + prev_randao: jpa.prev_randao, + suggested_fee_recipient: jpa.suggested_fee_recipient, + withdrawals: jpa.withdrawals.into_iter().map(Into::into).collect(), + parent_beacon_block_root: jpa.parent_beacon_block_root, + }), + } + } +} + +#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[serde(bound = "E: EthSpec", rename_all = "camelCase")] +pub struct JsonBlobsBundleV1 { + pub commitments: KzgCommitments, + pub proofs: KzgProofs, + #[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")] + pub blobs: BlobsList, +} + +impl From> for JsonBlobsBundleV1 { + fn from(blobs_bundle: BlobsBundle) -> Self { + Self { + commitments: blobs_bundle.commitments, + proofs: blobs_bundle.proofs, + blobs: blobs_bundle.blobs, + } + } +} +impl From> for BlobsBundle { + fn from(json_blobs_bundle: JsonBlobsBundleV1) -> Self { + Self { + commitments: json_blobs_bundle.commitments, + proofs: json_blobs_bundle.proofs, + blobs: json_blobs_bundle.blobs, } } } diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index 362f5b0b2..bc8e4e314 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -8,17 +8,19 @@ use crate::HttpJsonRpc; use lru::LruCache; use slog::{debug, error, info, warn, Logger}; use std::future::Future; +use std::num::NonZeroUsize; use std::sync::Arc; use std::time::Duration; use task_executor::TaskExecutor; use tokio::sync::{watch, Mutex, RwLock}; use tokio_stream::wrappers::WatchStream; +use types::non_zero_usize::new_non_zero_usize; use types::ExecutionBlockHash; /// The number of payload IDs that will be stored for each `Engine`. /// /// Since the size of each value is small (~800 bytes) a large number is used for safety. -const PAYLOAD_ID_LRU_CACHE_SIZE: usize = 512; +const PAYLOAD_ID_LRU_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(512); const CACHED_ENGINE_CAPABILITIES_AGE_LIMIT: Duration = Duration::from_secs(900); // 15 minutes /// Stores the remembered state of a engine. diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 32e255a16..868d81944 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -14,7 +14,9 @@ pub use engine_api::*; pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; use engines::{Engine, EngineError}; pub use engines::{EngineState, ForkchoiceState}; -use eth2::types::builder_bid::SignedBuilderBid; +use eth2::types::FullPayloadContents; +use eth2::types::{builder_bid::SignedBuilderBid, BlobsBundle, ForkVersionedResponse}; +use ethers_core::types::Transaction as EthersTransaction; use fork_choice::ForkchoiceUpdateParameters; use lru::LruCache; use payload_status::process_payload_status; @@ -27,7 +29,7 @@ use std::collections::HashMap; use std::fmt; use std::future::Future; use std::io::Write; -use std::marker::PhantomData; +use std::num::NonZeroUsize; use std::path::PathBuf; use std::sync::Arc; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; @@ -39,11 +41,16 @@ use tokio::{ }; use tokio_stream::wrappers::WatchStream; use tree_hash::TreeHash; -use types::{AbstractExecPayload, BeaconStateError, ExecPayload}; +use types::beacon_block_body::KzgCommitments; +use types::builder_bid::BuilderBid; +use types::non_zero_usize::new_non_zero_usize; +use types::payload::BlockProductionVersion; use types::{ - BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionPayloadCapella, ExecutionPayloadMerge, - ForkVersionedResponse, ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, - Slot, + AbstractExecPayload, BlobsList, ExecutionPayloadDeneb, KzgProofs, SignedBlindedBeaconBlock, +}; +use types::{ + BeaconStateError, BlindedPayload, ChainSpec, Epoch, ExecPayload, ExecutionPayloadCapella, + ExecutionPayloadMerge, FullPayload, ProposerPreparationData, PublicKeyBytes, Signature, Slot, }; mod block_hash; @@ -63,7 +70,7 @@ pub const DEFAULT_JWT_FILE: &str = "jwt.hex"; /// Each time the `ExecutionLayer` retrieves a block from an execution node, it stores that block /// in an LRU cache to avoid redundant lookups. This is the size of that cache. -const EXECUTION_BLOCKS_LRU_CACHE_SIZE: usize = 128; +const EXECUTION_BLOCKS_LRU_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(128); /// A fee recipient address for use during block production. Only used as a very last resort if /// there is no address provided by the user. @@ -83,6 +90,32 @@ pub enum ProvenancedPayload

{ Builder(P), } +impl TryFrom> for ProvenancedPayload> { + type Error = Error; + + fn try_from(value: BuilderBid) -> Result { + let block_proposal_contents = match value { + BuilderBid::Merge(builder_bid) => BlockProposalContents::Payload { + payload: ExecutionPayloadHeader::Merge(builder_bid.header).into(), + block_value: builder_bid.value, + }, + BuilderBid::Capella(builder_bid) => BlockProposalContents::Payload { + payload: ExecutionPayloadHeader::Capella(builder_bid.header).into(), + block_value: builder_bid.value, + }, + BuilderBid::Deneb(builder_bid) => BlockProposalContents::PayloadAndBlobs { + payload: ExecutionPayloadHeader::Deneb(builder_bid.header).into(), + block_value: builder_bid.value, + kzg_commitments: builder_bid.blob_kzg_commitments, + blobs_and_proofs: None, + }, + }; + Ok(ProvenancedPayload::Builder( + BlockProposalContentsType::Blinded(block_proposal_contents), + )) + } +} + #[derive(Debug)] pub enum Error { NoEngine, @@ -104,7 +137,10 @@ pub enum Error { InvalidJWTSecret(String), InvalidForkForPayload, InvalidPayloadBody(String), + InvalidPayloadConversion, + InvalidBlobConversion(String), BeaconStateError(BeaconStateError), + PayloadTypeMismatch, } impl From for Error { @@ -119,54 +155,132 @@ impl From for Error { } } +pub enum BlockProposalContentsType { + Full(BlockProposalContents>), + Blinded(BlockProposalContents>), +} + pub enum BlockProposalContents> { Payload { payload: Payload, block_value: Uint256, - // TODO: remove for 4844, since it appears in PayloadAndBlobs - _phantom: PhantomData, + }, + PayloadAndBlobs { + payload: Payload, + block_value: Uint256, + kzg_commitments: KzgCommitments, + /// `None` for blinded `PayloadAndBlobs`. + blobs_and_proofs: Option<(BlobsList, KzgProofs)>, }, } +impl From>> + for BlockProposalContents> +{ + fn from(item: BlockProposalContents>) -> Self { + match item { + BlockProposalContents::Payload { + payload, + block_value, + } => BlockProposalContents::Payload { + payload: payload.execution_payload().into(), + block_value, + }, + BlockProposalContents::PayloadAndBlobs { + payload, + block_value, + kzg_commitments, + blobs_and_proofs: _, + } => BlockProposalContents::PayloadAndBlobs { + payload: payload.execution_payload().into(), + block_value, + kzg_commitments, + blobs_and_proofs: None, + }, + } + } +} + +impl> TryFrom> + for BlockProposalContents +{ + type Error = Error; + + fn try_from(response: GetPayloadResponse) -> Result { + let (execution_payload, block_value, maybe_bundle) = response.into(); + match maybe_bundle { + Some(bundle) => Ok(Self::PayloadAndBlobs { + payload: execution_payload.into(), + block_value, + kzg_commitments: bundle.commitments, + blobs_and_proofs: Some((bundle.blobs, bundle.proofs)), + }), + None => Ok(Self::Payload { + payload: execution_payload.into(), + block_value, + }), + } + } +} + +impl TryFrom> for BlockProposalContentsType { + type Error = Error; + + fn try_from(response_type: GetPayloadResponseType) -> Result { + match response_type { + GetPayloadResponseType::Full(response) => Ok(Self::Full(response.try_into()?)), + GetPayloadResponseType::Blinded(response) => Ok(Self::Blinded(response.try_into()?)), + } + } +} + +#[allow(clippy::type_complexity)] impl> BlockProposalContents { - pub fn payload(&self) -> &Payload { + pub fn deconstruct( + self, + ) -> ( + Payload, + Option>, + Option<(BlobsList, KzgProofs)>, + Uint256, + ) { match self { Self::Payload { payload, - block_value: _, - _phantom: _, - } => payload, + block_value, + } => (payload, None, None, block_value), + Self::PayloadAndBlobs { + payload, + block_value, + kzg_commitments, + blobs_and_proofs, + } => ( + payload, + Some(kzg_commitments), + blobs_and_proofs, + block_value, + ), + } + } + + pub fn payload(&self) -> &Payload { + match self { + Self::Payload { payload, .. } => payload, + Self::PayloadAndBlobs { payload, .. } => payload, } } pub fn to_payload(self) -> Payload { match self { - Self::Payload { - payload, - block_value: _, - _phantom: _, - } => payload, + Self::Payload { payload, .. } => payload, + Self::PayloadAndBlobs { payload, .. } => payload, } } pub fn block_value(&self) -> &Uint256 { match self { - Self::Payload { - payload: _, - block_value, - _phantom: _, - } => block_value, + Self::Payload { block_value, .. } => block_value, + Self::PayloadAndBlobs { block_value, .. } => block_value, } } - pub fn default_at_fork(fork_name: ForkName) -> Result { - Ok(match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { - BlockProposalContents::Payload { - payload: Payload::default_at_fork(fork_name)?, - block_value: Uint256::zero(), - _phantom: PhantomData, - } - } - }) - } } #[derive(Clone, PartialEq)] @@ -194,6 +308,7 @@ pub struct BuilderParams { pub chain_health: ChainHealth, } +#[derive(PartialEq)] pub enum ChainHealth { Healthy, Unhealthy(FailedCondition), @@ -201,13 +316,15 @@ pub enum ChainHealth { PreMerge, } -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub enum FailedCondition { Skips, SkipsPerEpoch, EpochsSinceFinalization, } +type PayloadContentsRefTuple<'a, T> = (ExecutionPayloadRef<'a, T>, Option<&'a BlobsBundle>); + struct Inner { engine: Arc, builder: ArcSwapOption, @@ -218,9 +335,7 @@ struct Inner { proposers: RwLock>, executor: TaskExecutor, payload_cache: PayloadCache, - builder_profit_threshold: Uint256, log: Logger, - always_prefer_builder_payload: bool, /// Track whether the last `newPayload` call errored. /// /// This is used *only* in the informational sync status endpoint, so that a VC using this @@ -247,10 +362,7 @@ pub struct Config { pub jwt_version: Option, /// Default directory for the jwt secret if not provided through cli. pub default_datadir: PathBuf, - /// The minimum value of an external payload for it to be considered in a proposal. - pub builder_profit_threshold: u128, pub execution_timeout_multiplier: Option, - pub always_prefer_builder_payload: bool, } /// Provides access to one execution engine and provides a neat interface for consumption by the @@ -272,9 +384,7 @@ impl ExecutionLayer { jwt_id, jwt_version, default_datadir, - builder_profit_threshold, execution_timeout_multiplier, - always_prefer_builder_payload, } = config; if urls.len() > 1 { @@ -335,9 +445,7 @@ impl ExecutionLayer { execution_blocks: Mutex::new(LruCache::new(EXECUTION_BLOCKS_LRU_CACHE_SIZE)), executor, payload_cache: PayloadCache::default(), - builder_profit_threshold: Uint256::from(builder_profit_threshold), log, - always_prefer_builder_payload, last_new_payload_errored: RwLock::new(false), }; @@ -375,7 +483,6 @@ impl ExecutionLayer { self.log(), "Using external block builder"; "builder_url" => ?builder_url, - "builder_profit_threshold" => self.inner.builder_profit_threshold.as_u128(), "local_user_agent" => builder_client.get_user_agent(), ); self.inner.builder.swap(Some(Arc::new(builder_client))); @@ -383,12 +490,28 @@ impl ExecutionLayer { } /// Cache a full payload, keyed on the `tree_hash_root` of the payload - fn cache_payload(&self, payload: ExecutionPayloadRef) -> Option> { - self.inner.payload_cache.put(payload.clone_from_ref()) + fn cache_payload( + &self, + payload_and_blobs: PayloadContentsRefTuple, + ) -> Option> { + let (payload_ref, maybe_json_blobs_bundle) = payload_and_blobs; + + let payload = payload_ref.clone_from_ref(); + let maybe_blobs_bundle = maybe_json_blobs_bundle + .cloned() + .map(|blobs_bundle| BlobsBundle { + commitments: blobs_bundle.commitments, + proofs: blobs_bundle.proofs, + blobs: blobs_bundle.blobs, + }); + + self.inner + .payload_cache + .put(FullPayloadContents::new(payload, maybe_blobs_bundle)) } /// Attempt to retrieve a full payload from the payload cache by the payload root - pub fn get_payload_by_root(&self, root: &Hash256) -> Option> { + pub fn get_payload_by_root(&self, root: &Hash256) -> Option> { self.inner.payload_cache.get(root) } @@ -570,6 +693,13 @@ impl ExecutionLayer { } } + /// Delete proposer preparation data for `proposer_index`. This is only useful in tests. + pub async fn clear_proposer_preparation(&self, proposer_index: u64) { + self.proposer_preparation_data() + .await + .remove(&proposer_index); + } + /// Removes expired entries from proposer_preparation_data and proposers caches async fn clean_proposer_caches(&self, current_epoch: Epoch) -> Result<(), Error> { let mut proposer_preparation_data = self.proposer_preparation_data().await; @@ -649,7 +779,8 @@ impl ExecutionLayer { /// /// The result will be returned from the first node that returns successfully. No more nodes /// will be contacted. - pub async fn get_payload>( + #[allow(clippy::too_many_arguments)] + pub async fn get_payload( &self, parent_hash: ExecutionBlockHash, payload_attributes: &PayloadAttributes, @@ -657,42 +788,67 @@ impl ExecutionLayer { builder_params: BuilderParams, current_fork: ForkName, spec: &ChainSpec, - ) -> Result, Error> { - let payload_result = match Payload::block_type() { - BlockType::Blinded => { - let _timer = metrics::start_timer_vec( - &metrics::EXECUTION_LAYER_REQUEST_TIMES, - &[metrics::GET_BLINDED_PAYLOAD], - ); - self.get_blinded_payload( + builder_boost_factor: Option, + block_production_version: BlockProductionVersion, + ) -> Result, Error> { + let payload_result_type = match block_production_version { + BlockProductionVersion::V3 => match self + .determine_and_fetch_payload( parent_hash, payload_attributes, forkchoice_update_params, builder_params, current_fork, + builder_boost_factor, spec, ) .await - } - BlockType::Full => { + { + Ok(payload) => payload, + Err(e) => { + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_GET_PAYLOAD_OUTCOME, + &[metrics::FAILURE], + ); + return Err(e); + } + }, + BlockProductionVersion::BlindedV2 => { let _timer = metrics::start_timer_vec( &metrics::EXECUTION_LAYER_REQUEST_TIMES, - &[metrics::GET_PAYLOAD], + &[metrics::GET_BLINDED_PAYLOAD], ); - self.get_full_payload( + self.determine_and_fetch_payload( + parent_hash, + payload_attributes, + forkchoice_update_params, + builder_params, + current_fork, + None, + spec, + ) + .await? + } + BlockProductionVersion::FullV2 => self + .get_full_payload_with( parent_hash, payload_attributes, forkchoice_update_params, current_fork, + noop, ) .await - .map(ProvenancedPayload::Local) - } + .and_then(GetPayloadResponseType::try_into) + .map(ProvenancedPayload::Local)?, }; - // Track some metrics and return the result. - match payload_result { - Ok(ProvenancedPayload::Local(block_proposal_contents)) => { + let block_proposal_content_type = match payload_result_type { + ProvenancedPayload::Local(local_payload) => local_payload, + ProvenancedPayload::Builder(builder_payload) => builder_payload, + }; + + match block_proposal_content_type { + BlockProposalContentsType::Full(block_proposal_contents) => { metrics::inc_counter_vec( &metrics::EXECUTION_LAYER_GET_PAYLOAD_OUTCOME, &[metrics::SUCCESS], @@ -701,9 +857,15 @@ impl ExecutionLayer { &metrics::EXECUTION_LAYER_GET_PAYLOAD_SOURCE, &[metrics::LOCAL], ); - Ok(block_proposal_contents) + if matches!(block_production_version, BlockProductionVersion::BlindedV2) { + Ok(BlockProposalContentsType::Blinded( + block_proposal_contents.into(), + )) + } else { + Ok(BlockProposalContentsType::Full(block_proposal_contents)) + } } - Ok(ProvenancedPayload::Builder(block_proposal_contents)) => { + BlockProposalContentsType::Blinded(block_proposal_contents) => { metrics::inc_counter_vec( &metrics::EXECUTION_LAYER_GET_PAYLOAD_OUTCOME, &[metrics::SUCCESS], @@ -712,253 +874,106 @@ impl ExecutionLayer { &metrics::EXECUTION_LAYER_GET_PAYLOAD_SOURCE, &[metrics::BUILDER], ); - Ok(block_proposal_contents) - } - Err(e) => { - metrics::inc_counter_vec( - &metrics::EXECUTION_LAYER_GET_PAYLOAD_OUTCOME, - &[metrics::FAILURE], - ); - Err(e) + Ok(BlockProposalContentsType::Blinded(block_proposal_contents)) } } } - async fn get_blinded_payload>( + /// Fetches local and builder paylaods concurrently, Logs and returns results. + async fn fetch_builder_and_local_payloads( + &self, + builder: &BuilderHttpClient, + parent_hash: ExecutionBlockHash, + builder_params: &BuilderParams, + payload_attributes: &PayloadAttributes, + forkchoice_update_params: ForkchoiceUpdateParameters, + current_fork: ForkName, + ) -> ( + Result>>, builder_client::Error>, + Result, Error>, + ) { + let slot = builder_params.slot; + let pubkey = &builder_params.pubkey; + + info!( + self.log(), + "Requesting blinded header from connected builder"; + "slot" => ?slot, + "pubkey" => ?pubkey, + "parent_hash" => ?parent_hash, + ); + + // Wait for the builder *and* local EL to produce a payload (or return an error). + let ((relay_result, relay_duration), (local_result, local_duration)) = tokio::join!( + timed_future(metrics::GET_BLINDED_PAYLOAD_BUILDER, async { + builder + .get_builder_header::(slot, parent_hash, pubkey) + .await + }), + timed_future(metrics::GET_BLINDED_PAYLOAD_LOCAL, async { + self.get_full_payload_caching( + parent_hash, + payload_attributes, + forkchoice_update_params, + current_fork, + ) + .await + .and_then(|local_result_type| match local_result_type { + GetPayloadResponseType::Full(payload) => Ok(payload), + GetPayloadResponseType::Blinded(_) => Err(Error::PayloadTypeMismatch), + }) + }) + ); + + info!( + self.log(), + "Requested blinded execution payload"; + "relay_fee_recipient" => match &relay_result { + Ok(Some(r)) => format!("{:?}", r.data.message.header().fee_recipient()), + Ok(None) => "empty response".to_string(), + Err(_) => "request failed".to_string(), + }, + "relay_response_ms" => relay_duration.as_millis(), + "local_fee_recipient" => match &local_result { + Ok(get_payload_response) => format!("{:?}", get_payload_response.fee_recipient()), + Err(_) => "request failed".to_string() + }, + "local_response_ms" => local_duration.as_millis(), + "parent_hash" => ?parent_hash, + ); + + (relay_result, local_result) + } + + #[allow(clippy::too_many_arguments)] + async fn determine_and_fetch_payload( &self, parent_hash: ExecutionBlockHash, payload_attributes: &PayloadAttributes, forkchoice_update_params: ForkchoiceUpdateParameters, builder_params: BuilderParams, current_fork: ForkName, + builder_boost_factor: Option, spec: &ChainSpec, - ) -> Result>, Error> { - if let Some(builder) = self.builder() { - let slot = builder_params.slot; - let pubkey = builder_params.pubkey; + ) -> Result>, Error> { + let Some(builder) = self.builder() else { + // no builder.. return local payload + return self + .get_full_payload_caching( + parent_hash, + payload_attributes, + forkchoice_update_params, + current_fork, + ) + .await + .and_then(GetPayloadResponseType::try_into) + .map(ProvenancedPayload::Local); + }; + // check chain health + if builder_params.chain_health != ChainHealth::Healthy { + // chain is unhealthy, gotta use local payload match builder_params.chain_health { - ChainHealth::Healthy => { - info!( - self.log(), - "Requesting blinded header from connected builder"; - "slot" => ?slot, - "pubkey" => ?pubkey, - "parent_hash" => ?parent_hash, - ); - - // Wait for the builder *and* local EL to produce a payload (or return an error). - let ((relay_result, relay_duration), (local_result, local_duration)) = tokio::join!( - timed_future(metrics::GET_BLINDED_PAYLOAD_BUILDER, async { - builder - .get_builder_header::(slot, parent_hash, &pubkey) - .await - }), - timed_future(metrics::GET_BLINDED_PAYLOAD_LOCAL, async { - self.get_full_payload_caching::( - parent_hash, - payload_attributes, - forkchoice_update_params, - current_fork, - ) - .await - }) - ); - - info!( - self.log(), - "Requested blinded execution payload"; - "relay_fee_recipient" => match &relay_result { - Ok(Some(r)) => format!("{:?}", r.data.message.header.fee_recipient()), - Ok(None) => "empty response".to_string(), - Err(_) => "request failed".to_string(), - }, - "relay_response_ms" => relay_duration.as_millis(), - "local_fee_recipient" => match &local_result { - Ok(proposal_contents) => format!("{:?}", proposal_contents.payload().fee_recipient()), - Err(_) => "request failed".to_string() - }, - "local_response_ms" => local_duration.as_millis(), - "parent_hash" => ?parent_hash, - ); - - return match (relay_result, local_result) { - (Err(e), Ok(local)) => { - warn!( - self.log(), - "Builder error when requesting payload"; - "info" => "falling back to local execution client", - "relay_error" => ?e, - "local_block_hash" => ?local.payload().block_hash(), - "parent_hash" => ?parent_hash, - ); - Ok(ProvenancedPayload::Local(local)) - } - (Ok(None), Ok(local)) => { - info!( - self.log(), - "Builder did not return a payload"; - "info" => "falling back to local execution client", - "local_block_hash" => ?local.payload().block_hash(), - "parent_hash" => ?parent_hash, - ); - Ok(ProvenancedPayload::Local(local)) - } - (Ok(Some(relay)), Ok(local)) => { - let header = &relay.data.message.header; - - info!( - self.log(), - "Received local and builder payloads"; - "relay_block_hash" => ?header.block_hash(), - "local_block_hash" => ?local.payload().block_hash(), - "parent_hash" => ?parent_hash, - ); - - let relay_value = relay.data.message.value; - let local_value = *local.block_value(); - if !self.inner.always_prefer_builder_payload { - if local_value >= relay_value { - info!( - self.log(), - "Local block is more profitable than relay block"; - "local_block_value" => %local_value, - "relay_value" => %relay_value - ); - return Ok(ProvenancedPayload::Local(local)); - } else { - info!( - self.log(), - "Relay block is more profitable than local block"; - "local_block_value" => %local_value, - "relay_value" => %relay_value - ); - } - } - - match verify_builder_bid( - &relay, - parent_hash, - payload_attributes, - Some(local.payload().block_number()), - self.inner.builder_profit_threshold, - current_fork, - spec, - ) { - Ok(()) => Ok(ProvenancedPayload::Builder( - BlockProposalContents::Payload { - payload: relay.data.message.header, - block_value: relay.data.message.value, - _phantom: PhantomData, - }, - )), - Err(reason) if !reason.payload_invalid() => { - info!( - self.log(), - "Builder payload ignored"; - "info" => "using local payload", - "reason" => %reason, - "relay_block_hash" => ?header.block_hash(), - "parent_hash" => ?parent_hash, - ); - Ok(ProvenancedPayload::Local(local)) - } - Err(reason) => { - metrics::inc_counter_vec( - &metrics::EXECUTION_LAYER_GET_PAYLOAD_BUILDER_REJECTIONS, - &[reason.as_ref().as_ref()], - ); - warn!( - self.log(), - "Builder returned invalid payload"; - "info" => "using local payload", - "reason" => %reason, - "relay_block_hash" => ?header.block_hash(), - "parent_hash" => ?parent_hash, - ); - Ok(ProvenancedPayload::Local(local)) - } - } - } - (Ok(Some(relay)), Err(local_error)) => { - let header = &relay.data.message.header; - - info!( - self.log(), - "Received builder payload with local error"; - "relay_block_hash" => ?header.block_hash(), - "local_error" => ?local_error, - "parent_hash" => ?parent_hash, - ); - - match verify_builder_bid( - &relay, - parent_hash, - payload_attributes, - None, - self.inner.builder_profit_threshold, - current_fork, - spec, - ) { - Ok(()) => Ok(ProvenancedPayload::Builder( - BlockProposalContents::Payload { - payload: relay.data.message.header, - block_value: relay.data.message.value, - _phantom: PhantomData, - }, - )), - // If the payload is valid then use it. The local EE failed - // to produce a payload so we have no alternative. - Err(e) if !e.payload_invalid() => Ok(ProvenancedPayload::Builder( - BlockProposalContents::Payload { - payload: relay.data.message.header, - block_value: relay.data.message.value, - _phantom: PhantomData, - }, - )), - Err(reason) => { - metrics::inc_counter_vec( - &metrics::EXECUTION_LAYER_GET_PAYLOAD_BUILDER_REJECTIONS, - &[reason.as_ref().as_ref()], - ); - crit!( - self.log(), - "Builder returned invalid payload"; - "info" => "no local payload either - unable to propose block", - "reason" => %reason, - "relay_block_hash" => ?header.block_hash(), - "parent_hash" => ?parent_hash, - ); - Err(Error::CannotProduceHeader) - } - } - } - (Err(relay_error), Err(local_error)) => { - crit!( - self.log(), - "Unable to produce execution payload"; - "info" => "the local EL and builder both failed - unable to propose block", - "relay_error" => ?relay_error, - "local_error" => ?local_error, - "parent_hash" => ?parent_hash, - ); - - Err(Error::CannotProduceHeader) - } - (Ok(None), Err(local_error)) => { - crit!( - self.log(), - "Unable to produce execution payload"; - "info" => "the local EL failed and the builder returned nothing - \ - the block proposal will be missed", - "local_error" => ?local_error, - "parent_hash" => ?parent_hash, - ); - - Err(Error::CannotProduceHeader) - } - }; - } ChainHealth::Unhealthy(condition) => info!( self.log(), "Chain is unhealthy, using local payload"; @@ -974,44 +989,218 @@ impl ExecutionLayer { "info" => "the local execution engine is syncing and the builder network \ cannot safely be used - unable to propose block" ), + ChainHealth::Healthy => crit!( + self.log(), + "got healthy but also not healthy.. this shouldn't happen!" + ), + } + return self + .get_full_payload_caching( + parent_hash, + payload_attributes, + forkchoice_update_params, + current_fork, + ) + .await + .and_then(GetPayloadResponseType::try_into) + .map(ProvenancedPayload::Local); + } + + let (relay_result, local_result) = self + .fetch_builder_and_local_payloads( + builder.as_ref(), + parent_hash, + &builder_params, + payload_attributes, + forkchoice_update_params, + current_fork, + ) + .await; + + match (relay_result, local_result) { + (Err(e), Ok(local)) => { + warn!( + self.log(), + "Builder error when requesting payload"; + "info" => "falling back to local execution client", + "relay_error" => ?e, + "local_block_hash" => ?local.block_hash(), + "parent_hash" => ?parent_hash, + ); + Ok(ProvenancedPayload::Local(BlockProposalContentsType::Full( + local.try_into()?, + ))) + } + (Ok(None), Ok(local)) => { + info!( + self.log(), + "Builder did not return a payload"; + "info" => "falling back to local execution client", + "local_block_hash" => ?local.block_hash(), + "parent_hash" => ?parent_hash, + ); + Ok(ProvenancedPayload::Local(BlockProposalContentsType::Full( + local.try_into()?, + ))) + } + (Err(relay_error), Err(local_error)) => { + crit!( + self.log(), + "Unable to produce execution payload"; + "info" => "the local EL and builder both failed - unable to propose block", + "relay_error" => ?relay_error, + "local_error" => ?local_error, + "parent_hash" => ?parent_hash, + ); + + Err(Error::CannotProduceHeader) + } + (Ok(None), Err(local_error)) => { + crit!( + self.log(), + "Unable to produce execution payload"; + "info" => "the local EL failed and the builder returned nothing - \ + the block proposal will be missed", + "local_error" => ?local_error, + "parent_hash" => ?parent_hash, + ); + + Err(Error::CannotProduceHeader) + } + (Ok(Some(relay)), Ok(local)) => { + let header = &relay.data.message.header(); + + info!( + self.log(), + "Received local and builder payloads"; + "relay_block_hash" => ?header.block_hash(), + "local_block_hash" => ?local.block_hash(), + "parent_hash" => ?parent_hash, + ); + + // check relay payload validity + if let Err(reason) = verify_builder_bid( + &relay, + parent_hash, + payload_attributes, + Some(local.block_number()), + current_fork, + spec, + ) { + // relay payload invalid -> return local + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_GET_PAYLOAD_BUILDER_REJECTIONS, + &[reason.as_ref().as_ref()], + ); + warn!( + self.log(), + "Builder returned invalid payload"; + "info" => "using local payload", + "reason" => %reason, + "relay_block_hash" => ?header.block_hash(), + "parent_hash" => ?parent_hash, + ); + return Ok(ProvenancedPayload::Local(BlockProposalContentsType::Full( + local.try_into()?, + ))); + } + + let relay_value = *relay.data.message.value(); + + let boosted_relay_value = match builder_boost_factor { + Some(builder_boost_factor) => { + (relay_value / 100).saturating_mul(builder_boost_factor.into()) + } + None => relay_value, + }; + + let local_value = *local.block_value(); + + if local_value >= boosted_relay_value { + info!( + self.log(), + "Local block is more profitable than relay block"; + "local_block_value" => %local_value, + "relay_value" => %relay_value, + "boosted_relay_value" => %boosted_relay_value, + "builder_boost_factor" => ?builder_boost_factor, + ); + return Ok(ProvenancedPayload::Local(BlockProposalContentsType::Full( + local.try_into()?, + ))); + } + + if local.should_override_builder().unwrap_or(false) { + info!( + self.log(), + "Using local payload because execution engine suggested we ignore builder payload"; + "local_block_value" => %local_value, + "relay_value" => %relay_value + ); + return Ok(ProvenancedPayload::Local(BlockProposalContentsType::Full( + local.try_into()?, + ))); + } + + info!( + self.log(), + "Relay block is more profitable than local block"; + "local_block_value" => %local_value, + "relay_value" => %relay_value, + "boosted_relay_value" => %boosted_relay_value, + "builder_boost_factor" => ?builder_boost_factor + ); + + Ok(ProvenancedPayload::try_from(relay.data.message)?) + } + (Ok(Some(relay)), Err(local_error)) => { + let header = &relay.data.message.header(); + + info!( + self.log(), + "Received builder payload with local error"; + "relay_block_hash" => ?header.block_hash(), + "local_error" => ?local_error, + "parent_hash" => ?parent_hash, + ); + + match verify_builder_bid( + &relay, + parent_hash, + payload_attributes, + None, + current_fork, + spec, + ) { + Ok(()) => Ok(ProvenancedPayload::try_from(relay.data.message)?), + Err(reason) => { + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_GET_PAYLOAD_BUILDER_REJECTIONS, + &[reason.as_ref().as_ref()], + ); + crit!( + self.log(), + "Builder returned invalid payload"; + "info" => "no local payload either - unable to propose block", + "reason" => %reason, + "relay_block_hash" => ?header.block_hash(), + "parent_hash" => ?parent_hash, + ); + Err(Error::CannotProduceHeader) + } + } } } - self.get_full_payload_caching( - parent_hash, - payload_attributes, - forkchoice_update_params, - current_fork, - ) - .await - .map(ProvenancedPayload::Local) - } - - /// Get a full payload without caching its result in the execution layer's payload cache. - async fn get_full_payload>( - &self, - parent_hash: ExecutionBlockHash, - payload_attributes: &PayloadAttributes, - forkchoice_update_params: ForkchoiceUpdateParameters, - current_fork: ForkName, - ) -> Result, Error> { - self.get_full_payload_with( - parent_hash, - payload_attributes, - forkchoice_update_params, - current_fork, - noop, - ) - .await } /// Get a full payload and cache its result in the execution layer's payload cache. - async fn get_full_payload_caching>( + async fn get_full_payload_caching( &self, parent_hash: ExecutionBlockHash, payload_attributes: &PayloadAttributes, forkchoice_update_params: ForkchoiceUpdateParameters, current_fork: ForkName, - ) -> Result, Error> { + ) -> Result, Error> { self.get_full_payload_with( parent_hash, payload_attributes, @@ -1022,14 +1211,17 @@ impl ExecutionLayer { .await } - async fn get_full_payload_with>( + async fn get_full_payload_with( &self, parent_hash: ExecutionBlockHash, payload_attributes: &PayloadAttributes, forkchoice_update_params: ForkchoiceUpdateParameters, current_fork: ForkName, - f: fn(&ExecutionLayer, ExecutionPayloadRef) -> Option>, - ) -> Result, Error> { + cache_fn: fn( + &ExecutionLayer, + PayloadContentsRefTuple, + ) -> Option>, + ) -> Result, Error> { self.engine() .request(move |engine| async move { let payload_id = if let Some(id) = engine @@ -1082,7 +1274,7 @@ impl ExecutionLayer { } }; - let payload_fut = async { + let payload_response = async { debug!( self.log(), "Issuing engine_getPayload"; @@ -1091,37 +1283,35 @@ impl ExecutionLayer { "timestamp" => payload_attributes.timestamp(), "parent_hash" => ?parent_hash, ); + let _timer = metrics::start_timer_vec( + &metrics::EXECUTION_LAYER_REQUEST_TIMES, + &[metrics::GET_PAYLOAD], + ); engine.api.get_payload::(current_fork, payload_id).await - }; - let payload_response = payload_fut.await; - let (execution_payload, block_value) = payload_response.map(|payload_response| { - if payload_response.execution_payload_ref().fee_recipient() != payload_attributes.suggested_fee_recipient() { - error!( - self.log(), - "Inconsistent fee recipient"; - "msg" => "The fee recipient returned from the Execution Engine differs \ - from the suggested_fee_recipient set on the beacon node. This could \ - indicate that fees are being diverted to another address. Please \ - ensure that the value of suggested_fee_recipient is set correctly and \ - that the Execution Engine is trusted.", - "fee_recipient" => ?payload_response.execution_payload_ref().fee_recipient(), - "suggested_fee_recipient" => ?payload_attributes.suggested_fee_recipient(), - ); - } - if f(self, payload_response.execution_payload_ref()).is_some() { - warn!( - self.log(), - "Duplicate payload cached, this might indicate redundant proposal \ - attempts." - ); - } - payload_response.into() - })?; - Ok(BlockProposalContents::Payload { - payload: execution_payload.into(), - block_value, - _phantom: PhantomData, - }) + }.await?; + + if payload_response.execution_payload_ref().fee_recipient() != payload_attributes.suggested_fee_recipient() { + error!( + self.log(), + "Inconsistent fee recipient"; + "msg" => "The fee recipient returned from the Execution Engine differs \ + from the suggested_fee_recipient set on the beacon node. This could \ + indicate that fees are being diverted to another address. Please \ + ensure that the value of suggested_fee_recipient is set correctly and \ + that the Execution Engine is trusted.", + "fee_recipient" => ?payload_response.execution_payload_ref().fee_recipient(), + "suggested_fee_recipient" => ?payload_attributes.suggested_fee_recipient(), + ); + } + if cache_fn(self, (payload_response.execution_payload_ref(), payload_response.blobs_bundle().ok())).is_some() { + warn!( + self.log(), + "Duplicate payload cached, this might indicate redundant proposal \ + attempts." + ); + } + + Ok(GetPayloadResponseType::Full(payload_response)) }) .await .map_err(Box::new) @@ -1131,24 +1321,25 @@ impl ExecutionLayer { /// Maps to the `engine_newPayload` JSON-RPC call. pub async fn notify_new_payload( &self, - execution_payload: &ExecutionPayload, + new_payload_request: NewPayloadRequest, ) -> Result { let _timer = metrics::start_timer_vec( &metrics::EXECUTION_LAYER_REQUEST_TIMES, &[metrics::NEW_PAYLOAD], ); + let block_hash = new_payload_request.block_hash(); trace!( self.log(), "Issuing engine_newPayload"; - "parent_hash" => ?execution_payload.parent_hash(), - "block_hash" => ?execution_payload.block_hash(), - "block_number" => execution_payload.block_number(), + "parent_hash" => ?new_payload_request.parent_hash(), + "block_hash" => ?block_hash, + "block_number" => ?new_payload_request.block_number(), ); let result = self .engine() - .request(|engine| engine.api.new_payload(execution_payload.clone())) + .request(|engine| engine.api.new_payload(new_payload_request)) .await; if let Ok(status) = &result { @@ -1159,7 +1350,7 @@ impl ExecutionLayer { } *self.inner.last_new_payload_errored.write().await = result.is_err(); - process_payload_status(execution_payload.block_hash(), result, self.log()) + process_payload_status(block_hash, result, self.log()) .map_err(Box::new) .map_err(Error::EngineError) } @@ -1576,6 +1767,7 @@ impl ExecutionLayer { let payload = match fork { ForkName::Merge => ExecutionPayloadMerge::default().into(), ForkName::Capella => ExecutionPayloadCapella::default().into(), + ForkName::Deneb => ExecutionPayloadDeneb::default().into(), ForkName::Base | ForkName::Altair => { return Err(Error::InvalidForkForPayload); } @@ -1643,31 +1835,30 @@ impl ExecutionLayer { return match fork { ForkName::Merge => Ok(Some(ExecutionPayloadMerge::default().into())), ForkName::Capella => Ok(Some(ExecutionPayloadCapella::default().into())), + ForkName::Deneb => Ok(Some(ExecutionPayloadDeneb::default().into())), ForkName::Base | ForkName::Altair => Err(ApiError::UnsupportedForkVariant( format!("called get_payload_by_hash_from_engine with {}", fork), )), }; } - let block = if let Some(block) = engine + let Some(block) = engine .api .get_block_by_hash_with_txns::(hash, fork) .await? - { - block - } else { + else { return Ok(None); }; - let transactions = VariableList::new( - block - .transactions() - .iter() - .map(|transaction| VariableList::new(transaction.rlp().to_vec())) - .collect::>() - .map_err(ApiError::DeserializeTransaction)?, - ) - .map_err(ApiError::DeserializeTransactions)?; + let convert_transactions = |transactions: Vec| { + VariableList::new( + transactions + .into_iter() + .map(|tx| VariableList::new(tx.rlp().to_vec())) + .collect::, ssz_types::Error>>()?, + ) + .map_err(ApiError::SszError) + }; let payload = match block { ExecutionBlockWithTransactions::Merge(merge_block) => { @@ -1685,7 +1876,7 @@ impl ExecutionLayer { extra_data: merge_block.extra_data, base_fee_per_gas: merge_block.base_fee_per_gas, block_hash: merge_block.block_hash, - transactions, + transactions: convert_transactions(merge_block.transactions)?, }) } ExecutionBlockWithTransactions::Capella(capella_block) => { @@ -1711,10 +1902,39 @@ impl ExecutionLayer { extra_data: capella_block.extra_data, base_fee_per_gas: capella_block.base_fee_per_gas, block_hash: capella_block.block_hash, - transactions, + transactions: convert_transactions(capella_block.transactions)?, withdrawals, }) } + ExecutionBlockWithTransactions::Deneb(deneb_block) => { + let withdrawals = VariableList::new( + deneb_block + .withdrawals + .into_iter() + .map(Into::into) + .collect(), + ) + .map_err(ApiError::DeserializeWithdrawals)?; + ExecutionPayload::Deneb(ExecutionPayloadDeneb { + parent_hash: deneb_block.parent_hash, + fee_recipient: deneb_block.fee_recipient, + state_root: deneb_block.state_root, + receipts_root: deneb_block.receipts_root, + logs_bloom: deneb_block.logs_bloom, + prev_randao: deneb_block.prev_randao, + block_number: deneb_block.block_number, + gas_limit: deneb_block.gas_limit, + gas_used: deneb_block.gas_used, + timestamp: deneb_block.timestamp, + extra_data: deneb_block.extra_data, + base_fee_per_gas: deneb_block.base_fee_per_gas, + block_hash: deneb_block.block_hash, + transactions: convert_transactions(deneb_block.transactions)?, + withdrawals, + blob_gas_used: deneb_block.blob_gas_used, + excess_blob_gas: deneb_block.excess_blob_gas, + }) + } }; Ok(Some(payload)) @@ -1723,8 +1943,8 @@ impl ExecutionLayer { pub async fn propose_blinded_beacon_block( &self, block_root: Hash256, - block: &SignedBeaconBlock>, - ) -> Result, Error> { + block: &SignedBlindedBeaconBlock, + ) -> Result, Error> { debug!( self.log(), "Sending block to builder"; @@ -1743,11 +1963,12 @@ impl ExecutionLayer { .await; match &payload_result { - Ok(payload) => { + Ok(unblinded_response) => { metrics::inc_counter_vec( &metrics::EXECUTION_LAYER_BUILDER_REVEAL_PAYLOAD_OUTCOME, &[metrics::SUCCESS], ); + let payload = unblinded_response.payload_ref(); info!( self.log(), "Builder successfully revealed payload"; @@ -1789,10 +2010,6 @@ impl ExecutionLayer { #[derive(AsRefStr)] #[strum(serialize_all = "snake_case")] enum InvalidBuilderPayload { - LowValue { - profit_threshold: Uint256, - payload_value: Uint256, - }, ParentHash { payload: ExecutionBlockHash, expected: ExecutionBlockHash, @@ -1823,34 +2040,9 @@ enum InvalidBuilderPayload { }, } -impl InvalidBuilderPayload { - /// Returns `true` if a payload is objectively invalid and should never be included on chain. - fn payload_invalid(&self) -> bool { - match self { - // A low-value payload isn't invalid, it should just be avoided if possible. - InvalidBuilderPayload::LowValue { .. } => false, - InvalidBuilderPayload::ParentHash { .. } => true, - InvalidBuilderPayload::PrevRandao { .. } => true, - InvalidBuilderPayload::Timestamp { .. } => true, - InvalidBuilderPayload::BlockNumber { .. } => true, - InvalidBuilderPayload::Fork { .. } => true, - InvalidBuilderPayload::Signature { .. } => true, - InvalidBuilderPayload::WithdrawalsRoot { .. } => true, - } - } -} - impl fmt::Display for InvalidBuilderPayload { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - InvalidBuilderPayload::LowValue { - profit_threshold, - payload_value, - } => write!( - f, - "payload value of {} does not meet user-configured profit-threshold of {}", - payload_value, profit_threshold - ), InvalidBuilderPayload::ParentHash { payload, expected } => { write!(f, "payload block hash was {} not {}", payload, expected) } @@ -1889,21 +2081,19 @@ impl fmt::Display for InvalidBuilderPayload { } /// Perform some cursory, non-exhaustive validation of the bid returned from the builder. -fn verify_builder_bid>( - bid: &ForkVersionedResponse>, +fn verify_builder_bid( + bid: &ForkVersionedResponse>, parent_hash: ExecutionBlockHash, payload_attributes: &PayloadAttributes, block_number: Option, - profit_threshold: Uint256, current_fork: ForkName, spec: &ChainSpec, ) -> Result<(), Box> { let is_signature_valid = bid.data.verify_signature(spec); - let header = &bid.data.message.header; - let payload_value = bid.data.message.value; + let header = &bid.data.message.header(); // Avoid logging values that we can't represent with our Prometheus library. - let payload_value_gwei = bid.data.message.value / 1_000_000_000; + let payload_value_gwei = bid.data.message.value() / 1_000_000_000; if payload_value_gwei <= Uint256::from(i64::max_value()) { metrics::set_gauge_vec( &metrics::EXECUTION_LAYER_PAYLOAD_BIDS, @@ -1917,14 +2107,9 @@ fn verify_builder_bid>( .ok() .cloned() .map(|withdrawals| Withdrawals::::from(withdrawals).tree_hash_root()); - let payload_withdrawals_root = header.withdrawals_root().ok(); + let payload_withdrawals_root = header.withdrawals_root().ok().copied(); - if payload_value < profit_threshold { - Err(Box::new(InvalidBuilderPayload::LowValue { - profit_threshold, - payload_value, - })) - } else if header.parent_hash() != parent_hash { + if header.parent_hash() != parent_hash { Err(Box::new(InvalidBuilderPayload::ParentHash { payload: header.parent_hash(), expected: parent_hash, @@ -1952,7 +2137,7 @@ fn verify_builder_bid>( } else if !is_signature_valid { Err(Box::new(InvalidBuilderPayload::Signature { signature: bid.data.signature.clone(), - pubkey: bid.data.message.pubkey, + pubkey: *bid.data.message.pubkey(), })) } else if payload_withdrawals_root != expected_withdrawals_root { Err(Box::new(InvalidBuilderPayload::WithdrawalsRoot { @@ -1973,13 +2158,6 @@ async fn timed_future, T>(metric: &str, future: F) -> (T, (result, duration) } -fn noop( - _: &ExecutionLayer, - _: ExecutionPayloadRef, -) -> Option> { - None -} - #[cfg(test)] /// Returns the duration since the unix epoch. fn timestamp_now() -> u64 { @@ -1989,6 +2167,13 @@ fn timestamp_now() -> u64 { .as_secs() } +fn noop( + _: &ExecutionLayer, + _: PayloadContentsRefTuple, +) -> Option> { + None +} + #[cfg(test)] mod test { use super::*; diff --git a/beacon_node/execution_layer/src/payload_cache.rs b/beacon_node/execution_layer/src/payload_cache.rs index 1722edff4..1a2864c19 100644 --- a/beacon_node/execution_layer/src/payload_cache.rs +++ b/beacon_node/execution_layer/src/payload_cache.rs @@ -1,13 +1,16 @@ +use eth2::types::FullPayloadContents; use lru::LruCache; use parking_lot::Mutex; +use std::num::NonZeroUsize; use tree_hash::TreeHash; -use types::{EthSpec, ExecutionPayload, Hash256}; +use types::non_zero_usize::new_non_zero_usize; +use types::{EthSpec, Hash256}; -pub const DEFAULT_PAYLOAD_CACHE_SIZE: usize = 10; +pub const DEFAULT_PAYLOAD_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(10); /// A cache mapping execution payloads by tree hash roots. pub struct PayloadCache { - payloads: Mutex>>, + payloads: Mutex>>, } #[derive(Hash, PartialEq, Eq)] @@ -22,16 +25,16 @@ impl Default for PayloadCache { } impl PayloadCache { - pub fn put(&self, payload: ExecutionPayload) -> Option> { - let root = payload.tree_hash_root(); + pub fn put(&self, payload: FullPayloadContents) -> Option> { + let root = payload.payload_ref().tree_hash_root(); self.payloads.lock().put(PayloadCacheId(root), payload) } - pub fn pop(&self, root: &Hash256) -> Option> { + pub fn pop(&self, root: &Hash256) -> Option> { self.payloads.lock().pop(&PayloadCacheId(*root)) } - pub fn get(&self, hash: &Hash256) -> Option> { + pub fn get(&self, hash: &Hash256) -> Option> { self.payloads.lock().get(&PayloadCacheId(*hash)).cloned() } } diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index a8d98a767..182cad50f 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -1,4 +1,5 @@ use crate::engines::ForkchoiceState; +use crate::EthersTransaction; use crate::{ engine_api::{ json_structures::{ @@ -8,15 +9,27 @@ use crate::{ }, ExecutionBlockWithTransactions, }; +use eth2::types::BlobsBundle; +use kzg::{Kzg, KzgCommitment, KzgProof}; +use parking_lot::Mutex; +use rand::{rngs::StdRng, Rng, SeedableRng}; use serde::{Deserialize, Serialize}; +use ssz::Decode; +use ssz_types::VariableList; use std::collections::HashMap; +use std::sync::Arc; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; use types::{ - EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge, - ForkName, Hash256, Uint256, + Blob, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella, + ExecutionPayloadDeneb, ExecutionPayloadHeader, ExecutionPayloadMerge, ForkName, Hash256, + Transaction, Transactions, Uint256, }; +use super::DEFAULT_TERMINAL_BLOCK; + +const TEST_BLOB_BUNDLE: &[u8] = include_bytes!("fixtures/mainnet/test_blobs_bundle.ssz"); + const GAS_LIMIT: u64 = 16384; const GAS_USED: u64 = GAS_LIMIT - 1; @@ -118,6 +131,19 @@ pub struct ExecutionBlockGenerator { * Post-merge fork triggers */ pub shanghai_time: Option, // withdrawals + pub cancun_time: Option, // deneb + /* + * deneb stuff + */ + pub blobs_bundles: HashMap>, + pub kzg: Option>, + rng: Arc>, +} + +fn make_rng() -> Arc> { + // Nondeterminism in tests is a highly undesirable thing. Seed the RNG to some arbitrary + // but fixed value for reproducibility. + Arc::new(Mutex::new(StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64))) } impl ExecutionBlockGenerator { @@ -126,6 +152,8 @@ impl ExecutionBlockGenerator { terminal_block_number: u64, terminal_block_hash: ExecutionBlockHash, shanghai_time: Option, + cancun_time: Option, + kzg: Option, ) -> Self { let mut gen = Self { head_block: <_>::default(), @@ -139,6 +167,10 @@ impl ExecutionBlockGenerator { next_payload_id: 0, payload_ids: <_>::default(), shanghai_time, + cancun_time, + blobs_bundles: <_>::default(), + kzg: kzg.map(Arc::new), + rng: make_rng(), }; gen.insert_pow_block(0).unwrap(); @@ -171,9 +203,12 @@ impl ExecutionBlockGenerator { } pub fn get_fork_at_timestamp(&self, timestamp: u64) -> ForkName { - match self.shanghai_time { - Some(fork_time) if timestamp >= fork_time => ForkName::Capella, - _ => ForkName::Merge, + match self.cancun_time { + Some(fork_time) if timestamp >= fork_time => ForkName::Deneb, + _ => match self.shanghai_time { + Some(fork_time) if timestamp >= fork_time => ForkName::Capella, + _ => ForkName::Merge, + }, } } @@ -249,10 +284,15 @@ impl ExecutionBlockGenerator { finalized_block_hash )); } - let parent_hash = if block_number == 0 { - ExecutionBlockHash::zero() + let block = if block_number == 0 { + generate_genesis_block(self.terminal_total_difficulty, self.terminal_block_number)? } else if let Some(block) = self.block_by_number(block_number - 1) { - block.block_hash() + generate_pow_block( + self.terminal_total_difficulty, + self.terminal_block_number, + block_number, + block.block_hash(), + )? } else { return Err(format!( "parent with block number {} not found", @@ -260,13 +300,6 @@ impl ExecutionBlockGenerator { )); }; - let block = generate_pow_block( - self.terminal_total_difficulty, - self.terminal_block_number, - block_number, - parent_hash, - )?; - // Insert block into block tree self.insert_block(Block::PoW(block))?; @@ -327,10 +360,10 @@ impl ExecutionBlockGenerator { Ok(hash) } + // This does not reject duplicate blocks inserted. This lets us re-use the same execution + // block generator for multiple beacon chains which is useful in testing. pub fn insert_block(&mut self, block: Block) -> Result { - if self.blocks.contains_key(&block.block_hash()) { - return Err(format!("{:?} is already known", block.block_hash())); - } else if block.parent_hash() != ExecutionBlockHash::zero() + if block.parent_hash() != ExecutionBlockHash::zero() && !self.blocks.contains_key(&block.parent_hash()) { return Err(format!("parent block {:?} is unknown", block.parent_hash())); @@ -343,7 +376,7 @@ impl ExecutionBlockGenerator { let block_hash = block.block_hash(); self.block_hashes .entry(block.block_number()) - .or_insert_with(Vec::new) + .or_default() .push(block_hash); self.blocks.insert(block_hash, block); @@ -388,10 +421,12 @@ impl ExecutionBlockGenerator { self.payload_ids.get(id).cloned() } + pub fn get_blobs_bundle(&mut self, id: &PayloadId) -> Option> { + self.blobs_bundles.get(id).cloned() + } + pub fn new_payload(&mut self, payload: ExecutionPayload) -> PayloadStatusV1 { - let parent = if let Some(parent) = self.blocks.get(&payload.parent_hash()) { - parent - } else { + let Some(parent) = self.blocks.get(&payload.parent_hash()) else { return PayloadStatusV1 { status: PayloadStatusV1Status::Syncing, latest_valid_hash: None, @@ -424,14 +459,20 @@ impl ExecutionBlockGenerator { forkchoice_state: ForkchoiceState, payload_attributes: Option, ) -> Result { - if let Some(payload) = self - .pending_payloads - .remove(&forkchoice_state.head_block_hash) - { + // This is meant to cover starting post-merge transition at genesis. Useful for + // testing Capella forks and later. + let head_block_hash = forkchoice_state.head_block_hash; + if let Some(genesis_pow_block) = self.block_by_number(0) { + if genesis_pow_block.block_hash() == head_block_hash { + self.terminal_block_hash = head_block_hash; + } + } + + if let Some(payload) = self.pending_payloads.remove(&head_block_hash) { self.insert_block(Block::PoS(payload))?; } - let unknown_head_block_hash = !self.blocks.contains_key(&forkchoice_state.head_block_hash); + let unknown_head_block_hash = !self.blocks.contains_key(&head_block_hash); let unknown_safe_block_hash = forkchoice_state.safe_block_hash != ExecutionBlockHash::zero() && !self.blocks.contains_key(&forkchoice_state.safe_block_hash); @@ -464,75 +505,15 @@ impl ExecutionBlockGenerator { let parent = self .blocks - .get(&forkchoice_state.head_block_hash) - .ok_or_else(|| { - format!( - "unknown parent block {:?}", - forkchoice_state.head_block_hash - ) - })?; + .get(&head_block_hash) + .cloned() + .ok_or_else(|| format!("unknown parent block {head_block_hash:?}"))?; let id = payload_id_from_u64(self.next_payload_id); self.next_payload_id += 1; - let mut execution_payload = match &attributes { - PayloadAttributes::V1(pa) => ExecutionPayload::Merge(ExecutionPayloadMerge { - parent_hash: forkchoice_state.head_block_hash, - fee_recipient: pa.suggested_fee_recipient, - receipts_root: Hash256::repeat_byte(42), - state_root: Hash256::repeat_byte(43), - logs_bloom: vec![0; 256].into(), - prev_randao: pa.prev_randao, - block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, - gas_used: GAS_USED, - timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), - base_fee_per_gas: Uint256::one(), - block_hash: ExecutionBlockHash::zero(), - transactions: vec![].into(), - }), - PayloadAttributes::V2(pa) => match self.get_fork_at_timestamp(pa.timestamp) { - ForkName::Merge => ExecutionPayload::Merge(ExecutionPayloadMerge { - parent_hash: forkchoice_state.head_block_hash, - fee_recipient: pa.suggested_fee_recipient, - receipts_root: Hash256::repeat_byte(42), - state_root: Hash256::repeat_byte(43), - logs_bloom: vec![0; 256].into(), - prev_randao: pa.prev_randao, - block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, - gas_used: GAS_USED, - timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), - base_fee_per_gas: Uint256::one(), - block_hash: ExecutionBlockHash::zero(), - transactions: vec![].into(), - }), - ForkName::Capella => ExecutionPayload::Capella(ExecutionPayloadCapella { - parent_hash: forkchoice_state.head_block_hash, - fee_recipient: pa.suggested_fee_recipient, - receipts_root: Hash256::repeat_byte(42), - state_root: Hash256::repeat_byte(43), - logs_bloom: vec![0; 256].into(), - prev_randao: pa.prev_randao, - block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, - gas_used: GAS_USED, - timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), - base_fee_per_gas: Uint256::one(), - block_hash: ExecutionBlockHash::zero(), - transactions: vec![].into(), - withdrawals: pa.withdrawals.clone().into(), - }), - _ => unreachable!(), - }, - }; - - *execution_payload.block_hash_mut() = - ExecutionBlockHash::from_root(execution_payload.tree_hash_root()); - + let execution_payload = + self.build_new_execution_payload(head_block_hash, &parent, id, &attributes)?; self.payload_ids.insert(id, execution_payload); Some(id) @@ -559,12 +540,239 @@ impl ExecutionBlockGenerator { payload_id: id.map(Into::into), }) } + + pub fn build_new_execution_payload( + &mut self, + head_block_hash: ExecutionBlockHash, + parent: &Block, + id: PayloadId, + attributes: &PayloadAttributes, + ) -> Result, String> { + let mut execution_payload = match attributes { + PayloadAttributes::V1(pa) => ExecutionPayload::Merge(ExecutionPayloadMerge { + parent_hash: head_block_hash, + fee_recipient: pa.suggested_fee_recipient, + receipts_root: Hash256::repeat_byte(42), + state_root: Hash256::repeat_byte(43), + logs_bloom: vec![0; 256].into(), + prev_randao: pa.prev_randao, + block_number: parent.block_number() + 1, + gas_limit: GAS_LIMIT, + gas_used: GAS_USED, + timestamp: pa.timestamp, + extra_data: "block gen was here".as_bytes().to_vec().into(), + base_fee_per_gas: Uint256::one(), + block_hash: ExecutionBlockHash::zero(), + transactions: vec![].into(), + }), + PayloadAttributes::V2(pa) => match self.get_fork_at_timestamp(pa.timestamp) { + ForkName::Merge => ExecutionPayload::Merge(ExecutionPayloadMerge { + parent_hash: head_block_hash, + fee_recipient: pa.suggested_fee_recipient, + receipts_root: Hash256::repeat_byte(42), + state_root: Hash256::repeat_byte(43), + logs_bloom: vec![0; 256].into(), + prev_randao: pa.prev_randao, + block_number: parent.block_number() + 1, + gas_limit: GAS_LIMIT, + gas_used: GAS_USED, + timestamp: pa.timestamp, + extra_data: "block gen was here".as_bytes().to_vec().into(), + base_fee_per_gas: Uint256::one(), + block_hash: ExecutionBlockHash::zero(), + transactions: vec![].into(), + }), + ForkName::Capella => ExecutionPayload::Capella(ExecutionPayloadCapella { + parent_hash: head_block_hash, + fee_recipient: pa.suggested_fee_recipient, + receipts_root: Hash256::repeat_byte(42), + state_root: Hash256::repeat_byte(43), + logs_bloom: vec![0; 256].into(), + prev_randao: pa.prev_randao, + block_number: parent.block_number() + 1, + gas_limit: GAS_LIMIT, + gas_used: GAS_USED, + timestamp: pa.timestamp, + extra_data: "block gen was here".as_bytes().to_vec().into(), + base_fee_per_gas: Uint256::one(), + block_hash: ExecutionBlockHash::zero(), + transactions: vec![].into(), + withdrawals: pa.withdrawals.clone().into(), + }), + _ => unreachable!(), + }, + PayloadAttributes::V3(pa) => ExecutionPayload::Deneb(ExecutionPayloadDeneb { + parent_hash: head_block_hash, + fee_recipient: pa.suggested_fee_recipient, + receipts_root: Hash256::repeat_byte(42), + state_root: Hash256::repeat_byte(43), + logs_bloom: vec![0; 256].into(), + prev_randao: pa.prev_randao, + block_number: parent.block_number() + 1, + gas_limit: GAS_LIMIT, + gas_used: GAS_USED, + timestamp: pa.timestamp, + extra_data: "block gen was here".as_bytes().to_vec().into(), + base_fee_per_gas: Uint256::one(), + block_hash: ExecutionBlockHash::zero(), + transactions: vec![].into(), + withdrawals: pa.withdrawals.clone().into(), + blob_gas_used: 0, + excess_blob_gas: 0, + }), + }; + + match execution_payload.fork_name() { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => {} + ForkName::Deneb => { + // get random number between 0 and Max Blobs + let mut rng = self.rng.lock(); + let num_blobs = rng.gen::() % (T::max_blobs_per_block() + 1); + let (bundle, transactions) = generate_blobs(num_blobs)?; + for tx in Vec::from(transactions) { + execution_payload + .transactions_mut() + .push(tx) + .map_err(|_| "transactions are full".to_string())?; + } + self.blobs_bundles.insert(id, bundle); + } + } + + *execution_payload.block_hash_mut() = + ExecutionBlockHash::from_root(execution_payload.tree_hash_root()); + Ok(execution_payload) + } +} + +pub fn load_test_blobs_bundle() -> Result<(KzgCommitment, KzgProof, Blob), String> { + let BlobsBundle:: { + commitments, + proofs, + blobs, + } = BlobsBundle::from_ssz_bytes(TEST_BLOB_BUNDLE) + .map_err(|e| format!("Unable to decode ssz: {:?}", e))?; + + Ok(( + commitments + .first() + .cloned() + .ok_or("commitment missing in test bundle")?, + proofs + .first() + .cloned() + .ok_or("proof missing in test bundle")?, + blobs + .first() + .cloned() + .ok_or("blob missing in test bundle")?, + )) +} + +pub fn generate_blobs( + n_blobs: usize, +) -> Result<(BlobsBundle, Transactions), String> { + let (kzg_commitment, kzg_proof, blob) = load_test_blobs_bundle::()?; + + let mut bundle = BlobsBundle::::default(); + let mut transactions = vec![]; + + for blob_index in 0..n_blobs { + let tx = static_valid_tx::() + .map_err(|e| format!("error creating valid tx SSZ bytes: {:?}", e))?; + + transactions.push(tx); + bundle + .blobs + .push(blob.clone()) + .map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?; + bundle + .commitments + .push(kzg_commitment) + .map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?; + bundle + .proofs + .push(kzg_proof) + .map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?; + } + + Ok((bundle, transactions.into())) +} + +fn static_valid_tx() -> Result, String> { + // This is a real transaction hex encoded, but we don't care about the contents of the transaction. + let transaction: EthersTransaction = serde_json::from_str( + r#"{ + "blockHash":"0x1d59ff54b1eb26b013ce3cb5fc9dab3705b415a67127a003c3e61eb445bb8df2", + "blockNumber":"0x5daf3b", + "from":"0xa7d9ddbe1f17865597fbd27ec712455208b6b76d", + "gas":"0xc350", + "gasPrice":"0x4a817c800", + "hash":"0x88df016429689c079f3b2f6ad39fa052532c56795b733da78a91ebe6a713944b", + "input":"0x68656c6c6f21", + "nonce":"0x15", + "to":"0xf02c1c8e6114b1dbe8937a39260b5b0a374432bb", + "transactionIndex":"0x41", + "value":"0xf3dbb76162000", + "v":"0x25", + "r":"0x1b5e176d927f8e9ab405058b2d2457392da3e20f328b16ddabcebc33eaac5fea", + "s":"0x4ba69724e8f69de52f0125ad8b3c5c2cef33019bac3249e2c0a2192766d1721c" + }"#, + ) + .unwrap(); + VariableList::new(transaction.rlp().to_vec()) + .map_err(|e| format!("Failed to convert transaction to SSZ: {:?}", e)) } fn payload_id_from_u64(n: u64) -> PayloadId { n.to_le_bytes() } +pub fn generate_genesis_header( + spec: &ChainSpec, + post_transition_merge: bool, +) -> Option> { + let genesis_fork = spec.fork_name_at_slot::(spec.genesis_slot); + let genesis_block_hash = + generate_genesis_block(spec.terminal_total_difficulty, DEFAULT_TERMINAL_BLOCK) + .ok() + .map(|block| block.block_hash); + match genesis_fork { + ForkName::Base | ForkName::Altair => None, + ForkName::Merge => { + if post_transition_merge { + let mut header = ExecutionPayloadHeader::Merge(<_>::default()); + *header.block_hash_mut() = genesis_block_hash.unwrap_or_default(); + Some(header) + } else { + Some(ExecutionPayloadHeader::::Merge(<_>::default())) + } + } + ForkName::Capella => { + let mut header = ExecutionPayloadHeader::Capella(<_>::default()); + *header.block_hash_mut() = genesis_block_hash.unwrap_or_default(); + Some(header) + } + ForkName::Deneb => { + let mut header = ExecutionPayloadHeader::Deneb(<_>::default()); + *header.block_hash_mut() = genesis_block_hash.unwrap_or_default(); + Some(header) + } + } +} + +pub fn generate_genesis_block( + terminal_total_difficulty: Uint256, + terminal_block_number: u64, +) -> Result { + generate_pow_block( + terminal_total_difficulty, + terminal_block_number, + 0, + ExecutionBlockHash::zero(), + ) +} + pub fn generate_pow_block( terminal_total_difficulty: Uint256, terminal_block_number: u64, @@ -605,7 +813,9 @@ pub fn generate_pow_block( #[cfg(test)] mod test { use super::*; - use types::MainnetEthSpec; + use eth2_network_config::TRUSTED_SETUP_BYTES; + use kzg::TrustedSetup; + use types::{MainnetEthSpec, MinimalEthSpec}; #[test] fn pow_chain_only() { @@ -618,6 +828,8 @@ mod test { TERMINAL_BLOCK, ExecutionBlockHash::zero(), None, + None, + None, ); for i in 0..=TERMINAL_BLOCK { @@ -665,4 +877,32 @@ mod test { assert!(generator.block_by_number(next_i).is_none()); } } + + #[test] + fn valid_test_blobs() { + assert!( + validate_blob::().is_ok(), + "Mainnet preset test blobs bundle should contain valid proofs" + ); + assert!( + validate_blob::().is_ok(), + "Minimal preset test blobs bundle should contain valid proofs" + ); + } + + fn validate_blob() -> Result<(), String> { + let kzg = load_kzg()?; + let (kzg_commitment, kzg_proof, blob) = load_test_blobs_bundle::()?; + let kzg_blob = kzg::Blob::from_bytes(blob.as_ref()) + .map_err(|e| format!("Error converting blob to kzg blob: {e:?}"))?; + kzg.verify_blob_kzg_proof(&kzg_blob, kzg_commitment, kzg_proof) + .map_err(|e| format!("Invalid blobs bundle: {e:?}")) + } + + fn load_kzg() -> Result { + let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) + .map_err(|e| format!("Unable to read trusted setup file: {e:?}"))?; + Kzg::new_from_trusted_setup(trusted_setup) + .map_err(|e| format!("Failed to load trusted setup: {e:?}")) + } } diff --git a/beacon_node/execution_layer/src/test_utils/fixtures/mainnet/test_blobs_bundle.ssz b/beacon_node/execution_layer/src/test_utils/fixtures/mainnet/test_blobs_bundle.ssz new file mode 100644 index 000000000..6b549a4da Binary files /dev/null and b/beacon_node/execution_layer/src/test_utils/fixtures/mainnet/test_blobs_bundle.ssz differ diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 62cab5ad2..9dff1ac00 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -93,7 +93,7 @@ pub async fn handle_rpc( .unwrap()) } } - ENGINE_NEW_PAYLOAD_V1 | ENGINE_NEW_PAYLOAD_V2 => { + ENGINE_NEW_PAYLOAD_V1 | ENGINE_NEW_PAYLOAD_V2 | ENGINE_NEW_PAYLOAD_V3 => { let request = match method { ENGINE_NEW_PAYLOAD_V1 => JsonExecutionPayload::V1( get_param::>(params, 0) @@ -106,7 +106,17 @@ pub async fn handle_rpc( .map(|jep| JsonExecutionPayload::V1(jep)) }) .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?, - // TODO(4844) add that here.. + ENGINE_NEW_PAYLOAD_V3 => get_param::>(params, 0) + .map(|jep| JsonExecutionPayload::V3(jep)) + .or_else(|_| { + get_param::>(params, 0) + .map(|jep| JsonExecutionPayload::V2(jep)) + .or_else(|_| { + get_param::>(params, 0) + .map(|jep| JsonExecutionPayload::V1(jep)) + }) + }) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?, _ => unreachable!(), }; @@ -144,7 +154,32 @@ pub async fn handle_rpc( )); } } - // TODO(4844) add 4844 error checking here + ForkName::Deneb => { + if method == ENGINE_NEW_PAYLOAD_V1 || method == ENGINE_NEW_PAYLOAD_V2 { + return Err(( + format!("{} called after deneb fork!", method), + GENERIC_ERROR_CODE, + )); + } + if matches!(request, JsonExecutionPayload::V1(_)) { + return Err(( + format!( + "{} called with `ExecutionPayloadV1` after deneb fork!", + method + ), + GENERIC_ERROR_CODE, + )); + } + if matches!(request, JsonExecutionPayload::V2(_)) { + return Err(( + format!( + "{} called with `ExecutionPayloadV2` after deneb fork!", + method + ), + GENERIC_ERROR_CODE, + )); + } + } _ => unreachable!(), }; @@ -180,7 +215,7 @@ pub async fn handle_rpc( Ok(serde_json::to_value(JsonPayloadStatusV1::from(response)).unwrap()) } - ENGINE_GET_PAYLOAD_V1 | ENGINE_GET_PAYLOAD_V2 => { + ENGINE_GET_PAYLOAD_V1 | ENGINE_GET_PAYLOAD_V2 | ENGINE_GET_PAYLOAD_V3 => { let request: JsonPayloadIdRequest = get_param(params, 0).map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; let id = request.into(); @@ -196,6 +231,8 @@ pub async fn handle_rpc( ) })?; + let maybe_blobs = ctx.execution_block_generator.write().get_blobs_bundle(&id); + // validate method called correctly according to shanghai fork time if ctx .execution_block_generator @@ -209,7 +246,19 @@ pub async fn handle_rpc( FORK_REQUEST_MISMATCH_ERROR_CODE, )); } - // TODO(4844) add 4844 error checking here + // validate method called correctly according to deneb fork time + if ctx + .execution_block_generator + .read() + .get_fork_at_timestamp(response.timestamp()) + == ForkName::Deneb + && (method == ENGINE_GET_PAYLOAD_V1 || method == ENGINE_GET_PAYLOAD_V2) + { + return Err(( + format!("{} called after deneb fork!", method), + FORK_REQUEST_MISMATCH_ERROR_CODE, + )); + } match method { ENGINE_GET_PAYLOAD_V1 => { @@ -230,11 +279,31 @@ pub async fn handle_rpc( }) .unwrap() } + _ => unreachable!(), + }), + ENGINE_GET_PAYLOAD_V3 => Ok(match JsonExecutionPayload::from(response) { + JsonExecutionPayload::V3(execution_payload) => { + serde_json::to_value(JsonGetPayloadResponseV3 { + execution_payload, + block_value: DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI.into(), + blobs_bundle: maybe_blobs + .ok_or(( + "No blobs returned despite V3 Payload".to_string(), + GENERIC_ERROR_CODE, + ))? + .into(), + should_override_builder: false, + }) + .unwrap() + } + _ => unreachable!(), }), _ => unreachable!(), } } - ENGINE_FORKCHOICE_UPDATED_V1 | ENGINE_FORKCHOICE_UPDATED_V2 => { + ENGINE_FORKCHOICE_UPDATED_V1 + | ENGINE_FORKCHOICE_UPDATED_V2 + | ENGINE_FORKCHOICE_UPDATED_V3 => { let forkchoice_state: JsonForkchoiceStateV1 = get_param(params, 0).map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; let payload_attributes = match method { @@ -260,7 +329,7 @@ pub async fn handle_rpc( .map(|opt| opt.map(JsonPayloadAttributes::V1)) .transpose() } - ForkName::Capella => { + ForkName::Capella | ForkName::Deneb => { get_param::>(params, 1) .map(|opt| opt.map(JsonPayloadAttributes::V2)) .transpose() @@ -272,10 +341,15 @@ pub async fn handle_rpc( }) .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))? } + ENGINE_FORKCHOICE_UPDATED_V3 => { + get_param::>(params, 1) + .map(|opt| opt.map(JsonPayloadAttributes::V3)) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))? + } _ => unreachable!(), }; - // validate method called correctly according to shanghai fork time + // validate method called correctly according to fork time if let Some(pa) = payload_attributes.as_ref() { match ctx .execution_block_generator @@ -300,6 +374,15 @@ pub async fn handle_rpc( FORK_REQUEST_MISMATCH_ERROR_CODE, )); } + if method == ENGINE_FORKCHOICE_UPDATED_V3 { + return Err(( + format!( + "{} called with `JsonPayloadAttributesV3` before Deneb fork!", + method + ), + GENERIC_ERROR_CODE, + )); + } if matches!(pa, JsonPayloadAttributes::V1(_)) { return Err(( format!( @@ -310,7 +393,20 @@ pub async fn handle_rpc( )); } } - // TODO(4844) add 4844 error checking here + ForkName::Deneb => { + if method == ENGINE_FORKCHOICE_UPDATED_V1 { + return Err(( + format!("{} called after Deneb fork!", method), + FORK_REQUEST_MISMATCH_ERROR_CODE, + )); + } + if method == ENGINE_FORKCHOICE_UPDATED_V2 { + return Err(( + format!("{} called after Deneb fork!", method), + FORK_REQUEST_MISMATCH_ERROR_CODE, + )); + } + } _ => unreachable!(), }; } diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index c9dd196fa..3d4ea51f4 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -1,49 +1,30 @@ use crate::test_utils::{DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_JWT_SECRET}; use crate::{Config, ExecutionLayer, PayloadAttributes}; -use async_trait::async_trait; -use eth2::types::{BlockId, StateId, ValidatorId}; +use eth2::types::{BlobsBundle, BlockId, StateId, ValidatorId}; use eth2::{BeaconNodeHttpClient, Timeouts}; -pub use ethereum_consensus::state_transition::Context; -use ethereum_consensus::{ - crypto::{SecretKey, Signature}, - primitives::{BlsPublicKey, BlsSignature, ExecutionAddress, Hash32, Root, U256}, - state_transition::Error, -}; use fork_choice::ForkchoiceUpdateParameters; -use mev_rs::{ - blinded_block_provider::Server as BlindedBlockProviderServer, - signing::{sign_builder_message, verify_signed_builder_message}, - types::{ - bellatrix::{ - BuilderBid as BuilderBidBellatrix, SignedBuilderBid as SignedBuilderBidBellatrix, - }, - capella::{BuilderBid as BuilderBidCapella, SignedBuilderBid as SignedBuilderBidCapella}, - BidRequest, BuilderBid, ExecutionPayload as ServerPayload, SignedBlindedBeaconBlock, - SignedBuilderBid, SignedValidatorRegistration, - }, - Error as MevError, -}; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; -use ssz::{Decode, Encode}; -use ssz_rs::{Merkleized, SimpleSerialize}; use std::collections::HashMap; use std::fmt::Debug; -use std::net::Ipv4Addr; +use std::future::Future; +use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use std::sync::Arc; use std::time::Duration; use task_executor::TaskExecutor; use tempfile::NamedTempFile; use tree_hash::TreeHash; +use types::builder_bid::{ + BuilderBid, BuilderBidCapella, BuilderBidDeneb, BuilderBidMerge, SignedBuilderBid, +}; use types::{ - Address, BeaconState, BlindedPayload, ChainSpec, EthSpec, ExecPayload, ForkName, Hash256, Slot, + Address, BeaconState, ChainSpec, EthSpec, ExecPayload, ExecutionPayload, + ExecutionPayloadHeaderRefMut, ForkName, ForkVersionedResponse, Hash256, PublicKeyBytes, + Signature, SignedBlindedBeaconBlock, SignedRoot, SignedValidatorRegistrationData, Slot, Uint256, }; - -pub type MockBuilderServer = axum::Server< - hyper::server::conn::AddrIncoming, - axum::routing::IntoMakeService, ->; +use types::{ExecutionBlockHash, SecretKey}; +use warp::{Filter, Rejection}; #[derive(Clone)] pub enum Operation { @@ -58,119 +39,154 @@ pub enum Operation { } impl Operation { - fn apply(self, bid: &mut B) -> Result<(), MevError> { + fn apply>(self, bid: &mut B) { match self { - Operation::FeeRecipient(fee_recipient) => { - *bid.fee_recipient_mut() = to_ssz_rs(&fee_recipient)? - } - Operation::GasLimit(gas_limit) => *bid.gas_limit_mut() = gas_limit as u64, - Operation::Value(value) => *bid.value_mut() = to_ssz_rs(&value)?, - Operation::ParentHash(parent_hash) => *bid.parent_hash_mut() = to_ssz_rs(&parent_hash)?, - Operation::PrevRandao(prev_randao) => *bid.prev_randao_mut() = to_ssz_rs(&prev_randao)?, - Operation::BlockNumber(block_number) => *bid.block_number_mut() = block_number as u64, - Operation::Timestamp(timestamp) => *bid.timestamp_mut() = timestamp as u64, - Operation::WithdrawalsRoot(root) => *bid.withdrawals_root_mut()? = to_ssz_rs(&root)?, + Operation::FeeRecipient(fee_recipient) => bid.set_fee_recipient(fee_recipient), + Operation::GasLimit(gas_limit) => bid.set_gas_limit(gas_limit as u64), + Operation::Value(value) => bid.set_value(value), + Operation::ParentHash(parent_hash) => bid.set_parent_hash(parent_hash), + Operation::PrevRandao(prev_randao) => bid.set_prev_randao(prev_randao), + Operation::BlockNumber(block_number) => bid.set_block_number(block_number as u64), + Operation::Timestamp(timestamp) => bid.set_timestamp(timestamp as u64), + Operation::WithdrawalsRoot(root) => bid.set_withdrawals_root(root), } - Ok(()) } } +#[derive(Debug)] +struct Custom(String); + +impl warp::reject::Reject for Custom {} + // contains functions we need for BuilderBids.. not sure what to call this -pub trait BidStuff { - fn fee_recipient_mut(&mut self) -> &mut ExecutionAddress; - fn gas_limit_mut(&mut self) -> &mut u64; - fn value_mut(&mut self) -> &mut U256; - fn parent_hash_mut(&mut self) -> &mut Hash32; - fn prev_randao_mut(&mut self) -> &mut Hash32; - fn block_number_mut(&mut self) -> &mut u64; - fn timestamp_mut(&mut self) -> &mut u64; - fn withdrawals_root_mut(&mut self) -> Result<&mut Root, MevError>; +pub trait BidStuff { + fn set_fee_recipient(&mut self, fee_recipient_address: Address); + fn set_gas_limit(&mut self, gas_limit: u64); + fn set_value(&mut self, value: Uint256); + fn set_parent_hash(&mut self, parent_hash: Hash256); + fn set_prev_randao(&mut self, randao: Hash256); + fn set_block_number(&mut self, block_number: u64); + fn set_timestamp(&mut self, timestamp: u64); + fn set_withdrawals_root(&mut self, withdrawals_root: Hash256); - fn sign_builder_message( - &mut self, - signing_key: &SecretKey, - context: &Context, - ) -> Result; + fn sign_builder_message(&mut self, sk: &SecretKey, spec: &ChainSpec) -> Signature; - fn to_signed_bid(self, signature: BlsSignature) -> SignedBuilderBid; + fn to_signed_bid(self, signature: Signature) -> SignedBuilderBid; } -impl BidStuff for BuilderBid { - fn fee_recipient_mut(&mut self) -> &mut ExecutionAddress { - match self { - Self::Bellatrix(bid) => &mut bid.header.fee_recipient, - Self::Capella(bid) => &mut bid.header.fee_recipient, - } - } - - fn gas_limit_mut(&mut self) -> &mut u64 { - match self { - Self::Bellatrix(bid) => &mut bid.header.gas_limit, - Self::Capella(bid) => &mut bid.header.gas_limit, - } - } - - fn value_mut(&mut self) -> &mut U256 { - match self { - Self::Bellatrix(bid) => &mut bid.value, - Self::Capella(bid) => &mut bid.value, - } - } - - fn parent_hash_mut(&mut self) -> &mut Hash32 { - match self { - Self::Bellatrix(bid) => &mut bid.header.parent_hash, - Self::Capella(bid) => &mut bid.header.parent_hash, - } - } - - fn prev_randao_mut(&mut self) -> &mut Hash32 { - match self { - Self::Bellatrix(bid) => &mut bid.header.prev_randao, - Self::Capella(bid) => &mut bid.header.prev_randao, - } - } - - fn block_number_mut(&mut self) -> &mut u64 { - match self { - Self::Bellatrix(bid) => &mut bid.header.block_number, - Self::Capella(bid) => &mut bid.header.block_number, - } - } - - fn timestamp_mut(&mut self) -> &mut u64 { - match self { - Self::Bellatrix(bid) => &mut bid.header.timestamp, - Self::Capella(bid) => &mut bid.header.timestamp, - } - } - - fn withdrawals_root_mut(&mut self) -> Result<&mut Root, MevError> { - match self { - Self::Bellatrix(_) => Err(MevError::InvalidFork), - Self::Capella(bid) => Ok(&mut bid.header.withdrawals_root), - } - } - - fn sign_builder_message( - &mut self, - signing_key: &SecretKey, - context: &Context, - ) -> Result { - match self { - Self::Bellatrix(message) => sign_builder_message(message, signing_key, context), - Self::Capella(message) => sign_builder_message(message, signing_key, context), - } - } - - fn to_signed_bid(self, signature: Signature) -> SignedBuilderBid { - match self { - Self::Bellatrix(message) => { - SignedBuilderBid::Bellatrix(SignedBuilderBidBellatrix { message, signature }) +impl BidStuff for BuilderBid { + fn set_fee_recipient(&mut self, fee_recipient: Address) { + match self.to_mut().header_mut() { + ExecutionPayloadHeaderRefMut::Merge(header) => { + header.fee_recipient = fee_recipient; } - Self::Capella(message) => { - SignedBuilderBid::Capella(SignedBuilderBidCapella { message, signature }) + ExecutionPayloadHeaderRefMut::Capella(header) => { + header.fee_recipient = fee_recipient; } + ExecutionPayloadHeaderRefMut::Deneb(header) => { + header.fee_recipient = fee_recipient; + } + } + } + + fn set_gas_limit(&mut self, gas_limit: u64) { + match self.to_mut().header_mut() { + ExecutionPayloadHeaderRefMut::Merge(header) => { + header.gas_limit = gas_limit; + } + ExecutionPayloadHeaderRefMut::Capella(header) => { + header.gas_limit = gas_limit; + } + ExecutionPayloadHeaderRefMut::Deneb(header) => { + header.gas_limit = gas_limit; + } + } + } + + fn set_value(&mut self, value: Uint256) { + *self.value_mut() = value; + } + + fn set_parent_hash(&mut self, parent_hash: Hash256) { + match self.to_mut().header_mut() { + ExecutionPayloadHeaderRefMut::Merge(header) => { + header.parent_hash = ExecutionBlockHash::from_root(parent_hash); + } + ExecutionPayloadHeaderRefMut::Capella(header) => { + header.parent_hash = ExecutionBlockHash::from_root(parent_hash); + } + ExecutionPayloadHeaderRefMut::Deneb(header) => { + header.parent_hash = ExecutionBlockHash::from_root(parent_hash); + } + } + } + + fn set_prev_randao(&mut self, prev_randao: Hash256) { + match self.to_mut().header_mut() { + ExecutionPayloadHeaderRefMut::Merge(header) => { + header.prev_randao = prev_randao; + } + ExecutionPayloadHeaderRefMut::Capella(header) => { + header.prev_randao = prev_randao; + } + ExecutionPayloadHeaderRefMut::Deneb(header) => { + header.prev_randao = prev_randao; + } + } + } + + fn set_block_number(&mut self, block_number: u64) { + match self.to_mut().header_mut() { + ExecutionPayloadHeaderRefMut::Merge(header) => { + header.block_number = block_number; + } + ExecutionPayloadHeaderRefMut::Capella(header) => { + header.block_number = block_number; + } + ExecutionPayloadHeaderRefMut::Deneb(header) => { + header.block_number = block_number; + } + } + } + + fn set_timestamp(&mut self, timestamp: u64) { + match self.to_mut().header_mut() { + ExecutionPayloadHeaderRefMut::Merge(header) => { + header.timestamp = timestamp; + } + ExecutionPayloadHeaderRefMut::Capella(header) => { + header.timestamp = timestamp; + } + ExecutionPayloadHeaderRefMut::Deneb(header) => { + header.timestamp = timestamp; + } + } + } + + fn set_withdrawals_root(&mut self, withdrawals_root: Hash256) { + match self.to_mut().header_mut() { + ExecutionPayloadHeaderRefMut::Merge(_) => { + panic!("no withdrawals before capella") + } + ExecutionPayloadHeaderRefMut::Capella(header) => { + header.withdrawals_root = withdrawals_root; + } + ExecutionPayloadHeaderRefMut::Deneb(header) => { + header.withdrawals_root = withdrawals_root; + } + } + } + + fn sign_builder_message(&mut self, sk: &SecretKey, spec: &ChainSpec) -> Signature { + let domain = spec.get_builder_domain(); + let message = self.signing_root(domain); + sk.sign(message) + } + + fn to_signed_bid(self, signature: Signature) -> SignedBuilderBid { + SignedBuilderBid { + message: self, + signature, } } } @@ -180,8 +196,7 @@ pub struct MockBuilder { el: ExecutionLayer, beacon_client: BeaconNodeHttpClient, spec: ChainSpec, - context: Arc, - val_registration_cache: Arc>>, + val_registration_cache: Arc>>, builder_sk: SecretKey, operations: Arc>>, invalidate_signatures: Arc>, @@ -193,7 +208,7 @@ impl MockBuilder { beacon_url: SensitiveUrl, spec: ChainSpec, executor: TaskExecutor, - ) -> (Self, MockBuilderServer) { + ) -> (Self, (SocketAddr, impl Future)) { let file = NamedTempFile::new().unwrap(); let path = file.path().into(); std::fs::write(&path, hex::encode(DEFAULT_JWT_SECRET)).unwrap(); @@ -209,23 +224,14 @@ impl MockBuilder { let el = ExecutionLayer::from_config(config, executor.clone(), executor.log().clone()).unwrap(); - // This should probably be done for all fields, we only update ones we are testing with so far. - let mut context = Context::for_mainnet(); - context.terminal_total_difficulty = to_ssz_rs(&spec.terminal_total_difficulty).unwrap(); - context.terminal_block_hash = to_ssz_rs(&spec.terminal_block_hash).unwrap(); - context.terminal_block_hash_activation_epoch = - to_ssz_rs(&spec.terminal_block_hash_activation_epoch).unwrap(); - let builder = MockBuilder::new( el, BeaconNodeHttpClient::new(beacon_url, Timeouts::set_all(Duration::from_secs(1))), spec, - context, ); let host: Ipv4Addr = Ipv4Addr::LOCALHOST; let port = 0; - let provider = BlindedBlockProviderServer::new(host, port, builder.clone()); - let server = provider.serve(); + let server = serve(host, port, builder.clone()).expect("mock builder server should start"); (builder, server) } @@ -233,15 +239,13 @@ impl MockBuilder { el: ExecutionLayer, beacon_client: BeaconNodeHttpClient, spec: ChainSpec, - context: Context, ) -> Self { - let sk = SecretKey::random(&mut rand::thread_rng()).unwrap(); + let sk = SecretKey::random(); Self { el, beacon_client, // Should keep spec and context consistent somehow spec, - context: Arc::new(context), val_registration_cache: Arc::new(RwLock::new(HashMap::new())), builder_sk: sk, operations: Arc::new(RwLock::new(vec![])), @@ -263,237 +267,383 @@ impl MockBuilder { *self.invalidate_signatures.write() = false; } - fn apply_operations(&self, bid: &mut B) -> Result<(), MevError> { + fn apply_operations>(&self, bid: &mut B) { let mut guard = self.operations.write(); while let Some(op) = guard.pop() { - op.apply(bid)?; + op.apply(bid); } - Ok(()) } } -#[async_trait] -impl mev_rs::BlindedBlockProvider for MockBuilder { - async fn register_validators( - &self, - registrations: &mut [SignedValidatorRegistration], - ) -> Result<(), MevError> { - for registration in registrations { - let pubkey = registration.message.public_key.clone(); - let message = &mut registration.message; - verify_signed_builder_message( - message, - ®istration.signature, - &pubkey, - &self.context, - )?; - self.val_registration_cache.write().insert( - registration.message.public_key.clone(), - registration.clone(), - ); - } +pub fn serve( + listen_addr: Ipv4Addr, + listen_port: u16, + builder: MockBuilder, +) -> Result<(SocketAddr, impl Future), crate::test_utils::Error> { + let inner_ctx = builder.clone(); + let ctx_filter = warp::any().map(move || inner_ctx.clone()); - Ok(()) - } + let prefix = warp::path("eth") + .and(warp::path("v1")) + .and(warp::path("builder")); - async fn fetch_best_bid(&self, bid_request: &BidRequest) -> Result { - let slot = Slot::new(bid_request.slot); - let fork = self.spec.fork_name_at_slot::(slot); - let signed_cached_data = self - .val_registration_cache - .read() - .get(&bid_request.public_key) - .ok_or_else(|| convert_err("missing registration"))? - .clone(); - let cached_data = signed_cached_data.message; + let validators = prefix + .and(warp::path("validators")) + .and(warp::body::json()) + .and(warp::path::end()) + .and(ctx_filter.clone()) + .and_then( + |registrations: Vec, builder: MockBuilder| async move { + for registration in registrations { + if !registration.verify_signature(&builder.spec) { + return Err(reject("invalid signature")); + } + builder + .val_registration_cache + .write() + .insert(registration.message.pubkey, registration); + } + Ok(warp::reply()) + }, + ); - let head = self - .beacon_client - .get_beacon_blocks::(BlockId::Head) - .await - .map_err(convert_err)? - .ok_or_else(|| convert_err("missing head block"))?; + let blinded_block = prefix + .and(warp::path("blinded_blocks")) + .and(warp::body::json()) + .and(warp::path::end()) + .and(ctx_filter.clone()) + .and_then( + |block: SignedBlindedBeaconBlock, builder: MockBuilder| async move { + let slot = block.slot(); + let root = match block { + SignedBlindedBeaconBlock::Base(_) | types::SignedBeaconBlock::Altair(_) => { + return Err(reject("invalid fork")); + } + SignedBlindedBeaconBlock::Merge(block) => { + block.message.body.execution_payload.tree_hash_root() + } + SignedBlindedBeaconBlock::Capella(block) => { + block.message.body.execution_payload.tree_hash_root() + } + SignedBlindedBeaconBlock::Deneb(block) => { + block.message.body.execution_payload.tree_hash_root() + } + }; - let block = head.data.message(); - let head_block_root = block.tree_hash_root(); - let head_execution_hash = block - .body() - .execution_payload() - .map_err(convert_err)? - .block_hash(); - if head_execution_hash != from_ssz_rs(&bid_request.parent_hash)? { - return Err(custom_err(format!( - "head mismatch: {} {}", - head_execution_hash, bid_request.parent_hash - ))); - } + let fork_name = builder.spec.fork_name_at_slot::(slot); + let payload = builder + .el + .get_payload_by_root(&root) + .ok_or_else(|| reject("missing payload for tx root"))?; + let resp: ForkVersionedResponse<_> = ForkVersionedResponse { + version: Some(fork_name), + metadata: Default::default(), + data: payload, + }; - let finalized_execution_hash = self - .beacon_client - .get_beacon_blocks::(BlockId::Finalized) - .await - .map_err(convert_err)? - .ok_or_else(|| convert_err("missing finalized block"))? - .data - .message() - .body() - .execution_payload() - .map_err(convert_err)? - .block_hash(); + let json_payload = serde_json::to_string(&resp) + .map_err(|_| reject("coudn't serialize response"))?; + Ok::<_, warp::reject::Rejection>( + warp::http::Response::builder() + .status(200) + .body( + serde_json::to_string(&json_payload) + .map_err(|_| reject("nvalid JSON"))?, + ) + .unwrap(), + ) + }, + ); - let justified_execution_hash = self - .beacon_client - .get_beacon_blocks::(BlockId::Justified) - .await - .map_err(convert_err)? - .ok_or_else(|| convert_err("missing finalized block"))? - .data - .message() - .body() - .execution_payload() - .map_err(convert_err)? - .block_hash(); + let status = prefix + .and(warp::path("status")) + .then(|| async { warp::reply() }); - let val_index = self - .beacon_client - .get_beacon_states_validator_id( - StateId::Head, - &ValidatorId::PublicKey(from_ssz_rs(&cached_data.public_key)?), - ) - .await - .map_err(convert_err)? - .ok_or_else(|| convert_err("missing validator from state"))? - .data - .index; - let fee_recipient = from_ssz_rs(&cached_data.fee_recipient)?; - let slots_since_genesis = slot.as_u64() - self.spec.genesis_slot.as_u64(); + let header = prefix + .and(warp::path("header")) + .and(warp::path::param::().or_else(|_| async { Err(reject("Invalid slot")) })) + .and( + warp::path::param::() + .or_else(|_| async { Err(reject("Invalid parent hash")) }), + ) + .and( + warp::path::param::() + .or_else(|_| async { Err(reject("Invalid pubkey")) }), + ) + .and(warp::path::end()) + .and(ctx_filter.clone()) + .and_then( + |slot: Slot, + parent_hash: ExecutionBlockHash, + pubkey: PublicKeyBytes, + builder: MockBuilder| async move { + let fork = builder.spec.fork_name_at_slot::(slot); + let signed_cached_data = builder + .val_registration_cache + .read() + .get(&pubkey) + .ok_or_else(|| reject("missing registration"))? + .clone(); + let cached_data = signed_cached_data.message; - let genesis_time = self - .beacon_client - .get_beacon_genesis() - .await - .map_err(convert_err)? - .data - .genesis_time; - let timestamp = (slots_since_genesis * self.spec.seconds_per_slot) + genesis_time; + let head = builder + .beacon_client + .get_beacon_blocks::(BlockId::Head) + .await + .map_err(|_| reject("couldn't get head"))? + .ok_or_else(|| reject("missing head block"))?; - let head_state: BeaconState = self - .beacon_client - .get_debug_beacon_states(StateId::Head) - .await - .map_err(convert_err)? - .ok_or_else(|| custom_err("missing head state".to_string()))? - .data; - let prev_randao = head_state - .get_randao_mix(head_state.current_epoch()) - .map_err(convert_err)?; + let block = head.data.message(); + let head_block_root = block.tree_hash_root(); + let head_execution_hash = block + .body() + .execution_payload() + .map_err(|_| reject("pre-merge block"))? + .block_hash(); + if head_execution_hash != parent_hash { + return Err(reject("head mismatch")); + } - let payload_attributes = match fork { - ForkName::Merge => PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, None), - // the withdrawals root is filled in by operations - ForkName::Capella => { - PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, Some(vec![])) - } - ForkName::Base | ForkName::Altair => { - return Err(MevError::InvalidFork); - } - }; + let finalized_execution_hash = builder + .beacon_client + .get_beacon_blocks::(BlockId::Finalized) + .await + .map_err(|_| reject("couldn't get finalized block"))? + .ok_or_else(|| reject("missing finalized block"))? + .data + .message() + .body() + .execution_payload() + .map_err(|_| reject("pre-merge block"))? + .block_hash(); - self.el - .insert_proposer(slot, head_block_root, val_index, payload_attributes.clone()) - .await; + let justified_execution_hash = builder + .beacon_client + .get_beacon_blocks::(BlockId::Justified) + .await + .map_err(|_| reject("couldn't get justified block"))? + .ok_or_else(|| reject("missing justified block"))? + .data + .message() + .body() + .execution_payload() + .map_err(|_| reject("pre-merge block"))? + .block_hash(); - let forkchoice_update_params = ForkchoiceUpdateParameters { - head_root: Hash256::zero(), - head_hash: None, - justified_hash: Some(justified_execution_hash), - finalized_hash: Some(finalized_execution_hash), - }; + let val_index = builder + .beacon_client + .get_beacon_states_validator_id(StateId::Head, &ValidatorId::PublicKey(pubkey)) + .await + .map_err(|_| reject("couldn't get validator"))? + .ok_or_else(|| reject("missing validator"))? + .data + .index; + let fee_recipient = cached_data.fee_recipient; + let slots_since_genesis = slot.as_u64() - builder.spec.genesis_slot.as_u64(); - let payload = self - .el - .get_full_payload_caching::>( - head_execution_hash, - &payload_attributes, - forkchoice_update_params, - fork, - ) - .await - .map_err(convert_err)? - .to_payload() - .to_execution_payload_header(); + let genesis_data = builder + .beacon_client + .get_beacon_genesis() + .await + .map_err(|_| reject("couldn't get beacon genesis"))? + .data; + let genesis_time = genesis_data.genesis_time; + let timestamp = + (slots_since_genesis * builder.spec.seconds_per_slot) + genesis_time; - let json_payload = serde_json::to_string(&payload).map_err(convert_err)?; - let mut message = match fork { - ForkName::Capella => BuilderBid::Capella(BuilderBidCapella { - header: serde_json::from_str(json_payload.as_str()).map_err(convert_err)?, - value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?, - public_key: self.builder_sk.public_key(), - }), - ForkName::Merge => BuilderBid::Bellatrix(BuilderBidBellatrix { - header: serde_json::from_str(json_payload.as_str()).map_err(convert_err)?, - value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?, - public_key: self.builder_sk.public_key(), - }), - ForkName::Base | ForkName::Altair => return Err(MevError::InvalidFork), - }; - *message.gas_limit_mut() = cached_data.gas_limit; + let head_state: BeaconState = builder + .beacon_client + .get_debug_beacon_states(StateId::Head) + .await + .map_err(|_| reject("couldn't get state"))? + .ok_or_else(|| reject("missing state"))? + .data; + let prev_randao = head_state + .get_randao_mix(head_state.current_epoch()) + .map_err(|_| reject("couldn't get prev randao"))?; + let expected_withdrawals = match fork { + ForkName::Base | ForkName::Altair | ForkName::Merge => None, + ForkName::Capella | ForkName::Deneb => Some( + builder + .beacon_client + .get_expected_withdrawals(&StateId::Head) + .await + .unwrap() + .data, + ), + }; - self.apply_operations(&mut message)?; - let mut signature = - message.sign_builder_message(&self.builder_sk, self.context.as_ref())?; + let payload_attributes = match fork { + // the withdrawals root is filled in by operations, but we supply the valid withdrawals + // first to avoid polluting the execution block generator with invalid payload attributes + // NOTE: this was part of an effort to add payload attribute uniqueness checks, + // which was abandoned because it broke too many tests in subtle ways. + ForkName::Merge | ForkName::Capella => PayloadAttributes::new( + timestamp, + *prev_randao, + fee_recipient, + expected_withdrawals, + None, + ), + ForkName::Deneb => PayloadAttributes::new( + timestamp, + *prev_randao, + fee_recipient, + expected_withdrawals, + Some(head_block_root), + ), + ForkName::Base | ForkName::Altair => { + return Err(reject("invalid fork")); + } + }; - if *self.invalidate_signatures.read() { - signature = Signature::default(); - } + builder + .el + .insert_proposer(slot, head_block_root, val_index, payload_attributes.clone()) + .await; - Ok(message.to_signed_bid(signature)) - } + let forkchoice_update_params = ForkchoiceUpdateParameters { + head_root: Hash256::zero(), + head_hash: None, + justified_hash: Some(justified_execution_hash), + finalized_hash: Some(finalized_execution_hash), + }; - async fn open_bid( - &self, - signed_block: &mut SignedBlindedBeaconBlock, - ) -> Result { - let node = match signed_block { - SignedBlindedBeaconBlock::Bellatrix(block) => { - block.message.body.execution_payload_header.hash_tree_root() - } - SignedBlindedBeaconBlock::Capella(block) => { - block.message.body.execution_payload_header.hash_tree_root() - } - } - .map_err(convert_err)?; + let payload_response_type = builder + .el + .get_full_payload_caching( + head_execution_hash, + &payload_attributes, + forkchoice_update_params, + fork, + ) + .await + .map_err(|_| reject("couldn't get payload"))?; - let payload = self - .el - .get_payload_by_root(&from_ssz_rs(&node)?) - .ok_or_else(|| convert_err("missing payload for tx root"))?; + let mut message = match payload_response_type { + crate::GetPayloadResponseType::Full(payload_response) => { + let (payload, _block_value, maybe_blobs_bundle): ( + ExecutionPayload, + Uint256, + Option>, + ) = payload_response.into(); - let json_payload = serde_json::to_string(&payload).map_err(convert_err)?; - serde_json::from_str(json_payload.as_str()).map_err(convert_err) - } + match fork { + ForkName::Deneb => BuilderBid::Deneb(BuilderBidDeneb { + header: payload + .as_deneb() + .map_err(|_| reject("incorrect payload variant"))? + .into(), + blob_kzg_commitments: maybe_blobs_bundle + .map(|b| b.commitments) + .unwrap_or_default(), + value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), + pubkey: builder.builder_sk.public_key().compress(), + }), + ForkName::Capella => BuilderBid::Capella(BuilderBidCapella { + header: payload + .as_capella() + .map_err(|_| reject("incorrect payload variant"))? + .into(), + value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), + pubkey: builder.builder_sk.public_key().compress(), + }), + ForkName::Merge => BuilderBid::Merge(BuilderBidMerge { + header: payload + .as_merge() + .map_err(|_| reject("incorrect payload variant"))? + .into(), + value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), + pubkey: builder.builder_sk.public_key().compress(), + }), + ForkName::Base | ForkName::Altair => { + return Err(reject("invalid fork")) + } + } + } + crate::GetPayloadResponseType::Blinded(payload_response) => { + let (payload, _block_value, maybe_blobs_bundle): ( + ExecutionPayload, + Uint256, + Option>, + ) = payload_response.into(); + match fork { + ForkName::Deneb => BuilderBid::Deneb(BuilderBidDeneb { + header: payload + .as_deneb() + .map_err(|_| reject("incorrect payload variant"))? + .into(), + blob_kzg_commitments: maybe_blobs_bundle + .map(|b| b.commitments) + .unwrap_or_default(), + value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), + pubkey: builder.builder_sk.public_key().compress(), + }), + ForkName::Capella => BuilderBid::Capella(BuilderBidCapella { + header: payload + .as_capella() + .map_err(|_| reject("incorrect payload variant"))? + .into(), + value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), + pubkey: builder.builder_sk.public_key().compress(), + }), + ForkName::Merge => BuilderBid::Merge(BuilderBidMerge { + header: payload + .as_merge() + .map_err(|_| reject("incorrect payload variant"))? + .into(), + value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), + pubkey: builder.builder_sk.public_key().compress(), + }), + ForkName::Base | ForkName::Altair => { + return Err(reject("invalid fork")) + } + } + } + }; + + message.set_gas_limit(cached_data.gas_limit); + + builder.apply_operations(&mut message); + + let mut signature = + message.sign_builder_message(&builder.builder_sk, &builder.spec); + + if *builder.invalidate_signatures.read() { + signature = Signature::empty(); + } + + let fork_name = builder + .spec + .fork_name_at_epoch(slot.epoch(E::slots_per_epoch())); + let signed_bid = SignedBuilderBid { message, signature }; + let resp: ForkVersionedResponse<_> = ForkVersionedResponse { + version: Some(fork_name), + metadata: Default::default(), + data: signed_bid, + }; + let json_bid = serde_json::to_string(&resp) + .map_err(|_| reject("coudn't serialize signed bid"))?; + Ok::<_, Rejection>( + warp::http::Response::builder() + .status(200) + .body(json_bid) + .unwrap(), + ) + }, + ); + + let routes = warp::post() + .and(validators.or(blinded_block)) + .or(warp::get().and(status).or(header)) + .map(|reply| warp::reply::with_header(reply, "Server", "lighthouse-mock-builder-server")); + + let (listening_socket, server) = warp::serve(routes) + .try_bind_ephemeral(SocketAddrV4::new(listen_addr, listen_port)) + .expect("mock builder server should start"); + Ok((listening_socket, server)) } -pub fn from_ssz_rs(ssz_rs_data: &T) -> Result { - U::from_ssz_bytes( - ssz_rs::serialize(ssz_rs_data) - .map_err(convert_err)? - .as_ref(), - ) - .map_err(convert_err) -} - -pub fn to_ssz_rs(ssz_data: &T) -> Result { - ssz_rs::deserialize::(&ssz_data.as_ssz_bytes()).map_err(convert_err) -} - -fn convert_err(e: E) -> MevError { - custom_err(format!("{e:?}")) -} - -// This is a bit of a hack since the `Custom` variant was removed from `mev_rs::Error`. -fn custom_err(s: String) -> MevError { - MevError::Consensus(ethereum_consensus::state_transition::Error::Io( - std::io::Error::new(std::io::ErrorKind::Other, s), - )) +fn reject(msg: &'static str) -> Rejection { + warp::reject::custom(Custom(msg.to_string())) } diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 05f6c137e..7afeafc32 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -1,15 +1,15 @@ use crate::{ test_utils::{ - MockServer, DEFAULT_BUILDER_THRESHOLD_WEI, DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, - DEFAULT_TERMINAL_DIFFICULTY, + MockServer, DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, DEFAULT_TERMINAL_DIFFICULTY, }, Config, *, }; +use keccak_hash::H256; +use kzg::Kzg; use sensitive_url::SensitiveUrl; use task_executor::TaskExecutor; use tempfile::NamedTempFile; -use tree_hash::TreeHash; -use types::{Address, ChainSpec, Epoch, EthSpec, FullPayload, Hash256, MainnetEthSpec}; +use types::{Address, ChainSpec, Epoch, EthSpec, Hash256, MainnetEthSpec}; pub struct MockExecutionLayer { pub server: MockServer, @@ -31,6 +31,7 @@ impl MockExecutionLayer { None, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), spec, + None, ) } @@ -39,9 +40,10 @@ impl MockExecutionLayer { executor: TaskExecutor, terminal_block: u64, shanghai_time: Option, - builder_threshold: Option, + cancun_time: Option, jwt_key: Option, spec: ChainSpec, + kzg: Option, ) -> Self { let handle = executor.handle().unwrap(); @@ -53,6 +55,8 @@ impl MockExecutionLayer { terminal_block, spec.terminal_block_hash, shanghai_time, + cancun_time, + kzg, ); let url = SensitiveUrl::parse(&server.url()).unwrap(); @@ -65,7 +69,6 @@ impl MockExecutionLayer { execution_endpoints: vec![url], secret_files: vec![path], suggested_fee_recipient: Some(Address::repeat_byte(42)), - builder_profit_threshold: builder_threshold.unwrap_or(DEFAULT_BUILDER_THRESHOLD_WEI), ..Default::default() }; let el = @@ -96,13 +99,8 @@ impl MockExecutionLayer { justified_hash: None, finalized_hash: None, }; - let payload_attributes = PayloadAttributes::new( - timestamp, - prev_randao, - Address::repeat_byte(42), - // FIXME: think about how to handle different forks / withdrawals here.. - None, - ); + let payload_attributes = + PayloadAttributes::new(timestamp, prev_randao, Address::repeat_byte(42), None, None); // Insert a proposer to ensure the fork choice updated command works. let slot = Slot::new(0); @@ -130,22 +128,27 @@ impl MockExecutionLayer { }; let suggested_fee_recipient = self.el.get_suggested_fee_recipient(validator_index).await; let payload_attributes = - PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None); - let payload: ExecutionPayload = self + PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None, None); + + let block_proposal_content_type = self .el - .get_payload::>( + .get_payload( parent_hash, &payload_attributes, forkchoice_update_params, builder_params, - // FIXME: do we need to consider other forks somehow? What about withdrawals? ForkName::Merge, &self.spec, + None, + BlockProductionVersion::FullV2, ) .await - .unwrap() - .to_payload() - .into(); + .unwrap(); + + let payload: ExecutionPayload = match block_proposal_content_type { + BlockProposalContentsType::Full(block) => block.to_payload().into(), + BlockProposalContentsType::Blinded(_) => panic!("Should always be a full payload"), + }; let block_hash = payload.block_hash(); assert_eq!(payload.parent_hash(), parent_hash); @@ -165,22 +168,66 @@ impl MockExecutionLayer { }; let suggested_fee_recipient = self.el.get_suggested_fee_recipient(validator_index).await; let payload_attributes = - PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None); - let payload_header = self + PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None, None); + + let block_proposal_content_type = self .el - .get_payload::>( + .get_payload( parent_hash, &payload_attributes, forkchoice_update_params, builder_params, - // FIXME: do we need to consider other forks somehow? What about withdrawals? ForkName::Merge, &self.spec, + None, + BlockProductionVersion::BlindedV2, ) .await - .unwrap() - .to_payload(); + .unwrap(); + match block_proposal_content_type { + BlockProposalContentsType::Full(block) => { + let payload_header = block.to_payload(); + self.assert_valid_execution_payload_on_head( + payload, + payload_header, + block_hash, + parent_hash, + block_number, + timestamp, + prev_randao, + ) + .await; + } + BlockProposalContentsType::Blinded(block) => { + let payload_header = block.to_payload(); + self.assert_valid_execution_payload_on_head( + payload, + payload_header, + block_hash, + parent_hash, + block_number, + timestamp, + prev_randao, + ) + .await; + } + }; + + self + } + + #[allow(clippy::too_many_arguments)] + pub async fn assert_valid_execution_payload_on_head>( + &self, + payload: ExecutionPayload, + payload_header: Payload, + block_hash: ExecutionBlockHash, + parent_hash: ExecutionBlockHash, + block_number: u64, + timestamp: u64, + prev_randao: H256, + ) { assert_eq!(payload_header.block_hash(), block_hash); assert_eq!(payload_header.parent_hash(), parent_hash); assert_eq!(payload_header.block_number(), block_number); @@ -191,10 +238,15 @@ impl MockExecutionLayer { assert_eq!( self.el .get_payload_by_root(&payload_header.tree_hash_root()), - Some(payload.clone()) + Some(FullPayloadContents::Payload(payload.clone())) ); - let status = self.el.notify_new_payload(&payload).await.unwrap(); + // TODO: again consider forks + let status = self + .el + .notify_new_payload(payload.try_into().unwrap()) + .await + .unwrap(); assert_eq!(status, PayloadStatus::Valid); // Use junk values for slot/head-root to ensure there is no payload supplied. @@ -219,8 +271,6 @@ impl MockExecutionLayer { assert_eq!(head_execution_block.block_number(), block_number); assert_eq!(head_execution_block.block_hash(), block_hash); assert_eq!(head_execution_block.parent_hash(), parent_hash); - - self } pub fn move_to_block_prior_to_terminal_block(self) -> Self { diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 059c0275b..f0be51114 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -8,6 +8,7 @@ use bytes::Bytes; use environment::null_logger; use execution_block_generator::PoWBlock; use handle_rpc::handle_rpc; +use kzg::Kzg; use parking_lot::{Mutex, RwLock, RwLockWriteGuard}; use serde::{Deserialize, Serialize}; use serde_json::json; @@ -23,26 +24,31 @@ use types::{EthSpec, ExecutionBlockHash, Uint256}; use warp::{http::StatusCode, Filter, Rejection}; use crate::EngineCapabilities; -pub use execution_block_generator::{generate_pow_block, Block, ExecutionBlockGenerator}; +pub use execution_block_generator::{ + generate_blobs, generate_genesis_block, generate_genesis_header, generate_pow_block, Block, + ExecutionBlockGenerator, +}; pub use hook::Hook; -pub use mock_builder::{Context as MockBuilderContext, MockBuilder, MockBuilderServer, Operation}; +pub use mock_builder::{MockBuilder, Operation}; pub use mock_execution_layer::MockExecutionLayer; pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; pub const DEFAULT_TERMINAL_BLOCK: u64 = 64; pub const DEFAULT_JWT_SECRET: [u8; 32] = [42; 32]; -pub const DEFAULT_BUILDER_THRESHOLD_WEI: u128 = 1_000_000_000_000_000_000; pub const DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI: u128 = 10_000_000_000_000_000; pub const DEFAULT_BUILDER_PAYLOAD_VALUE_WEI: u128 = 20_000_000_000_000_000; pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { new_payload_v1: true, new_payload_v2: true, + new_payload_v3: true, forkchoice_updated_v1: true, forkchoice_updated_v2: true, + forkchoice_updated_v3: true, get_payload_bodies_by_hash_v1: true, get_payload_bodies_by_range_v1: true, get_payload_v1: true, get_payload_v2: true, + get_payload_v3: true, }; mod execution_block_generator; @@ -59,6 +65,7 @@ pub struct MockExecutionConfig { pub terminal_block: u64, pub terminal_block_hash: ExecutionBlockHash, pub shanghai_time: Option, + pub cancun_time: Option, } impl Default for MockExecutionConfig { @@ -70,6 +77,7 @@ impl Default for MockExecutionConfig { terminal_block_hash: ExecutionBlockHash::zero(), server_config: Config::default(), shanghai_time: None, + cancun_time: None, } } } @@ -90,10 +98,16 @@ impl MockServer { DEFAULT_TERMINAL_BLOCK, ExecutionBlockHash::zero(), None, // FIXME(capella): should this be the default? + None, // FIXME(deneb): should this be the default? + None, // FIXME(deneb): should this be the default? ) } - pub fn new_with_config(handle: &runtime::Handle, config: MockExecutionConfig) -> Self { + pub fn new_with_config( + handle: &runtime::Handle, + config: MockExecutionConfig, + kzg: Option, + ) -> Self { let MockExecutionConfig { jwt_key, terminal_difficulty, @@ -101,6 +115,7 @@ impl MockServer { terminal_block_hash, server_config, shanghai_time, + cancun_time, } = config; let last_echo_request = Arc::new(RwLock::new(None)); let preloaded_responses = Arc::new(Mutex::new(vec![])); @@ -109,6 +124,8 @@ impl MockServer { terminal_block, terminal_block_hash, shanghai_time, + cancun_time, + kzg, ); let ctx: Arc> = Arc::new(Context { @@ -161,6 +178,7 @@ impl MockServer { *self.ctx.engine_capabilities.write() = engine_capabilities; } + #[allow(clippy::too_many_arguments)] pub fn new( handle: &runtime::Handle, jwt_key: JwtKey, @@ -168,6 +186,8 @@ impl MockServer { terminal_block: u64, terminal_block_hash: ExecutionBlockHash, shanghai_time: Option, + cancun_time: Option, + kzg: Option, ) -> Self { Self::new_with_config( handle, @@ -178,7 +198,9 @@ impl MockServer { terminal_block, terminal_block_hash, shanghai_time, + cancun_time, }, + kzg, ) } diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs index b7134e37c..fdba9f474 100644 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -1,4 +1,4 @@ -pub use crate::{common::genesis_deposits, interop::interop_genesis_state}; +pub use crate::common::genesis_deposits; pub use eth1::Config as Eth1Config; use eth1::{DepositLog, Eth1Block, Service as Eth1Service}; diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index f1a42b874..45fc651f0 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -1,10 +1,11 @@ use crate::{state_id::checkpoint_slot_and_execution_optimistic, ExecutionOptimistic}; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; +use eth2::types::BlobIndicesQuery; use eth2::types::BlockId as CoreBlockId; use std::fmt; use std::str::FromStr; use std::sync::Arc; -use types::{EthSpec, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, Slot}; +use types::{BlobSidecarList, EthSpec, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, Slot}; /// Wraps `eth2::types::BlockId` and provides a simple way to obtain a block or root for a given /// `BlockId`. @@ -250,6 +251,37 @@ impl BlockId { } } } + + /// Return the `BlobSidecarList` identified by `self`. + pub fn blob_sidecar_list( + &self, + chain: &BeaconChain, + ) -> Result, warp::Rejection> { + let root = self.root(chain)?.0; + chain + .get_blobs(&root) + .map_err(warp_utils::reject::beacon_chain_error) + } + + pub fn blob_sidecar_list_filtered( + &self, + indices: BlobIndicesQuery, + chain: &BeaconChain, + ) -> Result, warp::Rejection> { + let blob_sidecar_list = self.blob_sidecar_list(chain)?; + let blob_sidecar_list_filtered = match indices.indices { + Some(vec) => { + let list = blob_sidecar_list + .into_iter() + .filter(|blob_sidecar| vec.contains(&blob_sidecar.index)) + .collect(); + BlobSidecarList::new(list) + .map_err(|e| warp_utils::reject::custom_server_error(format!("{:?}", e)))? + } + None => blob_sidecar_list, + }; + Ok(blob_sidecar_list_filtered) + } } impl FromStr for BlockId { diff --git a/beacon_node/http_api/src/block_rewards.rs b/beacon_node/http_api/src/block_rewards.rs index 299bc019c..ad71e9e9d 100644 --- a/beacon_node/http_api/src/block_rewards.rs +++ b/beacon_node/http_api/src/block_rewards.rs @@ -3,13 +3,13 @@ use eth2::lighthouse::{BlockReward, BlockRewardsQuery}; use lru::LruCache; use slog::{debug, warn, Logger}; use state_processing::BlockReplayer; +use std::num::NonZeroUsize; use std::sync::Arc; use types::beacon_block::BlindedBeaconBlock; -use warp_utils::reject::{ - beacon_chain_error, beacon_state_error, custom_bad_request, custom_server_error, -}; +use types::non_zero_usize::new_non_zero_usize; +use warp_utils::reject::{beacon_chain_error, beacon_state_error, custom_bad_request}; -const STATE_CACHE_SIZE: usize = 2; +const STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(2); /// Fetch block rewards for blocks from the canonical chain. pub fn get_block_rewards( @@ -164,11 +164,7 @@ pub fn compute_block_rewards( .build_all_committee_caches(&chain.spec) .map_err(beacon_state_error)?; - state_cache - .get_or_insert((parent_root, block.slot()), || state) - .ok_or_else(|| { - custom_server_error("LRU cache insert should always succeed".into()) - })? + state_cache.get_or_insert((parent_root, block.slot()), || state) }; // Compute block reward. diff --git a/beacon_node/http_api/src/build_block_contents.rs b/beacon_node/http_api/src/build_block_contents.rs new file mode 100644 index 000000000..37b4049c0 --- /dev/null +++ b/beacon_node/http_api/src/build_block_contents.rs @@ -0,0 +1,43 @@ +use beacon_chain::{BeaconBlockResponse, BeaconBlockResponseWrapper, BlockProductionError}; +use eth2::types::{BlockContents, FullBlockContents, ProduceBlockV3Response}; +use types::{EthSpec, ForkName}; +type Error = warp::reject::Rejection; + +pub fn build_block_contents( + fork_name: ForkName, + block_response: BeaconBlockResponseWrapper, +) -> Result, Error> { + match block_response { + BeaconBlockResponseWrapper::Blinded(block) => { + Ok(ProduceBlockV3Response::Blinded(block.block)) + } + BeaconBlockResponseWrapper::Full(block) => match fork_name { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => Ok( + ProduceBlockV3Response::Full(FullBlockContents::Block(block.block)), + ), + ForkName::Deneb => { + let BeaconBlockResponse { + block, + state: _, + blob_items, + execution_payload_value: _, + consensus_block_value: _, + } = block; + + let Some((kzg_proofs, blobs)) = blob_items else { + return Err(warp_utils::reject::block_production_error( + BlockProductionError::MissingBlobs, + )); + }; + + Ok(ProduceBlockV3Response::Full( + FullBlockContents::BlockContents(BlockContents { + block, + kzg_proofs, + blobs, + }), + )) + } + }, + } +} diff --git a/beacon_node/http_api/src/database.rs b/beacon_node/http_api/src/database.rs index 645c19c40..aa8b0e8ff 100644 --- a/beacon_node/http_api/src/database.rs +++ b/beacon_node/http_api/src/database.rs @@ -1,8 +1,7 @@ -use beacon_chain::store::{metadata::CURRENT_SCHEMA_VERSION, AnchorInfo}; +use beacon_chain::store::metadata::CURRENT_SCHEMA_VERSION; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2::lighthouse::DatabaseInfo; use std::sync::Arc; -use types::SignedBlindedBeaconBlock; pub fn info( chain: Arc>, @@ -11,25 +10,13 @@ pub fn info( let split = store.get_split_info(); let config = store.get_config().clone(); let anchor = store.get_anchor_info(); + let blob_info = store.get_blob_info(); Ok(DatabaseInfo { schema_version: CURRENT_SCHEMA_VERSION.as_u64(), config, split, anchor, + blob_info, }) } - -pub fn historical_blocks( - chain: Arc>, - blocks: Vec>>, -) -> Result { - chain - .import_historical_block_batch(blocks) - .map_err(warp_utils::reject::beacon_chain_error)?; - - let anchor = chain.store.get_anchor_info().ok_or_else(|| { - warp_utils::reject::custom_bad_request("node is not checkpoint synced".to_string()) - })?; - Ok(anchor) -} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index cde05e682..1594668e5 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -10,9 +10,11 @@ mod attester_duties; mod block_id; mod block_packing_efficiency; mod block_rewards; +mod build_block_contents; mod builder_states; mod database; mod metrics; +mod produce_block; mod proposer_duties; mod publish_blocks; mod standard_block_rewards; @@ -24,12 +26,14 @@ pub mod test_utils; mod ui; mod validator; mod validator_inclusion; +mod validators; mod version; +use crate::produce_block::{produce_blinded_block_v2, produce_block_v2, produce_block_v3}; use beacon_chain::{ attestation_verification::VerifiedAttestation, observed_operations::ObservationOutcome, validator_monitor::timestamp_now, AttestationError as AttnError, BeaconChain, BeaconChainError, - BeaconChainTypes, ProduceBlockVerification, WhenSlotSkipped, + BeaconChainTypes, WhenSlotSkipped, }; use beacon_processor::BeaconProcessorSend; pub use block_id::BlockId; @@ -38,8 +42,10 @@ use bytes::Bytes; use directory::DEFAULT_ROOT_DIR; use eth2::types::{ self as api_types, BroadcastValidation, EndpointVersion, ForkChoice, ForkChoiceNode, - SkipRandaoVerification, ValidatorId, ValidatorStatus, + PublishBlockRequest, ValidatorBalancesRequestBody, ValidatorId, ValidatorStatus, + ValidatorsRequestBody, }; +use eth2::{CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER}; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; use logging::SSELoggingComponents; @@ -72,19 +78,21 @@ use tokio_stream::{ StreamExt, }; use types::{ - Attestation, AttestationData, AttestationShufflingId, AttesterSlashing, BeaconStateError, - BlindedPayload, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload, - ProposerPreparationData, ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, - SignedBeaconBlock, SignedBlindedBeaconBlock, SignedBlsToExecutionChange, + fork_versioned_response::EmptyMetadata, Attestation, AttestationData, AttestationShufflingId, + AttesterSlashing, BeaconStateError, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, + ForkVersionedResponse, Hash256, ProposerPreparationData, ProposerSlashing, RelativeEpoch, + SignedAggregateAndProof, SignedBlindedBeaconBlock, SignedBlsToExecutionChange, SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncCommitteeMessage, SyncContributionData, }; use validator::pubkey_to_validator_index; use version::{ - add_consensus_version_header, execution_optimistic_finalized_fork_versioned_response, - fork_versioned_response, inconsistent_fork_rejection, unsupported_version_rejection, V1, V2, + add_consensus_version_header, add_ssz_content_type_header, + execution_optimistic_finalized_fork_versioned_response, inconsistent_fork_rejection, + unsupported_version_rejection, V1, V2, V3, }; use warp::http::StatusCode; +use warp::hyper::Body; use warp::sse::Event; use warp::Reply; use warp::{http::Response, Filter}; @@ -141,6 +149,7 @@ pub struct Config { pub enable_beacon_processor: bool, #[serde(with = "eth2::types::serde_status_code")] pub duplicate_block_status_code: StatusCode, + pub enable_light_client_server: bool, } impl Default for Config { @@ -157,6 +166,7 @@ impl Default for Config { sse_capacity_multiplier: 1, enable_beacon_processor: true, duplicate_block_status_code: StatusCode::ACCEPTED, + enable_light_client_server: false, } } } @@ -277,6 +287,18 @@ pub fn prometheus_metrics() -> warp::filters::log::Log impl Filter + Clone { + warp::any() + .and_then(move || async move { + if is_enabled { + Ok(()) + } else { + Err(warp::reject::not_found()) + } + }) + .untuple_one() +} + /// Creates a server that will serve requests using information from `ctx`. /// /// The server will shut down gracefully when the `shutdown` future resolves. @@ -567,12 +589,12 @@ pub fn serve( chain: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { let (root, execution_optimistic, finalized) = state_id.root(&chain)?; - Ok(root) - .map(api_types::RootData::from) - .map(api_types::GenericResponse::from) - .map(|resp| { - resp.add_execution_optimistic_finalized(execution_optimistic, finalized) - }) + Ok(api_types::GenericResponse::from(api_types::RootData::from( + root, + ))) + .map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) + }) }) }, ); @@ -646,47 +668,32 @@ pub fn serve( query_res: Result| { task_spawner.blocking_json_task(Priority::P1, move || { let query = query_res?; - let (data, execution_optimistic, finalized) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - Ok(( - state - .validators() - .iter() - .zip(state.balances().iter()) - .enumerate() - // filter by validator id(s) if provided - .filter(|(index, (validator, _))| { - query.id.as_ref().map_or(true, |ids| { - ids.iter().any(|id| match id { - ValidatorId::PublicKey(pubkey) => { - &validator.pubkey == pubkey - } - ValidatorId::Index(param_index) => { - *param_index == *index as u64 - } - }) - }) - }) - .map(|(index, (_, balance))| { - Some(api_types::ValidatorBalanceData { - index: index as u64, - balance: *balance, - }) - }) - .collect::>(), - execution_optimistic, - finalized, - )) - }, - )?; + crate::validators::get_beacon_state_validator_balances( + state_id, + chain, + query.id.as_deref(), + ) + }) + }, + ); - Ok(api_types::ExecutionOptimisticFinalizedResponse { - data, - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), - }) + // POST beacon/states/{state_id}/validator_balances + let post_beacon_state_validator_balances = beacon_states_path + .clone() + .and(warp::path("validator_balances")) + .and(warp::path::end()) + .and(warp::body::json()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: ValidatorBalancesRequestBody| { + task_spawner.blocking_json_task(Priority::P1, move || { + crate::validators::get_beacon_state_validator_balances( + state_id, + chain, + Some(&query.ids), + ) }) }, ); @@ -704,69 +711,34 @@ pub fn serve( query_res: Result| { task_spawner.blocking_json_task(Priority::P1, move || { let query = query_res?; - let (data, execution_optimistic, finalized) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let epoch = state.current_epoch(); - let far_future_epoch = chain.spec.far_future_epoch; + crate::validators::get_beacon_state_validators( + state_id, + chain, + &query.id, + &query.status, + ) + }) + }, + ); - Ok(( - state - .validators() - .iter() - .zip(state.balances().iter()) - .enumerate() - // filter by validator id(s) if provided - .filter(|(index, (validator, _))| { - query.id.as_ref().map_or(true, |ids| { - ids.iter().any(|id| match id { - ValidatorId::PublicKey(pubkey) => { - &validator.pubkey == pubkey - } - ValidatorId::Index(param_index) => { - *param_index == *index as u64 - } - }) - }) - }) - // filter by status(es) if provided and map the result - .filter_map(|(index, (validator, balance))| { - let status = api_types::ValidatorStatus::from_validator( - validator, - epoch, - far_future_epoch, - ); - - let status_matches = - query.status.as_ref().map_or(true, |statuses| { - statuses.contains(&status) - || statuses.contains(&status.superstatus()) - }); - - if status_matches { - Some(api_types::ValidatorData { - index: index as u64, - balance: *balance, - status, - validator: validator.clone(), - }) - } else { - None - } - }) - .collect::>(), - execution_optimistic, - finalized, - )) - }, - )?; - - Ok(api_types::ExecutionOptimisticFinalizedResponse { - data, - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), - }) + // POST beacon/states/{state_id}/validators + let post_beacon_state_validators = beacon_states_path + .clone() + .and(warp::path("validators")) + .and(warp::path::end()) + .and(warp::body::json()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: ValidatorsRequestBody| { + task_spawner.blocking_json_task(Priority::P1, move || { + crate::validators::get_beacon_state_validators( + state_id, + chain, + &query.ids, + &query.statuses, + ) }) }, ); @@ -1277,6 +1249,8 @@ pub fn serve( /* * beacon/blocks */ + let consensus_version_header_filter = + warp::header::header::(CONSENSUS_VERSION_HEADER); // POST beacon/blocks let post_beacon_blocks = eth_v1 @@ -1289,7 +1263,7 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( - move |block: Arc>, + move |block_contents: PublishBlockRequest, task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, @@ -1297,7 +1271,7 @@ pub fn serve( task_spawner.spawn_async_with_rejection(Priority::P0, async move { publish_blocks::publish_block( None, - ProvenancedBlock::local(block), + ProvenancedBlock::local(block_contents), chain, &network_tx, log, @@ -1314,27 +1288,29 @@ pub fn serve( .and(warp::path("blocks")) .and(warp::path::end()) .and(warp::body::bytes()) + .and(consensus_version_header_filter) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( move |block_bytes: Bytes, + consensus_version: ForkName, task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let block = - SignedBeaconBlock::::from_ssz_bytes(&block_bytes, &chain.spec) - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "invalid SSZ: {e:?}" - )) - })?; + let block_contents = PublishBlockRequest::::from_ssz_bytes( + &block_bytes, + consensus_version, + ) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) + })?; publish_blocks::publish_block( None, - ProvenancedBlock::local(Arc::new(block)), + ProvenancedBlock::local(block_contents), chain, &network_tx, log, @@ -1358,7 +1334,7 @@ pub fn serve( .and(log_filter.clone()) .then( move |validation_level: api_types::BroadcastValidationQuery, - block: Arc>, + block_contents: PublishBlockRequest, task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, @@ -1366,7 +1342,7 @@ pub fn serve( task_spawner.spawn_async_with_rejection(Priority::P0, async move { publish_blocks::publish_block( None, - ProvenancedBlock::local(block), + ProvenancedBlock::local(block_contents), chain, &network_tx, log, @@ -1384,6 +1360,7 @@ pub fn serve( .and(warp::query::()) .and(warp::path::end()) .and(warp::body::bytes()) + .and(consensus_version_header_filter) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(network_tx_filter.clone()) @@ -1391,21 +1368,22 @@ pub fn serve( .then( move |validation_level: api_types::BroadcastValidationQuery, block_bytes: Bytes, + consensus_version: ForkName, task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let block = - SignedBeaconBlock::::from_ssz_bytes(&block_bytes, &chain.spec) - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "invalid SSZ: {e:?}" - )) - })?; + let block_contents = PublishBlockRequest::::from_ssz_bytes( + &block_bytes, + consensus_version, + ) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) + })?; publish_blocks::publish_block( None, - ProvenancedBlock::local(Arc::new(block)), + ProvenancedBlock::local(block_contents), chain, &network_tx, log, @@ -1432,14 +1410,14 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( - move |block: SignedBlindedBeaconBlock, + move |block_contents: Arc>, task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { publish_blocks::publish_blinded_block( - block, + block_contents, chain, &network_tx, log, @@ -1472,6 +1450,7 @@ pub fn serve( &block_bytes, &chain.spec, ) + .map(Arc::new) .map_err(|e| { warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) })?; @@ -1500,14 +1479,14 @@ pub fn serve( .and(log_filter.clone()) .then( move |validation_level: api_types::BroadcastValidationQuery, - block: SignedBlindedBeaconBlock, + blinded_block: Arc>, task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { publish_blocks::publish_blinded_block( - block, + blinded_block, chain, &network_tx, log, @@ -1541,6 +1520,7 @@ pub fn serve( &block_bytes, &chain.spec, ) + .map(Arc::new) .map_err(|e| { warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) })?; @@ -1598,8 +1578,8 @@ pub fn serve( match accept_header { Some(api_types::Accept::Ssz) => Response::builder() .status(200) - .header("Content-Type", "application/octet-stream") .body(block.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) .map_err(|e| { warp_utils::reject::custom_server_error(format!( "failed to create response: {}", @@ -1629,13 +1609,19 @@ pub fn serve( |block_id: BlockId, task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (block, execution_optimistic, finalized) = - block_id.blinded_block(&chain)?; - Ok(api_types::GenericResponse::from(api_types::RootData::from( - block.canonical_root(), - )) - .add_execution_optimistic_finalized(execution_optimistic, finalized)) + // Prioritise requests for the head block root, as it is used by some VCs (including + // the Lighthouse VC) to create sync committee messages. + let priority = if let BlockId(eth2::types::BlockId::Head) = block_id { + Priority::P0 + } else { + Priority::P1 + }; + task_spawner.blocking_json_task(priority, move || { + let (block_root, execution_optimistic, finalized) = block_id.root(&chain)?; + Ok( + api_types::GenericResponse::from(api_types::RootData::from(block_root)) + .add_execution_optimistic_finalized(execution_optimistic, finalized), + ) }) }, ); @@ -1684,8 +1670,8 @@ pub fn serve( match accept_header { Some(api_types::Accept::Ssz) => Response::builder() .status(200) - .header("Content-Type", "application/octet-stream") .body(block.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) .map_err(|e| { warp_utils::reject::custom_server_error(format!( "failed to create response: {}", @@ -1709,6 +1695,50 @@ pub fn serve( }, ); + /* + * beacon/blob_sidecars + */ + + // GET beacon/blob_sidecars/{block_id} + let get_blobs = eth_v1 + .and(warp::path("beacon")) + .and(warp::path("blob_sidecars")) + .and(block_id_or_err) + .and(warp::path::end()) + .and(multi_key_query::()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(warp::header::optional::("accept")) + .then( + |block_id: BlockId, + indices_res: Result, + task_spawner: TaskSpawner, + chain: Arc>, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + let indices = indices_res?; + let blob_sidecar_list_filtered = + block_id.blob_sidecar_list_filtered(indices, &chain)?; + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .body(blob_sidecar_list_filtered.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + _ => Ok(warp::reply::json(&api_types::GenericResponse::from( + blob_sidecar_list_filtered, + )) + .into_response()), + } + }) + }, + ); + /* * beacon/pool */ @@ -1887,8 +1917,8 @@ pub fn serve( .naive_aggregation_pool .read() .iter() - .cloned() - .filter(|att| query_filter(&att.data)), + .filter(|&att| query_filter(&att.data)) + .cloned(), ); Ok(api_types::GenericResponse::from(attestations)) }) @@ -2221,8 +2251,8 @@ pub fn serve( .map(|snapshot| { Response::builder() .status(200) - .header("Content-Type", "application/octet-stream") .body(snapshot.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) .map_err(|e| { warp_utils::reject::custom_server_error(format!( "failed to create response: {}", @@ -2233,8 +2263,8 @@ pub fn serve( .unwrap_or_else(|| { Response::builder() .status(503) - .header("Content-Type", "application/octet-stream") .body(Vec::new().into()) + .map(|res: Response| add_ssz_content_type_header(res)) .map_err(|e| { warp_utils::reject::custom_server_error(format!( "failed to create response: {}", @@ -2265,11 +2295,9 @@ pub fn serve( task_spawner.blocking_json_task(Priority::P1, move || { let (rewards, execution_optimistic, finalized) = standard_block_rewards::compute_beacon_block_rewards(chain, block_id)?; - Ok(rewards) - .map(api_types::GenericResponse::from) - .map(|resp| { - resp.add_execution_optimistic_finalized(execution_optimistic, finalized) - }) + Ok(api_types::GenericResponse::from(rewards)).map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) + }) }) }, ); @@ -2307,8 +2335,8 @@ pub fn serve( match accept_header { Some(api_types::Accept::Ssz) => Response::builder() .status(200) - .header("Content-Type", "application/octet-stream") .body(withdrawals.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) .map_err(|e| { warp_utils::reject::custom_server_error(format!( "failed to create response: {}", @@ -2328,6 +2356,167 @@ pub fn serve( }, ); + /* + * beacon/light_client + */ + + let beacon_light_client_path = eth_v1 + .and(warp::path("beacon")) + .and(warp::path("light_client")) + .and(chain_filter.clone()); + + // GET beacon/light_client/bootstrap/{block_root} + let get_beacon_light_client_bootstrap = beacon_light_client_path + .clone() + .and(task_spawner_filter.clone()) + .and(warp::path("bootstrap")) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid block root value".to_string(), + )) + })) + .and(warp::path::end()) + .and(warp::header::optional::("accept")) + .then( + |chain: Arc>, + task_spawner: TaskSpawner, + block_root: Hash256, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + let (bootstrap, fork_name) = match chain.get_light_client_bootstrap(&block_root) + { + Ok(Some(res)) => res, + Ok(None) => { + return Err(warp_utils::reject::custom_not_found( + "Light client bootstrap unavailable".to_string(), + )); + } + Err(e) => { + return Err(warp_utils::reject::custom_server_error(format!( + "Unable to obtain LightClientBootstrap instance: {e:?}" + ))); + } + }; + + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .body(bootstrap.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + _ => Ok(warp::reply::json(&ForkVersionedResponse { + version: Some(fork_name), + metadata: EmptyMetadata {}, + data: bootstrap, + }) + .into_response()), + } + .map(|resp| add_consensus_version_header(resp, fork_name)) + }) + }, + ); + + // GET beacon/light_client/optimistic_update + let get_beacon_light_client_optimistic_update = beacon_light_client_path + .clone() + .and(task_spawner_filter.clone()) + .and(warp::path("optimistic_update")) + .and(warp::path::end()) + .and(warp::header::optional::("accept")) + .then( + |chain: Arc>, + task_spawner: TaskSpawner, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + let update = chain + .latest_seen_optimistic_update + .lock() + .clone() + .ok_or_else(|| { + warp_utils::reject::custom_not_found( + "No LightClientOptimisticUpdate is available".to_string(), + ) + })?; + + let fork_name = chain + .spec + .fork_name_at_slot::(update.signature_slot); + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .body(update.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + _ => Ok(warp::reply::json(&ForkVersionedResponse { + version: Some(fork_name), + metadata: EmptyMetadata {}, + data: update, + }) + .into_response()), + } + .map(|resp| add_consensus_version_header(resp, fork_name)) + }) + }, + ); + + // GET beacon/light_client/finality_update + let get_beacon_light_client_finality_update = beacon_light_client_path + .clone() + .and(task_spawner_filter.clone()) + .and(warp::path("finality_update")) + .and(warp::path::end()) + .and(warp::header::optional::("accept")) + .then( + |chain: Arc>, + task_spawner: TaskSpawner, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + let update = chain + .latest_seen_finality_update + .lock() + .clone() + .ok_or_else(|| { + warp_utils::reject::custom_not_found( + "No LightClientFinalityUpdate is available".to_string(), + ) + })?; + + let fork_name = chain + .spec + .fork_name_at_slot::(update.signature_slot); + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .body(update.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + _ => Ok(warp::reply::json(&ForkVersionedResponse { + version: Some(fork_name), + metadata: EmptyMetadata {}, + data: update, + }) + .into_response()), + } + .map(|resp| add_consensus_version_header(resp, fork_name)) + }) + }, + ); + /* * beacon/rewards */ @@ -2382,8 +2571,7 @@ pub fn serve( let execution_optimistic = chain.is_optimistic_or_invalid_head().unwrap_or_default(); - Ok(attestation_rewards) - .map(api_types::GenericResponse::from) + Ok(api_types::GenericResponse::from(attestation_rewards)) .map(|resp| resp.add_execution_optimistic(execution_optimistic)) }) }, @@ -2409,11 +2597,9 @@ pub fn serve( chain, block_id, validators, log, )?; - Ok(rewards) - .map(api_types::GenericResponse::from) - .map(|resp| { - resp.add_execution_optimistic_finalized(execution_optimistic, finalized) - }) + Ok(api_types::GenericResponse::from(rewards)).map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) + }) }) }, ); @@ -2513,8 +2699,8 @@ pub fn serve( .map_err(inconsistent_fork_rejection)?; Response::builder() .status(200) - .header("Content-Type", "application/octet-stream") .body(state.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) .map(|resp: warp::reply::Response| { add_consensus_version_header(resp, fork_name) }) @@ -3000,17 +3186,17 @@ pub fn serve( )) })) .and(warp::path::end()) + .and(warp::header::optional::("accept")) .and(not_while_syncing_filter.clone()) .and(warp::query::()) - .and(warp::header::optional::("accept")) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(log_filter.clone()) .then( |endpoint_version: EndpointVersion, slot: Slot, - query: api_types::ValidatorBlocksQuery, accept_header: Option, + query: api_types::ValidatorBlocksQuery, task_spawner: TaskSpawner, chain: Arc>, log: Logger| { @@ -3021,57 +3207,10 @@ pub fn serve( "slot" => slot ); - let randao_reveal = query.randao_reveal.decompress().map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "randao reveal is not a valid BLS signature: {:?}", - e - )) - })?; - - let randao_verification = - if query.skip_randao_verification == SkipRandaoVerification::Yes { - if !randao_reveal.is_infinity() { - return Err(warp_utils::reject::custom_bad_request( - "randao_reveal must be point-at-infinity if verification is skipped" - .into(), - )); - } - ProduceBlockVerification::NoVerification - } else { - ProduceBlockVerification::VerifyRandao - }; - - let (block, _) = chain - .produce_block_with_verification::>( - randao_reveal, - slot, - query.graffiti.map(Into::into), - randao_verification, - ) - .await - .map_err(warp_utils::reject::block_production_error)?; - let fork_name = block - .to_ref() - .fork_name(&chain.spec) - .map_err(inconsistent_fork_rejection)?; - - match accept_header { - Some(api_types::Accept::Ssz) => Response::builder() - .status(200) - .header("Content-Type", "application/octet-stream") - .body(block.as_ssz_bytes().into()) - .map(|res: Response| { - add_consensus_version_header(res, fork_name) - }) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to create response: {}", - e - )) - }), - _ => fork_versioned_response(endpoint_version, fork_name, block) - .map(|response| warp::reply::json(&response).into_response()) - .map(|res| add_consensus_version_header(res, fork_name)), + if endpoint_version == V3 { + produce_block_v3(accept_header, chain, slot, query).await + } else { + produce_block_v2(endpoint_version, accept_header, chain, slot, query).await } }) }, @@ -3099,59 +3238,8 @@ pub fn serve( task_spawner: TaskSpawner, chain: Arc>| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let randao_reveal = query.randao_reveal.decompress().map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "randao reveal is not a valid BLS signature: {:?}", - e - )) - })?; - - let randao_verification = - if query.skip_randao_verification == SkipRandaoVerification::Yes { - if !randao_reveal.is_infinity() { - return Err(warp_utils::reject::custom_bad_request( - "randao_reveal must be point-at-infinity if verification is skipped" - .into() - )); - } - ProduceBlockVerification::NoVerification - } else { - ProduceBlockVerification::VerifyRandao - }; - - let (block, _) = chain - .produce_block_with_verification::>( - randao_reveal, - slot, - query.graffiti.map(Into::into), - randao_verification, - ) + produce_blinded_block_v2(EndpointVersion(2), accept_header, chain, slot, query) .await - .map_err(warp_utils::reject::block_production_error)?; - let fork_name = block - .to_ref() - .fork_name(&chain.spec) - .map_err(inconsistent_fork_rejection)?; - - match accept_header { - Some(api_types::Accept::Ssz) => Response::builder() - .status(200) - .header("Content-Type", "application/octet-stream") - .body(block.as_ssz_bytes().into()) - .map(|res: Response| { - add_consensus_version_header(res, fork_name) - }) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to create response: {}", - e - )) - }), - // Pose as a V2 endpoint so we return the fork `version`. - _ => fork_versioned_response(V2, fork_name, block) - .map(|response| warp::reply::json(&response).into_response()) - .map(|res| add_consensus_version_header(res, fork_name)), - } }) }, ); @@ -3679,7 +3767,6 @@ pub fn serve( .as_ref() .ok_or(BeaconChainError::BuilderMissing) .map_err(warp_utils::reject::beacon_chain_error)?; - builder .post_builder_validators(&filtered_registration_data) .await @@ -4198,8 +4285,8 @@ pub fn serve( let (state, _execution_optimistic, _finalized) = state_id.state(&chain)?; Response::builder() .status(200) - .header("Content-Type", "application/ssz") - .body(state.as_ssz_bytes()) + .body(state.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) .map_err(|e| { warp_utils::reject::custom_server_error(format!( "failed to create response: {}", @@ -4262,31 +4349,6 @@ pub fn serve( }, ); - // POST lighthouse/database/historical_blocks - let post_lighthouse_database_historical_blocks = database_path - .and(warp::path("historical_blocks")) - .and(warp::path::end()) - .and(warp::body::json()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(log_filter.clone()) - .then( - |blocks: Vec>>, - task_spawner: TaskSpawner, - chain: Arc>, - log: Logger| { - info!( - log, - "Importing historical blocks"; - "count" => blocks.len(), - "source" => "http_api" - ); - task_spawner.blocking_json_task(Priority::P1, move || { - database::historical_blocks(chain, blocks) - }) - }, - ); - // GET lighthouse/analysis/block_rewards let get_lighthouse_block_rewards = warp::path("lighthouse") .and(warp::path("analysis")) @@ -4391,6 +4453,9 @@ pub fn serve( let receiver = match topic { api_types::EventTopic::Head => event_handler.subscribe_head(), api_types::EventTopic::Block => event_handler.subscribe_block(), + api_types::EventTopic::BlobSidecar => { + event_handler.subscribe_blob_sidecar() + } api_types::EventTopic::Attestation => { event_handler.subscribe_attestation() } @@ -4412,6 +4477,12 @@ pub fn serve( api_types::EventTopic::LateHead => { event_handler.subscribe_late_head() } + api_types::EventTopic::LightClientFinalityUpdate => { + event_handler.subscribe_light_client_finality_update() + } + api_types::EventTopic::LightClientOptimisticUpdate => { + event_handler.subscribe_light_client_optimistic_update() + } api_types::EventTopic::BlockReward => { event_handler.subscribe_block_reward() } @@ -4520,6 +4591,7 @@ pub fn serve( .uor(get_beacon_block_attestations) .uor(get_beacon_blinded_block) .uor(get_beacon_block_root) + .uor(get_blobs) .uor(get_beacon_pool_attestations) .uor(get_beacon_pool_attester_slashings) .uor(get_beacon_pool_proposer_slashings) @@ -4564,6 +4636,18 @@ pub fn serve( .uor(get_lighthouse_database_info) .uor(get_lighthouse_block_rewards) .uor(get_lighthouse_attestation_performance) + .uor( + enable(ctx.config.enable_light_client_server) + .and(get_beacon_light_client_optimistic_update), + ) + .uor( + enable(ctx.config.enable_light_client_server) + .and(get_beacon_light_client_finality_update), + ) + .uor( + enable(ctx.config.enable_light_client_server) + .and(get_beacon_light_client_bootstrap), + ) .uor(get_lighthouse_block_packing_efficiency) .uor(get_lighthouse_merge_readiness) .uor(get_events) @@ -4574,7 +4658,7 @@ pub fn serve( .boxed() .uor( warp::post().and( - warp::header::exact("Content-Type", "application/octet-stream") + warp::header::exact(CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER) // Routes which expect `application/octet-stream` go within this `and`. .and( post_beacon_blocks_ssz @@ -4592,6 +4676,8 @@ pub fn serve( .uor(post_beacon_pool_voluntary_exits) .uor(post_beacon_pool_sync_committees) .uor(post_beacon_pool_bls_to_execution_changes) + .uor(post_beacon_state_validators) + .uor(post_beacon_state_validator_balances) .uor(post_beacon_rewards_attestations) .uor(post_beacon_rewards_sync_committee) .uor(post_validator_duties_attester) @@ -4605,7 +4691,6 @@ pub fn serve( .uor(post_validator_liveness_epoch) .uor(post_lighthouse_liveness) .uor(post_lighthouse_database_reconstruct) - .uor(post_lighthouse_database_historical_blocks) .uor(post_lighthouse_block_rewards) .uor(post_lighthouse_ui_validator_metrics) .uor(post_lighthouse_ui_validator_info) @@ -4666,6 +4751,14 @@ fn publish_pubsub_message( ) } +/// Publish a message to the libp2p pubsub network. +fn publish_pubsub_messages( + network_tx: &UnboundedSender>, + messages: Vec>, +) -> Result<(), warp::Rejection> { + publish_network_message(network_tx, NetworkMessage::Publish { messages }) +} + /// Publish a message to the libp2p network. fn publish_network_message( network_tx: &UnboundedSender>, diff --git a/beacon_node/http_api/src/produce_block.rs b/beacon_node/http_api/src/produce_block.rs new file mode 100644 index 000000000..0da3bdc7a --- /dev/null +++ b/beacon_node/http_api/src/produce_block.rs @@ -0,0 +1,208 @@ +use crate::{ + build_block_contents, + version::{ + add_consensus_block_value_header, add_consensus_version_header, + add_execution_payload_blinded_header, add_execution_payload_value_header, + add_ssz_content_type_header, fork_versioned_response, inconsistent_fork_rejection, + }, +}; +use beacon_chain::{ + BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, ProduceBlockVerification, +}; +use eth2::types::{ + self as api_types, EndpointVersion, ProduceBlockV3Metadata, SkipRandaoVerification, +}; +use ssz::Encode; +use std::sync::Arc; +use types::{payload::BlockProductionVersion, *}; +use warp::{ + hyper::{Body, Response}, + Reply, +}; + +pub fn get_randao_verification( + query: &api_types::ValidatorBlocksQuery, + randao_reveal_infinity: bool, +) -> Result { + let randao_verification = if query.skip_randao_verification == SkipRandaoVerification::Yes { + if !randao_reveal_infinity { + return Err(warp_utils::reject::custom_bad_request( + "randao_reveal must be point-at-infinity if verification is skipped".into(), + )); + } + ProduceBlockVerification::NoVerification + } else { + ProduceBlockVerification::VerifyRandao + }; + + Ok(randao_verification) +} + +pub async fn produce_block_v3( + accept_header: Option, + chain: Arc>, + slot: Slot, + query: api_types::ValidatorBlocksQuery, +) -> Result, warp::Rejection> { + let randao_reveal = query.randao_reveal.decompress().map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "randao reveal is not a valid BLS signature: {:?}", + e + )) + })?; + + let randao_verification = get_randao_verification(&query, randao_reveal.is_infinity())?; + + let block_response_type = chain + .produce_block_with_verification( + randao_reveal, + slot, + query.graffiti, + randao_verification, + query.builder_boost_factor, + BlockProductionVersion::V3, + ) + .await + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("failed to fetch a block: {:?}", e)) + })?; + + build_response_v3(chain, block_response_type, accept_header) +} + +pub fn build_response_v3( + chain: Arc>, + block_response: BeaconBlockResponseWrapper, + accept_header: Option, +) -> Result, warp::Rejection> { + let fork_name = block_response + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; + let execution_payload_value = block_response.execution_payload_value(); + let consensus_block_value = block_response.consensus_block_value_wei(); + let execution_payload_blinded = block_response.is_blinded(); + + let metadata = ProduceBlockV3Metadata { + consensus_version: fork_name, + execution_payload_blinded, + execution_payload_value, + consensus_block_value, + }; + + let block_contents = build_block_contents::build_block_contents(fork_name, block_response)?; + + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .body(block_contents.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) + .map(|res: Response| add_consensus_version_header(res, fork_name)) + .map(|res| add_execution_payload_blinded_header(res, execution_payload_blinded)) + .map(|res: Response| { + add_execution_payload_value_header(res, execution_payload_value) + }) + .map(|res| add_consensus_block_value_header(res, consensus_block_value)) + .map_err(|e| -> warp::Rejection { + warp_utils::reject::custom_server_error(format!("failed to create response: {}", e)) + }), + _ => Ok(warp::reply::json(&ForkVersionedResponse { + version: Some(fork_name), + metadata, + data: block_contents, + }) + .into_response()) + .map(|res| res.into_response()) + .map(|res| add_consensus_version_header(res, fork_name)) + .map(|res| add_execution_payload_blinded_header(res, execution_payload_blinded)) + .map(|res| add_execution_payload_value_header(res, execution_payload_value)) + .map(|res| add_consensus_block_value_header(res, consensus_block_value)), + } +} + +pub async fn produce_blinded_block_v2( + endpoint_version: EndpointVersion, + accept_header: Option, + chain: Arc>, + slot: Slot, + query: api_types::ValidatorBlocksQuery, +) -> Result, warp::Rejection> { + let randao_reveal = query.randao_reveal.decompress().map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "randao reveal is not a valid BLS signature: {:?}", + e + )) + })?; + + let randao_verification = get_randao_verification(&query, randao_reveal.is_infinity())?; + let block_response_type = chain + .produce_block_with_verification( + randao_reveal, + slot, + query.graffiti.map(Into::into), + randao_verification, + None, + BlockProductionVersion::BlindedV2, + ) + .await + .map_err(warp_utils::reject::block_production_error)?; + + build_response_v2(chain, block_response_type, endpoint_version, accept_header) +} + +pub async fn produce_block_v2( + endpoint_version: EndpointVersion, + accept_header: Option, + chain: Arc>, + slot: Slot, + query: api_types::ValidatorBlocksQuery, +) -> Result, warp::Rejection> { + let randao_reveal = query.randao_reveal.decompress().map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "randao reveal is not a valid BLS signature: {:?}", + e + )) + })?; + + let randao_verification = get_randao_verification(&query, randao_reveal.is_infinity())?; + + let block_response_type = chain + .produce_block_with_verification( + randao_reveal, + slot, + query.graffiti.map(Into::into), + randao_verification, + None, + BlockProductionVersion::FullV2, + ) + .await + .map_err(warp_utils::reject::block_production_error)?; + + build_response_v2(chain, block_response_type, endpoint_version, accept_header) +} + +pub fn build_response_v2( + chain: Arc>, + block_response: BeaconBlockResponseWrapper, + endpoint_version: EndpointVersion, + accept_header: Option, +) -> Result, warp::Rejection> { + let fork_name = block_response + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; + + let block_contents = build_block_contents::build_block_contents(fork_name, block_response)?; + + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .body(block_contents.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) + .map(|res: Response| add_consensus_version_header(res, fork_name)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!("failed to create response: {}", e)) + }), + _ => fork_versioned_response(endpoint_version, fork_name, block_contents) + .map(|response| warp::reply::json(&response).into_response()) + .map(|res| add_consensus_version_header(res, fork_name)), + } +} diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index 708df39b4..c31dd9b1f 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -97,12 +97,12 @@ fn try_proposer_duties_from_cache( let head = chain.canonical_head.cached_head(); let head_block = &head.snapshot.beacon_block; let head_block_root = head.head_block_root(); + let head_epoch = head_block.slot().epoch(T::EthSpec::slots_per_epoch()); let head_decision_root = head .snapshot .beacon_state .proposer_shuffling_decision_root(head_block_root) .map_err(warp_utils::reject::beacon_state_error)?; - let head_epoch = head_block.slot().epoch(T::EthSpec::slots_per_epoch()); let execution_optimistic = chain .is_optimistic_or_invalid_head_block(head_block) .map_err(warp_utils::reject::beacon_chain_error)?; diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 58524f069..8b85c2ac9 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -1,10 +1,13 @@ use crate::metrics; + +use beacon_chain::block_verification_types::{AsBlock, BlockContentsError}; use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; use beacon_chain::{ - BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, IntoGossipVerifiedBlock, - NotifyExecutionLayer, + AvailabilityProcessingStatus, BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, + IntoGossipVerifiedBlockContents, NotifyExecutionLayer, }; -use eth2::types::{BroadcastValidation, ErrorMessage}; +use eth2::types::{into_full_block_and_blobs, BroadcastValidation, ErrorMessage}; +use eth2::types::{FullPayloadContents, PublishBlockRequest}; use execution_layer::ProvenancedPayload; use lighthouse_network::PubsubMessage; use network::NetworkMessage; @@ -16,13 +19,14 @@ use std::time::Duration; use tokio::sync::mpsc::UnboundedSender; use tree_hash::TreeHash; use types::{ - AbstractExecPayload, BeaconBlockRef, BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, - FullPayload, Hash256, SignedBeaconBlock, + AbstractExecPayload, BeaconBlockRef, BlobSidecarList, EthSpec, ExecPayload, ExecutionBlockHash, + ForkName, FullPayload, FullPayloadMerge, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, + VariableList, }; use warp::http::StatusCode; use warp::{reply::Response, Rejection, Reply}; -pub enum ProvenancedBlock> { +pub enum ProvenancedBlock> { /// The payload was built using a local EE. Local(B, PhantomData), /// The payload was build using a remote builder (e.g., via a mev-boost @@ -30,7 +34,7 @@ pub enum ProvenancedBlock> { Builder(B, PhantomData), } -impl> ProvenancedBlock { +impl> ProvenancedBlock { pub fn local(block: B) -> Self { Self::Local(block, PhantomData) } @@ -41,7 +45,7 @@ impl> ProvenancedBlock } /// Handles a request from the HTTP API for full blocks. -pub async fn publish_block>( +pub async fn publish_block>( block_root: Option, provenanced_block: ProvenancedBlock, chain: Arc>, @@ -51,16 +55,18 @@ pub async fn publish_block>( duplicate_status_code: StatusCode, ) -> Result { let seen_timestamp = timestamp_now(); - let (block, is_locally_built_block) = match provenanced_block { - ProvenancedBlock::Local(block, _) => (block, true), - ProvenancedBlock::Builder(block, _) => (block, false), + + let (block_contents, is_locally_built_block) = match provenanced_block { + ProvenancedBlock::Local(block_contents, _) => (block_contents, true), + ProvenancedBlock::Builder(block_contents, _) => (block_contents, false), }; - let beacon_block = block.inner(); - let delay = get_block_delay_ms(seen_timestamp, beacon_block.message(), &chain.slot_clock); - debug!(log, "Signed block received in HTTP API"; "slot" => beacon_block.slot()); + let block = block_contents.inner_block().clone(); + let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); + debug!(log, "Signed block received in HTTP API"; "slot" => block.slot()); /* actually publish a block */ let publish_block = move |block: Arc>, + blobs_opt: Option>, sender, log, seen_timestamp| { @@ -71,103 +77,161 @@ pub async fn publish_block>( info!(log, "Signed block published to network via HTTP API"; "slot" => block.slot(), "publish_delay" => ?publish_delay); - let message = PubsubMessage::BeaconBlock(block); - crate::publish_pubsub_message(&sender, message) - .map_err(|_| BeaconChainError::UnableToPublish.into()) + match block.as_ref() { + SignedBeaconBlock::Base(_) + | SignedBeaconBlock::Altair(_) + | SignedBeaconBlock::Merge(_) + | SignedBeaconBlock::Capella(_) => { + crate::publish_pubsub_message(&sender, PubsubMessage::BeaconBlock(block.clone())) + .map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish))?; + } + SignedBeaconBlock::Deneb(_) => { + let mut pubsub_messages = vec![PubsubMessage::BeaconBlock(block.clone())]; + if let Some(blob_sidecars) = blobs_opt { + for (blob_index, blob) in blob_sidecars.into_iter().enumerate() { + pubsub_messages.push(PubsubMessage::BlobSidecar(Box::new(( + blob_index as u64, + blob, + )))); + } + } + crate::publish_pubsub_messages(&sender, pubsub_messages) + .map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish))?; + } + }; + Ok(()) }; + /* only publish if gossip- and consensus-valid and equivocation-free */ + let chain_clone = chain.clone(); + let slot = block.message().slot(); + let proposer_index = block.message().proposer_index(); + let sender_clone = network_tx.clone(); + let log_clone = log.clone(); + /* if we can form a `GossipVerifiedBlock`, we've passed our basic gossip checks */ - let gossip_verified_block = match block.into_gossip_verified_block(&chain) { - Ok(b) => b, - Err(BlockError::BlockIsAlreadyKnown) => { - // Allow the status code for duplicate blocks to be overridden based on config. - return Ok(warp::reply::with_status( - warp::reply::json(&ErrorMessage { - code: duplicate_status_code.as_u16(), - message: "duplicate block".to_string(), - stacktraces: vec![], - }), - duplicate_status_code, - ) - .into_response()); - } - Err(e) => { - warn!( - log, - "Not publishing block - not gossip verified"; - "slot" => beacon_block.slot(), - "error" => ?e - ); - return Err(warp_utils::reject::custom_bad_request(e.to_string())); - } - }; + let (gossip_verified_block, gossip_verified_blobs) = + match block_contents.into_gossip_verified_block(&chain) { + Ok(b) => b, + Err(BlockContentsError::BlockError(BlockError::BlockIsAlreadyKnown)) + | Err(BlockContentsError::BlobError( + beacon_chain::blob_verification::GossipBlobError::RepeatBlob { .. }, + )) => { + // Allow the status code for duplicate blocks to be overridden based on config. + return Ok(warp::reply::with_status( + warp::reply::json(&ErrorMessage { + code: duplicate_status_code.as_u16(), + message: "duplicate block".to_string(), + stacktraces: vec![], + }), + duplicate_status_code, + ) + .into_response()); + } + Err(e) => { + warn!( + log, + "Not publishing block - not gossip verified"; + "slot" => slot, + "error" => ?e + ); + return Err(warp_utils::reject::custom_bad_request(e.to_string())); + } + }; + + // Clone here, so we can take advantage of the `Arc`. The block in `BlockContents` is not, + // `Arc`'d but blobs are. + let block = gossip_verified_block.block.block_cloned(); + let blobs_opt = gossip_verified_blobs.as_ref().map(|gossip_verified_blobs| { + let blobs = gossip_verified_blobs + .into_iter() + .map(|b| b.clone_blob()) + .collect::>(); + VariableList::from(blobs) + }); let block_root = block_root.unwrap_or(gossip_verified_block.block_root); if let BroadcastValidation::Gossip = validation_level { publish_block( - beacon_block.clone(), - network_tx.clone(), + block.clone(), + blobs_opt.clone(), + sender_clone.clone(), log.clone(), seen_timestamp, ) .map_err(|_| warp_utils::reject::custom_server_error("unable to publish".into()))?; } - /* only publish if gossip- and consensus-valid and equivocation-free */ - let chain_clone = chain.clone(); - let block_clone = beacon_block.clone(); - let log_clone = log.clone(); - let sender_clone = network_tx.clone(); + let block_clone = block.clone(); let publish_fn = move || match validation_level { BroadcastValidation::Gossip => Ok(()), - BroadcastValidation::Consensus => { - publish_block(block_clone, sender_clone, log_clone, seen_timestamp) - } + BroadcastValidation::Consensus => publish_block( + block_clone, + blobs_opt, + sender_clone, + log_clone, + seen_timestamp, + ), BroadcastValidation::ConsensusAndEquivocation => { - if chain_clone - .observed_block_producers - .read() - .proposer_has_been_observed(block_clone.message(), block_root) - .map_err(|e| BlockError::BeaconChainError(e.into()))? - .is_slashable() - { - warn!( - log_clone, - "Not publishing equivocating block"; - "slot" => block_clone.slot() - ); - Err(BlockError::Slashable) - } else { - publish_block(block_clone, sender_clone, log_clone, seen_timestamp) - } + check_slashable( + &chain_clone, + &blobs_opt, + block_root, + &block_clone, + &log_clone, + )?; + publish_block( + block_clone, + blobs_opt, + sender_clone, + log_clone, + seen_timestamp, + ) } }; - match chain - .process_block( - block_root, - gossip_verified_block, - NotifyExecutionLayer::Yes, - publish_fn, - ) - .await + if let Some(gossip_verified_blobs) = gossip_verified_blobs { + for blob in gossip_verified_blobs { + if let Err(e) = Box::pin(chain.process_gossip_blob(blob)).await { + let msg = format!("Invalid blob: {e}"); + return if let BroadcastValidation::Gossip = validation_level { + Err(warp_utils::reject::broadcast_without_import(msg)) + } else { + error!( + log, + "Invalid blob provided to HTTP API"; + "reason" => &msg + ); + Err(warp_utils::reject::custom_bad_request(msg)) + }; + } + } + } + + match Box::pin(chain.process_block( + block_root, + gossip_verified_block, + NotifyExecutionLayer::Yes, + publish_fn, + )) + .await { - Ok(root) => { + Ok(AvailabilityProcessingStatus::Imported(root)) => { info!( log, "Valid block from HTTP API"; "block_delay" => ?delay, "root" => format!("{}", root), - "proposer_index" => beacon_block.message().proposer_index(), - "slot" => beacon_block.slot(), + "proposer_index" => proposer_index, + "slot" =>slot, ); // Notify the validator monitor. chain.validator_monitor.read().register_api_block( seen_timestamp, - beacon_block.message(), + block.message(), root, &chain.slot_clock, ); @@ -180,17 +244,23 @@ pub async fn publish_block>( // blocks built with builders we consider the broadcast time to be // when the blinded block is published to the builder. if is_locally_built_block { - late_block_logging( - &chain, - seen_timestamp, - beacon_block.message(), - root, - "local", - &log, - ) + late_block_logging(&chain, seen_timestamp, block.message(), root, "local", &log) } Ok(warp::reply().into_response()) } + Ok(AvailabilityProcessingStatus::MissingComponents(_, block_root)) => { + let msg = format!("Missing parts of block with root {:?}", block_root); + if let BroadcastValidation::Gossip = validation_level { + Err(warp_utils::reject::broadcast_without_import(msg)) + } else { + error!( + log, + "Invalid block provided to HTTP API"; + "reason" => &msg + ); + Err(warp_utils::reject::custom_bad_request(msg)) + } + } Err(BlockError::BeaconChainError(BeaconChainError::UnableToPublish)) => { Err(warp_utils::reject::custom_server_error( "unable to publish to network channel".to_string(), @@ -220,16 +290,16 @@ pub async fn publish_block>( /// Handles a request from the HTTP API for blinded blocks. This converts blinded blocks into full /// blocks before publishing. pub async fn publish_blinded_block( - block: SignedBeaconBlock>, + blinded_block: Arc>, chain: Arc>, network_tx: &UnboundedSender>, log: Logger, validation_level: BroadcastValidation, duplicate_status_code: StatusCode, ) -> Result { - let block_root = block.canonical_root(); - let full_block: ProvenancedBlock>> = - reconstruct_block(chain.clone(), block_root, block, log.clone()).await?; + let block_root = blinded_block.canonical_root(); + let full_block: ProvenancedBlock> = + reconstruct_block(chain.clone(), block_root, blinded_block, log.clone()).await?; publish_block::( Some(block_root), full_block, @@ -248,28 +318,27 @@ pub async fn publish_blinded_block( pub async fn reconstruct_block( chain: Arc>, block_root: Hash256, - block: SignedBeaconBlock>, + block: Arc>, log: Logger, -) -> Result>>, Rejection> { +) -> Result>, Rejection> { let full_payload_opt = if let Ok(payload_header) = block.message().body().execution_payload() { let el = chain.execution_layer.as_ref().ok_or_else(|| { warp_utils::reject::custom_server_error("Missing execution layer".to_string()) })?; // If the execution block hash is zero, use an empty payload. - let full_payload = if payload_header.block_hash() == ExecutionBlockHash::zero() { - let payload = FullPayload::default_at_fork( - chain - .spec - .fork_name_at_epoch(block.slot().epoch(T::EthSpec::slots_per_epoch())), - ) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "Default payload construction error: {e:?}" - )) - })? - .into(); - ProvenancedPayload::Local(payload) + let full_payload_contents = if payload_header.block_hash() == ExecutionBlockHash::zero() { + let fork_name = chain + .spec + .fork_name_at_epoch(block.slot().epoch(T::EthSpec::slots_per_epoch())); + if fork_name == ForkName::Merge { + let payload: FullPayload = FullPayloadMerge::default().into(); + ProvenancedPayload::Local(FullPayloadContents::Payload(payload.into())) + } else { + Err(warp_utils::reject::custom_server_error( + "Failed to construct full payload - block hash must be non-zero after Bellatrix.".to_string() + ))? + } // If we already have an execution payload with this transactions root cached, use it. } else if let Some(cached_payload) = el.get_payload_by_root(&payload_header.tree_hash_root()) @@ -305,29 +374,30 @@ pub async fn reconstruct_block( ProvenancedPayload::Builder(full_payload) }; - Some(full_payload) + Some(full_payload_contents) } else { None }; + // Perf: cloning the block here to unblind it is a little sub-optimal. This is considered an + // acceptable tradeoff to avoid passing blocks around on the stack (unarced), which blows up + // the size of futures. + let block = (*block).clone(); match full_payload_opt { // A block without a payload is pre-merge and we consider it locally // built. - None => block - .try_into_full_block(None) - .map(Arc::new) - .map(ProvenancedBlock::local), - Some(ProvenancedPayload::Local(full_payload)) => block - .try_into_full_block(Some(full_payload)) - .map(Arc::new) - .map(ProvenancedBlock::local), - Some(ProvenancedPayload::Builder(full_payload)) => block - .try_into_full_block(Some(full_payload)) - .map(Arc::new) - .map(ProvenancedBlock::builder), + None => into_full_block_and_blobs(block, None).map(ProvenancedBlock::local), + Some(ProvenancedPayload::Local(full_payload_contents)) => { + into_full_block_and_blobs(block, Some(full_payload_contents)) + .map(ProvenancedBlock::local) + } + Some(ProvenancedPayload::Builder(full_payload_contents)) => { + into_full_block_and_blobs(block, Some(full_payload_contents)) + .map(ProvenancedBlock::builder) + } } - .ok_or_else(|| { - warp_utils::reject::custom_server_error("Unable to add payload to block".to_string()) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!("Unable to add payload to block: {e:?}")) }) } @@ -378,3 +448,46 @@ fn late_block_logging>( ) } } + +/// Check if any of the blobs or the block are slashable. Returns `BlockError::Slashable` if so. +fn check_slashable( + chain_clone: &BeaconChain, + blobs_opt: &Option>, + block_root: Hash256, + block_clone: &SignedBeaconBlock>, + log_clone: &Logger, +) -> Result<(), BlockError> { + let slashable_cache = chain_clone.observed_slashable.read(); + if let Some(blobs) = blobs_opt.as_ref() { + blobs.iter().try_for_each(|blob| { + if slashable_cache + .is_slashable(blob.slot(), blob.block_proposer_index(), blob.block_root()) + .map_err(|e| BlockError::BeaconChainError(e.into()))? + { + warn!( + log_clone, + "Not publishing equivocating blob"; + "slot" => block_clone.slot() + ); + return Err(BlockError::Slashable); + } + Ok(()) + })?; + }; + if slashable_cache + .is_slashable( + block_clone.slot(), + block_clone.message().proposer_index(), + block_root, + ) + .map_err(|e| BlockError::BeaconChainError(e.into()))? + { + warn!( + log_clone, + "Not publishing equivocating block"; + "slot" => block_clone.slot() + ); + return Err(BlockError::Slashable); + } + Ok(()) +} diff --git a/beacon_node/http_api/src/standard_block_rewards.rs b/beacon_node/http_api/src/standard_block_rewards.rs index de7e5eb7d..97e5a87fd 100644 --- a/beacon_node/http_api/src/standard_block_rewards.rs +++ b/beacon_node/http_api/src/standard_block_rewards.rs @@ -5,8 +5,8 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2::lighthouse::StandardBlockReward; use std::sync::Arc; use warp_utils::reject::beacon_chain_error; -//// The difference between block_rewards and beacon_block_rewards is the later returns block -//// reward format that satisfies beacon-api specs +/// The difference between block_rewards and beacon_block_rewards is the later returns block +/// reward format that satisfies beacon-api specs pub fn compute_beacon_block_rewards( chain: Arc>, block_id: BlockId, diff --git a/beacon_node/http_api/src/sync_committees.rs b/beacon_node/http_api/src/sync_committees.rs index dcf41429f..8b0c7dc0e 100644 --- a/beacon_node/http_api/src/sync_committees.rs +++ b/beacon_node/http_api/src/sync_committees.rs @@ -30,9 +30,7 @@ pub fn sync_committee_duties( request_indices: &[u64], chain: &BeaconChain, ) -> Result { - let altair_fork_epoch = if let Some(altair_fork_epoch) = chain.spec.altair_fork_epoch { - altair_fork_epoch - } else { + let Some(altair_fork_epoch) = chain.spec.altair_fork_epoch else { // Empty response for networks with Altair disabled. return Ok(convert_to_response(vec![], false)); }; diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index 4ab7f9135..b87fdf608 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -1,15 +1,13 @@ use crate::{Config, Context}; use beacon_chain::{ - test_utils::{ - BeaconChainHarness, BoxedMutator, Builder as HarnessBuilder, EphemeralHarnessType, - }, + test_utils::{BeaconChainHarness, BoxedMutator, Builder, EphemeralHarnessType}, BeaconChain, BeaconChainTypes, }; use beacon_processor::{BeaconProcessor, BeaconProcessorChannels, BeaconProcessorConfig}; use directory::DEFAULT_ROOT_DIR; use eth2::{BeaconNodeHttpClient, Timeouts}; use lighthouse_network::{ - discv5::enr::{CombinedKey, EnrBuilder}, + discv5::enr::CombinedKey, libp2p::swarm::{ behaviour::{ConnectionEstablished, FromSwarm}, ConnectionId, NetworkBehaviour, @@ -53,9 +51,8 @@ pub struct ApiServer> { pub external_peer_id: PeerId, } -type Initializer = Box< - dyn FnOnce(HarnessBuilder>) -> HarnessBuilder>, ->; +type HarnessBuilder = Builder>; +type Initializer = Box) -> HarnessBuilder>; type Mutator = BoxedMutator, MemoryStore>; impl InteractiveTester { @@ -141,7 +138,7 @@ pub async fn create_api_server( syncnets: EnrSyncCommitteeBitfield::::default(), }); let enr_key = CombinedKey::generate_secp256k1(); - let enr = EnrBuilder::new("v4").build(&enr_key).unwrap(); + let enr = Enr::builder().build(&enr_key).unwrap(); let network_globals = Arc::new(NetworkGlobals::new( enr.clone(), meta_data, @@ -212,6 +209,7 @@ pub async fn create_api_server( enabled: true, listen_port: port, data_dir: std::path::PathBuf::from(DEFAULT_ROOT_DIR), + enable_light_client_server: true, ..Config::default() }, chain: Some(chain), diff --git a/beacon_node/http_api/src/validator.rs b/beacon_node/http_api/src/validator.rs index 18e9dbf63..7f11ddd8f 100644 --- a/beacon_node/http_api/src/validator.rs +++ b/beacon_node/http_api/src/validator.rs @@ -1,5 +1,5 @@ use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; -use types::*; +use types::{BeaconState, PublicKeyBytes}; /// Uses the `chain.validator_pubkey_cache` to resolve a pubkey to a validator /// index and then ensures that the validator exists in the given `state`. diff --git a/beacon_node/http_api/src/validators.rs b/beacon_node/http_api/src/validators.rs new file mode 100644 index 000000000..20af7a680 --- /dev/null +++ b/beacon_node/http_api/src/validators.rs @@ -0,0 +1,119 @@ +use crate::state_id::StateId; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2::types::{ + self as api_types, ExecutionOptimisticFinalizedResponse, ValidatorBalanceData, ValidatorData, + ValidatorId, ValidatorStatus, +}; +use std::sync::Arc; + +pub fn get_beacon_state_validators( + state_id: StateId, + chain: Arc>, + query_ids: &Option>, + query_statuses: &Option>, +) -> Result>, warp::Rejection> { + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let epoch = state.current_epoch(); + let far_future_epoch = chain.spec.far_future_epoch; + + Ok(( + state + .validators() + .iter() + .zip(state.balances().iter()) + .enumerate() + // filter by validator id(s) if provided + .filter(|(index, (validator, _))| { + query_ids.as_ref().map_or(true, |ids| { + ids.iter().any(|id| match id { + ValidatorId::PublicKey(pubkey) => &validator.pubkey == pubkey, + ValidatorId::Index(param_index) => { + *param_index == *index as u64 + } + }) + }) + }) + // filter by status(es) if provided and map the result + .filter_map(|(index, (validator, balance))| { + let status = api_types::ValidatorStatus::from_validator( + validator, + epoch, + far_future_epoch, + ); + + let status_matches = query_statuses.as_ref().map_or(true, |statuses| { + statuses.contains(&status) + || statuses.contains(&status.superstatus()) + }); + + if status_matches { + Some(ValidatorData { + index: index as u64, + balance: *balance, + status, + validator: validator.clone(), + }) + } else { + None + } + }) + .collect::>(), + execution_optimistic, + finalized, + )) + }, + )?; + + Ok(ExecutionOptimisticFinalizedResponse { + data, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }) +} + +pub fn get_beacon_state_validator_balances( + state_id: StateId, + chain: Arc>, + optional_ids: Option<&[ValidatorId]>, +) -> Result>, warp::Rejection> { + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + Ok(( + state + .validators() + .iter() + .zip(state.balances().iter()) + .enumerate() + // filter by validator id(s) if provided + .filter(|(index, (validator, _))| { + optional_ids.map_or(true, |ids| { + ids.iter().any(|id| match id { + ValidatorId::PublicKey(pubkey) => &validator.pubkey == pubkey, + ValidatorId::Index(param_index) => { + *param_index == *index as u64 + } + }) + }) + }) + .map(|(index, (_, balance))| ValidatorBalanceData { + index: index as u64, + balance: *balance, + }) + .collect::>(), + execution_optimistic, + finalized, + )) + }, + )?; + + Ok(api_types::ExecutionOptimisticFinalizedResponse { + data, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }) +} diff --git a/beacon_node/http_api/src/version.rs b/beacon_node/http_api/src/version.rs index e01ff9822..59816cb89 100644 --- a/beacon_node/http_api/src/version.rs +++ b/beacon_node/http_api/src/version.rs @@ -1,12 +1,20 @@ -use crate::api_types::fork_versioned_response::ExecutionOptimisticFinalizedForkVersionedResponse; use crate::api_types::EndpointVersion; -use eth2::CONSENSUS_VERSION_HEADER; +use eth2::{ + CONSENSUS_BLOCK_VALUE_HEADER, CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, + EXECUTION_PAYLOAD_BLINDED_HEADER, EXECUTION_PAYLOAD_VALUE_HEADER, SSZ_CONTENT_TYPE_HEADER, +}; use serde::Serialize; -use types::{ForkName, ForkVersionedResponse, InconsistentFork}; +use types::{ + fork_versioned_response::{ + ExecutionOptimisticFinalizedForkVersionedResponse, ExecutionOptimisticFinalizedMetadata, + }, + ForkName, ForkVersionedResponse, InconsistentFork, Uint256, +}; use warp::reply::{self, Reply, Response}; pub const V1: EndpointVersion = EndpointVersion(1); pub const V2: EndpointVersion = EndpointVersion(2); +pub const V3: EndpointVersion = EndpointVersion(3); pub fn fork_versioned_response( endpoint_version: EndpointVersion, @@ -15,13 +23,14 @@ pub fn fork_versioned_response( ) -> Result, warp::reject::Rejection> { let fork_name = if endpoint_version == V1 { None - } else if endpoint_version == V2 { + } else if endpoint_version == V2 || endpoint_version == V3 { Some(fork_name) } else { return Err(unsupported_version_rejection(endpoint_version)); }; Ok(ForkVersionedResponse { version: fork_name, + metadata: Default::default(), data, }) } @@ -42,17 +51,63 @@ pub fn execution_optimistic_finalized_fork_versioned_response( }; Ok(ExecutionOptimisticFinalizedForkVersionedResponse { version: fork_name, - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), + metadata: ExecutionOptimisticFinalizedMetadata { + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }, data, }) } +/// Add the 'Content-Type application/octet-stream` header to a response. +pub fn add_ssz_content_type_header(reply: T) -> Response { + reply::with_header(reply, CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER).into_response() +} + /// Add the `Eth-Consensus-Version` header to a response. pub fn add_consensus_version_header(reply: T, fork_name: ForkName) -> Response { reply::with_header(reply, CONSENSUS_VERSION_HEADER, fork_name.to_string()).into_response() } +/// Add the `Eth-Execution-Payload-Blinded` header to a response. +pub fn add_execution_payload_blinded_header( + reply: T, + execution_payload_blinded: bool, +) -> Response { + reply::with_header( + reply, + EXECUTION_PAYLOAD_BLINDED_HEADER, + execution_payload_blinded.to_string(), + ) + .into_response() +} + +/// Add the `Eth-Execution-Payload-Value` header to a response. +pub fn add_execution_payload_value_header( + reply: T, + execution_payload_value: Uint256, +) -> Response { + reply::with_header( + reply, + EXECUTION_PAYLOAD_VALUE_HEADER, + execution_payload_value.to_string(), + ) + .into_response() +} + +/// Add the `Eth-Consensus-Block-Value` header to a response. +pub fn add_consensus_block_value_header( + reply: T, + consensus_payload_value: Uint256, +) -> Response { + reply::with_header( + reply, + CONSENSUS_BLOCK_VALUE_HEADER, + consensus_payload_value.to_string(), + ) + .into_response() +} + pub fn inconsistent_fork_rejection(error: InconsistentFork) -> warp::reject::Rejection { warp_utils::reject::custom_server_error(format!("wrong fork: {:?}", error)) } diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 96ff37d81..6a3f7947e 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -1,17 +1,17 @@ use beacon_chain::{ test_utils::{AttestationStrategy, BlockStrategy}, - GossipVerifiedBlock, + GossipVerifiedBlock, IntoGossipVerifiedBlockContents, }; -use eth2::types::{BroadcastValidation, SignedBeaconBlock, SignedBlindedBeaconBlock}; +use eth2::reqwest::StatusCode; +use eth2::types::{BroadcastValidation, PublishBlockRequest}; use http_api::test_utils::InteractiveTester; use http_api::{publish_blinded_block, publish_block, reconstruct_block, ProvenancedBlock}; +use std::sync::Arc; use tree_hash::TreeHash; -use types::{Hash256, MainnetEthSpec, Slot}; +use types::{Epoch, EthSpec, ForkName, Hash256, MainnetEthSpec, Slot}; use warp::Rejection; use warp_utils::reject::CustomBadRequest; -use eth2::reqwest::StatusCode; - type E = MainnetEthSpec; /* @@ -63,7 +63,7 @@ pub async fn gossip_invalid() { tester.harness.advance_slot(); - let (block, _): (SignedBeaconBlock, _) = tester + let ((block, blobs), _) = tester .harness .make_block_with_modifier(chain_state_before, slot, |b| { *b.state_root_mut() = Hash256::zero(); @@ -73,7 +73,7 @@ pub async fn gossip_invalid() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&block, validation_level) + .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -83,7 +83,7 @@ pub async fn gossip_invalid() { assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); assert!( - matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()) ); } @@ -115,7 +115,7 @@ pub async fn gossip_partial_pass() { tester.harness.advance_slot(); - let (block, _): (SignedBeaconBlock, _) = tester + let ((block, blobs), _) = tester .harness .make_block_with_modifier(chain_state_before, slot, |b| { *b.state_root_mut() = Hash256::random() @@ -124,7 +124,7 @@ pub async fn gossip_partial_pass() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&block, validation_level) + .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -161,11 +161,14 @@ pub async fn gossip_full_pass() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block, _): (SignedBeaconBlock, _) = tester.harness.make_block(state_a, slot_b).await; + let ((block, blobs), _) = tester.harness.make_block(state_a, slot_b).await; let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&block, validation_level) + .post_beacon_blocks_v2( + &PublishBlockRequest::new(block.clone(), blobs), + validation_level, + ) .await; assert!(response.is_ok()); @@ -185,7 +188,10 @@ pub async fn gossip_full_pass_ssz() { // `validator_count // 32`. let validator_count = 64; let num_initial: u64 = 31; - let tester = InteractiveTester::::new(None, validator_count).await; + // Deneb epoch set ahead of block slot, to test fork-based decoding + let mut spec = ForkName::Capella.make_genesis_spec(MainnetEthSpec::default_spec()); + spec.deneb_fork_epoch = Some(Epoch::new(4)); + let tester = InteractiveTester::::new(Some(spec), validator_count).await; // Create some chain depth. tester.harness.advance_slot(); @@ -203,18 +209,19 @@ pub async fn gossip_full_pass_ssz() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block, _): (SignedBeaconBlock, _) = tester.harness.make_block(state_a, slot_b).await; + let (block_contents_tuple, _) = tester.harness.make_block(state_a, slot_b).await; + let block_contents = block_contents_tuple.into(); let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2_ssz(&block, validation_level) + .post_beacon_blocks_v2_ssz(&block_contents, validation_level) .await; assert!(response.is_ok()); assert!(tester .harness .chain - .block_is_known_to_fork_choice(&block.canonical_root())); + .block_is_known_to_fork_choice(&block_contents.signed_block().canonical_root())); } /// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus`. @@ -244,7 +251,7 @@ pub async fn consensus_invalid() { tester.harness.advance_slot(); - let (block, _): (SignedBeaconBlock, _) = tester + let ((block, blobs), _) = tester .harness .make_block_with_modifier(chain_state_before, slot, |b| { *b.state_root_mut() = Hash256::zero(); @@ -254,7 +261,7 @@ pub async fn consensus_invalid() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&block, validation_level) + .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -264,7 +271,7 @@ pub async fn consensus_invalid() { assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); assert!( - matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()) ); } @@ -296,14 +303,14 @@ pub async fn consensus_gossip() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block, _): (SignedBeaconBlock, _) = tester + let ((block, blobs), _) = tester .harness .make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero()) .await; let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&block, validation_level) + .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -346,18 +353,18 @@ pub async fn consensus_partial_pass_only_consensus() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block_a, state_after_a): (SignedBeaconBlock, _) = - tester.harness.make_block(state_a.clone(), slot_b).await; - let (block_b, state_after_b): (SignedBeaconBlock, _) = - tester.harness.make_block(state_a, slot_b).await; + let ((block_a, _), state_after_a) = tester.harness.make_block(state_a.clone(), slot_b).await; + let ((block_b, blobs_b), state_after_b) = tester.harness.make_block(state_a, slot_b).await; + let block_b_root = block_b.canonical_root(); /* check for `make_block` curios */ assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); assert_ne!(block_a.state_root(), block_b.state_root()); - let gossip_block_b = GossipVerifiedBlock::new(block_b.clone().into(), &tester.harness.chain); - assert!(gossip_block_b.is_ok()); + let gossip_block_contents_b = PublishBlockRequest::new(block_b, blobs_b) + .into_gossip_verified_block(&tester.harness.chain); + assert!(gossip_block_contents_b.is_ok()); let gossip_block_a = GossipVerifiedBlock::new(block_a.clone().into(), &tester.harness.chain); assert!(gossip_block_a.is_err()); @@ -366,7 +373,7 @@ pub async fn consensus_partial_pass_only_consensus() { let publication_result = publish_block( None, - ProvenancedBlock::local(gossip_block_b.unwrap()), + ProvenancedBlock::local(gossip_block_contents_b.unwrap()), tester.harness.chain.clone(), &channel.0, test_logger, @@ -379,7 +386,7 @@ pub async fn consensus_partial_pass_only_consensus() { assert!(tester .harness .chain - .block_is_known_to_fork_choice(&block_b.canonical_root())); + .block_is_known_to_fork_choice(&block_b_root)); } /// This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=consensus`. @@ -410,11 +417,14 @@ pub async fn consensus_full_pass() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block, _): (SignedBeaconBlock, _) = tester.harness.make_block(state_a, slot_b).await; + let ((block, blobs), _) = tester.harness.make_block(state_a, slot_b).await; let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&block, validation_level) + .post_beacon_blocks_v2( + &PublishBlockRequest::new(block.clone(), blobs), + validation_level, + ) .await; assert!(response.is_ok()); @@ -453,7 +463,7 @@ pub async fn equivocation_invalid() { tester.harness.advance_slot(); - let (block, _): (SignedBeaconBlock, _) = tester + let ((block, blobs), _) = tester .harness .make_block_with_modifier(chain_state_before, slot, |b| { *b.state_root_mut() = Hash256::zero(); @@ -463,7 +473,7 @@ pub async fn equivocation_invalid() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&block, validation_level) + .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -473,7 +483,7 @@ pub async fn equivocation_invalid() { assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); assert!( - matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()) ); } @@ -506,10 +516,9 @@ pub async fn equivocation_consensus_early_equivocation() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block_a, state_after_a): (SignedBeaconBlock, _) = + let ((block_a, blobs_a), state_after_a) = tester.harness.make_block(state_a.clone(), slot_b).await; - let (block_b, state_after_b): (SignedBeaconBlock, _) = - tester.harness.make_block(state_a, slot_b).await; + let ((block_b, blobs_b), state_after_b) = tester.harness.make_block(state_a, slot_b).await; /* check for `make_block` curios */ assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); @@ -519,7 +528,10 @@ pub async fn equivocation_consensus_early_equivocation() { /* submit `block_a` as valid */ assert!(tester .client - .post_beacon_blocks_v2(&block_a, validation_level) + .post_beacon_blocks_v2( + &PublishBlockRequest::new(block_a.clone(), blobs_a), + validation_level + ) .await .is_ok()); assert!(tester @@ -530,7 +542,10 @@ pub async fn equivocation_consensus_early_equivocation() { /* submit `block_b` which should induce equivocation */ let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&block_b, validation_level) + .post_beacon_blocks_v2( + &PublishBlockRequest::new(block_b.clone(), blobs_b), + validation_level, + ) .await; assert!(response.is_err()); @@ -539,7 +554,7 @@ pub async fn equivocation_consensus_early_equivocation() { assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); assert!( - matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Slashable".to_string()) + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(Slashable)".to_string()) ); } @@ -572,14 +587,14 @@ pub async fn equivocation_gossip() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block, _): (SignedBeaconBlock, _) = tester + let ((block, blobs), _) = tester .harness .make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero()) .await; let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&block, validation_level) + .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -593,9 +608,11 @@ pub async fn equivocation_gossip() { ); } -/// This test checks that a block that is valid from both a gossip and consensus perspective but that equivocates **late** is rejected when using `broadcast_validation=consensus_and_equivocation`. +/// This test checks that a block that is valid from both a gossip and consensus perspective but +/// that equivocates **late** is rejected when using `broadcast_validation=consensus_and_equivocation`. /// -/// This test is unique in that we can't actually test the HTTP API directly, but instead have to hook into the `publish_blocks` code manually. This is in order to handle the late equivocation case. +/// This test is unique in that we can't actually test the HTTP API directly, but instead have to +/// hook into the `publish_blocks` code manually. This is in order to handle the late equivocation case. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn equivocation_consensus_late_equivocation() { /* this test targets gossip-level validation */ @@ -625,26 +642,27 @@ pub async fn equivocation_consensus_late_equivocation() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block_a, state_after_a): (SignedBeaconBlock, _) = + let ((block_a, blobs_a), state_after_a) = tester.harness.make_block(state_a.clone(), slot_b).await; - let (block_b, state_after_b): (SignedBeaconBlock, _) = - tester.harness.make_block(state_a, slot_b).await; + let ((block_b, blobs_b), state_after_b) = tester.harness.make_block(state_a, slot_b).await; /* check for `make_block` curios */ assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); assert_ne!(block_a.state_root(), block_b.state_root()); - let gossip_block_b = GossipVerifiedBlock::new(block_b.clone().into(), &tester.harness.chain); - assert!(gossip_block_b.is_ok()); - let gossip_block_a = GossipVerifiedBlock::new(block_a.clone().into(), &tester.harness.chain); - assert!(gossip_block_a.is_err()); + let gossip_block_contents_b = PublishBlockRequest::new(block_b, blobs_b) + .into_gossip_verified_block(&tester.harness.chain); + assert!(gossip_block_contents_b.is_ok()); + let gossip_block_contents_a = PublishBlockRequest::new(block_a, blobs_a) + .into_gossip_verified_block(&tester.harness.chain); + assert!(gossip_block_contents_a.is_err()); let channel = tokio::sync::mpsc::unbounded_channel(); let publication_result = publish_block( None, - ProvenancedBlock::local(gossip_block_b.unwrap()), + ProvenancedBlock::local(gossip_block_contents_b.unwrap()), tester.harness.chain, &channel.0, test_logger, @@ -694,11 +712,14 @@ pub async fn equivocation_full_pass() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block, _): (SignedBeaconBlock, _) = tester.harness.make_block(state_a, slot_b).await; + let ((block, blobs), _) = tester.harness.make_block(state_a, slot_b).await; let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&block, validation_level) + .post_beacon_blocks_v2( + &PublishBlockRequest::new(block.clone(), blobs), + validation_level, + ) .await; assert!(response.is_ok()); @@ -736,7 +757,7 @@ pub async fn blinded_gossip_invalid() { tester.harness.advance_slot(); - let (block, _): (SignedBeaconBlock, _) = tester + let (block_contents_tuple, _) = tester .harness .make_block_with_modifier(chain_state_before, slot, |b| { *b.state_root_mut() = Hash256::zero(); @@ -744,11 +765,9 @@ pub async fn blinded_gossip_invalid() { }) .await; - let blinded_block: SignedBlindedBeaconBlock = block.into(); - let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .post_beacon_blinded_blocks_v2(&block_contents_tuple.0.clone_as_blinded(), validation_level) .await; assert!(response.is_err()); @@ -758,7 +777,7 @@ pub async fn blinded_gossip_invalid() { assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); assert!( - matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()) ); } @@ -790,18 +809,16 @@ pub async fn blinded_gossip_partial_pass() { tester.harness.advance_slot(); - let (block, _): (SignedBeaconBlock, _) = tester + let (block_contents_tuple, _) = tester .harness .make_block_with_modifier(chain_state_before, slot, |b| { *b.state_root_mut() = Hash256::zero() }) .await; - let blinded_block: SignedBlindedBeaconBlock = block.into(); - let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .post_beacon_blinded_blocks_v2(&block_contents_tuple.0.clone_as_blinded(), validation_level) .await; assert!(response.is_err()); @@ -838,19 +855,17 @@ pub async fn blinded_gossip_full_pass() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block, _): (SignedBlindedBeaconBlock, _) = - tester.harness.make_blinded_block(state_a, slot_b).await; - + let (blinded_block, _) = tester.harness.make_blinded_block(state_a, slot_b).await; let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&block, validation_level) + .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) .await; assert!(response.is_ok()); assert!(tester .harness .chain - .block_is_known_to_fork_choice(&block.canonical_root())); + .block_is_known_to_fork_choice(&blinded_block.canonical_root())); } // This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=gossip`. @@ -881,19 +896,18 @@ pub async fn blinded_gossip_full_pass_ssz() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block, _): (SignedBlindedBeaconBlock, _) = - tester.harness.make_blinded_block(state_a, slot_b).await; + let (blinded_block, _) = tester.harness.make_blinded_block(state_a, slot_b).await; let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2_ssz(&block, validation_level) + .post_beacon_blinded_blocks_v2_ssz(&blinded_block, validation_level) .await; assert!(response.is_ok()); assert!(tester .harness .chain - .block_is_known_to_fork_choice(&block.canonical_root())); + .block_is_known_to_fork_choice(&blinded_block.canonical_root())); } /// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus`. @@ -924,7 +938,7 @@ pub async fn blinded_consensus_invalid() { tester.harness.advance_slot(); - let (block, _): (SignedBeaconBlock, _) = tester + let (block_contents_tuple, _) = tester .harness .make_block_with_modifier(chain_state_before, slot, |b| { *b.state_root_mut() = Hash256::zero(); @@ -932,11 +946,9 @@ pub async fn blinded_consensus_invalid() { }) .await; - let blinded_block: SignedBlindedBeaconBlock = block.into(); - let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .post_beacon_blinded_blocks_v2(&block_contents_tuple.0.clone_as_blinded(), validation_level) .await; assert!(response.is_err()); @@ -946,7 +958,7 @@ pub async fn blinded_consensus_invalid() { assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); assert!( - matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()) ); } @@ -978,16 +990,14 @@ pub async fn blinded_consensus_gossip() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block, _): (SignedBeaconBlock, _) = tester + let (block_contents_tuple, _) = tester .harness .make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero()) .await; - let blinded_block: SignedBlindedBeaconBlock = block.into(); - let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .post_beacon_blinded_blocks_v2(&block_contents_tuple.0.clone_as_blinded(), validation_level) .await; assert!(response.is_err()); @@ -1029,19 +1039,18 @@ pub async fn blinded_consensus_full_pass() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block, _): (SignedBlindedBeaconBlock, _) = - tester.harness.make_blinded_block(state_a, slot_b).await; + let (blinded_block, _) = tester.harness.make_blinded_block(state_a, slot_b).await; let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&block, validation_level) + .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) .await; assert!(response.is_ok()); assert!(tester .harness .chain - .block_is_known_to_fork_choice(&block.canonical_root())); + .block_is_known_to_fork_choice(&blinded_block.canonical_root())); } /// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus_and_equivocation`. @@ -1073,7 +1082,7 @@ pub async fn blinded_equivocation_invalid() { tester.harness.advance_slot(); - let (block, _): (SignedBeaconBlock, _) = tester + let (block_contents_tuple, _) = tester .harness .make_block_with_modifier(chain_state_before, slot, |b| { *b.state_root_mut() = Hash256::zero(); @@ -1081,11 +1090,9 @@ pub async fn blinded_equivocation_invalid() { }) .await; - let blinded_block: SignedBlindedBeaconBlock = block.into(); - let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .post_beacon_blinded_blocks_v2(&block_contents_tuple.0.clone_as_blinded(), validation_level) .await; assert!(response.is_err()); @@ -1095,7 +1102,7 @@ pub async fn blinded_equivocation_invalid() { assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); assert!( - matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()) ); } @@ -1128,12 +1135,11 @@ pub async fn blinded_equivocation_consensus_early_equivocation() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block_a, state_after_a): (SignedBlindedBeaconBlock, _) = tester + let (block_a, state_after_a) = tester .harness .make_blinded_block(state_a.clone(), slot_b) .await; - let (block_b, state_after_b): (SignedBlindedBeaconBlock, _) = - tester.harness.make_blinded_block(state_a, slot_b).await; + let (block_b, state_after_b) = tester.harness.make_blinded_block(state_a, slot_b).await; /* check for `make_blinded_block` curios */ assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); @@ -1163,7 +1169,7 @@ pub async fn blinded_equivocation_consensus_early_equivocation() { assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); assert!( - matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Slashable".to_string()) + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(Slashable)".to_string()) ); } @@ -1196,16 +1202,14 @@ pub async fn blinded_equivocation_gossip() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block, _): (SignedBeaconBlock, _) = tester + let (block_contents_tuple, _) = tester .harness .make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero()) .await; - let blinded_block: SignedBlindedBeaconBlock = block.into(); - let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .post_beacon_blinded_blocks_v2(&block_contents_tuple.0.clone_as_blinded(), validation_level) .await; assert!(response.is_err()); @@ -1219,9 +1223,13 @@ pub async fn blinded_equivocation_gossip() { ); } -/// This test checks that a block that is valid from both a gossip and consensus perspective but that equivocates **late** is rejected when using `broadcast_validation=consensus_and_equivocation`. +/// This test checks that a block that is valid from both a gossip and +/// consensus perspective but that equivocates **late** is rejected when using +/// `broadcast_validation=consensus_and_equivocation`. /// -/// This test is unique in that we can't actually test the HTTP API directly, but instead have to hook into the `publish_blocks` code manually. This is in order to handle the late equivocation case. +/// This test is unique in that we can't actually test the HTTP API directly, +/// but instead have to hook into the `publish_blocks` code manually. This is +/// in order to handle the late equivocation case. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn blinded_equivocation_consensus_late_equivocation() { /* this test targets gossip-level validation */ @@ -1251,12 +1259,12 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block_a, state_after_a): (SignedBlindedBeaconBlock, _) = tester + let (block_a, state_after_a) = tester .harness .make_blinded_block(state_a.clone(), slot_b) .await; - let (block_b, state_after_b): (SignedBlindedBeaconBlock, _) = - tester.harness.make_blinded_block(state_a, slot_b).await; + let (block_b, state_after_b) = tester.harness.make_blinded_block(state_a, slot_b).await; + let block_b = Arc::new(block_b); /* check for `make_blinded_block` curios */ assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); @@ -1265,15 +1273,15 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { let unblinded_block_a = reconstruct_block( tester.harness.chain.clone(), - block_a.state_root(), - block_a, + block_a.canonical_root(), + Arc::new(block_a), test_logger.clone(), ) .await .unwrap(); let unblinded_block_b = reconstruct_block( tester.harness.chain.clone(), - block_b.clone().state_root(), + block_b.canonical_root(), block_b.clone(), test_logger.clone(), ) @@ -1289,9 +1297,11 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { ProvenancedBlock::Builder(b, _) => b, }; - let gossip_block_b = GossipVerifiedBlock::new(inner_block_b, &tester.harness.chain); + let gossip_block_b = + GossipVerifiedBlock::new(inner_block_b.clone().deconstruct().0, &tester.harness.chain); assert!(gossip_block_b.is_ok()); - let gossip_block_a = GossipVerifiedBlock::new(inner_block_a, &tester.harness.chain); + let gossip_block_a = + GossipVerifiedBlock::new(inner_block_a.clone().deconstruct().0, &tester.harness.chain); assert!(gossip_block_a.is_err()); let channel = tokio::sync::mpsc::unbounded_channel(); @@ -1342,12 +1352,11 @@ pub async fn blinded_equivocation_full_pass() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block, _): (SignedBlindedBeaconBlock, _) = - tester.harness.make_blinded_block(state_a, slot_b).await; + let (block, _) = tester.harness.make_blinded_block(state_a, slot_b).await; let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&block, validation_level) + .post_beacon_blinded_blocks_v2(&block, validation_level) .await; assert!(response.is_ok()); diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index 0ab3c706e..74b264756 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -4,6 +4,7 @@ use beacon_chain::{ StateSkipConfig, }; use eth2::types::{IndexedErrorMessage, StateId, SyncSubcommittee}; +use execution_layer::test_utils::generate_genesis_header; use genesis::{bls_withdrawal_credentials, interop_genesis_state_with_withdrawal_credentials}; use http_api::test_utils::*; use std::collections::HashSet; @@ -354,12 +355,13 @@ async fn bls_to_execution_changes_update_all_around_capella_fork() { .iter() .map(|keypair| bls_withdrawal_credentials(&keypair.as_ref().unwrap().pk, &spec)) .collect::>(); + let header = generate_genesis_header(&spec, true); let genesis_state = interop_genesis_state_with_withdrawal_credentials( &validator_keypairs, &withdrawal_credentials, HARNESS_GENESIS_TIME, Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), - None, + header, &spec, ) .unwrap(); diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index d7ea7c262..6fb197b41 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -4,6 +4,7 @@ use beacon_chain::{ test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, ChainConfig, }; +use eth2::types::ProduceBlockV3Response; use eth2::types::{DepositContractData, StateId}; use execution_layer::{ForkchoiceState, PayloadAttributes}; use http_api::test_utils::InteractiveTester; @@ -17,8 +18,8 @@ use std::sync::Arc; use std::time::Duration; use tree_hash::TreeHash; use types::{ - Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, FullPayload, - MainnetEthSpec, MinimalEthSpec, ProposerPreparationData, Slot, + Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, MainnetEthSpec, + MinimalEthSpec, ProposerPreparationData, Slot, }; type E = MainnetEthSpec; @@ -111,8 +112,8 @@ async fn state_by_root_pruned_from_fork_choice() { .unwrap() .unwrap(); - assert!(response.finalized.unwrap()); - assert!(!response.execution_optimistic.unwrap()); + assert!(response.metadata.finalized.unwrap()); + assert!(!response.metadata.execution_optimistic.unwrap()); let mut state = response.data; assert_eq!(state.update_tree_hash_cache().unwrap(), state_root); @@ -391,8 +392,8 @@ pub async fn proposer_boost_re_org_test( ) { assert!(head_slot > 0); - // Test using Capella so that we simulate conditions as similar to mainnet as possible. - let mut spec = ForkName::Capella.make_genesis_spec(E::default_spec()); + // Test using the latest fork so that we simulate conditions as similar to mainnet as possible. + let mut spec = ForkName::latest().make_genesis_spec(E::default_spec()); spec.terminal_total_difficulty = 1.into(); // Ensure there are enough validators to have `attesters_per_slot`. @@ -551,7 +552,7 @@ pub async fn proposer_boost_re_org_test( // Produce block B and process it halfway through the slot. let (block_b, mut state_b) = harness.make_block(state_a.clone(), slot_b).await; - let block_b_root = block_b.canonical_root(); + let block_b_root = block_b.0.canonical_root(); let obs_time = slot_clock.start_of(slot_b).unwrap() + slot_clock.slot_duration() / 2; slot_clock.set_current_time(obs_time); @@ -617,13 +618,21 @@ pub async fn proposer_boost_re_org_test( let randao_reveal = harness .sign_randao_reveal(&state_b, proposer_index, slot_c) .into(); - let unsigned_block_c = tester + let (unsigned_block_type, _) = tester .client - .get_validator_blocks(slot_c, &randao_reveal, None) + .get_validator_blocks_v3::(slot_c, &randao_reveal, None, None) .await - .unwrap() - .data; - let block_c = harness.sign_beacon_block(unsigned_block_c, &state_b); + .unwrap(); + + let (unsigned_block_c, block_c_blobs) = match unsigned_block_type.data { + ProduceBlockV3Response::Full(unsigned_block_contents_c) => { + unsigned_block_contents_c.deconstruct() + } + ProduceBlockV3Response::Blinded(_) => { + panic!("Should not be a blinded block"); + } + }; + let block_c = Arc::new(harness.sign_beacon_block(unsigned_block_c, &state_b)); if should_re_org { // Block C should build on A. @@ -635,7 +644,7 @@ pub async fn proposer_boost_re_org_test( // Applying block C should cause it to become head regardless (re-org or continuation). let block_root_c = harness - .process_block_result(block_c.clone()) + .process_block_result((block_c.clone(), block_c_blobs)) .await .unwrap() .into(); @@ -643,8 +652,18 @@ pub async fn proposer_boost_re_org_test( // Check the fork choice updates that were sent. let forkchoice_updates = forkchoice_updates.lock(); - let block_a_exec_hash = block_a.message().execution_payload().unwrap().block_hash(); - let block_b_exec_hash = block_b.message().execution_payload().unwrap().block_hash(); + let block_a_exec_hash = block_a + .0 + .message() + .execution_payload() + .unwrap() + .block_hash(); + let block_b_exec_hash = block_b + .0 + .message() + .execution_payload() + .unwrap() + .block_hash(); let block_c_timestamp = block_c.message().execution_payload().unwrap().timestamp(); @@ -688,6 +707,11 @@ pub async fn proposer_boost_re_org_test( assert_ne!(expected_withdrawals, pre_advance_withdrawals); } + // Check that the `parent_beacon_block_root` of the payload attributes are correct. + if let Ok(parent_beacon_block_root) = payload_attribs.parent_beacon_block_root() { + assert_eq!(parent_beacon_block_root, block_c.parent_root()); + } + let lookahead = slot_clock .start_of(slot_c) .unwrap() @@ -749,7 +773,7 @@ pub async fn fork_choice_before_proposal() { let state_a = harness.get_current_state(); let (block_b, state_b) = harness.make_block(state_a.clone(), slot_b).await; let block_root_b = harness - .process_block(slot_b, block_b.canonical_root(), block_b) + .process_block(slot_b, block_b.0.canonical_root(), block_b) .await .unwrap(); @@ -764,7 +788,7 @@ pub async fn fork_choice_before_proposal() { let (block_c, state_c) = harness.make_block(state_a, slot_c).await; let block_root_c = harness - .process_block(slot_c, block_c.canonical_root(), block_c.clone()) + .process_block(slot_c, block_c.0.canonical_root(), block_c.clone()) .await .unwrap(); @@ -801,10 +825,12 @@ pub async fn fork_choice_before_proposal() { .into(); let block_d = tester .client - .get_validator_blocks::>(slot_d, &randao_reveal, None) + .get_validator_blocks::(slot_d, &randao_reveal, None) .await .unwrap() - .data; + .data + .deconstruct() + .0; // Head is now B. assert_eq!( diff --git a/beacon_node/http_api/tests/status_tests.rs b/beacon_node/http_api/tests/status_tests.rs index 95f885faa..d37026d40 100644 --- a/beacon_node/http_api/tests/status_tests.rs +++ b/beacon_node/http_api/tests/status_tests.rs @@ -100,9 +100,10 @@ async fn el_error_on_new_payload() { // Make a block. let pre_state = harness.get_current_state(); - let (block, _) = harness + let (block_contents, _) = harness .make_block(pre_state, Slot::new(num_blocks + 1)) .await; + let (block, blobs) = block_contents; let block_hash = block .message() .body() @@ -118,7 +119,9 @@ async fn el_error_on_new_payload() { // Attempt to process the block, which should error. harness.advance_slot(); assert!(matches!( - harness.process_block_result(block.clone()).await, + harness + .process_block_result((block.clone(), blobs.clone())) + .await, Err(BlockError::ExecutionPayloadError(_)) )); @@ -137,7 +140,7 @@ async fn el_error_on_new_payload() { validation_error: None, }, ); - harness.process_block_result(block).await.unwrap(); + harness.process_block_result((block, blobs)).await.unwrap(); let api_response = tester.client.get_node_syncing().await.unwrap().data; assert_eq!(api_response.el_offline, Some(false)); diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 1fbdab07c..933f98661 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -7,12 +7,13 @@ use environment::null_logger; use eth2::{ mixin::{RequestAccept, ResponseForkName, ResponseOptional}, reqwest::RequestBuilder, - types::{BlockId as CoreBlockId, ForkChoiceNode, StateId as CoreStateId, *}, + types::{ + BlockId as CoreBlockId, ForkChoiceNode, ProduceBlockV3Response, StateId as CoreStateId, *, + }, BeaconNodeHttpClient, Error, StatusCode, Timeouts, }; use execution_layer::test_utils::{ - MockBuilder, Operation, DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_BUILDER_THRESHOLD_WEI, - DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, + MockBuilder, Operation, DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, }; use futures::stream::{Stream, StreamExt}; use futures::FutureExt; @@ -62,8 +63,8 @@ struct ApiTester { harness: Arc>>, chain: Arc>>, client: BeaconNodeHttpClient, - next_block: SignedBeaconBlock, - reorg_block: SignedBeaconBlock, + next_block: PublishBlockRequest, + reorg_block: PublishBlockRequest, attestations: Vec>, contribution_and_proofs: Vec>, attester_slashing: AttesterSlashing, @@ -78,7 +79,6 @@ struct ApiTester { struct ApiTesterConfig { spec: ChainSpec, retain_historic_states: bool, - builder_threshold: Option, } impl Default for ApiTesterConfig { @@ -88,7 +88,6 @@ impl Default for ApiTesterConfig { Self { spec, retain_historic_states: false, - builder_threshold: None, } } } @@ -130,7 +129,7 @@ impl ApiTester { .logger(logging::test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() - .mock_execution_layer_with_config(config.builder_threshold) + .mock_execution_layer_with_config() .build(); harness @@ -171,11 +170,13 @@ impl ApiTester { let (next_block, _next_state) = harness .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) .await; + let next_block = PublishBlockRequest::from(next_block); // `make_block` adds random graffiti, so this will produce an alternate block let (reorg_block, _reorg_state) = harness .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap() + 1) .await; + let reorg_block = PublishBlockRequest::from(reorg_block); let head_state_root = head.beacon_state_root(); let attestations = harness @@ -265,11 +266,7 @@ impl ApiTester { // Start the mock builder service prior to building the chain out. harness.runtime.task_executor.spawn( - async move { - if let Err(e) = mock_builder_server.await { - panic!("error in mock builder server: {e:?}"); - } - }, + async move { mock_builder_server.await }, "mock_builder_server", ); @@ -314,11 +311,13 @@ impl ApiTester { let (next_block, _next_state) = harness .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) .await; + let next_block = PublishBlockRequest::from(next_block); // `make_block` adds random graffiti, so this will produce an alternate block let (reorg_block, _reorg_state) = harness .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) .await; + let reorg_block = PublishBlockRequest::from(reorg_block); let head_state_root = head.beacon_state_root(); let attestations = harness @@ -389,19 +388,12 @@ impl ApiTester { .test_post_validator_register_validator() .await; // Make sure bids always meet the minimum threshold. - tester - .mock_builder - .as_ref() - .unwrap() - .add_operation(Operation::Value(Uint256::from( - DEFAULT_BUILDER_THRESHOLD_WEI, - ))); + tester.mock_builder.as_ref().unwrap(); tester } - pub async fn new_mev_tester_no_builder_threshold() -> Self { + pub async fn new_mev_tester_default_payload_value() -> Self { let mut config = ApiTesterConfig { - builder_threshold: Some(0), retain_historic_states: false, spec: E::default_spec(), }; @@ -653,6 +645,7 @@ impl ApiTester { .await .unwrap() .unwrap() + .metadata .finalized .unwrap(); @@ -689,6 +682,7 @@ impl ApiTester { .await .unwrap() .unwrap() + .metadata .finalized .unwrap(); @@ -726,6 +720,7 @@ impl ApiTester { .await .unwrap() .unwrap() + .metadata .finalized .unwrap(); @@ -848,6 +843,18 @@ impl ApiTester { .await .unwrap() .map(|res| res.data); + let result_post_index_ids = self + .client + .post_beacon_states_validator_balances(state_id.0, validator_index_ids) + .await + .unwrap() + .map(|res| res.data); + let result_post_pubkey_ids = self + .client + .post_beacon_states_validator_balances(state_id.0, validator_pubkey_ids) + .await + .unwrap() + .map(|res| res.data); let expected = state_opt.map(|(state, _execution_optimistic, _finalized)| { let mut validators = Vec::with_capacity(validator_indices.len()); @@ -866,6 +873,8 @@ impl ApiTester { assert_eq!(result_index_ids, expected, "{:?}", state_id); assert_eq!(result_pubkey_ids, expected, "{:?}", state_id); + assert_eq!(result_post_index_ids, expected, "{:?}", state_id); + assert_eq!(result_post_pubkey_ids, expected, "{:?}", state_id); } } @@ -911,7 +920,6 @@ impl ApiTester { .await .unwrap() .map(|res| res.data); - let result_pubkey_ids = self .client .get_beacon_states_validators( @@ -922,6 +930,18 @@ impl ApiTester { .await .unwrap() .map(|res| res.data); + let post_result_index_ids = self + .client + .post_beacon_states_validators(state_id.0, Some(validator_index_ids), None) + .await + .unwrap() + .map(|res| res.data); + let post_result_pubkey_ids = self + .client + .post_beacon_states_validators(state_id.0, Some(validator_pubkey_ids), None) + .await + .unwrap() + .map(|res| res.data); let expected = state_opt.map(|state| { let epoch = state.current_epoch(); @@ -957,6 +977,8 @@ impl ApiTester { assert_eq!(result_index_ids, expected, "{:?}", state_id); assert_eq!(result_pubkey_ids, expected, "{:?}", state_id); + assert_eq!(post_result_index_ids, expected, "{:?}", state_id); + assert_eq!(post_result_pubkey_ids, expected, "{:?}", state_id); } } } @@ -1256,9 +1278,9 @@ impl ApiTester { } pub async fn test_post_beacon_blocks_valid(mut self) -> Self { - let next_block = &self.next_block; + let next_block = self.next_block.clone(); - self.client.post_beacon_blocks(next_block).await.unwrap(); + self.client.post_beacon_blocks(&next_block).await.unwrap(); assert!( self.network_rx.network_recv.recv().await.is_some(), @@ -1297,7 +1319,11 @@ impl ApiTester { .await .0; - assert!(self.client.post_beacon_blocks(&block).await.is_err()); + assert!(self + .client + .post_beacon_blocks(&PublishBlockRequest::from(block)) + .await + .is_err()); assert!( self.network_rx.network_recv.recv().await.is_some(), @@ -1320,7 +1346,11 @@ impl ApiTester { .await .0; - assert!(self.client.post_beacon_blocks_ssz(&block).await.is_err()); + assert!(self + .client + .post_beacon_blocks_ssz(&PublishBlockRequest::from(block)) + .await + .is_err()); assert!( self.network_rx.network_recv.recv().await.is_some(), @@ -1331,48 +1361,57 @@ impl ApiTester { } pub async fn test_post_beacon_blocks_duplicate(self) -> Self { - let block = self + let block_contents = self .harness .make_block( self.harness.get_current_state(), self.harness.get_current_slot(), ) .await - .0; + .0 + .into(); - assert!(self.client.post_beacon_blocks(&block).await.is_ok()); + assert!(self + .client + .post_beacon_blocks(&block_contents) + .await + .is_ok()); - let blinded_block = block.clone_as_blinded(); + // Blinded deneb block contents is just the blinded block + let blinded_block_contents = block_contents.signed_block().clone_as_blinded(); // Test all the POST methods in sequence, they should all behave the same. let responses = vec![ - self.client.post_beacon_blocks(&block).await.unwrap_err(), self.client - .post_beacon_blocks_v2(&block, None) + .post_beacon_blocks(&block_contents) .await .unwrap_err(), self.client - .post_beacon_blocks_ssz(&block) + .post_beacon_blocks_v2(&block_contents, None) .await .unwrap_err(), self.client - .post_beacon_blocks_v2_ssz(&block, None) + .post_beacon_blocks_ssz(&block_contents) .await .unwrap_err(), self.client - .post_beacon_blinded_blocks(&blinded_block) + .post_beacon_blocks_v2_ssz(&block_contents, None) .await .unwrap_err(), self.client - .post_beacon_blinded_blocks_v2(&blinded_block, None) + .post_beacon_blinded_blocks(&blinded_block_contents) .await .unwrap_err(), self.client - .post_beacon_blinded_blocks_ssz(&blinded_block) + .post_beacon_blinded_blocks_v2(&blinded_block_contents, None) .await .unwrap_err(), self.client - .post_beacon_blinded_blocks_v2_ssz(&blinded_block, None) + .post_beacon_blinded_blocks_ssz(&blinded_block_contents) + .await + .unwrap_err(), + self.client + .post_beacon_blinded_blocks_v2_ssz(&blinded_block_contents, None) .await .unwrap_err(), ]; @@ -1548,6 +1587,39 @@ impl ApiTester { self } + pub async fn test_get_blob_sidecars(self, use_indices: bool) -> Self { + let block_id = BlockId(CoreBlockId::Finalized); + let (block_root, _, _) = block_id.root(&self.chain).unwrap(); + let (block, _, _) = block_id.full_block(&self.chain).await.unwrap(); + let num_blobs = block.num_expected_blobs(); + let blob_indices = if use_indices { + Some( + (0..num_blobs.saturating_sub(1) as u64) + .into_iter() + .collect::>(), + ) + } else { + None + }; + let result = match self + .client + .get_blobs::(CoreBlockId::Root(block_root), blob_indices.as_deref()) + .await + { + Ok(result) => result.unwrap().data, + Err(e) => panic!("query failed incorrectly: {e:?}"), + }; + + assert_eq!( + result.len(), + blob_indices.map_or(num_blobs, |indices| indices.len()) + ); + let expected = block.slot(); + assert_eq!(result.get(0).unwrap().slot(), expected); + + self + } + pub async fn test_beacon_blocks_attestations(self) -> Self { for block_id in self.interesting_block_ids() { let result = self @@ -1628,6 +1700,59 @@ impl ApiTester { self } + pub async fn test_get_beacon_light_client_bootstrap(self) -> Self { + let block_id = BlockId(CoreBlockId::Finalized); + let (block_root, _, _) = block_id.root(&self.chain).unwrap(); + let (block, _, _) = block_id.full_block(&self.chain).await.unwrap(); + + let result = match self + .client + .get_light_client_bootstrap::(block_root) + .await + { + Ok(result) => result.unwrap().data, + Err(e) => panic!("query failed incorrectly: {e:?}"), + }; + + let expected = block.slot(); + assert_eq!(result.header.beacon.slot, expected); + + self + } + + pub async fn test_get_beacon_light_client_optimistic_update(self) -> Self { + // get_beacon_light_client_optimistic_update returns Ok(None) on 404 NOT FOUND + let result = match self + .client + .get_beacon_light_client_optimistic_update::() + .await + { + Ok(result) => result.map(|res| res.data), + Err(e) => panic!("query failed incorrectly: {e:?}"), + }; + + let expected = self.chain.latest_seen_optimistic_update.lock().clone(); + assert_eq!(result, expected); + + self + } + + pub async fn test_get_beacon_light_client_finality_update(self) -> Self { + let result = match self + .client + .get_beacon_light_client_finality_update::() + .await + { + Ok(result) => result.map(|res| res.data), + Err(e) => panic!("query failed incorrectly: {e:?}"), + }; + + let expected = self.chain.latest_seen_finality_update.lock().clone(); + assert_eq!(result, expected); + + self + } + pub async fn test_get_beacon_pool_attestations(self) -> Self { let result = self .client @@ -1798,9 +1923,9 @@ impl ApiTester { pub async fn test_get_config_spec(self) -> Self { let result = self .client - .get_config_spec::() + .get_config_spec::() .await - .map(|res| ConfigAndPreset::Capella(res.data)) + .map(|res| ConfigAndPreset::Deneb(res.data)) .unwrap(); let expected = ConfigAndPreset::from_chain_spec::(&self.chain.spec, None); @@ -2496,14 +2621,21 @@ impl ApiTester { let block = self .client - .get_validator_blocks::>(slot, &randao_reveal, None) + .get_validator_blocks::(slot, &randao_reveal, None) .await .unwrap() - .data; + .data + .deconstruct() + .0; let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + let signed_block_contents = + PublishBlockRequest::try_from(Arc::new(signed_block.clone())).unwrap(); - self.client.post_beacon_blocks(&signed_block).await.unwrap(); + self.client + .post_beacon_blocks(&signed_block_contents) + .await + .unwrap(); assert_eq!(self.chain.head_beacon_block().as_ref(), &signed_block); @@ -2553,23 +2685,27 @@ impl ApiTester { let block_bytes = self .client - .get_validator_blocks_ssz::>(slot, &randao_reveal, None) + .get_validator_blocks_ssz::(slot, &randao_reveal, None) .await .unwrap() .expect("block bytes"); - let block = - BeaconBlock::>::from_ssz_bytes(&block_bytes, &self.chain.spec) - .expect("block bytes can be decoded"); + let block_contents = + FullBlockContents::::from_ssz_bytes(&block_bytes, &self.chain.spec) + .expect("block contents bytes can be decoded"); - let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + let signed_block_contents = + block_contents.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); self.client - .post_beacon_blocks_ssz(&signed_block) + .post_beacon_blocks_ssz(&signed_block_contents) .await .unwrap(); - assert_eq!(self.chain.head_beacon_block().as_ref(), &signed_block); + assert_eq!( + self.chain.head_beacon_block(), + *signed_block_contents.signed_block() + ); self.chain.slot_clock.set_slot(slot.as_u64() + 1); } @@ -2577,13 +2713,108 @@ impl ApiTester { self } + pub async fn test_block_production_v3_ssz(self) -> Self { + let fork = self.chain.canonical_head.cached_head().head_fork(); + let genesis_validators_root = self.chain.genesis_validators_root; + + for _ in 0..E::slots_per_epoch() * 3 { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let proposer_pubkey_bytes = self + .client + .get_validator_duties_proposer(epoch) + .await + .unwrap() + .data + .into_iter() + .find(|duty| duty.slot == slot) + .map(|duty| duty.pubkey) + .unwrap(); + let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap(); + + let sk = self + .validator_keypairs() + .iter() + .find(|kp| kp.pk == proposer_pubkey) + .map(|kp| kp.sk.clone()) + .unwrap(); + + let randao_reveal = { + let domain = self.chain.spec.get_domain( + epoch, + Domain::Randao, + &fork, + genesis_validators_root, + ); + let message = epoch.signing_root(domain); + sk.sign(message).into() + }; + + let (response, metadata) = self + .client + .get_validator_blocks_v3_ssz::(slot, &randao_reveal, None, None) + .await + .unwrap(); + + match response { + ProduceBlockV3Response::Blinded(blinded_block) => { + assert!(metadata.execution_payload_blinded); + assert_eq!( + metadata.consensus_version, + blinded_block.to_ref().fork_name(&self.chain.spec).unwrap() + ); + let signed_blinded_block = + blinded_block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + + self.client + .post_beacon_blinded_blocks_ssz(&signed_blinded_block) + .await + .unwrap(); + + let head_block = self.chain.head_beacon_block().clone_as_blinded(); + assert_eq!(head_block, signed_blinded_block); + + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + } + ProduceBlockV3Response::Full(block_contents) => { + assert!(!metadata.execution_payload_blinded); + assert_eq!( + metadata.consensus_version, + block_contents + .block() + .to_ref() + .fork_name(&self.chain.spec) + .unwrap() + ); + let signed_block_contents = + block_contents.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + + self.client + .post_beacon_blocks_ssz(&signed_block_contents) + .await + .unwrap(); + + assert_eq!( + self.chain.head_beacon_block(), + *signed_block_contents.signed_block() + ); + + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + } + } + } + + self + } + pub async fn test_block_production_no_verify_randao(self) -> Self { for _ in 0..E::slots_per_epoch() { let slot = self.chain.slot().unwrap(); let block = self .client - .get_validator_blocks_modular::>( + .get_validator_blocks_modular::( slot, &Signature::infinity().unwrap().into(), None, @@ -2591,7 +2822,9 @@ impl ApiTester { ) .await .unwrap() - .data; + .data + .deconstruct() + .0; assert_eq!(block.slot(), slot); self.chain.slot_clock.set_slot(slot.as_u64() + 1); } @@ -2639,13 +2872,13 @@ impl ApiTester { // Check failure with no `skip_randao_verification` passed. self.client - .get_validator_blocks::>(slot, &bad_randao_reveal, None) + .get_validator_blocks::(slot, &bad_randao_reveal, None) .await .unwrap_err(); // Check failure with `skip_randao_verification` (requires infinity sig). self.client - .get_validator_blocks_modular::>( + .get_validator_blocks_modular::( slot, &bad_randao_reveal, None, @@ -2660,7 +2893,7 @@ impl ApiTester { self } - pub async fn test_blinded_block_production>(&self) { + pub async fn test_blinded_block_production(&self) { let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; @@ -2700,7 +2933,7 @@ impl ApiTester { let block = self .client - .get_validator_blinded_blocks::(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data; @@ -2712,15 +2945,21 @@ impl ApiTester { .await .unwrap(); - // This converts the generic `Payload` to a concrete type for comparison. - let head_block = SignedBeaconBlock::from(signed_block.clone()); - assert_eq!(head_block, signed_block); + let head_block = self + .client + .get_beacon_blocks(CoreBlockId::Head) + .await + .unwrap() + .unwrap() + .data; + + assert_eq!(head_block.clone_as_blinded(), signed_block); self.chain.slot_clock.set_slot(slot.as_u64() + 1); } } - pub async fn test_blinded_block_production_ssz>(&self) { + pub async fn test_blinded_block_production_ssz(&self) { let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; @@ -2758,40 +2997,49 @@ impl ApiTester { sk.sign(message).into() }; - let block_bytes = self + let block_contents_bytes = self .client - .get_validator_blinded_blocks_ssz::(slot, &randao_reveal, None) + .get_validator_blinded_blocks_ssz::(slot, &randao_reveal, None) .await .unwrap() .expect("block bytes"); - let block = BeaconBlock::::from_ssz_bytes(&block_bytes, &self.chain.spec) - .expect("block bytes can be decoded"); + let block_contents = + FullBlockContents::::from_ssz_bytes(&block_contents_bytes, &self.chain.spec) + .expect("block contents bytes can be decoded"); - let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + let signed_block_contents = + block_contents.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); self.client - .post_beacon_blinded_blocks_ssz(&signed_block) + .post_beacon_blinded_blocks_ssz( + &signed_block_contents.signed_block().clone_as_blinded(), + ) .await .unwrap(); - // This converts the generic `Payload` to a concrete type for comparison. - let head_block = SignedBeaconBlock::from(signed_block.clone()); - assert_eq!(head_block, signed_block); + let head_block = self + .client + .get_beacon_blocks(CoreBlockId::Head) + .await + .unwrap() + .unwrap() + .data; + + let signed_block = signed_block_contents.signed_block(); + assert_eq!(head_block, **signed_block); self.chain.slot_clock.set_slot(slot.as_u64() + 1); } } - pub async fn test_blinded_block_production_no_verify_randao>( - self, - ) -> Self { + pub async fn test_blinded_block_production_no_verify_randao(self) -> Self { for _ in 0..E::slots_per_epoch() { let slot = self.chain.slot().unwrap(); - let block = self + let blinded_block = self .client - .get_validator_blinded_blocks_modular::( + .get_validator_blinded_blocks_modular::( slot, &Signature::infinity().unwrap().into(), None, @@ -2800,18 +3048,14 @@ impl ApiTester { .await .unwrap() .data; - assert_eq!(block.slot(), slot); + assert_eq!(blinded_block.slot(), slot); self.chain.slot_clock.set_slot(slot.as_u64() + 1); } self } - pub async fn test_blinded_block_production_verify_randao_invalid< - Payload: AbstractExecPayload, - >( - self, - ) -> Self { + pub async fn test_blinded_block_production_verify_randao_invalid(self) -> Self { let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; @@ -2851,13 +3095,13 @@ impl ApiTester { // Check failure with full randao verification enabled. self.client - .get_validator_blinded_blocks::(slot, &bad_randao_reveal, None) + .get_validator_blinded_blocks::(slot, &bad_randao_reveal, None) .await .unwrap_err(); // Check failure with `skip_randao_verification` (requires infinity sig). self.client - .get_validator_blinded_blocks_modular::( + .get_validator_blinded_blocks_modular::( slot, &bad_randao_reveal, None, @@ -3246,7 +3490,7 @@ impl ApiTester { let result = self .client - .post_validator_liveness_epoch(epoch, indices.clone()) + .post_validator_liveness_epoch(epoch, &indices) .await .unwrap() .data; @@ -3261,7 +3505,7 @@ impl ApiTester { let result = self .client - .post_validator_liveness_epoch(epoch, indices.clone()) + .post_validator_liveness_epoch(epoch, &indices) .await .unwrap() .data; @@ -3324,6 +3568,84 @@ impl ApiTester { (proposer_index, randao_reveal) } + pub async fn test_payload_v3_respects_registration(self) -> Self { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let (payload_type, _) = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) + .await + .unwrap(); + + let payload: BlindedPayload = match payload_type.data { + ProduceBlockV3Response::Blinded(payload) => { + payload.body().execution_payload().unwrap().into() + } + ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), + }; + + let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + assert_eq!(payload.fee_recipient(), expected_fee_recipient); + assert_eq!(payload.gas_limit(), 11_111_111); + + self + } + + pub async fn test_payload_v3_zero_builder_boost_factor(self) -> Self { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let (payload_type, _) = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None, Some(0)) + .await + .unwrap(); + + let payload: FullPayload = match payload_type.data { + ProduceBlockV3Response::Full(payload) => { + payload.block().body().execution_payload().unwrap().into() + } + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), + }; + + let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + assert_eq!(payload.fee_recipient(), expected_fee_recipient); + assert_eq!(payload.gas_limit(), 16_384); + + self + } + + pub async fn test_payload_v3_max_builder_boost_factor(self) -> Self { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let (payload_type, _) = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None, Some(u64::MAX)) + .await + .unwrap(); + + let payload: BlindedPayload = match payload_type.data { + ProduceBlockV3Response::Blinded(payload) => { + payload.body().execution_payload().unwrap().into() + } + ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), + }; + + let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + assert_eq!(payload.fee_recipient(), expected_fee_recipient); + assert_eq!(payload.gas_limit(), 11_111_111); + + self + } + pub async fn test_payload_respects_registration(self) -> Self { let slot = self.chain.slot().unwrap(); let epoch = self.chain.epoch().unwrap(); @@ -3332,7 +3654,7 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data @@ -3372,7 +3694,7 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data @@ -3396,6 +3718,38 @@ impl ApiTester { self } + pub async fn test_payload_v3_accepts_mutated_gas_limit(self) -> Self { + // Mutate gas limit. + self.mock_builder + .as_ref() + .unwrap() + .add_operation(Operation::GasLimit(30_000_000)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let (payload_type, _) = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) + .await + .unwrap(); + + let payload: BlindedPayload = match payload_type.data { + ProduceBlockV3Response::Blinded(payload) => { + payload.body().execution_payload().unwrap().into() + } + ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), + }; + + let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + assert_eq!(payload.fee_recipient(), expected_fee_recipient); + assert_eq!(payload.gas_limit(), 30_000_000); + + self + } + pub async fn test_payload_accepts_changed_fee_recipient(self) -> Self { let test_fee_recipient = "0x4242424242424242424242424242424242424242" .parse::

() @@ -3414,7 +3768,7 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data @@ -3436,6 +3790,40 @@ impl ApiTester { self } + pub async fn test_payload_v3_accepts_changed_fee_recipient(self) -> Self { + let test_fee_recipient = "0x4242424242424242424242424242424242424242" + .parse::
() + .unwrap(); + + // Mutate fee recipient. + self.mock_builder + .as_ref() + .unwrap() + .add_operation(Operation::FeeRecipient(test_fee_recipient)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let (payload_type, _) = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) + .await + .unwrap(); + + let payload: BlindedPayload = match payload_type.data { + ProduceBlockV3Response::Blinded(payload) => { + payload.body().execution_payload().unwrap().into() + } + ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), + }; + + assert_eq!(payload.fee_recipient(), test_fee_recipient); + + self + } + pub async fn test_payload_rejects_invalid_parent_hash(self) -> Self { let invalid_parent_hash = "0x4242424242424242424242424242424242424242424242424242424242424242" @@ -3462,7 +3850,7 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data @@ -3484,6 +3872,48 @@ impl ApiTester { self } + pub async fn test_payload_v3_rejects_invalid_parent_hash(self) -> Self { + let invalid_parent_hash = + "0x4242424242424242424242424242424242424242424242424242424242424242" + .parse::() + .unwrap(); + + // Mutate parent hash. + self.mock_builder + .as_ref() + .unwrap() + .add_operation(Operation::ParentHash(invalid_parent_hash)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let expected_parent_hash = self + .chain + .head_snapshot() + .beacon_state + .latest_execution_payload_header() + .unwrap() + .block_hash(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let (payload_type, _) = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) + .await + .unwrap(); + + let payload: FullPayload = match payload_type.data { + ProduceBlockV3Response::Full(payload) => { + payload.block().body().execution_payload().unwrap().into() + } + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a blinded payload"), + }; + + assert_eq!(payload.parent_hash(), expected_parent_hash); + + self + } + pub async fn test_payload_rejects_invalid_prev_randao(self) -> Self { let invalid_prev_randao = "0x4242424242424242424242424242424242424242424242424242424242424242" @@ -3504,12 +3934,11 @@ impl ApiTester { .cached_head() .head_random() .unwrap(); - let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data @@ -3531,6 +3960,46 @@ impl ApiTester { self } + pub async fn test_payload_v3_rejects_invalid_prev_randao(self) -> Self { + let invalid_prev_randao = + "0x4242424242424242424242424242424242424242424242424242424242424242" + .parse::() + .unwrap(); + + // Mutate prev randao. + self.mock_builder + .as_ref() + .unwrap() + .add_operation(Operation::PrevRandao(invalid_prev_randao)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let expected_prev_randao = self + .chain + .canonical_head + .cached_head() + .head_random() + .unwrap(); + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let (payload_type, _) = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) + .await + .unwrap(); + + let payload: FullPayload = match payload_type.data { + ProduceBlockV3Response::Full(payload) => { + payload.block().body().execution_payload().unwrap().into() + } + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), + }; + + assert_eq!(payload.prev_randao(), expected_prev_randao); + + self + } + pub async fn test_payload_rejects_invalid_block_number(self) -> Self { let invalid_block_number = 2; @@ -3555,7 +4024,7 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data @@ -3577,6 +4046,46 @@ impl ApiTester { self } + pub async fn test_payload_v3_rejects_invalid_block_number(self) -> Self { + let invalid_block_number = 2; + + // Mutate block number. + self.mock_builder + .as_ref() + .unwrap() + .add_operation(Operation::BlockNumber(invalid_block_number)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let expected_block_number = self + .chain + .head_snapshot() + .beacon_state + .latest_execution_payload_header() + .unwrap() + .block_number() + + 1; + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let (payload_type, _) = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) + .await + .unwrap(); + + let payload: FullPayload = match payload_type.data { + ProduceBlockV3Response::Full(payload) => { + payload.block().body().execution_payload().unwrap().into() + } + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), + }; + + assert_eq!(payload.block_number(), expected_block_number); + + self + } + pub async fn test_payload_rejects_invalid_timestamp(self) -> Self { let invalid_timestamp = 2; @@ -3600,7 +4109,7 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data @@ -3622,6 +4131,45 @@ impl ApiTester { self } + pub async fn test_payload_v3_rejects_invalid_timestamp(self) -> Self { + let invalid_timestamp = 2; + + // Mutate timestamp. + self.mock_builder + .as_ref() + .unwrap() + .add_operation(Operation::Timestamp(invalid_timestamp)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let min_expected_timestamp = self + .chain + .head_snapshot() + .beacon_state + .latest_execution_payload_header() + .unwrap() + .timestamp(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let (payload_type, _) = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) + .await + .unwrap(); + + let payload: FullPayload = match payload_type.data { + ProduceBlockV3Response::Full(payload) => { + payload.block().body().execution_payload().unwrap().into() + } + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a blinded payload"), + }; + + assert!(payload.timestamp() > min_expected_timestamp); + + self + } + pub async fn test_payload_rejects_invalid_signature(self) -> Self { self.mock_builder.as_ref().unwrap().invalid_signatures(); @@ -3632,7 +4180,7 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data @@ -3652,6 +4200,28 @@ impl ApiTester { self } + pub async fn test_payload_v3_rejects_invalid_signature(self) -> Self { + self.mock_builder.as_ref().unwrap().invalid_signatures(); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let (payload_type, _) = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) + .await + .unwrap(); + + match payload_type.data { + ProduceBlockV3Response::Full(_) => (), + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), + }; + + self + } + pub async fn test_builder_chain_health_skips(self) -> Self { let slot = self.chain.slot().unwrap(); @@ -3669,7 +4239,7 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data @@ -3689,6 +4259,35 @@ impl ApiTester { self } + pub async fn test_builder_v3_chain_health_skips(self) -> Self { + let slot = self.chain.slot().unwrap(); + + // Since we are proposing this slot, start the count from the previous slot. + let prev_slot = slot - Slot::new(1); + let head_slot = self.chain.canonical_head.cached_head().head_slot(); + let epoch = self.chain.epoch().unwrap(); + + // Inclusive here to make sure we advance one slot past the threshold. + for _ in (prev_slot - head_slot).as_usize()..=self.chain.config.builder_fallback_skips { + self.harness.advance_slot(); + } + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let (payload_type, _) = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) + .await + .unwrap(); + + match payload_type.data { + ProduceBlockV3Response::Full(_) => (), + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), + }; + + self + } + pub async fn test_builder_chain_health_skips_per_epoch(self) -> Self { // Fill an epoch with `builder_fallback_skips_per_epoch` skip slots. for i in 0..E::slots_per_epoch() { @@ -3712,7 +4311,7 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) + .get_validator_blinded_blocks::(next_slot, &randao_reveal, None) .await .unwrap() .data @@ -3741,7 +4340,7 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) + .get_validator_blinded_blocks::(next_slot, &randao_reveal, None) .await .unwrap() .data @@ -3762,6 +4361,61 @@ impl ApiTester { self } + pub async fn test_builder_v3_chain_health_skips_per_epoch(self) -> Self { + // Fill an epoch with `builder_fallback_skips_per_epoch` skip slots. + for i in 0..E::slots_per_epoch() { + if i == 0 || i as usize > self.chain.config.builder_fallback_skips_per_epoch { + self.harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + } + self.harness.advance_slot(); + } + + let next_slot = self.chain.slot().unwrap(); + + let (_, randao_reveal) = self + .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) + .await; + + let (payload_type, _) = self + .client + .get_validator_blocks_v3::(next_slot, &randao_reveal, None, None) + .await + .unwrap(); + + match payload_type.data { + ProduceBlockV3Response::Blinded(_) => (), + ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), + }; + + // Without proposing, advance into the next slot, this should make us cross the threshold + // number of skips, causing us to use the fallback. + self.harness.advance_slot(); + let next_slot = self.chain.slot().unwrap(); + + let (_, randao_reveal) = self + .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) + .await; + + let (payload_type, _) = self + .client + .get_validator_blocks_v3::(next_slot, &randao_reveal, None, None) + .await + .unwrap(); + + match payload_type.data { + ProduceBlockV3Response::Full(_) => (), + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), + }; + + self + } + pub async fn test_builder_chain_health_epochs_since_finalization(self) -> Self { let skips = E::slots_per_epoch() * self.chain.config.builder_fallback_epochs_since_finalization as u64; @@ -3790,7 +4444,7 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) + .get_validator_blinded_blocks::(next_slot, &randao_reveal, None) .await .unwrap() .data @@ -3829,7 +4483,7 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) + .get_validator_blinded_blocks::(next_slot, &randao_reveal, None) .await .unwrap() .data @@ -3850,6 +4504,76 @@ impl ApiTester { self } + pub async fn test_builder_v3_chain_health_epochs_since_finalization(self) -> Self { + let skips = E::slots_per_epoch() + * self.chain.config.builder_fallback_epochs_since_finalization as u64; + + for _ in 0..skips { + self.harness.advance_slot(); + } + + // Fill the next epoch with blocks, should be enough to justify, not finalize. + for _ in 0..E::slots_per_epoch() { + self.harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + self.harness.advance_slot(); + } + + let next_slot = self.chain.slot().unwrap(); + + let (_, randao_reveal) = self + .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) + .await; + + let (payload_type, _) = self + .client + .get_validator_blocks_v3::(next_slot, &randao_reveal, None, None) + .await + .unwrap(); + + match payload_type.data { + ProduceBlockV3Response::Full(_) => (), + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), + }; + + // Fill another epoch with blocks, should be enough to finalize. (Sneaky plus 1 because this + // scenario starts at an epoch boundary). + for _ in 0..E::slots_per_epoch() + 1 { + self.harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + self.harness.advance_slot(); + } + + let next_slot = self.chain.slot().unwrap(); + + let (_, randao_reveal) = self + .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) + .await; + + let (payload_type, _) = self + .client + .get_validator_blocks_v3::(next_slot, &randao_reveal, None, None) + .await + .unwrap(); + + match payload_type.data { + ProduceBlockV3Response::Blinded(_) => (), + ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), + }; + + self + } + pub async fn test_builder_chain_health_optimistic_head(self) -> Self { // Make sure the next payload verification will return optimistic before advancing the chain. self.harness.mock_execution_layer.as_ref().map(|el| { @@ -3872,7 +4596,7 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data @@ -3896,39 +4620,42 @@ impl ApiTester { self } - pub async fn test_payload_rejects_inadequate_builder_threshold(self) -> Self { - // Mutate value. - self.mock_builder - .as_ref() - .unwrap() - .add_operation(Operation::Value(Uint256::from( - DEFAULT_BUILDER_THRESHOLD_WEI - 1, - ))); + pub async fn test_builder_v3_chain_health_optimistic_head(self) -> Self { + // Make sure the next payload verification will return optimistic before advancing the chain. + self.harness.mock_execution_layer.as_ref().map(|el| { + el.server.all_payloads_syncing(true); + el + }); + self.harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + self.harness.advance_slot(); let slot = self.chain.slot().unwrap(); let epoch = self.chain.epoch().unwrap(); - let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload: BlindedPayload = self + let (payload_type, _) = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await - .unwrap() - .data - .body() - .execution_payload() - .unwrap() - .into(); + .unwrap(); + + let payload: FullPayload = match payload_type.data { + ProduceBlockV3Response::Full(payload) => { + payload.block().body().execution_payload().unwrap().into() + } + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), + }; + + let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + assert_eq!(payload.fee_recipient(), expected_fee_recipient); - // If this cache is populated, it indicates fallback to the local EE was correctly used. - assert!(self - .chain - .execution_layer - .as_ref() - .unwrap() - .get_payload_by_root(&payload.tree_hash_root()) - .is_some()); self } @@ -3948,7 +4675,7 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data @@ -3968,6 +4695,34 @@ impl ApiTester { self } + pub async fn test_builder_payload_v3_chosen_when_more_profitable(self) -> Self { + // Mutate value. + self.mock_builder + .as_ref() + .unwrap() + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI + 1, + ))); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let (payload_type, _) = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) + .await + .unwrap(); + + match payload_type.data { + ProduceBlockV3Response::Blinded(_) => (), + ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), + }; + + self + } + pub async fn test_local_payload_chosen_when_equally_profitable(self) -> Self { // Mutate value. self.mock_builder @@ -3984,7 +4739,7 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data @@ -4004,6 +4759,34 @@ impl ApiTester { self } + pub async fn test_local_payload_v3_chosen_when_equally_profitable(self) -> Self { + // Mutate value. + self.mock_builder + .as_ref() + .unwrap() + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, + ))); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let (payload_type, _) = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) + .await + .unwrap(); + + match payload_type.data { + ProduceBlockV3Response::Full(_) => (), + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), + }; + + self + } + pub async fn test_local_payload_chosen_when_more_profitable(self) -> Self { // Mutate value. self.mock_builder @@ -4020,7 +4803,7 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data @@ -4040,6 +4823,34 @@ impl ApiTester { self } + pub async fn test_local_payload_v3_chosen_when_more_profitable(self) -> Self { + // Mutate value. + self.mock_builder + .as_ref() + .unwrap() + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI - 1, + ))); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let (payload_type, _) = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) + .await + .unwrap(); + + match payload_type.data { + ProduceBlockV3Response::Full(_) => (), + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), + }; + + self + } + pub async fn test_builder_works_post_capella(self) -> Self { // Ensure builder payload is chosen self.mock_builder @@ -4050,25 +4861,12 @@ impl ApiTester { ))); let slot = self.chain.slot().unwrap(); - let propose_state = self - .harness - .chain - .state_at_slot(slot, StateSkipConfig::WithoutStateRoots) - .unwrap(); - let withdrawals = get_expected_withdrawals(&propose_state, &self.chain.spec).unwrap(); - let withdrawals_root = withdrawals.tree_hash_root(); - // Set withdrawals root for builder - self.mock_builder - .as_ref() - .unwrap() - .add_operation(Operation::WithdrawalsRoot(withdrawals_root)); - let epoch = self.chain.epoch().unwrap(); let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data @@ -4088,6 +4886,33 @@ impl ApiTester { self } + pub async fn test_builder_works_post_deneb(self) -> Self { + // Ensure builder payload is chosen + self.mock_builder + .as_ref() + .unwrap() + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI + 1, + ))); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let (payload_type, _) = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) + .await + .unwrap(); + + let _block_contents = match payload_type.data { + ProduceBlockV3Response::Blinded(payload) => payload, + ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), + }; + + self + } + pub async fn test_lighthouse_rejects_invalid_withdrawals_root(self) -> Self { // Ensure builder payload *would be* chosen self.mock_builder @@ -4108,7 +4933,7 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data @@ -4128,6 +4953,38 @@ impl ApiTester { self } + pub async fn test_lighthouse_rejects_invalid_withdrawals_root_v3(self) -> Self { + // Ensure builder payload *would be* chosen + self.mock_builder + .as_ref() + .unwrap() + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI + 1, + ))); + // Set withdrawals root to something invalid + self.mock_builder + .as_ref() + .unwrap() + .add_operation(Operation::WithdrawalsRoot(Hash256::repeat_byte(0x42))); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let (payload_type, _) = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) + .await + .unwrap(); + + match payload_type.data { + ProduceBlockV3Response::Full(_) => (), + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), + }; + + self + } + #[cfg(target_os = "linux")] pub async fn test_get_lighthouse_health(self) -> Self { self.client.get_lighthouse_health().await.unwrap(); @@ -4367,12 +5224,12 @@ impl ApiTester { // Submit the next block, which is on an epoch boundary, so this will produce a finalized // checkpoint event, head event, and block event - let block_root = self.next_block.canonical_root(); + let block_root = self.next_block.signed_block().canonical_root(); // current_duty_dependent_root = block root because this is the first slot of the epoch let current_duty_dependent_root = self.chain.head_beacon_block_root(); let current_slot = self.chain.slot().unwrap(); - let next_slot = self.next_block.slot(); + let next_slot = self.next_block.signed_block().slot(); let finalization_distance = E::slots_per_epoch() * 2; let expected_block = EventKind::Block(SseBlock { @@ -4384,7 +5241,7 @@ impl ApiTester { let expected_head = EventKind::Head(SseHead { block: block_root, slot: next_slot, - state: self.next_block.state_root(), + state: self.next_block.signed_block().state_root(), current_duty_dependent_root, previous_duty_dependent_root: self .chain @@ -4433,13 +5290,17 @@ impl ApiTester { .unwrap(); let expected_reorg = EventKind::ChainReorg(SseChainReorg { - slot: self.reorg_block.slot(), + slot: self.reorg_block.signed_block().slot(), depth: 1, - old_head_block: self.next_block.canonical_root(), - old_head_state: self.next_block.state_root(), - new_head_block: self.reorg_block.canonical_root(), - new_head_state: self.reorg_block.state_root(), - epoch: self.next_block.slot().epoch(E::slots_per_epoch()), + old_head_block: self.next_block.signed_block().canonical_root(), + old_head_state: self.next_block.signed_block().state_root(), + new_head_block: self.reorg_block.signed_block().canonical_root(), + new_head_state: self.reorg_block.signed_block().state_root(), + epoch: self + .next_block + .signed_block() + .slot() + .epoch(E::slots_per_epoch()), execution_optimistic: false, }); @@ -4503,8 +5364,7 @@ impl ApiTester { assert_eq!(withdrawal_response.finalized, Some(false)); assert_eq!(withdrawal_response.data, expected_withdrawals.to_vec()); } - Err(e) => { - println!("{:?}", e); + Err(_) => { panic!("query failed incorrectly"); } } @@ -4569,8 +5429,8 @@ impl ApiTester { .await .unwrap(); - let block_root = self.next_block.canonical_root(); - let next_slot = self.next_block.slot(); + let block_root = self.next_block.signed_block().canonical_root(); + let next_slot = self.next_block.signed_block().slot(); let expected_block = EventKind::Block(SseBlock { block: block_root, @@ -4581,7 +5441,7 @@ impl ApiTester { let expected_head = EventKind::Head(SseHead { block: block_root, slot: next_slot, - state: self.next_block.state_root(), + state: self.next_block.signed_block().state_root(), current_duty_dependent_root: self.chain.genesis_block_root, previous_duty_dependent_root: self.chain.genesis_block_root, epoch_transition: false, @@ -4886,6 +5746,42 @@ async fn node_get() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_light_client_bootstrap() { + let config = ApiTesterConfig { + spec: ForkName::Altair.make_genesis_spec(E::default_spec()), + ..<_>::default() + }; + ApiTester::new_from_config(config) + .await + .test_get_beacon_light_client_bootstrap() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_light_client_optimistic_update() { + let config = ApiTesterConfig { + spec: ForkName::Altair.make_genesis_spec(E::default_spec()), + ..<_>::default() + }; + ApiTester::new_from_config(config) + .await + .test_get_beacon_light_client_optimistic_update() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_light_client_finality_update() { + let config = ApiTesterConfig { + spec: ForkName::Altair.make_genesis_spec(E::default_spec()), + ..<_>::default() + }; + ApiTester::new_from_config(config) + .await + .test_get_beacon_light_client_finality_update() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_validator_duties_early() { ApiTester::new() @@ -4973,18 +5869,29 @@ async fn block_production_ssz_with_skip_slots() { } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn blinded_block_production_full_payload_premerge() { +async fn block_production_ssz_v3() { + ApiTester::new().await.test_block_production_v3_ssz().await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn block_production_v3_ssz_with_skip_slots() { ApiTester::new() .await - .test_blinded_block_production::>() + .skip_slots(E::slots_per_epoch() * 2) + .test_block_production_v3_ssz() .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_full_payload_premerge() { + ApiTester::new().await.test_blinded_block_production().await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn blinded_block_production_ssz_full_payload_premerge() { ApiTester::new() .await - .test_blinded_block_production_ssz::>() + .test_blinded_block_production_ssz() .await; } @@ -4993,7 +5900,7 @@ async fn blinded_block_production_with_skip_slots_full_payload_premerge() { ApiTester::new() .await .skip_slots(E::slots_per_epoch() * 2) - .test_blinded_block_production::>() + .test_blinded_block_production() .await; } @@ -5002,7 +5909,7 @@ async fn blinded_block_production_ssz_with_skip_slots_full_payload_premerge() { ApiTester::new() .await .skip_slots(E::slots_per_epoch() * 2) - .test_blinded_block_production_ssz::>() + .test_blinded_block_production_ssz() .await; } @@ -5010,7 +5917,7 @@ async fn blinded_block_production_ssz_with_skip_slots_full_payload_premerge() { async fn blinded_block_production_no_verify_randao_full_payload_premerge() { ApiTester::new() .await - .test_blinded_block_production_no_verify_randao::>() + .test_blinded_block_production_no_verify_randao() .await; } @@ -5018,16 +5925,13 @@ async fn blinded_block_production_no_verify_randao_full_payload_premerge() { async fn blinded_block_production_verify_randao_invalid_full_payload_premerge() { ApiTester::new() .await - .test_blinded_block_production_verify_randao_invalid::>() + .test_blinded_block_production_verify_randao_invalid() .await; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn blinded_block_production_blinded_payload_premerge() { - ApiTester::new() - .await - .test_blinded_block_production::>() - .await; + ApiTester::new().await.test_blinded_block_production().await; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] @@ -5035,7 +5939,7 @@ async fn blinded_block_production_with_skip_slots_blinded_payload_premerge() { ApiTester::new() .await .skip_slots(E::slots_per_epoch() * 2) - .test_blinded_block_production::>() + .test_blinded_block_production() .await; } @@ -5043,7 +5947,7 @@ async fn blinded_block_production_with_skip_slots_blinded_payload_premerge() { async fn blinded_block_production_no_verify_randao_blinded_payload_premerge() { ApiTester::new() .await - .test_blinded_block_production_no_verify_randao::>() + .test_blinded_block_production_no_verify_randao() .await; } @@ -5051,7 +5955,7 @@ async fn blinded_block_production_no_verify_randao_blinded_payload_premerge() { async fn blinded_block_production_verify_randao_invalid_blinded_payload_premerge() { ApiTester::new() .await - .test_blinded_block_production_verify_randao_invalid::>() + .test_blinded_block_production_verify_randao_invalid() .await; } @@ -5155,6 +6059,30 @@ async fn post_validator_register_valid() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_validator_zero_builder_boost_factor() { + ApiTester::new_mev_tester() + .await + .test_payload_v3_zero_builder_boost_factor() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_validator_max_builder_boost_factor() { + ApiTester::new_mev_tester() + .await + .test_payload_v3_max_builder_boost_factor() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_validator_register_valid_v3() { + ApiTester::new_mev_tester() + .await + .test_payload_v3_respects_registration() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn post_validator_register_gas_limit_mutation() { ApiTester::new_mev_tester() @@ -5163,6 +6091,14 @@ async fn post_validator_register_gas_limit_mutation() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_validator_register_gas_limit_mutation_v3() { + ApiTester::new_mev_tester() + .await + .test_payload_v3_accepts_mutated_gas_limit() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn post_validator_register_fee_recipient_mutation() { ApiTester::new_mev_tester() @@ -5171,6 +6107,14 @@ async fn post_validator_register_fee_recipient_mutation() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_validator_register_fee_recipient_mutation_v3() { + ApiTester::new_mev_tester() + .await + .test_payload_v3_accepts_changed_fee_recipient() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_blinded_block_invalid_parent_hash() { ApiTester::new_mev_tester() @@ -5179,6 +6123,14 @@ async fn get_blinded_block_invalid_parent_hash() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_full_block_invalid_parent_hash_v3() { + ApiTester::new_mev_tester() + .await + .test_payload_v3_rejects_invalid_parent_hash() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_blinded_block_invalid_prev_randao() { ApiTester::new_mev_tester() @@ -5187,6 +6139,14 @@ async fn get_blinded_block_invalid_prev_randao() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_full_block_invalid_prev_randao_v3() { + ApiTester::new_mev_tester() + .await + .test_payload_v3_rejects_invalid_prev_randao() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_blinded_block_invalid_block_number() { ApiTester::new_mev_tester() @@ -5195,6 +6155,14 @@ async fn get_blinded_block_invalid_block_number() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_full_block_invalid_block_number_v3() { + ApiTester::new_mev_tester() + .await + .test_payload_v3_rejects_invalid_block_number() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_blinded_block_invalid_timestamp() { ApiTester::new_mev_tester() @@ -5203,6 +6171,14 @@ async fn get_blinded_block_invalid_timestamp() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_full_block_invalid_timestamp_v3() { + ApiTester::new_mev_tester() + .await + .test_payload_v3_rejects_invalid_timestamp() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_blinded_block_invalid_signature() { ApiTester::new_mev_tester() @@ -5211,6 +6187,14 @@ async fn get_blinded_block_invalid_signature() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_full_block_invalid_signature_v3() { + ApiTester::new_mev_tester() + .await + .test_payload_v3_rejects_invalid_signature() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn builder_chain_health_skips() { ApiTester::new_mev_tester() @@ -5219,6 +6203,14 @@ async fn builder_chain_health_skips() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_chain_health_skips_v3() { + ApiTester::new_mev_tester() + .await + .test_builder_v3_chain_health_skips() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn builder_chain_health_skips_per_epoch() { ApiTester::new_mev_tester() @@ -5227,6 +6219,14 @@ async fn builder_chain_health_skips_per_epoch() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_chain_health_skips_per_epoch_v3() { + ApiTester::new_mev_tester() + .await + .test_builder_v3_chain_health_skips_per_epoch() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn builder_chain_health_epochs_since_finalization() { ApiTester::new_mev_tester() @@ -5235,6 +6235,14 @@ async fn builder_chain_health_epochs_since_finalization() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_chain_health_epochs_since_finalization_v3() { + ApiTester::new_mev_tester() + .await + .test_builder_v3_chain_health_epochs_since_finalization() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn builder_chain_health_optimistic_head() { ApiTester::new_mev_tester() @@ -5244,16 +6252,16 @@ async fn builder_chain_health_optimistic_head() { } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn builder_inadequate_builder_threshold() { +async fn builder_chain_health_optimistic_head_v3() { ApiTester::new_mev_tester() .await - .test_payload_rejects_inadequate_builder_threshold() + .test_builder_v3_chain_health_optimistic_head() .await; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn builder_payload_chosen_by_profit() { - ApiTester::new_mev_tester_no_builder_threshold() + ApiTester::new_mev_tester_default_payload_value() .await .test_builder_payload_chosen_when_more_profitable() .await @@ -5263,10 +6271,21 @@ async fn builder_payload_chosen_by_profit() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_payload_chosen_by_profit_v3() { + ApiTester::new_mev_tester_default_payload_value() + .await + .test_builder_payload_v3_chosen_when_more_profitable() + .await + .test_local_payload_v3_chosen_when_equally_profitable() + .await + .test_local_payload_v3_chosen_when_more_profitable() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn builder_works_post_capella() { let mut config = ApiTesterConfig { - builder_threshold: Some(0), retain_historic_states: false, spec: E::default_spec(), }; @@ -5284,6 +6303,48 @@ async fn builder_works_post_capella() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_works_post_deneb() { + let mut config = ApiTesterConfig { + retain_historic_states: false, + spec: E::default_spec(), + }; + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + config.spec.deneb_fork_epoch = Some(Epoch::new(0)); + + ApiTester::new_from_config(config) + .await + .test_post_validator_register_validator() + .await + .test_builder_works_post_deneb() + .await + .test_lighthouse_rejects_invalid_withdrawals_root_v3() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_blob_sidecars() { + let mut config = ApiTesterConfig { + retain_historic_states: false, + spec: E::default_spec(), + }; + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + config.spec.deneb_fork_epoch = Some(Epoch::new(0)); + + ApiTester::new_from_config(config) + .await + .test_post_beacon_blocks_valid() + .await + .test_get_blob_sidecars(false) + .await + .test_get_blob_sidecars(true) + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn post_validator_liveness_epoch() { ApiTester::new() diff --git a/beacon_node/http_metrics/src/metrics.rs b/beacon_node/http_metrics/src/metrics.rs index 785206b75..e6e06caa8 100644 --- a/beacon_node/http_metrics/src/metrics.rs +++ b/beacon_node/http_metrics/src/metrics.rs @@ -4,8 +4,6 @@ use lighthouse_metrics::TextEncoder; use lighthouse_network::prometheus_client::encoding::text::encode; use malloc_utils::scrape_allocator_metrics; -pub use lighthouse_metrics::*; - pub fn gather_prometheus_metrics( ctx: &Context, ) -> std::result::Result { diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 07674fb6d..46acdeade 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -10,7 +10,6 @@ unsigned-varint = { version = "0.6", features = ["codec"] } ssz_types = { workspace = true } types = { workspace = true } serde = { workspace = true } -serde_derive = "1" ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } tree_hash = { workspace = true } @@ -40,17 +39,17 @@ directory = { workspace = true } regex = { workspace = true } strum = { workspace = true } superstruct = { workspace = true } -prometheus-client = "0.21.0" +prometheus-client = "0.22.0" unused_port = { workspace = true } delay_map = { workspace = true } void = "1" -libp2p-quic= { version = "0.9.2", features=["tokio"]} -libp2p-mplex = "0.40.0" +libp2p-mplex = { git = "https://github.com/sigp/rust-libp2p/", rev = "cfa3275ca17e502799ed56e555b6c0611752e369" } [dependencies.libp2p] -version = "0.52" +git = "https://github.com/sigp/rust-libp2p/" +rev = "cfa3275ca17e502799ed56e555b6c0611752e369" default-features = false -features = ["identify", "yamux", "noise", "gossipsub", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa"] +features = ["identify", "yamux", "noise", "gossipsub", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa", "metrics", "quic"] [dev-dependencies] slog-term = { workspace = true } diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index c3f6b60b0..169a061d2 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -5,17 +5,22 @@ use crate::{Enr, PeerIdSerialized}; use directory::{ DEFAULT_BEACON_NODE_DIR, DEFAULT_HARDCODED_NETWORK, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR, }; -use discv5::{Discv5Config, Discv5ConfigBuilder}; use libp2p::gossipsub; use libp2p::Multiaddr; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use std::net::{Ipv4Addr, Ipv6Addr}; +use std::num::NonZeroU16; use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; use types::{ForkContext, ForkName}; +pub const DEFAULT_IPV4_ADDRESS: Ipv4Addr = Ipv4Addr::UNSPECIFIED; +pub const DEFAULT_TCP_PORT: u16 = 9000u16; +pub const DEFAULT_DISC_PORT: u16 = 9000u16; +pub const DEFAULT_QUIC_PORT: u16 = 9001u16; + /// The cache time is set to accommodate the circulation time of an attestation. /// /// The p2p spec declares that we accept attestations within the following range: @@ -59,22 +64,22 @@ pub struct Config { pub enr_address: (Option, Option), /// The udp ipv4 port to broadcast to peers in order to reach back for discovery. - pub enr_udp4_port: Option, + pub enr_udp4_port: Option, /// The quic ipv4 port to broadcast to peers in order to reach back for libp2p services. - pub enr_quic4_port: Option, + pub enr_quic4_port: Option, /// The tcp ipv4 port to broadcast to peers in order to reach back for libp2p services. - pub enr_tcp4_port: Option, + pub enr_tcp4_port: Option, /// The udp ipv6 port to broadcast to peers in order to reach back for discovery. - pub enr_udp6_port: Option, + pub enr_udp6_port: Option, /// The tcp ipv6 port to broadcast to peers in order to reach back for libp2p services. - pub enr_tcp6_port: Option, + pub enr_tcp6_port: Option, /// The quic ipv6 port to broadcast to peers in order to reach back for libp2p services. - pub enr_quic6_port: Option, + pub enr_quic6_port: Option, /// Target number of connected peers. pub target_peers: usize, @@ -85,7 +90,7 @@ pub struct Config { /// Discv5 configuration parameters. #[serde(skip)] - pub discv5_config: Discv5Config, + pub discv5_config: discv5::Config, /// List of nodes to initially connect to. pub boot_nodes_enr: Vec, @@ -117,7 +122,7 @@ pub struct Config { /// Subscribe to all subnets for the duration of the runtime. pub subscribe_all_subnets: bool, - /// Import/aggregate all attestations recieved on subscribed subnets for the duration of the + /// Import/aggregate all attestations received on subscribed subnets for the duration of the /// runtime. pub import_all_attestations: bool, @@ -152,6 +157,10 @@ pub struct Config { /// Configuration for the inbound rate limiter (requests received by this node). pub inbound_rate_limiter_config: Option, + + /// Whether to disable logging duplicate gossip messages as WARN. If set to true, duplicate + /// errors will be logged at DEBUG level. + pub disable_duplicate_warn_logs: bool, } impl Config { @@ -304,17 +313,17 @@ impl Default for Config { .expect("The total rate limit has been specified"), ); let listen_addresses = ListenAddress::V4(ListenAddr { - addr: Ipv4Addr::UNSPECIFIED, - disc_port: 9000, - quic_port: 9001, - tcp_port: 9000, + addr: DEFAULT_IPV4_ADDRESS, + disc_port: DEFAULT_DISC_PORT, + quic_port: DEFAULT_QUIC_PORT, + tcp_port: DEFAULT_TCP_PORT, }); let discv5_listen_config = discv5::ListenConfig::from_ip(Ipv4Addr::UNSPECIFIED.into(), 9000); // discv5 configuration - let discv5_config = Discv5ConfigBuilder::new(discv5_listen_config) + let discv5_config = discv5::ConfigBuilder::new(discv5_listen_config) .enable_packet_filter() .session_cache_capacity(5000) .request_timeout(Duration::from_secs(1)) @@ -357,7 +366,7 @@ impl Default for Config { disable_discovery: false, disable_quic_support: false, upnp_enabled: true, - network_load: 3, + network_load: 4, private: false, subscribe_all_subnets: false, import_all_attestations: false, @@ -369,6 +378,7 @@ impl Default for Config { outbound_rate_limiter_config: None, invalid_block_storage: None, inbound_rate_limiter_config: None, + disable_duplicate_warn_logs: false, } } } @@ -416,7 +426,7 @@ impl From for NetworkLoad { mesh_n_high: 10, gossip_lazy: 3, history_gossip: 3, - heartbeat_interval: Duration::from_millis(700), + heartbeat_interval: Duration::from_millis(1000), }, 4 => NetworkLoad { name: "Average", @@ -426,7 +436,7 @@ impl From for NetworkLoad { mesh_n_high: 12, gossip_lazy: 3, history_gossip: 3, - heartbeat_interval: Duration::from_millis(700), + heartbeat_interval: Duration::from_millis(1000), }, // 5 and above _ => NetworkLoad { @@ -437,7 +447,7 @@ impl From for NetworkLoad { mesh_n_high: 15, gossip_lazy: 5, history_gossip: 6, - heartbeat_interval: Duration::from_millis(500), + heartbeat_interval: Duration::from_millis(700), }, } } @@ -449,12 +459,6 @@ pub fn gossipsub_config( fork_context: Arc, gossipsub_config_params: GossipsubConfigParams, ) -> gossipsub::Config { - // The function used to generate a gossipsub message id - // We use the first 8 bytes of SHA256(topic, data) for content addressing - let fast_gossip_message_id = |message: &gossipsub::RawMessage| { - let data = [message.topic.as_str().as_bytes(), &message.data].concat(); - gossipsub::FastMessageId::from(&Sha256::digest(&data)[..8]) - }; fn prefix( prefix: [u8; 4], message: &gossipsub::Message, @@ -462,7 +466,7 @@ pub fn gossipsub_config( ) -> Vec { let topic_bytes = message.topic.as_str().as_bytes(); match fork_context.current_fork() { - ForkName::Altair | ForkName::Merge | ForkName::Capella => { + ForkName::Altair | ForkName::Merge | ForkName::Capella | ForkName::Deneb => { let topic_len_bytes = topic_bytes.len().to_le_bytes(); let mut vec = Vec::with_capacity( prefix.len() + topic_len_bytes.len() + topic_bytes.len() + message.data.len(), @@ -506,13 +510,13 @@ pub fn gossipsub_config( .gossip_lazy(load.gossip_lazy) .fanout_ttl(Duration::from_secs(60)) .history_length(12) + .flood_publish(false) .max_messages_per_rpc(Some(500)) // Responses to IWANT can be quite large .history_gossip(load.history_gossip) .validate_messages() // require validation before propagation .validation_mode(gossipsub::ValidationMode::Anonymous) .duplicate_cache_time(DUPLICATE_CACHE_TIME) .message_id_fn(gossip_message_id) - .fast_message_id_fn(fast_gossip_message_id) .allow_self_origin(true) .build() .expect("valid gossipsub configuration") diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 3f46285a8..b0e0a01ee 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -1,12 +1,11 @@ //! Helper functions and an extension trait for Ethereum 2 ENRs. -pub use discv5::enr::{self, CombinedKey, EnrBuilder}; +pub use discv5::enr::CombinedKey; use super::enr_ext::CombinedKeyExt; use super::ENR_FILENAME; use crate::types::{Enr, EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use crate::NetworkConfig; -use discv5::enr::EnrKey; use libp2p::identity::Keypair; use slog::{debug, warn}; use ssz::{Decode, Encode}; @@ -142,11 +141,13 @@ pub fn build_or_load_enr( Ok(local_enr) } -pub fn create_enr_builder_from_config( +/// Builds a lighthouse ENR given a `NetworkConfig`. +pub fn build_enr( + enr_key: &CombinedKey, config: &NetworkConfig, - enable_libp2p: bool, -) -> EnrBuilder { - let mut builder = EnrBuilder::new("v4"); + enr_fork_id: &EnrForkId, +) -> Result { + let mut builder = discv5::enr::Enr::builder(); let (maybe_ipv4_address, maybe_ipv6_address) = &config.enr_address; if let Some(ip) = maybe_ipv4_address { @@ -158,60 +159,58 @@ pub fn create_enr_builder_from_config( } if let Some(udp4_port) = config.enr_udp4_port { - builder.udp4(udp4_port); + builder.udp4(udp4_port.get()); } if let Some(udp6_port) = config.enr_udp6_port { - builder.udp6(udp6_port); + builder.udp6(udp6_port.get()); } - if enable_libp2p { - // Add QUIC fields to the ENR. - // Since QUIC is used as an alternative transport for the libp2p protocols, - // the related fields should only be added when both QUIC and libp2p are enabled - if !config.disable_quic_support { - // If we are listening on ipv4, add the quic ipv4 port. - if let Some(quic4_port) = config - .enr_quic4_port - .or_else(|| config.listen_addrs().v4().map(|v4_addr| v4_addr.quic_port)) - { - builder.add_value(QUIC_ENR_KEY, &quic4_port); - } - - // If we are listening on ipv6, add the quic ipv6 port. - if let Some(quic6_port) = config - .enr_quic6_port - .or_else(|| config.listen_addrs().v6().map(|v6_addr| v6_addr.quic_port)) - { - builder.add_value(QUIC6_ENR_KEY, &quic6_port); - } + // Add QUIC fields to the ENR. + // Since QUIC is used as an alternative transport for the libp2p protocols, + // the related fields should only be added when both QUIC and libp2p are enabled + if !config.disable_quic_support { + // If we are listening on ipv4, add the quic ipv4 port. + if let Some(quic4_port) = config.enr_quic4_port.or_else(|| { + config + .listen_addrs() + .v4() + .and_then(|v4_addr| v4_addr.quic_port.try_into().ok()) + }) { + builder.add_value(QUIC_ENR_KEY, &quic4_port.get()); } - // If the ENR port is not set, and we are listening over that ip version, use the listening port instead. - let tcp4_port = config - .enr_tcp4_port - .or_else(|| config.listen_addrs().v4().map(|v4_addr| v4_addr.tcp_port)); - if let Some(tcp4_port) = tcp4_port { - builder.tcp4(tcp4_port); - } - - let tcp6_port = config - .enr_tcp6_port - .or_else(|| config.listen_addrs().v6().map(|v6_addr| v6_addr.tcp_port)); - if let Some(tcp6_port) = tcp6_port { - builder.tcp6(tcp6_port); + // If we are listening on ipv6, add the quic ipv6 port. + if let Some(quic6_port) = config.enr_quic6_port.or_else(|| { + config + .listen_addrs() + .v6() + .and_then(|v6_addr| v6_addr.quic_port.try_into().ok()) + }) { + builder.add_value(QUIC6_ENR_KEY, &quic6_port.get()); } } - builder -} -/// Builds a lighthouse ENR given a `NetworkConfig`. -pub fn build_enr( - enr_key: &CombinedKey, - config: &NetworkConfig, - enr_fork_id: &EnrForkId, -) -> Result { - let mut builder = create_enr_builder_from_config(config, true); + // If the ENR port is not set, and we are listening over that ip version, use the listening port instead. + let tcp4_port = config.enr_tcp4_port.or_else(|| { + config + .listen_addrs() + .v4() + .and_then(|v4_addr| v4_addr.tcp_port.try_into().ok()) + }); + if let Some(tcp4_port) = tcp4_port { + builder.tcp4(tcp4_port.get()); + } + + let tcp6_port = config.enr_tcp6_port.or_else(|| { + config + .listen_addrs() + .v6() + .and_then(|v6_addr| v6_addr.tcp_port.try_into().ok()) + }); + if let Some(tcp6_port) = tcp6_port { + builder.tcp6(tcp6_port.get()); + } // set the `eth2` field on our ENR builder.add_value(ETH2_ENR_KEY, &enr_fork_id.as_ssz_bytes()); diff --git a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs index 2efaa76ac..bae723560 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs @@ -366,9 +366,7 @@ mod tests { let libp2p_kp: Keypair = secp256k1_kp.into(); let peer_id = libp2p_kp.public().to_peer_id(); - let enr = discv5::enr::EnrBuilder::new("v4") - .build(&secret_key) - .unwrap(); + let enr = discv5::enr::Enr::builder().build(&secret_key).unwrap(); let node_id = peer_id_to_node_id(&peer_id).unwrap(); assert_eq!(enr.node_id(), node_id); @@ -387,9 +385,7 @@ mod tests { let libp2p_kp: Keypair = secp256k1_kp.into(); let peer_id = libp2p_kp.public().to_peer_id(); - let enr = discv5::enr::EnrBuilder::new("v4") - .build(&secret_key) - .unwrap(); + let enr = discv5::enr::Enr::builder().build(&secret_key).unwrap(); let node_id = peer_id_to_node_id(&peer_id).unwrap(); assert_eq!(enr.node_id(), node_id); diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 4d8807336..829124e12 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -10,30 +10,29 @@ pub mod enr_ext; use crate::service::TARGET_SUBNET_PEERS; use crate::{error, Enr, NetworkConfig, NetworkGlobals, Subnet, SubnetDiscovery}; use crate::{metrics, ClearDialError}; -use discv5::{enr::NodeId, Discv5, Discv5Event}; -pub use enr::{ - build_enr, create_enr_builder_from_config, load_enr_from_disk, use_or_load_enr, CombinedKey, - Eth2Enr, -}; +use discv5::{enr::NodeId, Discv5}; +pub use enr::{build_enr, load_enr_from_disk, use_or_load_enr, CombinedKey, Eth2Enr}; pub use enr_ext::{peer_id_to_node_id, CombinedKeyExt, EnrExt}; pub use libp2p::identity::{Keypair, PublicKey}; use enr::{ATTESTATION_BITFIELD_ENR_KEY, ETH2_ENR_KEY, SYNC_COMMITTEE_BITFIELD_ENR_KEY}; use futures::prelude::*; use futures::stream::FuturesUnordered; +use libp2p::multiaddr::Protocol; use libp2p::swarm::behaviour::{DialFailure, FromSwarm}; use libp2p::swarm::THandlerInEvent; pub use libp2p::{ - core::{ConnectedPoint, Multiaddr}, + core::{transport::ListenerId, ConnectedPoint, Multiaddr}, identity::PeerId, swarm::{ dummy::ConnectionHandler, ConnectionId, DialError, NetworkBehaviour, NotifyHandler, - PollParameters, SubstreamProtocol, ToSwarm, + SubstreamProtocol, ToSwarm, }, }; use lru::LruCache; use slog::{crit, debug, error, info, trace, warn}; use ssz::Encode; +use std::num::NonZeroUsize; use std::{ collections::{HashMap, VecDeque}, net::{IpAddr, SocketAddr}, @@ -48,6 +47,7 @@ use types::{EnrForkId, EthSpec}; mod subnet_predicate; pub use subnet_predicate::subnet_predicate; +use types::non_zero_usize::new_non_zero_usize; /// Local ENR storage filename. pub const ENR_FILENAME: &str = "enr.dat"; @@ -69,6 +69,8 @@ const MAX_SUBNETS_IN_QUERY: usize = 3; pub const FIND_NODE_QUERY_CLOSEST_PEERS: usize = 16; /// The threshold for updating `min_ttl` on a connected peer. const DURATION_DIFFERENCE: Duration = Duration::from_millis(1); +/// The capacity of the Discovery ENR cache. +const ENR_CACHE_CAPACITY: NonZeroUsize = new_non_zero_usize(50); /// A query has completed. This result contains a mapping of discovered peer IDs to the `min_ttl` /// of the peer if it is specified. @@ -77,6 +79,19 @@ pub struct DiscoveredPeers { pub peers: HashMap>, } +/// Specifies which port numbers should be modified after start of the discovery service +#[derive(Debug)] +pub struct UpdatePorts { + /// TCP port associated wih IPv4 address (if present) + pub tcp4: bool, + /// TCP port associated wih IPv6 address (if present) + pub tcp6: bool, + /// QUIC port associated wih IPv4 address (if present) + pub quic4: bool, + /// QUIC port associated wih IPv6 address (if present) + pub quic6: bool, +} + #[derive(Clone, PartialEq)] struct SubnetQuery { subnet: Subnet, @@ -129,15 +144,10 @@ enum EventStream { /// Awaiting an event stream to be generated. This is required due to the poll nature of /// `Discovery` Awaiting( - Pin< - Box< - dyn Future, discv5::Discv5Error>> - + Send, - >, - >, + Pin, discv5::Error>> + Send>>, ), /// The future has completed. - Present(mpsc::Receiver), + Present(mpsc::Receiver), // The future has failed or discv5 has been disabled. There are no events from discv5. InActive, } @@ -177,12 +187,8 @@ pub struct Discovery { /// always false. pub started: bool, - /// This keeps track of whether an external UDP port change should also indicate an internal - /// TCP port change. As we cannot detect our external TCP port, we assume that the external UDP - /// port is also our external TCP port. This assumption only holds if the user has not - /// explicitly set their ENR TCP port via the CLI config. The first indicates tcp4 and the - /// second indicates tcp6. - update_tcp_port: (bool, bool), + /// Specifies whether various port numbers should be updated after the discovery service has been started + update_ports: UpdatePorts, /// Logger for the discovery behaviour. log: slog::Logger, @@ -300,13 +306,15 @@ impl Discovery { } } - let update_tcp_port = ( - config.enr_tcp4_port.is_none(), - config.enr_tcp6_port.is_none(), - ); + let update_ports = UpdatePorts { + tcp4: config.enr_tcp4_port.is_none(), + tcp6: config.enr_tcp6_port.is_none(), + quic4: config.enr_quic4_port.is_none(), + quic6: config.enr_quic6_port.is_none(), + }; Ok(Self { - cached_enrs: LruCache::new(50), + cached_enrs: LruCache::new(ENR_CACHE_CAPACITY), network_globals, find_peer_active: false, queued_queries: VecDeque::with_capacity(10), @@ -314,7 +322,7 @@ impl Discovery { discv5, event_stream, started: !config.disable_discovery, - update_tcp_port, + update_ports, log, enr_dir, }) @@ -555,8 +563,6 @@ impl Discovery { if let Ok(node_id) = peer_id_to_node_id(peer_id) { // If we could convert this peer id, remove it from the DHT and ban it from discovery. self.discv5.ban_node(&node_id, None); - // Remove the node from the routing table. - self.discv5.remove_node(&node_id); } for ip_address in ip_addresses { @@ -945,11 +951,7 @@ impl NetworkBehaviour for Discovery { } // Main execution loop to drive the behaviour - fn poll( - &mut self, - cx: &mut Context, - _: &mut impl PollParameters, - ) -> Poll>> { + fn poll(&mut self, cx: &mut Context) -> Poll>> { if !self.started { return Poll::Pending; } @@ -986,7 +988,7 @@ impl NetworkBehaviour for Discovery { match event { // We filter out unwanted discv5 events here and only propagate useful results to // the peer manager. - Discv5Event::Discovered(_enr) => { + discv5::Event::Discovered(_enr) => { // Peers that get discovered during a query but are not contactable or // don't match a predicate can end up here. For debugging purposes we // log these to see if we are unnecessarily dropping discovered peers @@ -999,15 +1001,15 @@ impl NetworkBehaviour for Discovery { } */ } - Discv5Event::SocketUpdated(socket_addr) => { + discv5::Event::SocketUpdated(socket_addr) => { info!(self.log, "Address updated"; "ip" => %socket_addr.ip(), "udp_port" => %socket_addr.port()); metrics::inc_counter(&metrics::ADDRESS_UPDATE_COUNT); metrics::check_nat(); // Discv5 will have updated our local ENR. We save the updated version // to disk. - if (self.update_tcp_port.0 && socket_addr.is_ipv4()) - || (self.update_tcp_port.1 && socket_addr.is_ipv6()) + if (self.update_ports.tcp4 && socket_addr.is_ipv4()) + || (self.update_ports.tcp6 && socket_addr.is_ipv6()) { // Update the TCP port in the ENR self.discv5.update_local_enr_socket(socket_addr, true); @@ -1020,10 +1022,10 @@ impl NetworkBehaviour for Discovery { // NOTE: We assume libp2p itself can keep track of IP changes and we do // not inform it about IP changes found via discovery. } - Discv5Event::EnrAdded { .. } - | Discv5Event::TalkRequest(_) - | Discv5Event::NodeInserted { .. } - | Discv5Event::SessionEstablished { .. } => {} // Ignore all other discv5 server events + discv5::Event::EnrAdded { .. } + | discv5::Event::TalkRequest(_) + | discv5::Event::NodeInserted { .. } + | discv5::Event::SessionEstablished { .. } => {} // Ignore all other discv5 server events } } } @@ -1031,23 +1033,80 @@ impl NetworkBehaviour for Discovery { Poll::Pending } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { match event { FromSwarm::DialFailure(DialFailure { peer_id, error, .. }) => { self.on_dial_failure(peer_id, error) } - FromSwarm::ConnectionEstablished(_) - | FromSwarm::ConnectionClosed(_) - | FromSwarm::AddressChange(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrExpired(_) - | FromSwarm::ExternalAddrConfirmed(_) => { + FromSwarm::NewListenAddr(ev) => { + let addr = ev.addr; + let listener_id = ev.listener_id; + + trace!(self.log, "Received NewListenAddr event from swarm"; "listener_id" => ?listener_id, "addr" => ?addr); + + let mut addr_iter = addr.iter(); + + let attempt_enr_update = match addr_iter.next() { + Some(Protocol::Ip4(_)) => match (addr_iter.next(), addr_iter.next()) { + (Some(Protocol::Tcp(port)), None) => { + if !self.update_ports.tcp4 { + debug!(self.log, "Skipping ENR update"; "multiaddr" => ?addr); + return; + } + + self.update_enr_tcp_port(port) + } + (Some(Protocol::Udp(port)), Some(Protocol::QuicV1)) => { + if !self.update_ports.quic4 { + debug!(self.log, "Skipping ENR update"; "multiaddr" => ?addr); + return; + } + + self.update_enr_quic_port(port) + } + _ => { + debug!(self.log, "Encountered unacceptable multiaddr for listening (unsupported transport)"; "addr" => ?addr); + return; + } + }, + Some(Protocol::Ip6(_)) => match (addr_iter.next(), addr_iter.next()) { + (Some(Protocol::Tcp(port)), None) => { + if !self.update_ports.tcp6 { + debug!(self.log, "Skipping ENR update"; "multiaddr" => ?addr); + return; + } + + self.update_enr_tcp_port(port) + } + (Some(Protocol::Udp(port)), Some(Protocol::QuicV1)) => { + if !self.update_ports.quic6 { + debug!(self.log, "Skipping ENR update"; "multiaddr" => ?addr); + return; + } + + self.update_enr_quic_port(port) + } + _ => { + debug!(self.log, "Encountered unacceptable multiaddr for listening (unsupported transport)"; "addr" => ?addr); + return; + } + }, + _ => { + debug!(self.log, "Encountered unacceptable multiaddr for listening (no IP)"; "addr" => ?addr); + return; + } + }; + + let local_enr: Enr = self.discv5.local_enr(); + + match attempt_enr_update { + Ok(_) => { + info!(self.log, "Updated local ENR"; "enr" => local_enr.to_base64(), "seq" => local_enr.seq(), "id"=> %local_enr.node_id(), "ip4" => ?local_enr.ip4(), "udp4"=> ?local_enr.udp4(), "tcp4" => ?local_enr.tcp4(), "tcp6" => ?local_enr.tcp6(), "udp6" => ?local_enr.udp6()) + } + Err(e) => warn!(self.log, "Failed to update ENR"; "error" => ?e), + } + } + _ => { // Ignore events not relevant to discovery } } @@ -1077,7 +1136,6 @@ impl Discovery { mod tests { use super::*; use crate::rpc::methods::{MetaData, MetaDataV2}; - use enr::EnrBuilder; use libp2p::identity::secp256k1; use slog::{o, Drain}; use types::{BitVector, MinimalEthSpec, SubnetId}; @@ -1160,7 +1218,7 @@ mod tests { } fn make_enr(subnet_ids: Vec) -> Enr { - let mut builder = EnrBuilder::new("v4"); + let mut builder = Enr::builder(); let keypair = secp256k1::Keypair::generate(); let enr_key: CombinedKey = CombinedKey::from_secp256k1(&keypair); diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index 7467fb7f0..ea1ab07e3 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -115,7 +115,6 @@ pub use config::Config as NetworkConfig; pub use discovery::{CombinedKeyExt, EnrExt, Eth2Enr}; pub use discv5; pub use libp2p; -pub use libp2p::bandwidth::BandwidthSinks; pub use libp2p::gossipsub::{IdentTopic, MessageAcceptance, MessageId, Topic, TopicHash}; pub use libp2p::{core::ConnectedPoint, PeerId, Swarm}; pub use libp2p::{multiaddr, Multiaddr}; diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 3c9b29238..4316c0d07 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -326,8 +326,10 @@ impl PeerManager { // considered a priority. We have pre-allocated some extra priority slots for these // peers as specified by PRIORITY_PEER_EXCESS. Therefore we dial these peers, even // if we are already at our max_peer limit. - if min_ttl.is_some() && connected_or_dialing + to_dial_peers < self.max_priority_peers() - || connected_or_dialing + to_dial_peers < self.max_peers() + if !self.peers_to_dial.contains(&enr) + && ((min_ttl.is_some() + && connected_or_dialing + to_dial_peers < self.max_priority_peers()) + || connected_or_dialing + to_dial_peers < self.max_peers()) { // This should be updated with the peer dialing. In fact created once the peer is // dialed @@ -337,9 +339,11 @@ impl PeerManager { .write() .update_min_ttl(&enr.peer_id(), min_ttl); } - debug!(self.log, "Dialing discovered peer"; "peer_id" => %enr.peer_id()); - self.dial_peer(enr); - to_dial_peers += 1; + let peer_id = enr.peer_id(); + if self.dial_peer(enr) { + debug!(self.log, "Dialing discovered peer"; "peer_id" => %peer_id); + to_dial_peers += 1; + } } } @@ -401,7 +405,8 @@ impl PeerManager { /* Notifications from the Swarm */ /// A peer is being dialed. - pub fn dial_peer(&mut self, peer: Enr) { + /// Returns true, if this peer will be dialed. + pub fn dial_peer(&mut self, peer: Enr) -> bool { if self .network_globals .peers @@ -409,13 +414,16 @@ impl PeerManager { .should_dial(&peer.peer_id()) { self.peers_to_dial.push(peer); + true + } else { + false } } /// Reports if a peer is banned or not. /// /// This is used to determine if we should accept incoming connections. - pub fn ban_status(&self, peer_id: &PeerId) -> BanResult { + pub fn ban_status(&self, peer_id: &PeerId) -> Option { self.network_globals.peers.read().ban_status(peer_id) } @@ -517,6 +525,11 @@ impl PeerManager { RPCError::ErrorResponse(code, _) => match code { RPCResponseErrorCode::Unknown => PeerAction::HighToleranceError, RPCResponseErrorCode::ResourceUnavailable => { + // Don't ban on this because we want to retry with a block by root request. + if matches!(protocol, Protocol::BlobsByRoot) { + return; + } + // NOTE: This error only makes sense for the `BlocksByRange` and `BlocksByRoot` // protocols. // @@ -545,11 +558,14 @@ impl PeerManager { Protocol::Ping => PeerAction::MidToleranceError, Protocol::BlocksByRange => PeerAction::MidToleranceError, Protocol::BlocksByRoot => PeerAction::MidToleranceError, + Protocol::BlobsByRange => PeerAction::MidToleranceError, Protocol::LightClientBootstrap => PeerAction::LowToleranceError, + Protocol::BlobsByRoot => PeerAction::MidToleranceError, Protocol::Goodbye => PeerAction::LowToleranceError, Protocol::MetaData => PeerAction::LowToleranceError, Protocol::Status => PeerAction::LowToleranceError, }, + RPCResponseErrorCode::BlobsNotFoundForBlock => PeerAction::LowToleranceError, }, RPCError::SSZDecodeError(_) => PeerAction::Fatal, RPCError::UnsupportedProtocol => { @@ -561,6 +577,8 @@ impl PeerManager { Protocol::Ping => PeerAction::Fatal, Protocol::BlocksByRange => return, Protocol::BlocksByRoot => return, + Protocol::BlobsByRange => return, + Protocol::BlobsByRoot => return, Protocol::Goodbye => return, Protocol::LightClientBootstrap => return, Protocol::MetaData => PeerAction::Fatal, @@ -577,6 +595,8 @@ impl PeerManager { Protocol::Ping => PeerAction::LowToleranceError, Protocol::BlocksByRange => PeerAction::MidToleranceError, Protocol::BlocksByRoot => PeerAction::MidToleranceError, + Protocol::BlobsByRange => PeerAction::MidToleranceError, + Protocol::BlobsByRoot => PeerAction::MidToleranceError, Protocol::LightClientBootstrap => return, Protocol::Goodbye => return, Protocol::MetaData => return, @@ -803,7 +823,7 @@ impl PeerManager { ) -> bool { { let mut peerdb = self.network_globals.peers.write(); - if !matches!(peerdb.ban_status(peer_id), BanResult::NotBanned) { + if peerdb.ban_status(peer_id).is_some() { // don't connect if the peer is banned error!(self.log, "Connection has been allowed to a banned peer"; "peer_id" => %peer_id); } @@ -904,7 +924,7 @@ impl PeerManager { { self.max_outbound_dialing_peers() .saturating_sub(dialing_peers) - - peer_count + .saturating_sub(peer_count) } else { 0 }; @@ -1043,7 +1063,7 @@ impl PeerManager { Subnet::Attestation(_) => { subnet_to_peer .entry(subnet) - .or_insert_with(Vec::new) + .or_default() .push((*peer_id, info.clone())); } Subnet::SyncCommittee(id) => { diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index fedb876bb..da205d169 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -1,5 +1,6 @@ //! Implementation of [`NetworkBehaviour`] for the [`PeerManager`]. +use std::net::IpAddr; use std::task::{Context, Poll}; use futures::StreamExt; @@ -8,17 +9,17 @@ use libp2p::identity::PeerId; use libp2p::swarm::behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}; use libp2p::swarm::dial_opts::{DialOpts, PeerCondition}; use libp2p::swarm::dummy::ConnectionHandler; -use libp2p::swarm::{ConnectionId, NetworkBehaviour, PollParameters, ToSwarm}; -use slog::{debug, error}; +use libp2p::swarm::{ConnectionDenied, ConnectionId, NetworkBehaviour, ToSwarm}; +use slog::{debug, error, trace}; use types::EthSpec; use crate::discovery::enr_ext::EnrExt; +use crate::peer_manager::peerdb::BanResult; use crate::rpc::GoodbyeReason; use crate::types::SyncState; use crate::{metrics, ClearDialError}; -use super::peerdb::BanResult; -use super::{ConnectingType, PeerManager, PeerManagerEvent, ReportSource}; +use super::{ConnectingType, PeerManager, PeerManagerEvent}; impl NetworkBehaviour for PeerManager { type ConnectionHandler = ConnectionHandler; @@ -35,11 +36,7 @@ impl NetworkBehaviour for PeerManager { // no events from the dummy handler } - fn poll( - &mut self, - cx: &mut Context<'_>, - _params: &mut impl PollParameters, - ) -> Poll> { + fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { // perform the heartbeat when necessary while self.heartbeat.poll_tick(cx).is_ready() { self.heartbeat(); @@ -120,7 +117,7 @@ impl NetworkBehaviour for PeerManager { Poll::Pending } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { match event { FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id, @@ -154,41 +151,73 @@ impl NetworkBehaviour for PeerManager { // TODO: we likely want to check this against our assumed external tcp // address } - FromSwarm::AddressChange(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrExpired(_) => { + _ => { + // NOTE: FromSwarm is a non exhaustive enum so updates should be based on release + // notes more than compiler feedback // The rest of the events we ignore since they are handled in their associated // `SwarmEvent` } } } + fn handle_pending_inbound_connection( + &mut self, + _connection_id: ConnectionId, + _local_addr: &libp2p::Multiaddr, + remote_addr: &libp2p::Multiaddr, + ) -> Result<(), ConnectionDenied> { + // get the IP address to verify it's not banned. + let ip = match remote_addr.iter().next() { + Some(libp2p::multiaddr::Protocol::Ip6(ip)) => IpAddr::V6(ip), + Some(libp2p::multiaddr::Protocol::Ip4(ip)) => IpAddr::V4(ip), + _ => { + return Err(ConnectionDenied::new(format!( + "Connection to peer rejected: invalid multiaddr: {remote_addr}" + ))) + } + }; + + if self.network_globals.peers.read().is_ip_banned(&ip) { + return Err(ConnectionDenied::new(format!( + "Connection to peer rejected: peer {ip} is banned" + ))); + } + + Ok(()) + } + fn handle_established_inbound_connection( &mut self, _connection_id: ConnectionId, - _peer: PeerId, + peer_id: PeerId, _local_addr: &libp2p::Multiaddr, - _remote_addr: &libp2p::Multiaddr, - ) -> Result, libp2p::swarm::ConnectionDenied> { - // TODO: we might want to check if we accept this peer or not in the future. + remote_addr: &libp2p::Multiaddr, + ) -> Result, ConnectionDenied> { + trace!(self.log, "Inbound connection"; "peer_id" => %peer_id, "multiaddr" => %remote_addr); + // We already checked if the peer was banned on `handle_pending_inbound_connection`. + if let Some(BanResult::BadScore) = self.ban_status(&peer_id) { + return Err(ConnectionDenied::new( + "Connection to peer rejected: peer has a bad score", + )); + } Ok(ConnectionHandler) } fn handle_established_outbound_connection( &mut self, _connection_id: ConnectionId, - _peer: PeerId, - _addr: &libp2p::Multiaddr, + peer_id: PeerId, + addr: &libp2p::Multiaddr, _role_override: libp2p::core::Endpoint, ) -> Result, libp2p::swarm::ConnectionDenied> { - // TODO: we might want to check if we accept this peer or not in the future. - Ok(ConnectionHandler) + trace!(self.log, "Outbound connection"; "peer_id" => %peer_id, "multiaddr" => %addr); + match self.ban_status(&peer_id) { + Some(cause) => { + error!(self.log, "Connected a banned peer. Rejecting connection"; "peer_id" => %peer_id); + Err(ConnectionDenied::new(cause)) + } + None => Ok(ConnectionHandler), + } } } @@ -215,10 +244,7 @@ impl PeerManager { // increment prometheus metrics if self.metrics_enabled { - let remote_addr = match endpoint { - ConnectedPoint::Dialer { address, .. } => address, - ConnectedPoint::Listener { send_back_addr, .. } => send_back_addr, - }; + let remote_addr = endpoint.get_remote_address(); match remote_addr.iter().find(|proto| { matches!( proto, @@ -241,28 +267,6 @@ impl PeerManager { metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT); } - // Check to make sure the peer is not supposed to be banned - match self.ban_status(&peer_id) { - // TODO: directly emit the ban event? - BanResult::BadScore => { - // This is a faulty state - error!(self.log, "Connected to a banned peer. Re-banning"; "peer_id" => %peer_id); - // Disconnect the peer. - self.goodbye_peer(&peer_id, GoodbyeReason::Banned, ReportSource::PeerManager); - // Re-ban the peer to prevent repeated errors. - self.events.push(PeerManagerEvent::Banned(peer_id, vec![])); - return; - } - BanResult::BannedIp(ip_addr) => { - // A good peer has connected to us via a banned IP address. We ban the peer and - // prevent future connections. - debug!(self.log, "Peer connected via banned IP. Banning"; "peer_id" => %peer_id, "banned_ip" => %ip_addr); - self.goodbye_peer(&peer_id, GoodbyeReason::BannedIP, ReportSource::PeerManager); - return; - } - BanResult::NotBanned => {} - } - // Count dialing peers in the limit if the peer dialed us. let count_dialing = endpoint.is_listener(); // Check the connection limits @@ -326,11 +330,7 @@ impl PeerManager { // reference so that peer manager can track this peer. self.inject_disconnect(&peer_id); - let remote_addr = match endpoint { - ConnectedPoint::Listener { send_back_addr, .. } => send_back_addr, - ConnectedPoint::Dialer { address, .. } => address, - }; - + let remote_addr = endpoint.get_remote_address(); // Update the prometheus metrics if self.metrics_enabled { match remote_addr.iter().find(|proto| { diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index 4a1efe8f2..a6bf3ffec 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -3,10 +3,13 @@ use peer_info::{ConnectionDirection, PeerConnectionStatus, PeerInfo}; use rand::seq::SliceRandom; use score::{PeerAction, ReportSource, Score, ScoreState}; use slog::{crit, debug, error, trace, warn}; -use std::cmp::Ordering; -use std::collections::{HashMap, HashSet}; use std::net::IpAddr; use std::time::Instant; +use std::{cmp::Ordering, fmt::Display}; +use std::{ + collections::{HashMap, HashSet}, + fmt::Formatter, +}; use sync_status::SyncStatus; use types::EthSpec; @@ -136,26 +139,18 @@ impl PeerDB { } } - /// Returns the current [`BanResult`] of the peer. This doesn't check the connection state, rather the + /// Returns the current [`BanResult`] of the peer if banned. This doesn't check the connection state, rather the /// underlying score of the peer. A peer may be banned but still in the connected state /// temporarily. /// /// This is used to determine if we should accept incoming connections or not. - pub fn ban_status(&self, peer_id: &PeerId) -> BanResult { - if let Some(peer) = self.peers.get(peer_id) { - match peer.score_state() { - ScoreState::Banned => BanResult::BadScore, - _ => { - if let Some(ip) = self.ip_is_banned(peer) { - BanResult::BannedIp(ip) - } else { - BanResult::NotBanned - } - } - } - } else { - BanResult::NotBanned - } + pub fn ban_status(&self, peer_id: &PeerId) -> Option { + self.peers + .get(peer_id) + .and_then(|peer| match peer.score_state() { + ScoreState::Banned => Some(BanResult::BadScore), + _ => self.ip_is_banned(peer).map(BanResult::BannedIp), + }) } /// Checks if the peer's known addresses are currently banned. @@ -1183,23 +1178,25 @@ pub enum BanOperation { } /// When checking if a peer is banned, it can be banned for multiple reasons. +#[derive(Copy, Clone, Debug)] pub enum BanResult { /// The peer's score is too low causing it to be banned. BadScore, /// The peer should be banned because it is connecting from a banned IP address. BannedIp(IpAddr), - /// The peer is not banned. - NotBanned, } -// Helper function for unit tests -#[cfg(test)] -impl BanResult { - pub fn is_banned(&self) -> bool { - !matches!(self, BanResult::NotBanned) +impl Display for BanResult { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + BanResult::BadScore => write!(f, "Peer has a bad score"), + BanResult::BannedIp(addr) => write!(f, "Peer address: {} is banned", addr), + } } } +impl std::error::Error for BanResult {} + #[derive(Default)] pub struct BannedPeersCount { /// The number of banned peers in the database. @@ -1492,7 +1489,7 @@ mod tests { assert!(the_best.is_some()); // Consistency check let best_peers = pdb.best_peers_by_status(PeerInfo::is_connected); - assert_eq!(the_best.unwrap(), best_peers.get(0).unwrap().0); + assert_eq!(the_best.unwrap(), best_peers.first().unwrap().0); } #[test] @@ -1852,11 +1849,11 @@ mod tests { } //check that ip1 and ip2 are banned but ip3-5 not - assert!(pdb.ban_status(&p1).is_banned()); - assert!(pdb.ban_status(&p2).is_banned()); - assert!(!pdb.ban_status(&p3).is_banned()); - assert!(!pdb.ban_status(&p4).is_banned()); - assert!(!pdb.ban_status(&p5).is_banned()); + assert!(pdb.ban_status(&p1).is_some()); + assert!(pdb.ban_status(&p2).is_some()); + assert!(pdb.ban_status(&p3).is_none()); + assert!(pdb.ban_status(&p4).is_none()); + assert!(pdb.ban_status(&p5).is_none()); //ban also the last peer in peers let _ = pdb.report_peer( @@ -1868,11 +1865,11 @@ mod tests { pdb.inject_disconnect(&peers[BANNED_PEERS_PER_IP_THRESHOLD + 1]); //check that ip1-ip4 are banned but ip5 not - assert!(pdb.ban_status(&p1).is_banned()); - assert!(pdb.ban_status(&p2).is_banned()); - assert!(pdb.ban_status(&p3).is_banned()); - assert!(pdb.ban_status(&p4).is_banned()); - assert!(!pdb.ban_status(&p5).is_banned()); + assert!(pdb.ban_status(&p1).is_some()); + assert!(pdb.ban_status(&p2).is_some()); + assert!(pdb.ban_status(&p3).is_some()); + assert!(pdb.ban_status(&p4).is_some()); + assert!(pdb.ban_status(&p5).is_none()); //peers[0] gets unbanned reset_score(&mut pdb, &peers[0]); @@ -1880,11 +1877,11 @@ mod tests { let _ = pdb.shrink_to_fit(); //nothing changed - assert!(pdb.ban_status(&p1).is_banned()); - assert!(pdb.ban_status(&p2).is_banned()); - assert!(pdb.ban_status(&p3).is_banned()); - assert!(pdb.ban_status(&p4).is_banned()); - assert!(!pdb.ban_status(&p5).is_banned()); + assert!(pdb.ban_status(&p1).is_some()); + assert!(pdb.ban_status(&p2).is_some()); + assert!(pdb.ban_status(&p3).is_some()); + assert!(pdb.ban_status(&p4).is_some()); + assert!(pdb.ban_status(&p5).is_none()); //peers[1] gets unbanned reset_score(&mut pdb, &peers[1]); @@ -1892,11 +1889,11 @@ mod tests { let _ = pdb.shrink_to_fit(); //all ips are unbanned - assert!(!pdb.ban_status(&p1).is_banned()); - assert!(!pdb.ban_status(&p2).is_banned()); - assert!(!pdb.ban_status(&p3).is_banned()); - assert!(!pdb.ban_status(&p4).is_banned()); - assert!(!pdb.ban_status(&p5).is_banned()); + assert!(pdb.ban_status(&p1).is_none()); + assert!(pdb.ban_status(&p2).is_none()); + assert!(pdb.ban_status(&p3).is_none()); + assert!(pdb.ban_status(&p4).is_none()); + assert!(pdb.ban_status(&p5).is_none()); } #[test] @@ -1921,8 +1918,8 @@ mod tests { } // check ip is banned - assert!(pdb.ban_status(&p1).is_banned()); - assert!(!pdb.ban_status(&p2).is_banned()); + assert!(pdb.ban_status(&p1).is_some()); + assert!(pdb.ban_status(&p2).is_none()); // unban a peer reset_score(&mut pdb, &peers[0]); @@ -1930,8 +1927,8 @@ mod tests { let _ = pdb.shrink_to_fit(); // check not banned anymore - assert!(!pdb.ban_status(&p1).is_banned()); - assert!(!pdb.ban_status(&p2).is_banned()); + assert!(pdb.ban_status(&p1).is_none()); + assert!(pdb.ban_status(&p2).is_none()); // unban all peers for p in &peers { @@ -1950,8 +1947,8 @@ mod tests { } // both IP's are now banned - assert!(pdb.ban_status(&p1).is_banned()); - assert!(pdb.ban_status(&p2).is_banned()); + assert!(pdb.ban_status(&p1).is_some()); + assert!(pdb.ban_status(&p2).is_some()); // unban all peers for p in &peers { @@ -1967,16 +1964,16 @@ mod tests { } // nothing is banned - assert!(!pdb.ban_status(&p1).is_banned()); - assert!(!pdb.ban_status(&p2).is_banned()); + assert!(pdb.ban_status(&p1).is_none()); + assert!(pdb.ban_status(&p2).is_none()); // reban last peer let _ = pdb.report_peer(&peers[0], PeerAction::Fatal, ReportSource::PeerManager, ""); pdb.inject_disconnect(&peers[0]); //Ip's are banned again - assert!(pdb.ban_status(&p1).is_banned()); - assert!(pdb.ban_status(&p2).is_banned()); + assert!(pdb.ban_status(&p1).is_some()); + assert!(pdb.ban_status(&p2).is_some()); } #[test] diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs index bafa355d6..877d72581 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs @@ -330,13 +330,15 @@ impl Eq for Score {} impl PartialOrd for Score { fn partial_cmp(&self, other: &Score) -> Option { - self.score().partial_cmp(&other.score()) + Some(self.cmp(other)) } } impl Ord for Score { fn cmp(&self, other: &Score) -> std::cmp::Ordering { - self.partial_cmp(other).unwrap_or(std::cmp::Ordering::Equal) + self.score() + .partial_cmp(&other.score()) + .unwrap_or(std::cmp::Ordering::Equal) } } diff --git a/beacon_node/lighthouse_network/src/rpc/codec/base.rs b/beacon_node/lighthouse_network/src/rpc/codec/base.rs index 943d4a3bc..4085ac17b 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/base.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/base.rs @@ -194,16 +194,19 @@ mod tests { let altair_fork_epoch = Epoch::new(1); let merge_fork_epoch = Epoch::new(2); let capella_fork_epoch = Epoch::new(3); + let deneb_fork_epoch = Epoch::new(4); chain_spec.altair_fork_epoch = Some(altair_fork_epoch); chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch); chain_spec.capella_fork_epoch = Some(capella_fork_epoch); + chain_spec.deneb_fork_epoch = Some(deneb_fork_epoch); let current_slot = match fork_name { ForkName::Base => Slot::new(0), ForkName::Altair => altair_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Merge => merge_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Capella => capella_fork_epoch.start_slot(Spec::slots_per_epoch()), + ForkName::Deneb => deneb_fork_epoch.start_slot(Spec::slots_per_epoch()), }; ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) } @@ -313,7 +316,7 @@ mod tests { )); // Request limits - let limit = protocol_id.rpc_request_limits(); + let limit = protocol_id.rpc_request_limits(&fork_context.spec); let mut max = encode_len(limit.max + 1); let mut codec = SSZSnappyOutboundCodec::::new( protocol_id.clone(), diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index f1d94da7e..7a7f2969f 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -15,10 +15,11 @@ use std::io::{Read, Write}; use std::marker::PhantomData; use std::sync::Arc; use tokio_util::codec::{Decoder, Encoder}; -use types::light_client_bootstrap::LightClientBootstrap; +use types::ChainSpec; use types::{ - EthSpec, ForkContext, ForkName, Hash256, SignedBeaconBlock, SignedBeaconBlockAltair, - SignedBeaconBlockBase, SignedBeaconBlockCapella, SignedBeaconBlockMerge, + BlobSidecar, EthSpec, ForkContext, ForkName, Hash256, LightClientBootstrap, + RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, + SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockMerge, }; use unsigned_varint::codec::Uvi; @@ -71,6 +72,8 @@ impl Encoder> for SSZSnappyInboundCodec< RPCResponse::Status(res) => res.as_ssz_bytes(), RPCResponse::BlocksByRange(res) => res.as_ssz_bytes(), RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(), + RPCResponse::BlobsByRange(res) => res.as_ssz_bytes(), + RPCResponse::BlobsByRoot(res) => res.as_ssz_bytes(), RPCResponse::LightClientBootstrap(res) => res.as_ssz_bytes(), RPCResponse::Pong(res) => res.data.as_ssz_bytes(), RPCResponse::MetaData(res) => @@ -132,14 +135,13 @@ impl Decoder for SSZSnappyInboundCodec { if self.protocol.versioned_protocol == SupportedProtocol::MetaDataV2 { return Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v2()))); } - let length = match handle_length(&mut self.inner, &mut self.len, src)? { - Some(len) => len, - None => return Ok(None), + let Some(length) = handle_length(&mut self.inner, &mut self.len, src)? else { + return Ok(None); }; // Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of // packet size for ssz container corresponding to `self.protocol`. - let ssz_limits = self.protocol.rpc_request_limits(); + let ssz_limits = self.protocol.rpc_request_limits(&self.fork_context.spec); if ssz_limits.is_out_of_bounds(length, self.max_packet_size) { return Err(RPCError::InvalidData(format!( "RPC request length for protocol {:?} is out of bounds, length {}", @@ -160,7 +162,11 @@ impl Decoder for SSZSnappyInboundCodec { let n = reader.get_ref().get_ref().position(); self.len = None; let _read_bytes = src.split_to(n as usize); - handle_rpc_request(self.protocol.versioned_protocol, &decoded_buffer) + handle_rpc_request( + self.protocol.versioned_protocol, + &decoded_buffer, + &self.fork_context.spec, + ) } Err(e) => handle_error(e, reader.get_ref().get_ref().position(), max_compressed_len), } @@ -222,6 +228,8 @@ impl Encoder> for SSZSnappyOutboundCodec< BlocksByRootRequest::V1(req) => req.block_roots.as_ssz_bytes(), BlocksByRootRequest::V2(req) => req.block_roots.as_ssz_bytes(), }, + OutboundRequest::BlobsByRange(req) => req.as_ssz_bytes(), + OutboundRequest::BlobsByRoot(req) => req.blob_ids.as_ssz_bytes(), OutboundRequest::Ping(req) => req.as_ssz_bytes(), OutboundRequest::MetaData(_) => return Ok(()), // no metadata to encode }; @@ -272,9 +280,8 @@ impl Decoder for SSZSnappyOutboundCodec { return Ok(None); } } - let length = match handle_length(&mut self.inner, &mut self.len, src)? { - Some(len) => len, - None => return Ok(None), + let Some(length) = handle_length(&mut self.inner, &mut self.len, src)? else { + return Ok(None); }; // Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of @@ -284,8 +291,8 @@ impl Decoder for SSZSnappyOutboundCodec { .rpc_response_limits::(&self.fork_context); if ssz_limits.is_out_of_bounds(length, self.max_packet_size) { return Err(RPCError::InvalidData(format!( - "RPC response length is out of bounds, length {}", - length + "RPC response length is out of bounds, length {}, max {}, min {}", + length, ssz_limits.max, ssz_limits.min ))); } // Calculate worst case compression length for given uncompressed length @@ -319,9 +326,8 @@ impl OutboundCodec> for SSZSnappyOutbound &mut self, src: &mut BytesMut, ) -> Result, RPCError> { - let length = match handle_length(&mut self.inner, &mut self.len, src)? { - Some(len) => len, - None => return Ok(None), + let Some(length) = handle_length(&mut self.inner, &mut self.len, src)? else { + return Ok(None); }; // Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of @@ -376,7 +382,7 @@ fn handle_error( Ok(None) } } - _ => Err(err).map_err(RPCError::from), + _ => Err(RPCError::from(err)), } } @@ -396,22 +402,24 @@ fn context_bytes( return match **ref_box_block { // NOTE: If you are adding another fork type here, be sure to modify the // `fork_context.to_context_bytes()` function to support it as well! + SignedBeaconBlock::Deneb { .. } => { + fork_context.to_context_bytes(ForkName::Deneb) + } SignedBeaconBlock::Capella { .. } => { - // Capella context being `None` implies that "merge never happened". fork_context.to_context_bytes(ForkName::Capella) } SignedBeaconBlock::Merge { .. } => { - // Merge context being `None` implies that "merge never happened". fork_context.to_context_bytes(ForkName::Merge) } SignedBeaconBlock::Altair { .. } => { - // Altair context being `None` implies that "altair never happened". - // This code should be unreachable if altair is disabled since only Version::V1 would be valid in that case. fork_context.to_context_bytes(ForkName::Altair) } SignedBeaconBlock::Base { .. } => Some(fork_context.genesis_context_bytes()), }; } + if let RPCResponse::BlobsByRange(_) | RPCResponse::BlobsByRoot(_) = rpc_variant { + return fork_context.to_context_bytes(ForkName::Deneb); + } } } None @@ -448,6 +456,7 @@ fn handle_length( fn handle_rpc_request( versioned_protocol: SupportedProtocol, decoded_buffer: &[u8], + spec: &ChainSpec, ) -> Result>, RPCError> { match versioned_protocol { SupportedProtocol::StatusV1 => Ok(Some(InboundRequest::Status( @@ -464,14 +473,31 @@ fn handle_rpc_request( ))), SupportedProtocol::BlocksByRootV2 => Ok(Some(InboundRequest::BlocksByRoot( BlocksByRootRequest::V2(BlocksByRootRequestV2 { - block_roots: VariableList::from_ssz_bytes(decoded_buffer)?, + block_roots: RuntimeVariableList::from_ssz_bytes( + decoded_buffer, + spec.max_request_blocks as usize, + )?, }), ))), SupportedProtocol::BlocksByRootV1 => Ok(Some(InboundRequest::BlocksByRoot( BlocksByRootRequest::V1(BlocksByRootRequestV1 { - block_roots: VariableList::from_ssz_bytes(decoded_buffer)?, + block_roots: RuntimeVariableList::from_ssz_bytes( + decoded_buffer, + spec.max_request_blocks as usize, + )?, }), ))), + SupportedProtocol::BlobsByRangeV1 => Ok(Some(InboundRequest::BlobsByRange( + BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?, + ))), + SupportedProtocol::BlobsByRootV1 => { + Ok(Some(InboundRequest::BlobsByRoot(BlobsByRootRequest { + blob_ids: RuntimeVariableList::from_ssz_bytes( + decoded_buffer, + spec.max_request_blob_sidecars as usize, + )?, + }))) + } SupportedProtocol::PingV1 => Ok(Some(InboundRequest::Ping(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), @@ -526,6 +552,38 @@ fn handle_rpc_response( SupportedProtocol::BlocksByRootV1 => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), + SupportedProtocol::BlobsByRangeV1 => match fork_name { + Some(ForkName::Deneb) => Ok(Some(RPCResponse::BlobsByRange(Arc::new( + BlobSidecar::from_ssz_bytes(decoded_buffer)?, + )))), + Some(_) => Err(RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + "Invalid fork name for blobs by range".to_string(), + )), + None => Err(RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + format!( + "No context bytes provided for {:?} response", + versioned_protocol + ), + )), + }, + SupportedProtocol::BlobsByRootV1 => match fork_name { + Some(ForkName::Deneb) => Ok(Some(RPCResponse::BlobsByRoot(Arc::new( + BlobSidecar::from_ssz_bytes(decoded_buffer)?, + )))), + Some(_) => Err(RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + "Invalid fork name for blobs by root".to_string(), + )), + None => Err(RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + format!( + "No context bytes provided for {:?} response", + versioned_protocol + ), + )), + }, SupportedProtocol::PingV1 => Ok(Some(RPCResponse::Pong(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), @@ -555,6 +613,9 @@ fn handle_rpc_response( decoded_buffer, )?), )))), + Some(ForkName::Deneb) => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + SignedBeaconBlock::Deneb(SignedBeaconBlockDeneb::from_ssz_bytes(decoded_buffer)?), + )))), None => Err(RPCError::ErrorResponse( RPCResponseErrorCode::InvalidRequest, format!( @@ -578,6 +639,9 @@ fn handle_rpc_response( decoded_buffer, )?), )))), + Some(ForkName::Deneb) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + SignedBeaconBlock::Deneb(SignedBeaconBlockDeneb::from_ssz_bytes(decoded_buffer)?), + )))), None => Err(RPCError::ErrorResponse( RPCResponseErrorCode::InvalidRequest, format!( @@ -598,9 +662,13 @@ fn context_bytes_to_fork_name( .from_context_bytes(context_bytes) .cloned() .ok_or_else(|| { + let encoded = hex::encode(context_bytes); RPCError::ErrorResponse( RPCResponseErrorCode::InvalidRequest, - "Context bytes does not correspond to a valid fork".to_string(), + format!( + "Context bytes {} do not correspond to a valid fork", + encoded + ), ) }) } @@ -615,8 +683,9 @@ mod tests { }; use std::sync::Arc; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, ChainSpec, EmptyBlock, - Epoch, ForkContext, FullPayload, Hash256, Signature, SignedBeaconBlock, Slot, + blob_sidecar::BlobIdentifier, BeaconBlock, BeaconBlockAltair, BeaconBlockBase, + BeaconBlockMerge, ChainSpec, EmptyBlock, Epoch, ForkContext, FullPayload, Hash256, + Signature, SignedBeaconBlock, Slot, }; use snap::write::FrameEncoder; @@ -630,16 +699,19 @@ mod tests { let altair_fork_epoch = Epoch::new(1); let merge_fork_epoch = Epoch::new(2); let capella_fork_epoch = Epoch::new(3); + let deneb_fork_epoch = Epoch::new(4); chain_spec.altair_fork_epoch = Some(altair_fork_epoch); chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch); chain_spec.capella_fork_epoch = Some(capella_fork_epoch); + chain_spec.deneb_fork_epoch = Some(deneb_fork_epoch); let current_slot = match fork_name { ForkName::Base => Slot::new(0), ForkName::Altair => altair_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Merge => merge_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Capella => capella_fork_epoch.start_slot(Spec::slots_per_epoch()), + ForkName::Deneb => deneb_fork_epoch.start_slot(Spec::slots_per_epoch()), }; ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) } @@ -657,6 +729,10 @@ mod tests { SignedBeaconBlock::from_block(full_block, Signature::empty()) } + fn empty_blob_sidecar() -> Arc> { + Arc::new(BlobSidecar::empty()) + } + /// Merge block with length < max_rpc_size. fn merge_block_small(fork_context: &ForkContext, spec: &ChainSpec) -> SignedBeaconBlock { let mut block: BeaconBlockMerge<_, FullPayload> = @@ -705,12 +781,29 @@ mod tests { OldBlocksByRangeRequest::new(0, 10, 1) } - fn bbroot_request_v1() -> BlocksByRootRequest { - BlocksByRootRequest::new_v1(vec![Hash256::zero()].into()) + fn blbrange_request() -> BlobsByRangeRequest { + BlobsByRangeRequest { + start_slot: 0, + count: 10, + } } - fn bbroot_request_v2() -> BlocksByRootRequest { - BlocksByRootRequest::new(vec![Hash256::zero()].into()) + fn bbroot_request_v1(spec: &ChainSpec) -> BlocksByRootRequest { + BlocksByRootRequest::new_v1(vec![Hash256::zero()], spec) + } + + fn bbroot_request_v2(spec: &ChainSpec) -> BlocksByRootRequest { + BlocksByRootRequest::new(vec![Hash256::zero()], spec) + } + + fn blbroot_request(spec: &ChainSpec) -> BlobsByRootRequest { + BlobsByRootRequest::new( + vec![BlobIdentifier { + block_root: Hash256::zero(), + index: 0, + }], + spec, + ) } fn ping_message() -> Ping { @@ -846,6 +939,12 @@ mod tests { OutboundRequest::BlocksByRoot(bbroot) => { assert_eq!(decoded, InboundRequest::BlocksByRoot(bbroot)) } + OutboundRequest::BlobsByRange(blbrange) => { + assert_eq!(decoded, InboundRequest::BlobsByRange(blbrange)) + } + OutboundRequest::BlobsByRoot(bbroot) => { + assert_eq!(decoded, InboundRequest::BlobsByRoot(bbroot)) + } OutboundRequest::Ping(ping) => { assert_eq!(decoded, InboundRequest::Ping(ping)) } @@ -952,6 +1051,26 @@ mod tests { ), Ok(Some(RPCResponse::MetaData(metadata()))), ); + + assert_eq!( + encode_then_decode_response( + SupportedProtocol::BlobsByRangeV1, + RPCCodedResponse::Success(RPCResponse::BlobsByRange(empty_blob_sidecar())), + ForkName::Deneb, + &chain_spec + ), + Ok(Some(RPCResponse::BlobsByRange(empty_blob_sidecar()))), + ); + + assert_eq!( + encode_then_decode_response( + SupportedProtocol::BlobsByRootV1, + RPCCodedResponse::Success(RPCResponse::BlobsByRoot(empty_blob_sidecar())), + ForkName::Deneb, + &chain_spec + ), + Ok(Some(RPCResponse::BlobsByRoot(empty_blob_sidecar()))), + ); } // Test RPCResponse encoding/decoding for V1 messages @@ -1288,20 +1407,22 @@ mod tests { #[test] fn test_encode_then_decode_request() { + let chain_spec = Spec::default_spec(); + let requests: &[OutboundRequest] = &[ OutboundRequest::Ping(ping_message()), OutboundRequest::Status(status_message()), OutboundRequest::Goodbye(GoodbyeReason::Fault), OutboundRequest::BlocksByRange(bbrange_request_v1()), OutboundRequest::BlocksByRange(bbrange_request_v2()), - OutboundRequest::BlocksByRoot(bbroot_request_v1()), - OutboundRequest::BlocksByRoot(bbroot_request_v2()), + OutboundRequest::BlocksByRoot(bbroot_request_v1(&chain_spec)), + OutboundRequest::BlocksByRoot(bbroot_request_v2(&chain_spec)), OutboundRequest::MetaData(MetadataRequest::new_v1()), + OutboundRequest::BlobsByRange(blbrange_request()), + OutboundRequest::BlobsByRoot(blbroot_request(&chain_spec)), OutboundRequest::MetaData(MetadataRequest::new_v2()), ]; - let chain_spec = Spec::default_spec(); - for req in requests.iter() { for fork_name in ForkName::list_all() { encode_then_decode_request(req.clone(), fork_name, &chain_spec); diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index a0f3acaf7..989514919 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -4,9 +4,9 @@ use std::{ time::Duration, }; -use super::{methods, rate_limiter::Quota, Protocol}; +use super::{rate_limiter::Quota, Protocol}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; /// Auxiliary struct to aid on configuration parsing. /// @@ -89,6 +89,8 @@ pub struct RateLimiterConfig { pub(super) goodbye_quota: Quota, pub(super) blocks_by_range_quota: Quota, pub(super) blocks_by_root_quota: Quota, + pub(super) blobs_by_range_quota: Quota, + pub(super) blobs_by_root_quota: Quota, pub(super) light_client_bootstrap_quota: Quota, } @@ -97,9 +99,10 @@ impl RateLimiterConfig { pub const DEFAULT_META_DATA_QUOTA: Quota = Quota::n_every(2, 5); pub const DEFAULT_STATUS_QUOTA: Quota = Quota::n_every(5, 15); pub const DEFAULT_GOODBYE_QUOTA: Quota = Quota::one_every(10); - pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota = - Quota::n_every(methods::MAX_REQUEST_BLOCKS, 10); + pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota = Quota::n_every(1024, 10); pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10); + pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = Quota::n_every(768, 10); + pub const DEFAULT_BLOBS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10); pub const DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA: Quota = Quota::one_every(10); } @@ -112,6 +115,8 @@ impl Default for RateLimiterConfig { goodbye_quota: Self::DEFAULT_GOODBYE_QUOTA, blocks_by_range_quota: Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA, blocks_by_root_quota: Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA, + blobs_by_range_quota: Self::DEFAULT_BLOBS_BY_RANGE_QUOTA, + blobs_by_root_quota: Self::DEFAULT_BLOBS_BY_ROOT_QUOTA, light_client_bootstrap_quota: Self::DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA, } } @@ -136,6 +141,8 @@ impl Debug for RateLimiterConfig { .field("goodbye", fmt_q!(&self.goodbye_quota)) .field("blocks_by_range", fmt_q!(&self.blocks_by_range_quota)) .field("blocks_by_root", fmt_q!(&self.blocks_by_root_quota)) + .field("blobs_by_range", fmt_q!(&self.blobs_by_range_quota)) + .field("blobs_by_root", fmt_q!(&self.blobs_by_root_quota)) .finish() } } @@ -154,6 +161,8 @@ impl FromStr for RateLimiterConfig { let mut goodbye_quota = None; let mut blocks_by_range_quota = None; let mut blocks_by_root_quota = None; + let mut blobs_by_range_quota = None; + let mut blobs_by_root_quota = None; let mut light_client_bootstrap_quota = None; for proto_def in s.split(';') { @@ -164,6 +173,8 @@ impl FromStr for RateLimiterConfig { Protocol::Goodbye => goodbye_quota = goodbye_quota.or(quota), Protocol::BlocksByRange => blocks_by_range_quota = blocks_by_range_quota.or(quota), Protocol::BlocksByRoot => blocks_by_root_quota = blocks_by_root_quota.or(quota), + Protocol::BlobsByRange => blobs_by_range_quota = blobs_by_range_quota.or(quota), + Protocol::BlobsByRoot => blobs_by_root_quota = blobs_by_root_quota.or(quota), Protocol::Ping => ping_quota = ping_quota.or(quota), Protocol::MetaData => meta_data_quota = meta_data_quota.or(quota), Protocol::LightClientBootstrap => { @@ -180,6 +191,9 @@ impl FromStr for RateLimiterConfig { .unwrap_or(Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA), blocks_by_root_quota: blocks_by_root_quota .unwrap_or(Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA), + blobs_by_range_quota: blobs_by_range_quota + .unwrap_or(Self::DEFAULT_BLOBS_BY_RANGE_QUOTA), + blobs_by_root_quota: blobs_by_root_quota.unwrap_or(Self::DEFAULT_BLOBS_BY_ROOT_QUOTA), light_client_bootstrap_quota: light_client_bootstrap_quota .unwrap_or(Self::DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA), }) diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 36a5abc08..03f4761ff 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -1,7 +1,7 @@ #![allow(clippy::type_complexity)] #![allow(clippy::cognitive_complexity)] -use super::methods::{GoodbyeReason, RPCCodedResponse, RPCResponseErrorCode, ResponseTermination}; +use super::methods::{GoodbyeReason, RPCCodedResponse, RPCResponseErrorCode}; use super::outbound::OutboundRequestContainer; use super::protocol::{InboundOutput, InboundRequest, Protocol, RPCError, RPCProtocol}; use super::{RPCReceived, RPCSend, ReqId}; @@ -12,8 +12,7 @@ use futures::prelude::*; use futures::{Sink, SinkExt}; use libp2p::swarm::handler::{ ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, - FullyNegotiatedInbound, FullyNegotiatedOutbound, KeepAlive, StreamUpgradeError, - SubstreamProtocol, + FullyNegotiatedInbound, FullyNegotiatedOutbound, StreamUpgradeError, SubstreamProtocol, }; use libp2p::swarm::Stream; use slog::{crit, debug, trace, warn}; @@ -42,10 +41,21 @@ const MAX_INBOUND_SUBSTREAMS: usize = 32; #[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] pub struct SubstreamId(usize); +impl SubstreamId { + pub fn new(id: usize) -> Self { + Self(id) + } +} + type InboundSubstream = InboundFramed; /// Events the handler emits to the behaviour. -pub type HandlerEvent = Result, HandlerErr>; +#[derive(Debug)] +pub enum HandlerEvent { + Ok(RPCReceived), + Err(HandlerErr), + Close(RPCError), +} /// An error encountered by the handler. #[derive(Debug)] @@ -243,11 +253,12 @@ where } // We now drive to completion communications already dialed/established while let Some((id, req)) = self.dial_queue.pop() { - self.events_out.push(Err(HandlerErr::Outbound { - error: RPCError::Disconnected, - proto: req.versioned_protocol().protocol(), - id, - })); + self.events_out + .push(HandlerEvent::Err(HandlerErr::Outbound { + error: RPCError::Disconnected, + proto: req.versioned_protocol().protocol(), + id, + })); } // Queue our goodbye message. @@ -267,11 +278,13 @@ where HandlerState::Active => { self.dial_queue.push((id, req)); } - _ => self.events_out.push(Err(HandlerErr::Outbound { - error: RPCError::Disconnected, - proto: req.versioned_protocol().protocol(), - id, - })), + _ => self + .events_out + .push(HandlerEvent::Err(HandlerErr::Outbound { + error: RPCError::Disconnected, + proto: req.versioned_protocol().protocol(), + id, + })), } } @@ -280,9 +293,7 @@ where // wrong state a response will fail silently. fn send_response(&mut self, inbound_id: SubstreamId, response: RPCCodedResponse) { // check if the stream matching the response still exists - let inbound_info = if let Some(info) = self.inbound_substreams.get_mut(&inbound_id) { - info - } else { + let Some(inbound_info) = self.inbound_substreams.get_mut(&inbound_id) else { if !matches!(response, RPCCodedResponse::StreamTermination(..)) { // the stream is closed after sending the expected number of responses trace!(self.log, "Inbound stream has expired. Response not sent"; @@ -290,10 +301,9 @@ where } return; }; - // If the response we are sending is an error, report back for handling if let RPCCodedResponse::Error(ref code, ref reason) = response { - self.events_out.push(Err(HandlerErr::Inbound { + self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound { error: RPCError::ErrorResponse(*code, reason.to_string()), proto: inbound_info.protocol, id: inbound_id, @@ -317,7 +327,6 @@ where { type FromBehaviour = RPCSend; type ToBehaviour = HandlerEvent; - type Error = RPCError; type InboundProtocol = RPCProtocol; type OutboundProtocol = OutboundRequestContainer; type OutboundOpenInfo = (Id, OutboundRequest); // Keep track of the id and the request @@ -339,28 +348,23 @@ where } } - fn connection_keep_alive(&self) -> KeepAlive { + fn connection_keep_alive(&self) -> bool { // Check that we don't have outbound items pending for dialing, nor dialing, nor // established. Also check that there are no established inbound substreams. // Errors and events need to be reported back, so check those too. - let should_shutdown = match self.state { + match self.state { HandlerState::ShuttingDown(_) => { - self.dial_queue.is_empty() - && self.outbound_substreams.is_empty() - && self.inbound_substreams.is_empty() - && self.events_out.is_empty() - && self.dial_negotiated == 0 + !self.dial_queue.is_empty() + || !self.outbound_substreams.is_empty() + || !self.inbound_substreams.is_empty() + || !self.events_out.is_empty() + || !self.dial_negotiated != 0 } HandlerState::Deactivated => { // Regardless of events, the timeout has expired. Force the disconnect. - true + false } - _ => false, - }; - if should_shutdown { - KeepAlive::No - } else { - KeepAlive::Yes + _ => true, } } @@ -368,12 +372,7 @@ where &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { if let Some(waker) = &self.waker { if waker.will_wake(cx.waker()) { @@ -397,7 +396,9 @@ where Poll::Ready(_) => { self.state = HandlerState::Deactivated; debug!(self.log, "Handler deactivated"); - return Poll::Ready(ConnectionHandlerEvent::Close(RPCError::Disconnected)); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::Close(RPCError::Disconnected), + )); } Poll::Pending => {} }; @@ -411,7 +412,7 @@ where if let Some(info) = self.inbound_substreams.get_mut(inbound_id.get_ref()) { // the delay has been removed info.delay_key = None; - self.events_out.push(Err(HandlerErr::Inbound { + self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound { error: RPCError::StreamTimeout, proto: info.protocol, id: *inbound_id.get_ref(), @@ -429,9 +430,11 @@ where Poll::Ready(Some(Err(e))) => { warn!(self.log, "Inbound substream poll failed"; "error" => ?e); // drops the peer if we cannot read the delay queue - return Poll::Ready(ConnectionHandlerEvent::Close(RPCError::InternalError( - "Could not poll inbound stream timer", - ))); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::Close(RPCError::InternalError( + "Could not poll inbound stream timer", + )), + )); } Poll::Pending | Poll::Ready(None) => break, } @@ -450,18 +453,20 @@ where error: RPCError::StreamTimeout, }; // notify the user - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Err( - outbound_err, - ))); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::Err(outbound_err), + )); } else { crit!(self.log, "timed out substream not in the books"; "stream_id" => outbound_id.get_ref()); } } Poll::Ready(Some(Err(e))) => { warn!(self.log, "Outbound substream poll failed"; "error" => ?e); - return Poll::Ready(ConnectionHandlerEvent::Close(RPCError::InternalError( - "Could not poll outbound stream timer", - ))); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::Close(RPCError::InternalError( + "Could not poll outbound stream timer", + )), + )); } Poll::Pending | Poll::Ready(None) => break, } @@ -513,7 +518,7 @@ where // If there was an error in shutting down the substream report the // error if let Err(error) = res { - self.events_out.push(Err(HandlerErr::Inbound { + self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound { error, proto: info.protocol, id: *id, @@ -525,7 +530,7 @@ where if info.pending_items.back().map(|l| l.close_after()) == Some(false) { // if the request was still active, report back to cancel it - self.events_out.push(Err(HandlerErr::Inbound { + self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound { error: RPCError::Disconnected, proto: info.protocol, id: *id, @@ -593,6 +598,9 @@ where if matches!(info.protocol, Protocol::BlocksByRange) { debug!(self.log, "BlocksByRange Response sent"; "duration" => Instant::now().duration_since(info.request_start_time).as_secs()); } + if matches!(info.protocol, Protocol::BlobsByRange) { + debug!(self.log, "BlobsByRange Response sent"; "duration" => Instant::now().duration_since(info.request_start_time).as_secs()); + } // There is nothing more to process on this substream as it has // been closed. Move on to the next one. @@ -607,7 +615,7 @@ where self.inbound_substreams_delay.remove(delay_key); } // Report the error that occurred during the send process - self.events_out.push(Err(HandlerErr::Inbound { + self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound { error, proto: info.protocol, id: *id, @@ -616,6 +624,9 @@ where if matches!(info.protocol, Protocol::BlocksByRange) { debug!(self.log, "BlocksByRange Response failed"; "duration" => info.request_start_time.elapsed().as_secs()); } + if matches!(info.protocol, Protocol::BlobsByRange) { + debug!(self.log, "BlobsByRange Response failed"; "duration" => info.request_start_time.elapsed().as_secs()); + } break; } // The sending future has not completed. Leave the state as busy and @@ -657,11 +668,12 @@ where } if deactivated => { // the handler is deactivated. Close the stream entry.get_mut().state = OutboundSubstreamState::Closing(substream); - self.events_out.push(Err(HandlerErr::Outbound { - error: RPCError::Disconnected, - proto: entry.get().proto, - id: entry.get().req_id, - })) + self.events_out + .push(HandlerEvent::Err(HandlerErr::Outbound { + error: RPCError::Disconnected, + proto: entry.get().proto, + id: entry.get().req_id, + })) } OutboundSubstreamState::RequestPendingResponse { mut substream, @@ -702,14 +714,18 @@ where let received = match response { RPCCodedResponse::StreamTermination(t) => { - Ok(RPCReceived::EndOfStream(id, t)) + HandlerEvent::Ok(RPCReceived::EndOfStream(id, t)) + } + RPCCodedResponse::Success(resp) => { + HandlerEvent::Ok(RPCReceived::Response(id, resp)) + } + RPCCodedResponse::Error(ref code, ref r) => { + HandlerEvent::Err(HandlerErr::Outbound { + id, + proto, + error: RPCError::ErrorResponse(*code, r.to_string()), + }) } - RPCCodedResponse::Success(resp) => Ok(RPCReceived::Response(id, resp)), - RPCCodedResponse::Error(ref code, ref r) => Err(HandlerErr::Outbound { - id, - proto, - error: RPCError::ErrorResponse(*code, r.to_string()), - }), }; return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(received)); @@ -727,9 +743,12 @@ where // notify the application error if request.expected_responses() > 1 { // return an end of stream result - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Ok( - RPCReceived::EndOfStream(request_id, request.stream_termination()), - ))); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::Ok(RPCReceived::EndOfStream( + request_id, + request.stream_termination(), + )), + )); } // else we return an error, stream should not have closed early. @@ -738,9 +757,9 @@ where proto: request.versioned_protocol().protocol(), error: RPCError::IncompleteStream, }; - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Err( - outbound_err, - ))); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::Err(outbound_err), + )); } Poll::Pending => { entry.get_mut().state = @@ -756,9 +775,9 @@ where error: e, }; entry.remove_entry(); - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Err( - outbound_err, - ))); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::Err(outbound_err), + )); } }, OutboundSubstreamState::Closing(mut substream) => { @@ -777,16 +796,14 @@ where // continue sending responses beyond what we would expect. Here // we simply terminate the stream and report a stream // termination to the application - let termination = match protocol { - Protocol::BlocksByRange => Some(ResponseTermination::BlocksByRange), - Protocol::BlocksByRoot => Some(ResponseTermination::BlocksByRoot), - _ => None, // all other protocols are do not have multiple responses and we do not inform the user, we simply drop the stream. - }; - if let Some(termination) = termination { - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Ok( - RPCReceived::EndOfStream(request_id, termination), - ))); + if let Some(termination) = protocol.terminator() { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::Ok(RPCReceived::EndOfStream( + request_id, + termination, + )), + )); } } Poll::Pending => { @@ -827,7 +844,9 @@ where && self.events_out.is_empty() && self.dial_negotiated == 0 { - return Poll::Ready(ConnectionHandlerEvent::Close(RPCError::Disconnected)); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::Close(RPCError::Disconnected), + )); } } @@ -855,24 +874,9 @@ where ConnectionEvent::DialUpgradeError(DialUpgradeError { info, error }) => { self.on_dial_upgrade_error(info, error) } - ConnectionEvent::ListenUpgradeError(libp2p::swarm::handler::ListenUpgradeError { - info: _, - error: _, /* RPCError */ - }) => { - // This is going to be removed in the next libp2p release. I think its fine to do - // nothing. - } - ConnectionEvent::LocalProtocolsChange(_) => { - // This shouldn't effect this handler, we will still negotiate streams if we support - // the protocol as usual. - } - ConnectionEvent::RemoteProtocolsChange(_) => { - // This shouldn't effect this handler, we will still negotiate streams if we support - // the protocol as usual. - } - ConnectionEvent::AddressChange(_) => { - // We dont care about these changes as they have no bearing on our RPC internal - // logic. + _ => { + // NOTE: ConnectionEvent is a non exhaustive enum so updates should be based on + // release notes more than compiler feedback } } } @@ -915,7 +919,7 @@ where }, ); } else { - self.events_out.push(Err(HandlerErr::Inbound { + self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound { id: self.current_inbound_substream_id, proto: req.versioned_protocol().protocol(), error: RPCError::HandlerRejected, @@ -929,7 +933,7 @@ where self.shutdown(None); } - self.events_out.push(Ok(RPCReceived::Request( + self.events_out.push(HandlerEvent::Ok(RPCReceived::Request( self.current_inbound_substream_id, req, ))); @@ -949,11 +953,12 @@ where // accept outbound connections only if the handler is not deactivated if matches!(self.state, HandlerState::Deactivated) { - self.events_out.push(Err(HandlerErr::Outbound { - error: RPCError::Disconnected, - proto, - id, - })); + self.events_out + .push(HandlerEvent::Err(HandlerErr::Outbound { + error: RPCError::Disconnected, + proto, + id, + })); } // add the stream to substreams if we expect a response, otherwise drop the stream. @@ -1026,11 +1031,12 @@ where self.dial_negotiated -= 1; self.outbound_io_error_retries = 0; - self.events_out.push(Err(HandlerErr::Outbound { - error, - proto: req.versioned_protocol().protocol(), - id, - })); + self.events_out + .push(HandlerEvent::Err(HandlerErr::Outbound { + error, + proto: req.versioned_protocol().protocol(), + id, + })); } } diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index af0ba2510..04ec6bac4 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -5,23 +5,18 @@ use regex::bytes::Regex; use serde::Serialize; use ssz::Encode; use ssz_derive::{Decode, Encode}; -use ssz_types::{ - typenum::{U1024, U256}, - VariableList, -}; +use ssz_types::{typenum::U256, VariableList}; use std::marker::PhantomData; use std::ops::Deref; use std::sync::Arc; use strum::IntoStaticStr; use superstruct::superstruct; +use types::blob_sidecar::BlobIdentifier; use types::{ - light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot, + blob_sidecar::BlobSidecar, ChainSpec, Epoch, EthSpec, Hash256, LightClientBootstrap, + RuntimeVariableList, SignedBeaconBlock, Slot, }; -/// Maximum number of blocks in a single request. -pub type MaxRequestBlocks = U1024; -pub const MAX_REQUEST_BLOCKS: u64 = 1024; - /// Maximum length of error message. pub type MaxErrorLen = U256; pub const MAX_ERROR_LEN: u64 = 256; @@ -278,6 +273,22 @@ impl BlocksByRangeRequest { } } +/// Request a number of beacon blobs from a peer. +#[derive(Encode, Decode, Clone, Debug, PartialEq)] +pub struct BlobsByRangeRequest { + /// The starting slot to request blobs. + pub start_slot: u64, + + /// The number of slots from the start slot. + pub count: u64, +} + +impl BlobsByRangeRequest { + pub fn max_blobs_requested(&self) -> u64 { + self.count.saturating_mul(E::max_blobs_per_block() as u64) + } +} + /// Request a number of beacon block roots from a peer. #[superstruct( variants(V1, V2), @@ -323,19 +334,38 @@ impl OldBlocksByRangeRequest { #[derive(Clone, Debug, PartialEq)] pub struct BlocksByRootRequest { /// The list of beacon block bodies being requested. - pub block_roots: VariableList, + pub block_roots: RuntimeVariableList, } impl BlocksByRootRequest { - pub fn new(block_roots: VariableList) -> Self { + pub fn new(block_roots: Vec, spec: &ChainSpec) -> Self { + let block_roots = + RuntimeVariableList::from_vec(block_roots, spec.max_request_blocks as usize); Self::V2(BlocksByRootRequestV2 { block_roots }) } - pub fn new_v1(block_roots: VariableList) -> Self { + pub fn new_v1(block_roots: Vec, spec: &ChainSpec) -> Self { + let block_roots = + RuntimeVariableList::from_vec(block_roots, spec.max_request_blocks as usize); Self::V1(BlocksByRootRequestV1 { block_roots }) } } +/// Request a number of beacon blocks and blobs from a peer. +#[derive(Clone, Debug, PartialEq)] +pub struct BlobsByRootRequest { + /// The list of beacon block roots being requested. + pub blob_ids: RuntimeVariableList, +} + +impl BlobsByRootRequest { + pub fn new(blob_ids: Vec, spec: &ChainSpec) -> Self { + let blob_ids = + RuntimeVariableList::from_vec(blob_ids, spec.max_request_blob_sidecars as usize); + Self { blob_ids } + } +} + /* RPC Handling and Grouping */ // Collection of enums and structs used by the Codecs to encode/decode RPC messages @@ -351,9 +381,15 @@ pub enum RPCResponse { /// A response to a get BLOCKS_BY_ROOT request. BlocksByRoot(Arc>), + /// A response to a get BLOBS_BY_RANGE request + BlobsByRange(Arc>), + /// A response to a get LIGHTCLIENT_BOOTSTRAP request. LightClientBootstrap(LightClientBootstrap), + /// A response to a get BLOBS_BY_ROOT request. + BlobsByRoot(Arc>), + /// A PONG response to a PING request. Pong(Ping), @@ -369,6 +405,12 @@ pub enum ResponseTermination { /// Blocks by root stream termination. BlocksByRoot, + + /// Blobs by range stream termination. + BlobsByRange, + + /// Blobs by root stream termination. + BlobsByRoot, } /// The structured response containing a result/code indicating success or failure @@ -395,6 +437,7 @@ pub struct LightClientBootstrapRequest { #[strum(serialize_all = "snake_case")] pub enum RPCResponseErrorCode { RateLimited, + BlobsNotFoundForBlock, InvalidRequest, ServerError, /// Error spec'd to indicate that a peer does not have blocks on a requested range. @@ -424,6 +467,7 @@ impl RPCCodedResponse { 2 => RPCResponseErrorCode::ServerError, 3 => RPCResponseErrorCode::ResourceUnavailable, 139 => RPCResponseErrorCode::RateLimited, + 140 => RPCResponseErrorCode::BlobsNotFoundForBlock, _ => RPCResponseErrorCode::Unknown, }; RPCCodedResponse::Error(code, err) @@ -436,6 +480,8 @@ impl RPCCodedResponse { RPCResponse::Status(_) => false, RPCResponse::BlocksByRange(_) => true, RPCResponse::BlocksByRoot(_) => true, + RPCResponse::BlobsByRange(_) => true, + RPCResponse::BlobsByRoot(_) => true, RPCResponse::Pong(_) => false, RPCResponse::MetaData(_) => false, RPCResponse::LightClientBootstrap(_) => false, @@ -460,6 +506,7 @@ impl RPCResponseErrorCode { RPCResponseErrorCode::ResourceUnavailable => 3, RPCResponseErrorCode::Unknown => 255, RPCResponseErrorCode::RateLimited => 139, + RPCResponseErrorCode::BlobsNotFoundForBlock => 140, } } } @@ -471,6 +518,8 @@ impl RPCResponse { RPCResponse::Status(_) => Protocol::Status, RPCResponse::BlocksByRange(_) => Protocol::BlocksByRange, RPCResponse::BlocksByRoot(_) => Protocol::BlocksByRoot, + RPCResponse::BlobsByRange(_) => Protocol::BlobsByRange, + RPCResponse::BlobsByRoot(_) => Protocol::BlobsByRoot, RPCResponse::Pong(_) => Protocol::Ping, RPCResponse::MetaData(_) => Protocol::MetaData, RPCResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap, @@ -486,6 +535,7 @@ impl std::fmt::Display for RPCResponseErrorCode { RPCResponseErrorCode::ServerError => "Server error occurred", RPCResponseErrorCode::Unknown => "Unknown error occurred", RPCResponseErrorCode::RateLimited => "Rate limited", + RPCResponseErrorCode::BlobsNotFoundForBlock => "No blobs for the given root", }; f.write_str(repr) } @@ -507,10 +557,20 @@ impl std::fmt::Display for RPCResponse { RPCResponse::BlocksByRoot(block) => { write!(f, "BlocksByRoot: Block slot: {}", block.slot()) } + RPCResponse::BlobsByRange(blob) => { + write!(f, "BlobsByRange: Blob slot: {}", blob.slot()) + } + RPCResponse::BlobsByRoot(sidecar) => { + write!(f, "BlobsByRoot: Blob slot: {}", sidecar.slot()) + } RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()), RPCResponse::LightClientBootstrap(bootstrap) => { - write!(f, "LightClientBootstrap Slot: {}", bootstrap.header.slot) + write!( + f, + "LightClientBootstrap Slot: {}", + bootstrap.header.beacon.slot + ) } } } @@ -565,6 +625,26 @@ impl std::fmt::Display for OldBlocksByRangeRequest { } } +impl std::fmt::Display for BlobsByRootRequest { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Request: BlobsByRoot: Number of Requested Roots: {}", + self.blob_ids.len() + ) + } +} + +impl std::fmt::Display for BlobsByRangeRequest { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Request: BlobsByRange: Start Slot: {}, Count: {}", + self.start_slot, self.count + ) + } +} + impl slog::KV for StatusMessage { fn serialize( &self, diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 14f77e4ba..3606438fb 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -5,9 +5,9 @@ //! syncing. use futures::future::FutureExt; -use handler::{HandlerEvent, RPCHandler}; +use handler::RPCHandler; use libp2p::swarm::{ - handler::ConnectionHandler, ConnectionId, NetworkBehaviour, NotifyHandler, PollParameters, + handler::ConnectionHandler, CloseConnection, ConnectionId, NetworkBehaviour, NotifyHandler, ToSwarm, }; use libp2p::swarm::{FromSwarm, SubstreamProtocol, THandlerInEvent}; @@ -20,14 +20,14 @@ use std::task::{Context, Poll}; use std::time::Duration; use types::{EthSpec, ForkContext}; -pub(crate) use handler::HandlerErr; +pub(crate) use handler::{HandlerErr, HandlerEvent}; pub(crate) use methods::{MetaData, MetaDataV1, MetaDataV2, Ping, RPCCodedResponse, RPCResponse}; pub(crate) use protocol::InboundRequest; pub use handler::SubstreamId; pub use methods::{ BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, LightClientBootstrapRequest, - MaxRequestBlocks, RPCResponseErrorCode, ResponseTermination, StatusMessage, MAX_REQUEST_BLOCKS, + RPCResponseErrorCode, ResponseTermination, StatusMessage, }; pub(crate) use outbound::OutboundRequest; pub use protocol::{max_rpc_size, Protocol, RPCError}; @@ -282,25 +282,9 @@ where Ok(handler) } - fn on_swarm_event(&mut self, event: FromSwarm) { - match event { - FromSwarm::ConnectionClosed(_) - | FromSwarm::ConnectionEstablished(_) - | FromSwarm::AddressChange(_) - | FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrExpired(_) - | FromSwarm::ExternalAddrConfirmed(_) => { - // Rpc Behaviour does not act on these swarm events. We use a comprehensive match - // statement to ensure future events are dealt with appropriately. - } - } + fn on_swarm_event(&mut self, _event: FromSwarm) { + // NOTE: FromSwarm is a non exhaustive enum so updates should be based on release notes more + // than compiler feedback } fn on_connection_handler_event( @@ -309,74 +293,82 @@ where conn_id: ConnectionId, event: ::ToBehaviour, ) { - if let Ok(RPCReceived::Request(ref id, ref req)) = event { - if let Some(limiter) = self.limiter.as_mut() { - // check if the request is conformant to the quota - match limiter.allows(&peer_id, req) { - Ok(()) => { - // send the event to the user - self.events.push(ToSwarm::GenerateEvent(RPCMessage { - peer_id, - conn_id, - event, - })) - } - Err(RateLimitedErr::TooLarge) => { - // we set the batch sizes, so this is a coding/config err for most protocols - let protocol = req.versioned_protocol().protocol(); - if matches!(protocol, Protocol::BlocksByRange) { - debug!(self.log, "Blocks by range request will never be processed"; "request" => %req); - } else { - crit!(self.log, "Request size too large to ever be processed"; "protocol" => %protocol); + match event { + HandlerEvent::Ok(RPCReceived::Request(ref id, ref req)) => { + if let Some(limiter) = self.limiter.as_mut() { + // check if the request is conformant to the quota + match limiter.allows(&peer_id, req) { + Ok(()) => { + // send the event to the user + self.events.push(ToSwarm::GenerateEvent(RPCMessage { + peer_id, + conn_id, + event, + })) } - // send an error code to the peer. - // the handler upon receiving the error code will send it back to the behaviour - self.send_response( - peer_id, - (conn_id, *id), - RPCCodedResponse::Error( - RPCResponseErrorCode::RateLimited, - "Rate limited. Request too large".into(), - ), - ); - } - Err(RateLimitedErr::TooSoon(wait_time)) => { - debug!(self.log, "Request exceeds the rate limit"; + Err(RateLimitedErr::TooLarge) => { + // we set the batch sizes, so this is a coding/config err for most protocols + let protocol = req.versioned_protocol().protocol(); + if matches!(protocol, Protocol::BlocksByRange) + || matches!(protocol, Protocol::BlobsByRange) + { + debug!(self.log, "By range request will never be processed"; "request" => %req, "protocol" => %protocol); + } else { + crit!(self.log, "Request size too large to ever be processed"; "protocol" => %protocol); + } + // send an error code to the peer. + // the handler upon receiving the error code will send it back to the behaviour + self.send_response( + peer_id, + (conn_id, *id), + RPCCodedResponse::Error( + RPCResponseErrorCode::RateLimited, + "Rate limited. Request too large".into(), + ), + ); + } + Err(RateLimitedErr::TooSoon(wait_time)) => { + debug!(self.log, "Request exceeds the rate limit"; "request" => %req, "peer_id" => %peer_id, "wait_time_ms" => wait_time.as_millis()); - // send an error code to the peer. - // the handler upon receiving the error code will send it back to the behaviour - self.send_response( - peer_id, - (conn_id, *id), - RPCCodedResponse::Error( - RPCResponseErrorCode::RateLimited, - format!("Wait {:?}", wait_time).into(), - ), - ); + // send an error code to the peer. + // the handler upon receiving the error code will send it back to the behaviour + self.send_response( + peer_id, + (conn_id, *id), + RPCCodedResponse::Error( + RPCResponseErrorCode::RateLimited, + format!("Wait {:?}", wait_time).into(), + ), + ); + } } + } else { + // No rate limiting, send the event to the user + self.events.push(ToSwarm::GenerateEvent(RPCMessage { + peer_id, + conn_id, + event, + })) } - } else { - // No rate limiting, send the event to the user + } + HandlerEvent::Close(_) => { + // Handle the close event here. + self.events.push(ToSwarm::CloseConnection { + peer_id, + connection: CloseConnection::All, + }); + } + _ => { self.events.push(ToSwarm::GenerateEvent(RPCMessage { peer_id, conn_id, event, - })) + })); } - } else { - self.events.push(ToSwarm::GenerateEvent(RPCMessage { - peer_id, - conn_id, - event, - })); } } - fn poll( - &mut self, - cx: &mut Context, - _: &mut impl PollParameters, - ) -> Poll>> { + fn poll(&mut self, cx: &mut Context) -> Poll>> { // let the rate limiter prune. if let Some(limiter) = self.limiter.as_mut() { let _ = limiter.poll_unpin(cx); @@ -407,25 +399,38 @@ where serializer: &mut dyn slog::Serializer, ) -> slog::Result { serializer.emit_arguments("peer_id", &format_args!("{}", self.peer_id))?; - let (msg_kind, protocol) = match &self.event { - Ok(received) => match received { - RPCReceived::Request(_, req) => ("request", req.versioned_protocol().protocol()), - RPCReceived::Response(_, res) => ("response", res.protocol()), - RPCReceived::EndOfStream(_, end) => ( - "end_of_stream", - match end { - ResponseTermination::BlocksByRange => Protocol::BlocksByRange, - ResponseTermination::BlocksByRoot => Protocol::BlocksByRoot, - }, - ), - }, - Err(error) => match &error { - HandlerErr::Inbound { proto, .. } => ("inbound_err", *proto), - HandlerErr::Outbound { proto, .. } => ("outbound_err", *proto), - }, + match &self.event { + HandlerEvent::Ok(received) => { + let (msg_kind, protocol) = match received { + RPCReceived::Request(_, req) => { + ("request", req.versioned_protocol().protocol()) + } + RPCReceived::Response(_, res) => ("response", res.protocol()), + RPCReceived::EndOfStream(_, end) => ( + "end_of_stream", + match end { + ResponseTermination::BlocksByRange => Protocol::BlocksByRange, + ResponseTermination::BlocksByRoot => Protocol::BlocksByRoot, + ResponseTermination::BlobsByRange => Protocol::BlobsByRange, + ResponseTermination::BlobsByRoot => Protocol::BlobsByRoot, + }, + ), + }; + serializer.emit_str("msg_kind", msg_kind)?; + serializer.emit_arguments("protocol", &format_args!("{}", protocol))?; + } + HandlerEvent::Err(error) => { + let (msg_kind, protocol) = match &error { + HandlerErr::Inbound { proto, .. } => ("inbound_err", *proto), + HandlerErr::Outbound { proto, .. } => ("outbound_err", *proto), + }; + serializer.emit_str("msg_kind", msg_kind)?; + serializer.emit_arguments("protocol", &format_args!("{}", protocol))?; + } + HandlerEvent::Close(err) => { + serializer.emit_arguments("handler_close", &format_args!("{}", err))?; + } }; - serializer.emit_str("msg_kind", msg_kind)?; - serializer.emit_arguments("protocol", &format_args!("{}", protocol))?; slog::Result::Ok(()) } diff --git a/beacon_node/lighthouse_network/src/rpc/outbound.rs b/beacon_node/lighthouse_network/src/rpc/outbound.rs index d12f36686..713e9e0ec 100644 --- a/beacon_node/lighthouse_network/src/rpc/outbound.rs +++ b/beacon_node/lighthouse_network/src/rpc/outbound.rs @@ -35,6 +35,8 @@ pub enum OutboundRequest { Goodbye(GoodbyeReason), BlocksByRange(OldBlocksByRangeRequest), BlocksByRoot(BlocksByRootRequest), + BlobsByRange(BlobsByRangeRequest), + BlobsByRoot(BlobsByRootRequest), Ping(Ping), MetaData(MetadataRequest), } @@ -70,6 +72,14 @@ impl OutboundRequest { ProtocolId::new(SupportedProtocol::BlocksByRootV2, Encoding::SSZSnappy), ProtocolId::new(SupportedProtocol::BlocksByRootV1, Encoding::SSZSnappy), ], + OutboundRequest::BlobsByRange(_) => vec![ProtocolId::new( + SupportedProtocol::BlobsByRangeV1, + Encoding::SSZSnappy, + )], + OutboundRequest::BlobsByRoot(_) => vec![ProtocolId::new( + SupportedProtocol::BlobsByRootV1, + Encoding::SSZSnappy, + )], OutboundRequest::Ping(_) => vec![ProtocolId::new( SupportedProtocol::PingV1, Encoding::SSZSnappy, @@ -89,6 +99,8 @@ impl OutboundRequest { OutboundRequest::Goodbye(_) => 0, OutboundRequest::BlocksByRange(req) => *req.count(), OutboundRequest::BlocksByRoot(req) => req.block_roots().len() as u64, + OutboundRequest::BlobsByRange(req) => req.max_blobs_requested::(), + OutboundRequest::BlobsByRoot(req) => req.blob_ids.len() as u64, OutboundRequest::Ping(_) => 1, OutboundRequest::MetaData(_) => 1, } @@ -107,6 +119,8 @@ impl OutboundRequest { BlocksByRootRequest::V1(_) => SupportedProtocol::BlocksByRootV1, BlocksByRootRequest::V2(_) => SupportedProtocol::BlocksByRootV2, }, + OutboundRequest::BlobsByRange(_) => SupportedProtocol::BlobsByRangeV1, + OutboundRequest::BlobsByRoot(_) => SupportedProtocol::BlobsByRootV1, OutboundRequest::Ping(_) => SupportedProtocol::PingV1, OutboundRequest::MetaData(req) => match req { MetadataRequest::V1(_) => SupportedProtocol::MetaDataV1, @@ -123,6 +137,8 @@ impl OutboundRequest { // variants that have `multiple_responses()` can have values. OutboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange, OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, + OutboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange, + OutboundRequest::BlobsByRoot(_) => ResponseTermination::BlobsByRoot, OutboundRequest::Status(_) => unreachable!(), OutboundRequest::Goodbye(_) => unreachable!(), OutboundRequest::Ping(_) => unreachable!(), @@ -178,6 +194,8 @@ impl std::fmt::Display for OutboundRequest { OutboundRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason), OutboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req), OutboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), + OutboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req), + OutboundRequest::BlobsByRoot(req) => write!(f, "Blobs by root: {:?}", req), OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), OutboundRequest::MetaData(_) => write!(f, "MetaData request"), } diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index f2a39470b..9c174b8e4 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -2,7 +2,6 @@ use super::methods::*; use crate::rpc::{ codec::{base::BaseInboundCodec, ssz_snappy::SSZSnappyInboundCodec, InboundCodec}, methods::{MaxErrorLen, ResponseTermination, MAX_ERROR_LEN}, - MaxRequestBlocks, MAX_REQUEST_BLOCKS, }; use futures::future::BoxFuture; use futures::prelude::{AsyncRead, AsyncWrite}; @@ -22,7 +21,7 @@ use tokio_util::{ }; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockMerge, - EmptyBlock, EthSpec, ForkContext, ForkName, Hash256, MainnetEthSpec, Signature, + BlobSidecar, ChainSpec, EmptyBlock, EthSpec, ForkContext, ForkName, MainnetEthSpec, Signature, SignedBeaconBlock, }; @@ -83,18 +82,12 @@ lazy_static! { + types::ExecutionPayload::::max_execution_payload_capella_size() // adding max size of execution payload (~16gb) + ssz::BYTES_PER_LENGTH_OFFSET; // Adding the additional ssz offset for the `ExecutionPayload` field - pub static ref BLOCKS_BY_ROOT_REQUEST_MIN: usize = - VariableList::::from(Vec::::new()) - .as_ssz_bytes() - .len(); - pub static ref BLOCKS_BY_ROOT_REQUEST_MAX: usize = - VariableList::::from(vec![ - Hash256::zero(); - MAX_REQUEST_BLOCKS - as usize - ]) - .as_ssz_bytes() - .len(); + pub static ref SIGNED_BEACON_BLOCK_DENEB_MAX: usize = *SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD + + types::ExecutionPayload::::max_execution_payload_deneb_size() // adding max size of execution payload (~16gb) + + ssz::BYTES_PER_LENGTH_OFFSET // Adding the additional offsets for the `ExecutionPayload` + + (::ssz_fixed_len() * ::max_blobs_per_block()) + + ssz::BYTES_PER_LENGTH_OFFSET; // Length offset for the blob commitments field. + pub static ref ERROR_TYPE_MIN: usize = VariableList::::from(Vec::::new()) .as_ssz_bytes() @@ -121,6 +114,7 @@ pub fn max_rpc_size(fork_context: &ForkContext, max_chunk_size: usize) -> usize ForkName::Altair | ForkName::Base => max_chunk_size / 10, ForkName::Merge => max_chunk_size, ForkName::Capella => max_chunk_size, + ForkName::Deneb => max_chunk_size, } } @@ -145,6 +139,10 @@ pub fn rpc_block_limits_by_fork(current_fork: ForkName) -> RpcLimits { *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and merge blocks *SIGNED_BEACON_BLOCK_CAPELLA_MAX, // Capella block is larger than base, altair and merge blocks ), + ForkName::Deneb => RpcLimits::new( + *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and merge blocks + *SIGNED_BEACON_BLOCK_DENEB_MAX, // EIP 4844 block is larger than all prior fork blocks + ), } } @@ -162,6 +160,12 @@ pub enum Protocol { /// The `BlocksByRoot` protocol name. #[strum(serialize = "beacon_blocks_by_root")] BlocksByRoot, + /// The `BlobsByRange` protocol name. + #[strum(serialize = "blob_sidecars_by_range")] + BlobsByRange, + /// The `BlobsByRoot` protocol name. + #[strum(serialize = "blob_sidecars_by_root")] + BlobsByRoot, /// The `Ping` protocol name. Ping, /// The `MetaData` protocol name. @@ -172,6 +176,22 @@ pub enum Protocol { LightClientBootstrap, } +impl Protocol { + pub(crate) fn terminator(self) -> Option { + match self { + Protocol::Status => None, + Protocol::Goodbye => None, + Protocol::BlocksByRange => Some(ResponseTermination::BlocksByRange), + Protocol::BlocksByRoot => Some(ResponseTermination::BlocksByRoot), + Protocol::BlobsByRange => Some(ResponseTermination::BlobsByRange), + Protocol::BlobsByRoot => Some(ResponseTermination::BlobsByRoot), + Protocol::Ping => None, + Protocol::MetaData => None, + Protocol::LightClientBootstrap => None, + } + } +} + /// RPC Encondings supported. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Encoding { @@ -187,6 +207,8 @@ pub enum SupportedProtocol { BlocksByRangeV2, BlocksByRootV1, BlocksByRootV2, + BlobsByRangeV1, + BlobsByRootV1, PingV1, MetaDataV1, MetaDataV2, @@ -202,6 +224,8 @@ impl SupportedProtocol { SupportedProtocol::BlocksByRangeV2 => "2", SupportedProtocol::BlocksByRootV1 => "1", SupportedProtocol::BlocksByRootV2 => "2", + SupportedProtocol::BlobsByRangeV1 => "1", + SupportedProtocol::BlobsByRootV1 => "1", SupportedProtocol::PingV1 => "1", SupportedProtocol::MetaDataV1 => "1", SupportedProtocol::MetaDataV2 => "2", @@ -217,6 +241,8 @@ impl SupportedProtocol { SupportedProtocol::BlocksByRangeV2 => Protocol::BlocksByRange, SupportedProtocol::BlocksByRootV1 => Protocol::BlocksByRoot, SupportedProtocol::BlocksByRootV2 => Protocol::BlocksByRoot, + SupportedProtocol::BlobsByRangeV1 => Protocol::BlobsByRange, + SupportedProtocol::BlobsByRootV1 => Protocol::BlobsByRoot, SupportedProtocol::PingV1 => Protocol::Ping, SupportedProtocol::MetaDataV1 => Protocol::MetaData, SupportedProtocol::MetaDataV2 => Protocol::MetaData, @@ -224,8 +250,8 @@ impl SupportedProtocol { } } - fn currently_supported() -> Vec { - vec![ + fn currently_supported(fork_context: &ForkContext) -> Vec { + let mut supported = vec![ ProtocolId::new(Self::StatusV1, Encoding::SSZSnappy), ProtocolId::new(Self::GoodbyeV1, Encoding::SSZSnappy), // V2 variants have higher preference then V1 @@ -236,7 +262,14 @@ impl SupportedProtocol { ProtocolId::new(Self::PingV1, Encoding::SSZSnappy), ProtocolId::new(Self::MetaDataV2, Encoding::SSZSnappy), ProtocolId::new(Self::MetaDataV1, Encoding::SSZSnappy), - ] + ]; + if fork_context.fork_exists(ForkName::Deneb) { + supported.extend_from_slice(&[ + ProtocolId::new(SupportedProtocol::BlobsByRootV1, Encoding::SSZSnappy), + ProtocolId::new(SupportedProtocol::BlobsByRangeV1, Encoding::SSZSnappy), + ]); + } + supported } } @@ -264,7 +297,7 @@ impl UpgradeInfo for RPCProtocol { /// The list of supported RPC protocols for Lighthouse. fn protocol_info(&self) -> Self::InfoIter { - let mut supported_protocols = SupportedProtocol::currently_supported(); + let mut supported_protocols = SupportedProtocol::currently_supported(&self.fork_context); if self.enable_light_client_server { supported_protocols.push(ProtocolId::new( SupportedProtocol::LightClientBootstrapV1, @@ -315,7 +348,7 @@ impl AsRef for ProtocolId { impl ProtocolId { /// Returns min and max size for messages of given protocol id requests. - pub fn rpc_request_limits(&self) -> RpcLimits { + pub fn rpc_request_limits(&self, spec: &ChainSpec) -> RpcLimits { match self.versioned_protocol.protocol() { Protocol::Status => RpcLimits::new( ::ssz_fixed_len(), @@ -330,9 +363,12 @@ impl ProtocolId { ::ssz_fixed_len(), ::ssz_fixed_len(), ), - Protocol::BlocksByRoot => { - RpcLimits::new(*BLOCKS_BY_ROOT_REQUEST_MIN, *BLOCKS_BY_ROOT_REQUEST_MAX) - } + Protocol::BlocksByRoot => RpcLimits::new(0, spec.max_blocks_by_root_request), + Protocol::BlobsByRange => RpcLimits::new( + ::ssz_fixed_len(), + ::ssz_fixed_len(), + ), + Protocol::BlobsByRoot => RpcLimits::new(0, spec.max_blobs_by_root_request), Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -355,6 +391,8 @@ impl ProtocolId { Protocol::Goodbye => RpcLimits::new(0, 0), // Goodbye request has no response Protocol::BlocksByRange => rpc_block_limits_by_fork(fork_context.current_fork()), Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork()), + Protocol::BlobsByRange => rpc_blob_limits::(), + Protocol::BlobsByRoot => rpc_blob_limits::(), Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -376,6 +414,8 @@ impl ProtocolId { match self.versioned_protocol { SupportedProtocol::BlocksByRangeV2 | SupportedProtocol::BlocksByRootV2 + | SupportedProtocol::BlobsByRangeV1 + | SupportedProtocol::BlobsByRootV1 | SupportedProtocol::LightClientBootstrapV1 => true, SupportedProtocol::StatusV1 | SupportedProtocol::BlocksByRootV1 @@ -407,6 +447,13 @@ impl ProtocolId { } } +pub fn rpc_blob_limits() -> RpcLimits { + RpcLimits::new( + BlobSidecar::::empty().as_ssz_bytes().len(), + BlobSidecar::::max_size(), + ) +} + /* Inbound upgrade */ // The inbound protocol reads the request, decodes it and returns the stream to the protocol @@ -478,6 +525,8 @@ pub enum InboundRequest { Goodbye(GoodbyeReason), BlocksByRange(OldBlocksByRangeRequest), BlocksByRoot(BlocksByRootRequest), + BlobsByRange(BlobsByRangeRequest), + BlobsByRoot(BlobsByRootRequest), LightClientBootstrap(LightClientBootstrapRequest), Ping(Ping), MetaData(MetadataRequest), @@ -494,6 +543,8 @@ impl InboundRequest { InboundRequest::Goodbye(_) => 0, InboundRequest::BlocksByRange(req) => *req.count(), InboundRequest::BlocksByRoot(req) => req.block_roots().len() as u64, + InboundRequest::BlobsByRange(req) => req.max_blobs_requested::(), + InboundRequest::BlobsByRoot(req) => req.blob_ids.len() as u64, InboundRequest::Ping(_) => 1, InboundRequest::MetaData(_) => 1, InboundRequest::LightClientBootstrap(_) => 1, @@ -513,6 +564,8 @@ impl InboundRequest { BlocksByRootRequest::V1(_) => SupportedProtocol::BlocksByRootV1, BlocksByRootRequest::V2(_) => SupportedProtocol::BlocksByRootV2, }, + InboundRequest::BlobsByRange(_) => SupportedProtocol::BlobsByRangeV1, + InboundRequest::BlobsByRoot(_) => SupportedProtocol::BlobsByRootV1, InboundRequest::Ping(_) => SupportedProtocol::PingV1, InboundRequest::MetaData(req) => match req { MetadataRequest::V1(_) => SupportedProtocol::MetaDataV1, @@ -530,6 +583,8 @@ impl InboundRequest { // variants that have `multiple_responses()` can have values. InboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange, InboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, + InboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange, + InboundRequest::BlobsByRoot(_) => ResponseTermination::BlobsByRoot, InboundRequest::Status(_) => unreachable!(), InboundRequest::Goodbye(_) => unreachable!(), InboundRequest::Ping(_) => unreachable!(), @@ -636,6 +691,8 @@ impl std::fmt::Display for InboundRequest { InboundRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason), InboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req), InboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), + InboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req), + InboundRequest::BlobsByRoot(req) => write!(f, "Blobs by root: {:?}", req), InboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), InboundRequest::MetaData(_) => write!(f, "MetaData request"), InboundRequest::LightClientBootstrap(bootstrap) => { diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index e1634d711..0b57374e8 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -2,7 +2,7 @@ use super::config::RateLimiterConfig; use crate::rpc::Protocol; use fnv::FnvHashMap; use libp2p::PeerId; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::convert::TryInto; use std::future::Future; use std::hash::Hash; @@ -94,6 +94,10 @@ pub struct RPCRateLimiter { bbrange_rl: Limiter, /// BlocksByRoot rate limiter. bbroots_rl: Limiter, + /// BlobsByRange rate limiter. + blbrange_rl: Limiter, + /// BlobsByRoot rate limiter. + blbroot_rl: Limiter, /// LightClientBootstrap rate limiter. lcbootstrap_rl: Limiter, } @@ -122,6 +126,10 @@ pub struct RPCRateLimiterBuilder { bbrange_quota: Option, /// Quota for the BlocksByRoot protocol. bbroots_quota: Option, + /// Quota for the BlobsByRange protocol. + blbrange_quota: Option, + /// Quota for the BlobsByRoot protocol. + blbroot_quota: Option, /// Quota for the LightClientBootstrap protocol. lcbootstrap_quota: Option, } @@ -137,6 +145,8 @@ impl RPCRateLimiterBuilder { Protocol::Goodbye => self.goodbye_quota = q, Protocol::BlocksByRange => self.bbrange_quota = q, Protocol::BlocksByRoot => self.bbroots_quota = q, + Protocol::BlobsByRange => self.blbrange_quota = q, + Protocol::BlobsByRoot => self.blbroot_quota = q, Protocol::LightClientBootstrap => self.lcbootstrap_quota = q, } self @@ -158,6 +168,14 @@ impl RPCRateLimiterBuilder { .lcbootstrap_quota .ok_or("LightClientBootstrap quota not specified")?; + let blbrange_quota = self + .blbrange_quota + .ok_or("BlobsByRange quota not specified")?; + + let blbroots_quota = self + .blbroot_quota + .ok_or("BlobsByRoot quota not specified")?; + // create the rate limiters let ping_rl = Limiter::from_quota(ping_quota)?; let metadata_rl = Limiter::from_quota(metadata_quota)?; @@ -165,6 +183,8 @@ impl RPCRateLimiterBuilder { let goodbye_rl = Limiter::from_quota(goodbye_quota)?; let bbroots_rl = Limiter::from_quota(bbroots_quota)?; let bbrange_rl = Limiter::from_quota(bbrange_quota)?; + let blbrange_rl = Limiter::from_quota(blbrange_quota)?; + let blbroot_rl = Limiter::from_quota(blbroots_quota)?; let lcbootstrap_rl = Limiter::from_quota(lcbootstrap_quote)?; // check for peers to prune every 30 seconds, starting in 30 seconds @@ -179,6 +199,8 @@ impl RPCRateLimiterBuilder { goodbye_rl, bbroots_rl, bbrange_rl, + blbrange_rl, + blbroot_rl, lcbootstrap_rl, init_time: Instant::now(), }) @@ -219,6 +241,8 @@ impl RPCRateLimiter { goodbye_quota, blocks_by_range_quota, blocks_by_root_quota, + blobs_by_range_quota, + blobs_by_root_quota, light_client_bootstrap_quota, } = config; @@ -229,6 +253,8 @@ impl RPCRateLimiter { .set_quota(Protocol::Goodbye, goodbye_quota) .set_quota(Protocol::BlocksByRange, blocks_by_range_quota) .set_quota(Protocol::BlocksByRoot, blocks_by_root_quota) + .set_quota(Protocol::BlobsByRange, blobs_by_range_quota) + .set_quota(Protocol::BlobsByRoot, blobs_by_root_quota) .set_quota(Protocol::LightClientBootstrap, light_client_bootstrap_quota) .build() } @@ -255,6 +281,8 @@ impl RPCRateLimiter { Protocol::Goodbye => &mut self.goodbye_rl, Protocol::BlocksByRange => &mut self.bbrange_rl, Protocol::BlocksByRoot => &mut self.bbroots_rl, + Protocol::BlobsByRange => &mut self.blbrange_rl, + Protocol::BlobsByRoot => &mut self.blbroot_rl, Protocol::LightClientBootstrap => &mut self.lcbootstrap_rl, }; check(limiter) @@ -268,6 +296,8 @@ impl RPCRateLimiter { self.goodbye_rl.prune(time_since_start); self.bbrange_rl.prune(time_since_start); self.bbroots_rl.prune(time_since_start); + self.blbrange_rl.prune(time_since_start); + self.blbroot_rl.prune(time_since_start); } } diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index 187c0ab1b..96c9d2833 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -1,9 +1,9 @@ use std::sync::Arc; use libp2p::swarm::ConnectionId; -use types::light_client_bootstrap::LightClientBootstrap; -use types::{EthSpec, SignedBeaconBlock}; +use types::{BlobSidecar, EthSpec, LightClientBootstrap, SignedBeaconBlock}; +use crate::rpc::methods::{BlobsByRangeRequest, BlobsByRootRequest}; use crate::rpc::{ methods::{ BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, @@ -34,10 +34,14 @@ pub enum Request { Status(StatusMessage), /// A blocks by range request. BlocksByRange(BlocksByRangeRequest), + /// A blobs by range request. + BlobsByRange(BlobsByRangeRequest), /// A request blocks root request. BlocksByRoot(BlocksByRootRequest), // light client bootstrap request LightClientBootstrap(LightClientBootstrapRequest), + /// A request blobs root request. + BlobsByRoot(BlobsByRootRequest), } impl std::convert::From for OutboundRequest { @@ -63,6 +67,8 @@ impl std::convert::From for OutboundRequest { Request::LightClientBootstrap(_) => { unreachable!("Lighthouse never makes an outbound light client request") } + Request::BlobsByRange(r) => OutboundRequest::BlobsByRange(r), + Request::BlobsByRoot(r) => OutboundRequest::BlobsByRoot(r), Request::Status(s) => OutboundRequest::Status(s), } } @@ -80,8 +86,12 @@ pub enum Response { Status(StatusMessage), /// A response to a get BLOCKS_BY_RANGE request. A None response signals the end of the batch. BlocksByRange(Option>>), + /// A response to a get BLOBS_BY_RANGE request. A None response signals the end of the batch. + BlobsByRange(Option>>), /// A response to a get BLOCKS_BY_ROOT request. BlocksByRoot(Option>>), + /// A response to a get BLOBS_BY_ROOT request. + BlobsByRoot(Option>>), /// A response to a LightClientUpdate request. LightClientBootstrap(LightClientBootstrap), } @@ -97,6 +107,14 @@ impl std::convert::From> for RPCCodedResponse RPCCodedResponse::Success(RPCResponse::BlocksByRange(b)), None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange), }, + Response::BlobsByRoot(r) => match r { + Some(b) => RPCCodedResponse::Success(RPCResponse::BlobsByRoot(b)), + None => RPCCodedResponse::StreamTermination(ResponseTermination::BlobsByRoot), + }, + Response::BlobsByRange(r) => match r { + Some(b) => RPCCodedResponse::Success(RPCResponse::BlobsByRange(b)), + None => RPCCodedResponse::StreamTermination(ResponseTermination::BlobsByRange), + }, Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)), Response::LightClientBootstrap(b) => { RPCCodedResponse::Success(RPCResponse::LightClientBootstrap(b)) diff --git a/beacon_node/lighthouse_network/src/service/behaviour.rs b/beacon_node/lighthouse_network/src/service/behaviour.rs index 6c52a07c1..8dd750429 100644 --- a/beacon_node/lighthouse_network/src/service/behaviour.rs +++ b/beacon_node/lighthouse_network/src/service/behaviour.rs @@ -20,8 +20,6 @@ where AppReqId: ReqId, TSpec: EthSpec, { - /// Peers banned. - pub banned_peers: libp2p::allow_block_list::Behaviour, /// Keep track of active and pending connections to enforce hard limits. pub connection_limits: libp2p::connection_limits::Behaviour, /// The routing pub-sub mechanism for eth2. diff --git a/beacon_node/lighthouse_network/src/service/gossip_cache.rs b/beacon_node/lighthouse_network/src/service/gossip_cache.rs index 2865d5b3f..5dc0d29ff 100644 --- a/beacon_node/lighthouse_network/src/service/gossip_cache.rs +++ b/beacon_node/lighthouse_network/src/service/gossip_cache.rs @@ -20,6 +20,8 @@ pub struct GossipCache { topic_msgs: HashMap, Key>>, /// Timeout for blocks. beacon_block: Option, + /// Timeout for blobs. + blob_sidecar: Option, /// Timeout for aggregate attestations. aggregates: Option, /// Timeout for attestations. @@ -47,6 +49,8 @@ pub struct GossipCacheBuilder { default_timeout: Option, /// Timeout for blocks. beacon_block: Option, + /// Timeout for blob sidecars. + blob_sidecar: Option, /// Timeout for aggregate attestations. aggregates: Option, /// Timeout for attestations. @@ -147,6 +151,7 @@ impl GossipCacheBuilder { let GossipCacheBuilder { default_timeout, beacon_block, + blob_sidecar, aggregates, attestation, voluntary_exit, @@ -162,6 +167,7 @@ impl GossipCacheBuilder { expirations: DelayQueue::default(), topic_msgs: HashMap::default(), beacon_block: beacon_block.or(default_timeout), + blob_sidecar: blob_sidecar.or(default_timeout), aggregates: aggregates.or(default_timeout), attestation: attestation.or(default_timeout), voluntary_exit: voluntary_exit.or(default_timeout), @@ -187,6 +193,7 @@ impl GossipCache { pub fn insert(&mut self, topic: GossipTopic, data: Vec) { let expire_timeout = match topic.kind() { GossipKind::BeaconBlock => self.beacon_block, + GossipKind::BlobSidecar(_) => self.blob_sidecar, GossipKind::BeaconAggregateAndProof => self.aggregates, GossipKind::Attestation(_) => self.attestation, GossipKind::VoluntaryExit => self.voluntary_exit, @@ -198,9 +205,8 @@ impl GossipCache { GossipKind::LightClientFinalityUpdate => self.light_client_finality_update, GossipKind::LightClientOptimisticUpdate => self.light_client_optimistic_update, }; - let expire_timeout = match expire_timeout { - Some(expire_timeout) => expire_timeout, - None => return, + let Some(expire_timeout) = expire_timeout else { + return; }; match self .topic_msgs diff --git a/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs b/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs index b058fc0ff..47c2c9e56 100644 --- a/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs +++ b/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs @@ -96,6 +96,9 @@ impl PeerScoreSettings { ip_colocation_factor_threshold: 8.0, // Allow up to 8 nodes per IP behaviour_penalty_threshold: 6.0, behaviour_penalty_decay: self.score_parameter_decay(self.epoch * 10), + slow_peer_decay: 0.1, + slow_peer_weight: -10.0, + slow_peer_threshold: 0.0, ..Default::default() }; diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index e4e11f29c..2b20c76cf 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -15,7 +15,8 @@ use crate::service::behaviour::BehaviourEvent; pub use crate::service::behaviour::Gossipsub; use crate::types::{ fork_core_topics, subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, - SnappyTransform, Subnet, SubnetDiscovery, + SnappyTransform, Subnet, SubnetDiscovery, ALTAIR_CORE_TOPICS, BASE_CORE_TOPICS, + CAPELLA_CORE_TOPICS, DENEB_CORE_TOPICS, LIGHT_CLIENT_GOSSIP_TOPICS, }; use crate::EnrExt; use crate::Eth2Enr; @@ -23,14 +24,14 @@ use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; use api_types::{PeerRequestId, Request, RequestId, Response}; use futures::stream::StreamExt; use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettings}; -use libp2p::bandwidth::BandwidthSinks; use libp2p::gossipsub::{ self, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, PublishError, + TopicScoreParams, }; -use libp2p::identify; use libp2p::multiaddr::{Multiaddr, Protocol as MProtocol}; -use libp2p::swarm::{Swarm, SwarmBuilder, SwarmEvent}; +use libp2p::swarm::{Swarm, SwarmEvent}; use libp2p::PeerId; +use libp2p::{identify, SwarmBuilder}; use slog::{crit, debug, info, o, trace, warn}; use std::path::PathBuf; use std::pin::Pin; @@ -69,6 +70,8 @@ pub enum NetworkEvent { id: AppReqId, /// The peer to which this request was sent. peer_id: PeerId, + /// The error of the failed request. + error: RPCError, }, RequestReceived { /// The peer that sent the request. @@ -122,10 +125,10 @@ pub struct Network { /// The interval for updating gossipsub scores update_gossipsub_scores: tokio::time::Interval, gossip_cache: GossipCache, - /// The bandwidth logger for the underlying libp2p transport. - pub bandwidth: Arc, /// This node's PeerId. pub local_peer_id: PeerId, + /// Flag to disable warning logs for duplicate gossip messages and log at DEBUG level instead. + pub disable_duplicate_warn_logs: bool, /// Logger for behaviour actions. log: slog::Logger, } @@ -134,10 +137,11 @@ pub struct Network { impl Network { pub async fn new( executor: task_executor::TaskExecutor, - ctx: ServiceContext<'_>, + mut ctx: ServiceContext<'_>, log: &slog::Logger, ) -> error::Result<(Self, Arc>)> { let log = log.new(o!("service"=> "libp2p")); + let mut config = ctx.config.clone(); trace!(log, "Libp2p Service starting"); // initialise the node's ID @@ -219,15 +223,27 @@ impl Network { // Set up a scoring update interval let update_gossipsub_scores = tokio::time::interval(params.decay_interval); + let max_topics = ctx.chain_spec.attestation_subnet_count as usize + + SYNC_COMMITTEE_SUBNET_COUNT as usize + + ctx.chain_spec.blob_sidecar_subnet_count as usize + + BASE_CORE_TOPICS.len() + + ALTAIR_CORE_TOPICS.len() + + CAPELLA_CORE_TOPICS.len() + + DENEB_CORE_TOPICS.len() + + LIGHT_CLIENT_GOSSIP_TOPICS.len(); + let possible_fork_digests = ctx.fork_context.all_fork_digests(); let filter = gossipsub::MaxCountSubscriptionFilter { filter: utils::create_whitelist_filter( possible_fork_digests, ctx.chain_spec.attestation_subnet_count, SYNC_COMMITTEE_SUBNET_COUNT, + ctx.chain_spec.blob_sidecar_subnet_count, ), - max_subscribed_topics: 200, - max_subscriptions_per_request: 150, // 148 in theory = (64 attestation + 4 sync committee + 6 core topics) * 2 + // during a fork we subscribe to both the old and new topics + max_subscribed_topics: max_topics * 4, + // 162 in theory = (64 attestation + 4 sync committee + 7 core topics + 6 blob topics) * 2 + max_subscriptions_per_request: max_topics * 2, }; let gossipsub_config_params = GossipsubConfigParams { @@ -240,10 +256,13 @@ impl Network { gossipsub_config_params, ); - // If metrics are enabled for gossipsub build the configuration - let gossipsub_metrics = ctx - .gossipsub_registry - .map(|registry| (registry, Default::default())); + // If metrics are enabled for libp2p build the configuration + let gossipsub_metrics = ctx.libp2p_registry.as_mut().map(|registry| { + ( + registry.sub_registry_with_prefix("gossipsub"), + Default::default(), + ) + }); let snappy_transform = SnappyTransform::new(config.gs_config.max_transmit_size()); let mut gossipsub = Gossipsub::new_with_subscription_filter_and_transform( @@ -337,11 +356,8 @@ impl Network { libp2p::connection_limits::Behaviour::new(limits) }; - let banned_peers = libp2p::allow_block_list::Behaviour::default(); - let behaviour = { Behaviour { - banned_peers, gossipsub, eth2_rpc, discovery, @@ -351,34 +367,53 @@ impl Network { } }; - let (swarm, bandwidth) = { - // Set up the transport - tcp/ws with noise and mplex - let (transport, bandwidth) = - build_transport(local_keypair.clone(), !config.disable_quic_support) - .map_err(|e| format!("Failed to build transport: {:?}", e))?; + // Set up the transport - tcp/quic with noise and mplex + let transport = build_transport(local_keypair.clone(), !config.disable_quic_support) + .map_err(|e| format!("Failed to build transport: {:?}", e))?; - // use the executor for libp2p - struct Executor(task_executor::TaskExecutor); - impl libp2p::swarm::Executor for Executor { - fn exec(&self, f: Pin + Send>>) { - self.0.spawn(f, "libp2p"); - } + // use the executor for libp2p + struct Executor(task_executor::TaskExecutor); + impl libp2p::swarm::Executor for Executor { + fn exec(&self, f: Pin + Send>>) { + self.0.spawn(f, "libp2p"); } + } - // sets up the libp2p connection limits + // sets up the libp2p swarm. - ( - SwarmBuilder::with_executor( - transport, - behaviour, - local_peer_id, - Executor(executor), - ) - .notify_handler_buffer_size(std::num::NonZeroUsize::new(7).expect("Not zero")) - .per_connection_event_buffer_size(4) - .build(), - bandwidth, - ) + let swarm = { + let builder = SwarmBuilder::with_existing_identity(local_keypair) + .with_tokio() + .with_other_transport(|_key| transport) + .expect("infalible"); + + // NOTE: adding bandwidth metrics changes the generics of the swarm, so types diverge + if let Some(libp2p_registry) = ctx.libp2p_registry { + builder + .with_bandwidth_metrics(libp2p_registry) + .with_behaviour(|_| behaviour) + .expect("infalible") + .with_swarm_config(|_| { + libp2p::swarm::Config::with_executor(Executor(executor)) + .with_notify_handler_buffer_size( + std::num::NonZeroUsize::new(7).expect("Not zero"), + ) + .with_per_connection_event_buffer_size(4) + }) + .build() + } else { + builder + .with_behaviour(|_| behaviour) + .expect("infalible") + .with_swarm_config(|_| { + libp2p::swarm::Config::with_executor(Executor(executor)) + .with_notify_handler_buffer_size( + std::num::NonZeroUsize::new(7).expect("Not zero"), + ) + .with_per_connection_event_buffer_size(4) + }) + .build() + } }; let mut network = Network { @@ -390,8 +425,8 @@ impl Network { score_settings, update_gossipsub_scores, gossip_cache, - bandwidth, local_peer_id, + disable_duplicate_warn_logs: config.disable_duplicate_warn_logs, log, }; @@ -603,7 +638,7 @@ impl Network { } // Subscribe to core topics for the new fork - for kind in fork_core_topics(&new_fork) { + for kind in fork_core_topics::(&new_fork, &self.fork_context.spec) { let topic = GossipTopic::new(kind, GossipEncoding::default(), new_fork_digest); self.subscribe(topic); } @@ -621,6 +656,38 @@ impl Network { } } + /// Remove topic weight from all topics that don't have the given fork digest. + pub fn remove_topic_weight_except(&mut self, except: [u8; 4]) { + let new_param = TopicScoreParams { + topic_weight: 0.0, + ..Default::default() + }; + let subscriptions = self.network_globals.gossipsub_subscriptions.read().clone(); + for topic in subscriptions + .iter() + .filter(|topic| topic.fork_digest != except) + { + let libp2p_topic: Topic = topic.clone().into(); + match self + .gossipsub_mut() + .set_topic_params(libp2p_topic, new_param.clone()) + { + Ok(_) => debug!(self.log, "Removed topic weight"; "topic" => %topic), + Err(e) => { + warn!(self.log, "Failed to remove topic weight"; "topic" => %topic, "error" => e) + } + } + } + } + + /// Returns the scoring parameters for a topic if set. + pub fn get_topic_params(&self, topic: GossipTopic) -> Option<&TopicScoreParams> { + self.swarm + .behaviour() + .gossipsub + .get_topic_params(&topic.into()) + } + /// Subscribes to a gossipsub topic. /// /// Returns `true` if the subscription was successful and `false` otherwise. @@ -678,7 +745,21 @@ impl Network { .gossipsub_mut() .publish(Topic::from(topic.clone()), message_data.clone()) { - slog::warn!(self.log, "Could not publish message"; "error" => ?e); + if self.disable_duplicate_warn_logs && matches!(e, PublishError::Duplicate) { + debug!( + self.log, + "Could not publish message"; + "error" => ?e, + "kind" => %topic.kind(), + ); + } else { + warn!( + self.log, + "Could not publish message"; + "error" => ?e, + "kind" => %topic.kind(), + ); + }; // add to metrics match topic.kind() { @@ -804,7 +885,7 @@ impl Network { } /// Inform the peer that their request produced an error. - pub fn send_error_reponse( + pub fn send_error_response( &mut self, peer_id: PeerId, id: PeerRequestId, @@ -1048,6 +1129,12 @@ impl Network { Request::BlocksByRoot { .. } => { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_root"]) } + Request::BlobsByRange { .. } => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_range"]) + } + Request::BlobsByRoot { .. } => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_root"]) + } } NetworkEvent::RequestReceived { peer_id, @@ -1074,9 +1161,11 @@ impl Network { // Remove the ENR from the cache to prevent continual re-dialing on disconnects for enr in peers_to_dial { - debug!(self.log, "Dialing cached ENR peer"; "peer_id" => %enr.peer_id()); self.discovery_mut().remove_cached_enr(&enr.peer_id()); - self.peer_manager_mut().dial_peer(enr); + let peer_id = enr.peer_id(); + if self.peer_manager_mut().dial_peer(enr) { + debug!(self.log, "Dialing cached ENR peer"; "peer_id" => %peer_id); + } } } @@ -1177,6 +1266,32 @@ impl Network { "does_not_support_gossipsub", ); } + gossipsub::Event::SlowPeer { + peer_id, + failed_messages, + } => { + debug!(self.log, "Slow gossipsub peer"; "peer_id" => %peer_id, "publish" => failed_messages.publish, "forward" => failed_messages.forward, "priority" => failed_messages.priority, "non_priority" => failed_messages.non_priority); + // Punish the peer if it cannot handle priority messages + if failed_messages.total_timeout() > 10 { + debug!(self.log, "Slow gossipsub peer penalized for priority failure"; "peer_id" => %peer_id); + self.peer_manager_mut().report_peer( + &peer_id, + PeerAction::HighToleranceError, + ReportSource::Gossipsub, + None, + "publish_timeout_penalty", + ); + } else if failed_messages.total_queue_full() > 10 { + debug!(self.log, "Slow gossipsub peer penalized for send queue full"; "peer_id" => %peer_id); + self.peer_manager_mut().report_peer( + &peer_id, + PeerAction::HighToleranceError, + ReportSource::Gossipsub, + None, + "queue_full_penalty", + ); + } + } } None } @@ -1200,7 +1315,7 @@ impl Network { let handler_id = event.conn_id; // The METADATA and PING RPC responses are handled within the behaviour and not propagated match event.event { - Err(handler_err) => { + HandlerEvent::Err(handler_err) => { match handler_err { HandlerErr::Inbound { id: _, @@ -1226,16 +1341,16 @@ impl Network { &error, ConnectionDirection::Outgoing, ); - // inform failures of requests comming outside the behaviour + // inform failures of requests coming outside the behaviour if let RequestId::Application(id) = id { - Some(NetworkEvent::RPCFailed { peer_id, id }) + Some(NetworkEvent::RPCFailed { peer_id, id, error }) } else { None } } } } - Ok(RPCReceived::Request(id, request)) => { + HandlerEvent::Ok(RPCReceived::Request(id, request)) => { let peer_request_id = (handler_id, id); match request { /* Behaviour managed protocols: Ping and Metadata */ @@ -1311,6 +1426,19 @@ impl Network { ); Some(event) } + InboundRequest::BlobsByRange(req) => { + let event = self.build_request( + peer_request_id, + peer_id, + Request::BlobsByRange(req), + ); + Some(event) + } + InboundRequest::BlobsByRoot(req) => { + let event = + self.build_request(peer_request_id, peer_id, Request::BlobsByRoot(req)); + Some(event) + } InboundRequest::LightClientBootstrap(req) => { let event = self.build_request( peer_request_id, @@ -1321,7 +1449,7 @@ impl Network { } } } - Ok(RPCReceived::Response(id, resp)) => { + HandlerEvent::Ok(RPCReceived::Response(id, resp)) => { match resp { /* Behaviour managed protocols */ RPCResponse::Pong(ping) => { @@ -1343,22 +1471,34 @@ impl Network { RPCResponse::BlocksByRange(resp) => { self.build_response(id, peer_id, Response::BlocksByRange(Some(resp))) } + RPCResponse::BlobsByRange(resp) => { + self.build_response(id, peer_id, Response::BlobsByRange(Some(resp))) + } RPCResponse::BlocksByRoot(resp) => { self.build_response(id, peer_id, Response::BlocksByRoot(Some(resp))) } + RPCResponse::BlobsByRoot(resp) => { + self.build_response(id, peer_id, Response::BlobsByRoot(Some(resp))) + } // Should never be reached RPCResponse::LightClientBootstrap(bootstrap) => { self.build_response(id, peer_id, Response::LightClientBootstrap(bootstrap)) } } } - Ok(RPCReceived::EndOfStream(id, termination)) => { + HandlerEvent::Ok(RPCReceived::EndOfStream(id, termination)) => { let response = match termination { ResponseTermination::BlocksByRange => Response::BlocksByRange(None), ResponseTermination::BlocksByRoot => Response::BlocksByRoot(None), + ResponseTermination::BlobsByRange => Response::BlobsByRange(None), + ResponseTermination::BlobsByRoot => Response::BlobsByRoot(None), }; self.build_response(id, peer_id, response) } + HandlerEvent::Close(_) => { + // NOTE: This is handled in the RPC behaviour. + None + } } } @@ -1402,15 +1542,10 @@ impl Network { Some(NetworkEvent::PeerDisconnected(peer_id)) } PeerManagerEvent::Banned(peer_id, associated_ips) => { - self.swarm.behaviour_mut().banned_peers.block_peer(peer_id); self.discovery_mut().ban_peer(&peer_id, associated_ips); None } PeerManagerEvent::UnBanned(peer_id, associated_ips) => { - self.swarm - .behaviour_mut() - .banned_peers - .unblock_peer(peer_id); self.discovery_mut().unban_peer(&peer_id, associated_ips); None } @@ -1459,7 +1594,6 @@ impl Network { let maybe_event = match swarm_event { SwarmEvent::Behaviour(behaviour_event) => match behaviour_event { // Handle sub-behaviour events. - BehaviourEvent::BannedPeers(void) => void::unreachable(void), BehaviourEvent::Gossipsub(ge) => self.inject_gs_event(ge), BehaviourEvent::Eth2Rpc(re) => self.inject_rpc_event(re), // Inform the peer manager about discovered peers. @@ -1535,7 +1669,14 @@ impl Network { SwarmEvent::ListenerClosed { addresses, reason, .. } => { - crit!(self.log, "Listener closed"; "addresses" => ?addresses, "reason" => ?reason); + match reason { + Ok(_) => { + debug!(self.log, "Listener gracefully closed"; "addresses" => ?addresses) + } + Err(reason) => { + crit!(self.log, "Listener abruptly closed"; "addresses" => ?addresses, "reason" => ?reason) + } + }; if Swarm::listeners(&self.swarm).count() == 0 { Some(NetworkEvent::ZeroListeners) } else { @@ -1551,7 +1692,11 @@ impl Network { None } } - SwarmEvent::Dialing { .. } => None, + _ => { + // NOTE: SwarmEvent is a non exhaustive enum so updates should be based on + // release notes more than compiler feedback + None + } }; if let Some(ev) = maybe_event { diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index b02a47fef..34dec1ca6 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -5,12 +5,11 @@ use crate::types::{ }; use crate::{GossipTopic, NetworkConfig}; use futures::future::Either; -use libp2p::bandwidth::BandwidthSinks; use libp2p::core::{multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed}; use libp2p::gossipsub; use libp2p::identity::{secp256k1, Keypair}; -use libp2p::{core, noise, yamux, PeerId, Transport, TransportExt}; -use libp2p_quic; +use libp2p::quic; +use libp2p::{core, noise, yamux, PeerId, Transport}; use prometheus_client::registry::Registry; use slog::{debug, warn}; use ssz::Decode; @@ -34,7 +33,7 @@ pub struct Context<'a> { pub enr_fork_id: EnrForkId, pub fork_context: Arc, pub chain_spec: &'a ChainSpec, - pub gossipsub_registry: Option<&'a mut Registry>, + pub libp2p_registry: Option<&'a mut Registry>, } type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>; @@ -44,16 +43,14 @@ type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>; pub fn build_transport( local_private_key: Keypair, quic_support: bool, -) -> std::io::Result<(BoxedTransport, Arc)> { +) -> std::io::Result { // mplex config let mut mplex_config = libp2p_mplex::MplexConfig::new(); mplex_config.set_max_buffer_size(256); mplex_config.set_max_buffer_behaviour(libp2p_mplex::MaxBufferBehaviour::Block); // yamux config - let mut yamux_config = yamux::Config::default(); - yamux_config.set_window_update_mode(yamux::WindowUpdateMode::on_read()); - + let yamux_config = yamux::Config::default(); // Creates the TCP transport layer let tcp = libp2p::tcp::tokio::Transport::new(libp2p::tcp::Config::default().nodelay(true)) .upgrade(core::upgrade::Version::V1) @@ -63,25 +60,26 @@ pub fn build_transport( mplex_config, )) .timeout(Duration::from_secs(10)); - - let (transport, bandwidth) = if quic_support { + let transport = if quic_support { // Enables Quic // The default quic configuration suits us for now. - let quic_config = libp2p_quic::Config::new(&local_private_key); - tcp.or_transport(libp2p_quic::tokio::Transport::new(quic_config)) + let quic_config = quic::Config::new(&local_private_key); + let quic = quic::tokio::Transport::new(quic_config); + let transport = tcp + .or_transport(quic) .map(|either_output, _| match either_output { Either::Left((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), Either::Right((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), - }) - .with_bandwidth_logging() + }); + transport.boxed() } else { - tcp.with_bandwidth_logging() + tcp.boxed() }; - // // Enables DNS over the transport. - let transport = libp2p::dns::TokioDnsConfig::system(transport)?.boxed(); + // Enables DNS over the transport. + let transport = libp2p::dns::tokio::Transport::system(transport)?.boxed(); - Ok((transport, bandwidth)) + Ok(transport) } // Useful helper functions for debugging. Currently not used in the client. @@ -233,6 +231,7 @@ pub(crate) fn create_whitelist_filter( possible_fork_digests: Vec<[u8; 4]>, attestation_subnet_count: u64, sync_committee_subnet_count: u64, + blob_sidecar_subnet_count: u64, ) -> gossipsub::WhitelistSubscriptionFilter { let mut possible_hashes = HashSet::new(); for fork_digest in possible_fork_digests { @@ -258,6 +257,9 @@ pub(crate) fn create_whitelist_filter( for id in 0..sync_committee_subnet_count { add(SyncCommitteeMessage(SyncSubnetId::new(id))); } + for id in 0..blob_sidecar_subnet_count { + add(BlobSidecar(id)); + } } gossipsub::WhitelistSubscriptionFilter(possible_hashes) } diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index b2b605e8a..84a581d56 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -118,7 +118,7 @@ impl NetworkGlobals { use crate::CombinedKeyExt; let keypair = libp2p::identity::secp256k1::Keypair::generate(); let enr_key: discv5::enr::CombinedKey = discv5::enr::CombinedKey::from_secp256k1(&keypair); - let enr = discv5::enr::EnrBuilder::new("v4").build(&enr_key).unwrap(); + let enr = discv5::enr::Enr::builder().build(&enr_key).unwrap(); NetworkGlobals::new( enr, MetaData::V2(MetaDataV2 { diff --git a/beacon_node/lighthouse_network/src/types/mod.rs b/beacon_node/lighthouse_network/src/types/mod.rs index e7457f25d..af9e9ef45 100644 --- a/beacon_node/lighthouse_network/src/types/mod.rs +++ b/beacon_node/lighthouse_network/src/types/mod.rs @@ -18,5 +18,6 @@ pub use subnet::{Subnet, SubnetDiscovery}; pub use sync_state::{BackFillState, SyncState}; pub use topics::{ core_topics_to_subscribe, fork_core_topics, subnet_from_topic_hash, GossipEncoding, GossipKind, - GossipTopic, LIGHT_CLIENT_GOSSIP_TOPICS, + GossipTopic, ALTAIR_CORE_TOPICS, BASE_CORE_TOPICS, CAPELLA_CORE_TOPICS, DENEB_CORE_TOPICS, + LIGHT_CLIENT_GOSSIP_TOPICS, }; diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 06732ac99..60fe37482 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -9,17 +9,20 @@ use std::boxed::Box; use std::io::{Error, ErrorKind}; use std::sync::Arc; use types::{ - Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, LightClientFinalityUpdate, - LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, - SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockCapella, - SignedBeaconBlockMerge, SignedBlsToExecutionChange, SignedContributionAndProof, - SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, + Attestation, AttesterSlashing, BlobSidecar, EthSpec, ForkContext, ForkName, + LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, + SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, + SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockMerge, + SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, + SyncCommitteeMessage, SyncSubnetId, }; #[derive(Debug, Clone, PartialEq)] pub enum PubsubMessage { /// Gossipsub message providing notification of a new block. BeaconBlock(Arc>), + /// Gossipsub message providing notification of a [`BlobSidecar`] along with the subnet id where it was received. + BlobSidecar(Box<(u64, Arc>)>), /// Gossipsub message providing notification of a Aggregate attestation and associated proof. AggregateAndProofAttestation(Box>), /// Gossipsub message providing notification of a raw un-aggregated attestation with its shard id. @@ -113,6 +116,9 @@ impl PubsubMessage { pub fn kind(&self) -> GossipKind { match self { PubsubMessage::BeaconBlock(_) => GossipKind::BeaconBlock, + PubsubMessage::BlobSidecar(blob_sidecar_data) => { + GossipKind::BlobSidecar(blob_sidecar_data.0) + } PubsubMessage::AggregateAndProofAttestation(_) => GossipKind::BeaconAggregateAndProof, PubsubMessage::Attestation(attestation_data) => { GossipKind::Attestation(attestation_data.0) @@ -183,6 +189,10 @@ impl PubsubMessage { SignedBeaconBlockCapella::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?, ), + Some(ForkName::Deneb) => SignedBeaconBlock::::Deneb( + SignedBeaconBlockDeneb::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), None => { return Err(format!( "Unknown gossipsub fork digest: {:?}", @@ -192,6 +202,30 @@ impl PubsubMessage { }; Ok(PubsubMessage::BeaconBlock(Arc::new(beacon_block))) } + GossipKind::BlobSidecar(blob_index) => { + match fork_context.from_context_bytes(gossip_topic.fork_digest) { + Some(ForkName::Deneb) => { + let blob_sidecar = Arc::new( + BlobSidecar::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ); + Ok(PubsubMessage::BlobSidecar(Box::new(( + *blob_index, + blob_sidecar, + )))) + } + Some( + ForkName::Base + | ForkName::Altair + | ForkName::Merge + | ForkName::Capella, + ) + | None => Err(format!( + "beacon_blobs_and_sidecar topic invalid for given fork digest {:?}", + gossip_topic.fork_digest + )), + } + } GossipKind::VoluntaryExit => { let voluntary_exit = SignedVoluntaryExit::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?; @@ -260,6 +294,7 @@ impl PubsubMessage { // messages for us. match &self { PubsubMessage::BeaconBlock(data) => data.as_ssz_bytes(), + PubsubMessage::BlobSidecar(data) => data.1.as_ssz_bytes(), PubsubMessage::AggregateAndProofAttestation(data) => data.as_ssz_bytes(), PubsubMessage::VoluntaryExit(data) => data.as_ssz_bytes(), PubsubMessage::ProposerSlashing(data) => data.as_ssz_bytes(), @@ -283,6 +318,12 @@ impl std::fmt::Display for PubsubMessage { block.slot(), block.message().proposer_index() ), + PubsubMessage::BlobSidecar(data) => write!( + f, + "BlobSidecar: slot: {}, blob index: {}", + data.1.slot(), + data.1.index, + ), PubsubMessage::AggregateAndProofAttestation(att) => write!( f, "Aggregate and Proof: slot: {}, index: {}, aggregator_index: {}", diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 0e4aefbb5..b77490517 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -1,7 +1,7 @@ use libp2p::gossipsub::{IdentTopic as Topic, TopicHash}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use strum::AsRefStr; -use types::{ForkName, SubnetId, SyncSubnetId}; +use types::{ChainSpec, EthSpec, ForkName, SubnetId, SyncSubnetId}; use crate::Subnet; @@ -13,6 +13,7 @@ pub const SSZ_SNAPPY_ENCODING_POSTFIX: &str = "ssz_snappy"; pub const BEACON_BLOCK_TOPIC: &str = "beacon_block"; pub const BEACON_AGGREGATE_AND_PROOF_TOPIC: &str = "beacon_aggregate_and_proof"; pub const BEACON_ATTESTATION_PREFIX: &str = "beacon_attestation_"; +pub const BLOB_SIDECAR_PREFIX: &str = "blob_sidecar_"; pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit"; pub const PROPOSER_SLASHING_TOPIC: &str = "proposer_slashing"; pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing"; @@ -39,22 +40,37 @@ pub const LIGHT_CLIENT_GOSSIP_TOPICS: [GossipKind; 2] = [ GossipKind::LightClientOptimisticUpdate, ]; +pub const DENEB_CORE_TOPICS: [GossipKind; 0] = []; + /// Returns the core topics associated with each fork that are new to the previous fork -pub fn fork_core_topics(fork_name: &ForkName) -> Vec { +pub fn fork_core_topics(fork_name: &ForkName, spec: &ChainSpec) -> Vec { match fork_name { ForkName::Base => BASE_CORE_TOPICS.to_vec(), ForkName::Altair => ALTAIR_CORE_TOPICS.to_vec(), ForkName::Merge => vec![], ForkName::Capella => CAPELLA_CORE_TOPICS.to_vec(), + ForkName::Deneb => { + // All of deneb blob topics are core topics + let mut deneb_blob_topics = Vec::new(); + for i in 0..spec.blob_sidecar_subnet_count { + deneb_blob_topics.push(GossipKind::BlobSidecar(i)); + } + let mut deneb_topics = DENEB_CORE_TOPICS.to_vec(); + deneb_topics.append(&mut deneb_blob_topics); + deneb_topics + } } } /// Returns all the topics that we need to subscribe to for a given fork /// including topics from older forks and new topics for the current fork. -pub fn core_topics_to_subscribe(mut current_fork: ForkName) -> Vec { - let mut topics = fork_core_topics(¤t_fork); +pub fn core_topics_to_subscribe( + mut current_fork: ForkName, + spec: &ChainSpec, +) -> Vec { + let mut topics = fork_core_topics::(¤t_fork, spec); while let Some(previous_fork) = current_fork.previous_fork() { - let previous_fork_topics = fork_core_topics(&previous_fork); + let previous_fork_topics = fork_core_topics::(&previous_fork, spec); topics.extend(previous_fork_topics); current_fork = previous_fork; } @@ -82,6 +98,8 @@ pub enum GossipKind { BeaconBlock, /// Topic for publishing aggregate attestations and proofs. BeaconAggregateAndProof, + /// Topic for publishing BlobSidecars. + BlobSidecar(u64), /// Topic for publishing raw attestations on a particular subnet. #[strum(serialize = "beacon_attestation")] Attestation(SubnetId), @@ -111,6 +129,9 @@ impl std::fmt::Display for GossipKind { GossipKind::SyncCommitteeMessage(subnet_id) => { write!(f, "sync_committee_{}", **subnet_id) } + GossipKind::BlobSidecar(blob_index) => { + write!(f, "{}{}", BLOB_SIDECAR_PREFIX, blob_index) + } x => f.write_str(x.as_ref()), } } @@ -178,11 +199,8 @@ impl GossipTopic { BLS_TO_EXECUTION_CHANGE_TOPIC => GossipKind::BlsToExecutionChange, LIGHT_CLIENT_FINALITY_UPDATE => GossipKind::LightClientFinalityUpdate, LIGHT_CLIENT_OPTIMISTIC_UPDATE => GossipKind::LightClientOptimisticUpdate, - topic => match committee_topic_index(topic) { - Some(subnet) => match subnet { - Subnet::Attestation(s) => GossipKind::Attestation(s), - Subnet::SyncCommittee(s) => GossipKind::SyncCommitteeMessage(s), - }, + topic => match subnet_topic_index(topic) { + Some(kind) => kind, None => return Err(format!("Unknown topic: {}", topic)), }, }; @@ -236,6 +254,9 @@ impl std::fmt::Display for GossipTopic { GossipKind::SyncCommitteeMessage(index) => { format!("{}{}", SYNC_COMMITTEE_PREFIX_TOPIC, *index) } + GossipKind::BlobSidecar(blob_index) => { + format!("{}{}", BLOB_SIDECAR_PREFIX, blob_index) + } GossipKind::BlsToExecutionChange => BLS_TO_EXECUTION_CHANGE_TOPIC.into(), GossipKind::LightClientFinalityUpdate => LIGHT_CLIENT_FINALITY_UPDATE.into(), GossipKind::LightClientOptimisticUpdate => LIGHT_CLIENT_OPTIMISTIC_UPDATE.into(), @@ -267,28 +288,26 @@ pub fn subnet_from_topic_hash(topic_hash: &TopicHash) -> Option { GossipTopic::decode(topic_hash.as_str()).ok()?.subnet_id() } -// Determines if a string is an attestation or sync committee topic. -fn committee_topic_index(topic: &str) -> Option { - if topic.starts_with(BEACON_ATTESTATION_PREFIX) { - return Some(Subnet::Attestation(SubnetId::new( - topic - .trim_start_matches(BEACON_ATTESTATION_PREFIX) - .parse::() - .ok()?, +// Determines if the topic name is of an indexed topic. +fn subnet_topic_index(topic: &str) -> Option { + if let Some(index) = topic.strip_prefix(BEACON_ATTESTATION_PREFIX) { + return Some(GossipKind::Attestation(SubnetId::new( + index.parse::().ok()?, ))); - } else if topic.starts_with(SYNC_COMMITTEE_PREFIX_TOPIC) { - return Some(Subnet::SyncCommittee(SyncSubnetId::new( - topic - .trim_start_matches(SYNC_COMMITTEE_PREFIX_TOPIC) - .parse::() - .ok()?, + } else if let Some(index) = topic.strip_prefix(SYNC_COMMITTEE_PREFIX_TOPIC) { + return Some(GossipKind::SyncCommitteeMessage(SyncSubnetId::new( + index.parse::().ok()?, ))); + } else if let Some(index) = topic.strip_prefix(BLOB_SIDECAR_PREFIX) { + return Some(GossipKind::BlobSidecar(index.parse::().ok()?)); } None } #[cfg(test)] mod tests { + use types::MainnetEthSpec; + use super::GossipKind::*; use super::*; @@ -417,12 +436,19 @@ mod tests { #[test] fn test_core_topics_to_subscribe() { + type E = MainnetEthSpec; + let spec = E::default_spec(); let mut all_topics = Vec::new(); + let mut deneb_core_topics = fork_core_topics::(&ForkName::Deneb, &spec); + all_topics.append(&mut deneb_core_topics); all_topics.extend(CAPELLA_CORE_TOPICS); all_topics.extend(ALTAIR_CORE_TOPICS); all_topics.extend(BASE_CORE_TOPICS); let latest_fork = *ForkName::list_all().last().unwrap(); - assert_eq!(core_topics_to_subscribe(latest_fork), all_topics); + assert_eq!( + core_topics_to_subscribe::(latest_fork, &spec), + all_topics + ); } } diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index 7b437fe7a..9585dcf5a 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -25,16 +25,19 @@ pub fn fork_context(fork_name: ForkName) -> ForkContext { let altair_fork_epoch = Epoch::new(1); let merge_fork_epoch = Epoch::new(2); let capella_fork_epoch = Epoch::new(3); + let deneb_fork_epoch = Epoch::new(4); chain_spec.altair_fork_epoch = Some(altair_fork_epoch); chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch); chain_spec.capella_fork_epoch = Some(capella_fork_epoch); + chain_spec.deneb_fork_epoch = Some(deneb_fork_epoch); let current_slot = match fork_name { ForkName::Base => Slot::new(0), ForkName::Altair => altair_fork_epoch.start_slot(E::slots_per_epoch()), ForkName::Merge => merge_fork_epoch.start_slot(E::slots_per_epoch()), ForkName::Capella => capella_fork_epoch.start_slot(E::slots_per_epoch()), + ForkName::Deneb => deneb_fork_epoch.start_slot(E::slots_per_epoch()), }; ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) } @@ -110,7 +113,7 @@ pub async fn build_libp2p_instance( enr_fork_id: EnrForkId::default(), fork_context: Arc::new(fork_context(fork_name)), chain_spec: spec, - gossipsub_registry: None, + libp2p_registry: None, }; Libp2pInstance( LibP2PService::new(executor, libp2p_context, &log) diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 795afd06b..643c1231a 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -13,9 +13,9 @@ use std::time::Duration; use tokio::runtime::Runtime; use tokio::time::sleep; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, ChainSpec, EmptyBlock, - Epoch, EthSpec, ForkContext, ForkName, Hash256, MinimalEthSpec, Signature, SignedBeaconBlock, - Slot, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, BlobSidecar, ChainSpec, + EmptyBlock, Epoch, EthSpec, ForkContext, ForkName, Hash256, MinimalEthSpec, Signature, + SignedBeaconBlock, Slot, }; type E = MinimalEthSpec; @@ -278,6 +278,118 @@ fn test_tcp_blocks_by_range_chunked_rpc() { }) } +// Tests a streamed BlobsByRange RPC Message +#[test] +#[allow(clippy::single_match)] +fn test_blobs_by_range_chunked_rpc() { + // set up the logging. The level and enabled logging or not + let log_level = Level::Debug; + let enable_logging = false; + + let slot_count = 32; + let messages_to_send = 34; + + let log = common::build_log(log_level, enable_logging); + + let rt = Arc::new(Runtime::new().unwrap()); + + rt.block_on(async { + // get sender/receiver + let spec = E::default_spec(); + let (mut sender, mut receiver) = common::build_node_pair( + Arc::downgrade(&rt), + &log, + ForkName::Deneb, + &spec, + Protocol::Tcp, + ) + .await; + + // BlobsByRange Request + let rpc_request = Request::BlobsByRange(BlobsByRangeRequest { + start_slot: 0, + count: slot_count, + }); + + // BlocksByRange Response + let blob = BlobSidecar::::empty(); + + let rpc_response = Response::BlobsByRange(Some(Arc::new(blob))); + + // keep count of the number of messages received + let mut messages_received = 0; + let request_id = messages_to_send as usize; + // build the sender future + let sender_future = async { + loop { + match sender.next_event().await { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { + // Send a STATUS message + debug!(log, "Sending RPC"); + sender.send_request(peer_id, request_id, rpc_request.clone()); + } + NetworkEvent::ResponseReceived { + peer_id: _, + id: _, + response, + } => { + warn!(log, "Sender received a response"); + match response { + Response::BlobsByRange(Some(_)) => { + assert_eq!(response, rpc_response.clone()); + messages_received += 1; + warn!(log, "Chunk received"); + } + Response::BlobsByRange(None) => { + // should be exactly `messages_to_send` messages before terminating + assert_eq!(messages_received, messages_to_send); + // end the test + return; + } + _ => panic!("Invalid RPC received"), + } + } + _ => {} // Ignore other behaviour events + } + } + }; + + // build the receiver future + let receiver_future = async { + loop { + match receiver.next_event().await { + NetworkEvent::RequestReceived { + peer_id, + id, + request, + } => { + if request == rpc_request { + // send the response + warn!(log, "Receiver got request"); + for _ in 0..messages_to_send { + // Send first third of responses as base blocks, + // second as altair and third as merge. + receiver.send_response(peer_id, id, rpc_response.clone()); + } + // send the stream termination + receiver.send_response(peer_id, id, Response::BlobsByRange(None)); + } + } + _ => {} // Ignore other events + } + } + }; + + tokio::select! { + _ = sender_future => {} + _ = receiver_future => {} + _ = sleep(Duration::from_secs(30)) => { + panic!("Future timed out"); + } + } + }) +} + // Tests rejection of blocks over `MAX_RPC_SIZE`. #[test] #[allow(clippy::single_match)] @@ -603,7 +715,7 @@ fn test_tcp_blocks_by_range_single_empty_rpc() { } // Tests a streamed, chunked BlocksByRoot RPC Message -// The size of the reponse is a full `BeaconBlock` +// The size of the response is a full `BeaconBlock` // which is greater than the Snappy frame size. Hence, this test // serves to test the snappy framing format as well. #[test] @@ -631,15 +743,17 @@ fn test_tcp_blocks_by_root_chunked_rpc() { .await; // BlocksByRoot Request - let rpc_request = - Request::BlocksByRoot(BlocksByRootRequest::new(VariableList::from(vec![ + let rpc_request = Request::BlocksByRoot(BlocksByRootRequest::new( + vec![ Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0), - ]))); + ], + &spec, + )); // BlocksByRoot Response let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); @@ -764,8 +878,8 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { .await; // BlocksByRoot Request - let rpc_request = - Request::BlocksByRoot(BlocksByRootRequest::new(VariableList::from(vec![ + let rpc_request = Request::BlocksByRoot(BlocksByRootRequest::new( + vec![ Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0), @@ -776,7 +890,9 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0), - ]))); + ], + &spec, + )); // BlocksByRoot Response let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 0204fc245..bbd2af217 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -11,6 +11,7 @@ matches = "0.1.8" exit-future = { workspace = true } slog-term = { workspace = true } slog-async = { workspace = true } +eth2 = { workspace = true } [dependencies] beacon_chain = { workspace = true } @@ -39,6 +40,7 @@ itertools = { workspace = true } num_cpus = { workspace = true } lru_cache = { workspace = true } if-addrs = "0.6.4" +lru = { workspace = true } strum = { workspace = true } tokio-util = { workspace = true } derivative = { workspace = true } @@ -53,3 +55,5 @@ environment = { workspace = true } [features] # NOTE: This can be run via cargo build --bin lighthouse --features network/disable-backfill disable-backfill = [] +fork_from_env = ["beacon_chain/fork_from_env"] +portable = ["beacon_chain/portable"] \ No newline at end of file diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 014482486..0509ed1ea 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -7,8 +7,8 @@ use beacon_chain::{ use fnv::FnvHashMap; pub use lighthouse_metrics::*; use lighthouse_network::{ - peer_manager::peerdb::client::ClientKind, types::GossipKind, BandwidthSinks, GossipTopic, - Gossipsub, NetworkGlobals, + peer_manager::peerdb::client::ClientKind, types::GossipKind, GossipTopic, Gossipsub, + NetworkGlobals, }; use std::sync::Arc; use strum::IntoEnumIterator; @@ -67,6 +67,10 @@ lazy_static! { "beacon_processor_gossip_block_early_seconds", "Whenever a gossip block is received early this metrics is set to how early that block was." ); + pub static ref BEACON_PROCESSOR_GOSSIP_BLOB_VERIFIED_TOTAL: Result = try_create_int_counter( + "beacon_processor_gossip_blob_verified_total", + "Total number of gossip blob verified for propagation." + ); // Gossip Exits. pub static ref BEACON_PROCESSOR_EXIT_VERIFIED_TOTAL: Result = try_create_int_counter( "beacon_processor_exit_verified_total", @@ -103,6 +107,10 @@ lazy_static! { "beacon_processor_bls_to_execution_change_imported_total", "Total number of address changes imported to the op pool." ); +} + +// Need to split up this `lazy_static!` due to recursion limits. +lazy_static! { // Rpc blocks. pub static ref BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL: Result = try_create_int_counter( "beacon_processor_rpc_block_imported_total", @@ -215,22 +223,6 @@ lazy_static! { lazy_static! { - /* - * Bandwidth metrics - */ - pub static ref INBOUND_LIBP2P_BYTES: Result = - try_create_int_gauge("libp2p_inbound_bytes", "The inbound bandwidth over libp2p"); - - pub static ref OUTBOUND_LIBP2P_BYTES: Result = try_create_int_gauge( - "libp2p_outbound_bytes", - "The outbound bandwidth over libp2p" - ); - pub static ref TOTAL_LIBP2P_BANDWIDTH: Result = try_create_int_gauge( - "libp2p_total_bandwidth", - "The total inbound/outbound bandwidth over libp2p" - ); - - /* * Sync related metrics */ @@ -282,6 +274,44 @@ lazy_static! { "Count of times when a gossip block arrived from the network later than the attestation deadline.", ); + /* + * Blob Delay Metrics + */ + pub static ref BEACON_BLOB_GOSSIP_PROPAGATION_VERIFICATION_DELAY_TIME: Result = try_create_histogram_with_buckets( + "beacon_blob_gossip_propagation_verification_delay_time", + "Duration between when the blob is received over gossip and when it is verified for propagation.", + // [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5] + decimal_buckets(-3,-1) + ); + pub static ref BEACON_BLOB_GOSSIP_SLOT_START_DELAY_TIME: Result = try_create_histogram_with_buckets( + "beacon_blob_gossip_slot_start_delay_time", + "Duration between when the blob is received over gossip and the start of the slot it belongs to.", + // Create a custom bucket list for greater granularity in block delay + Ok(vec![0.1, 0.2, 0.3,0.4,0.5,0.75,1.0,1.25,1.5,1.75,2.0,2.5,3.0,3.5,4.0,5.0,6.0,7.0,8.0,9.0,10.0,15.0,20.0]) + // NOTE: Previous values, which we may want to switch back to. + // [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50] + //decimal_buckets(-1,2) + ); + pub static ref BEACON_BLOB_RPC_SLOT_START_DELAY_TIME: Result = try_create_histogram_with_buckets( + "beacon_blob_rpc_slot_start_delay_time", + "Duration between when a blob is received over rpc and the start of the slot it belongs to.", + // Create a custom bucket list for greater granularity in block delay + Ok(vec![0.1, 0.2, 0.3,0.4,0.5,0.75,1.0,1.25,1.5,1.75,2.0,2.5,3.0,3.5,4.0,5.0,6.0,7.0,8.0,9.0,10.0,15.0,20.0]) + // NOTE: Previous values, which we may want to switch back to. + // [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50] + //decimal_buckets(-1,2) + + ); + pub static ref BEACON_BLOB_LAST_DELAY: Result = try_create_int_gauge( + "beacon_blob_last_delay", + "Keeps track of the last blob's delay from the start of the slot" + ); + + pub static ref BEACON_BLOB_GOSSIP_ARRIVED_LATE_TOTAL: Result = try_create_int_counter( + "beacon_blob_gossip_arrived_late_total", + "Count of times when a gossip blob arrived from the network later than the attestation deadline.", + ); + /* * Light client update reprocessing queue metrics. */ @@ -291,15 +321,6 @@ lazy_static! { ); } -pub fn update_bandwidth_metrics(bandwidth: Arc) { - set_gauge(&INBOUND_LIBP2P_BYTES, bandwidth.total_inbound() as i64); - set_gauge(&OUTBOUND_LIBP2P_BYTES, bandwidth.total_outbound() as i64); - set_gauge( - &TOTAL_LIBP2P_BANDWIDTH, - (bandwidth.total_inbound() + bandwidth.total_outbound()) as i64, - ); -} - pub fn register_finality_update_error(error: &LightClientFinalityUpdateError) { inc_counter_vec(&GOSSIP_FINALITY_UPDATE_ERRORS_PER_TYPE, &[error.as_ref()]); } diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index ac7479db0..9d9b196e9 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -4,17 +4,19 @@ use crate::{ service::NetworkMessage, sync::SyncMessage, }; - +use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; +use beacon_chain::block_verification_types::AsBlock; use beacon_chain::store::Error; use beacon_chain::{ attestation_verification::{self, Error as AttnError, VerifiedAttestation}, + data_availability_checker::AvailabilityCheckErrorCategory, light_client_finality_update_verification::Error as LightClientFinalityUpdateError, light_client_optimistic_update_verification::Error as LightClientOptimisticUpdateError, observed_operations::ObservationOutcome, sync_committee_verification::{self, Error as SyncCommitteeError}, - validator_monitor::get_block_delay_ms, - BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, GossipVerifiedBlock, - NotifyExecutionLayer, + validator_monitor::{get_block_delay_ms, get_slot_delay_ms}, + AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, + GossipVerifiedBlock, NotifyExecutionLayer, }; use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; use operation_pool::ReceivedPreCapella; @@ -29,10 +31,11 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; use tokio::sync::mpsc; use types::{ - Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, LightClientFinalityUpdate, - LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, - SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId, - SyncCommitteeMessage, SyncSubnetId, + Attestation, AttesterSlashing, BlobSidecar, EthSpec, Hash256, IndexedAttestation, + LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, + SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange, + SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId, SyncCommitteeMessage, + SyncSubnetId, }; use beacon_processor::{ @@ -596,6 +599,203 @@ impl NetworkBeaconProcessor { } } + #[allow(clippy::too_many_arguments)] + pub async fn process_gossip_blob( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + _peer_client: Client, + blob_index: u64, + blob_sidecar: Arc>, + seen_duration: Duration, + ) { + let slot = blob_sidecar.slot(); + let root = blob_sidecar.block_root(); + let index = blob_sidecar.index; + let commitment = blob_sidecar.kzg_commitment; + let delay = get_slot_delay_ms(seen_duration, slot, &self.chain.slot_clock); + // Log metrics to track delay from other nodes on the network. + metrics::observe_duration(&metrics::BEACON_BLOB_GOSSIP_SLOT_START_DELAY_TIME, delay); + metrics::set_gauge(&metrics::BEACON_BLOB_LAST_DELAY, delay.as_millis() as i64); + match self + .chain + .verify_blob_sidecar_for_gossip(blob_sidecar, blob_index) + { + Ok(gossip_verified_blob) => { + metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOB_VERIFIED_TOTAL); + + if delay >= self.chain.slot_clock.unagg_attestation_production_delay() { + metrics::inc_counter(&metrics::BEACON_BLOB_GOSSIP_ARRIVED_LATE_TOTAL); + debug!( + self.log, + "Gossip blob arrived late"; + "block_root" => ?gossip_verified_blob.block_root(), + "proposer_index" => gossip_verified_blob.block_proposer_index(), + "slot" => gossip_verified_blob.slot(), + "delay" => ?delay, + "commitment" => %gossip_verified_blob.kzg_commitment(), + ); + } + + debug!( + self.log, + "Successfully verified gossip blob"; + "slot" => %slot, + "root" => %root, + "index" => %index, + "commitment" => %gossip_verified_blob.kzg_commitment(), + ); + + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + + // Log metrics to keep track of propagation delay times. + if let Some(duration) = SystemTime::now() + .duration_since(UNIX_EPOCH) + .ok() + .and_then(|now| now.checked_sub(seen_duration)) + { + metrics::observe_duration( + &metrics::BEACON_BLOB_GOSSIP_PROPAGATION_VERIFICATION_DELAY_TIME, + duration, + ); + } + self.process_gossip_verified_blob(peer_id, gossip_verified_blob, seen_duration) + .await + } + Err(err) => { + match err { + GossipBlobError::BlobParentUnknown(blob) => { + debug!( + self.log, + "Unknown parent hash for blob"; + "action" => "requesting parent", + "block_root" => %blob.block_root(), + "parent_root" => %blob.block_parent_root(), + "commitment" => %commitment, + ); + self.send_sync_message(SyncMessage::UnknownParentBlob(peer_id, blob)); + } + GossipBlobError::KzgNotInitialized + | GossipBlobError::PubkeyCacheTimeout + | GossipBlobError::BeaconChainError(_) => { + crit!( + self.log, + "Internal error when verifying blob sidecar"; + "error" => ?err, + ) + } + GossipBlobError::ProposalSignatureInvalid + | GossipBlobError::UnknownValidator(_) + | GossipBlobError::ProposerIndexMismatch { .. } + | GossipBlobError::BlobIsNotLaterThanParent { .. } + | GossipBlobError::InvalidSubnet { .. } + | GossipBlobError::InvalidInclusionProof + | GossipBlobError::KzgError(_) + | GossipBlobError::InclusionProof(_) + | GossipBlobError::NotFinalizedDescendant { .. } => { + warn!( + self.log, + "Could not verify blob sidecar for gossip. Rejecting the blob sidecar"; + "error" => ?err, + "slot" => %slot, + "root" => %root, + "index" => %index, + "commitment" => %commitment, + ); + // Prevent recurring behaviour by penalizing the peer slightly. + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "gossip_blob_low", + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Reject, + ); + } + GossipBlobError::FutureSlot { .. } + | GossipBlobError::RepeatBlob { .. } + | GossipBlobError::PastFinalizedSlot { .. } => { + warn!( + self.log, + "Could not verify blob sidecar for gossip. Ignoring the blob sidecar"; + "error" => ?err, + "slot" => %slot, + "root" => %root, + "index" => %index, + "commitment" => %commitment, + ); + // Prevent recurring behaviour by penalizing the peer slightly. + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "gossip_blob_high", + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } + } + } + } + } + + pub async fn process_gossip_verified_blob( + self: &Arc, + peer_id: PeerId, + verified_blob: GossipVerifiedBlob, + // This value is not used presently, but it might come in handy for debugging. + _seen_duration: Duration, + ) { + let block_root = verified_blob.block_root(); + let blob_slot = verified_blob.slot(); + let blob_index = verified_blob.id().index; + + match self.chain.process_gossip_blob(verified_blob).await { + Ok(AvailabilityProcessingStatus::Imported(block_root)) => { + // Note: Reusing block imported metric here + metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); + info!( + self.log, + "Gossipsub blob processed, imported fully available block"; + "block_root" => %block_root + ); + self.chain.recompute_head_at_current_slot().await; + } + Ok(AvailabilityProcessingStatus::MissingComponents(slot, block_root)) => { + trace!( + self.log, + "Processed blob, waiting for other components"; + "slot" => %slot, + "blob_index" => %blob_index, + "block_root" => %block_root, + ); + } + Err(err) => { + debug!( + self.log, + "Invalid gossip blob"; + "outcome" => ?err, + "block root" => ?block_root, + "block slot" => blob_slot, + "blob index" => blob_index, + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::MidToleranceError, + "bad_gossip_blob_ssz", + ); + trace!( + self.log, + "Invalid gossip blob ssz"; + ); + } + } + } + /// Process the beacon block received from the gossip network and: /// /// - If it passes gossip propagation criteria, tell the network thread to forward it. @@ -751,7 +951,7 @@ impl NetworkBeaconProcessor { "Unknown parent for gossip block"; "root" => ?block_root ); - self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block, block_root)); + self.send_sync_message(SyncMessage::UnknownParentBlock(peer_id, block, block_root)); return None; } Err(e @ BlockError::BeaconChainError(_)) => { @@ -763,9 +963,17 @@ impl NetworkBeaconProcessor { self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); return None; } + Err(BlockError::BlockIsAlreadyKnown) => { + debug!( + self.log, + "Gossip block is already known"; + "block_root" => %block_root, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + return None; + } Err(e @ BlockError::FutureSlot { .. }) | Err(e @ BlockError::WouldRevertFinalizedSlot { .. }) - | Err(e @ BlockError::BlockIsAlreadyKnown) | Err(e @ BlockError::NotFinalizedDescendant { .. }) => { debug!(self.log, "Could not verify block for gossip. Ignoring the block"; "error" => %e); @@ -809,6 +1017,15 @@ impl NetworkBeaconProcessor { ); return None; } + // Note: This error variant cannot be reached when doing gossip validation + // as we do not do availability checks here. + Err(e @ BlockError::AvailabilityCheck(_)) => { + crit!(self.log, "Internal block gossip validation error. Availability check during + gossip validation"; + "error" => %e + ); + return None; + } }; metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_VERIFIED_TOTAL); @@ -916,21 +1133,16 @@ impl NetworkBeaconProcessor { // This value is not used presently, but it might come in handy for debugging. _seen_duration: Duration, ) { - let block: Arc<_> = verified_block.block.clone(); + let block = verified_block.block.block_cloned(); let block_root = verified_block.block_root; let result = self .chain - .process_block( - block_root, - verified_block, - NotifyExecutionLayer::Yes, - || Ok(()), - ) + .process_block_with_early_caching(block_root, verified_block, NotifyExecutionLayer::Yes) .await; match &result { - Ok(block_root) => { + Ok(AvailabilityProcessingStatus::Imported(block_root)) => { metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); if reprocess_tx @@ -957,7 +1169,15 @@ impl NetworkBeaconProcessor { self.chain.recompute_head_at_current_slot().await; } - Err(BlockError::ParentUnknown { .. }) => { + Ok(AvailabilityProcessingStatus::MissingComponents(slot, block_root)) => { + trace!( + self.log, + "Processed block, waiting for other components"; + "slot" => slot, + "block_root" => %block_root, + ); + } + Err(BlockError::ParentUnknown(block)) => { // Inform the sync manager to find parents for this block // This should not occur. It should be checked by `should_forward_block` error!( @@ -965,7 +1185,7 @@ impl NetworkBeaconProcessor { "Block with unknown parent attempted to be processed"; "peer_id" => %peer_id ); - self.send_sync_message(SyncMessage::UnknownBlock( + self.send_sync_message(SyncMessage::UnknownParentBlock( peer_id, block.clone(), block_root, @@ -978,6 +1198,31 @@ impl NetworkBeaconProcessor { "error" => %e ); } + Err(BlockError::AvailabilityCheck(err)) => { + match err.category() { + AvailabilityCheckErrorCategory::Internal => { + warn!( + self.log, + "Internal availability check error"; + "error" => ?err, + ); + } + AvailabilityCheckErrorCategory::Malicious => { + // Note: we cannot penalize the peer that sent us the block + // over gossip here because these errors imply either an issue + // with: + // 1. Blobs we have received over non-gossip sources + // (from potentially other peers) + // 2. The proposer being malicious and sending inconsistent + // blocks and blobs. + warn!( + self.log, + "Received invalid blob or malicious proposer"; + "error" => ?err + ); + } + } + } other => { debug!( self.log, @@ -1809,7 +2054,10 @@ impl NetworkBeaconProcessor { // We don't know the block, get the sync manager to handle the block lookup, and // send the attestation to be scheduled for re-processing. self.sync_tx - .send(SyncMessage::UnknownBlockHash(peer_id, *beacon_block_root)) + .send(SyncMessage::UnknownBlockHashFromAttestation( + peer_id, + *beacon_block_root, + )) .unwrap_or_else(|_| { warn!( self.log, diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index f8c4e37ff..67fc2fabb 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -2,6 +2,7 @@ use crate::{ service::NetworkMessage, sync::{manager::BlockProcessType, SyncMessage}, }; +use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{ builder::Witness, eth1_chain::CachingEth1Backend, test_utils::BeaconChainHarness, BeaconChain, }; @@ -12,6 +13,7 @@ use beacon_processor::{ WorkEvent as BeaconWorkEvent, }; use environment::null_logger; +use lighthouse_network::rpc::methods::{BlobsByRangeRequest, BlobsByRootRequest}; use lighthouse_network::{ rpc::{BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, StatusMessage}, Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, @@ -28,6 +30,7 @@ use tokio::sync::mpsc::{self, error::TrySendError}; use types::*; pub use sync_methods::ChainSegmentProcessId; +use types::blob_sidecar::FixedBlobSidecarList; pub type Error = TrySendError>; @@ -196,6 +199,36 @@ impl NetworkBeaconProcessor { }) } + /// Create a new `Work` event for some blob sidecar. + pub fn send_gossip_blob_sidecar( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + peer_client: Client, + blob_index: u64, + blob_sidecar: Arc>, + seen_timestamp: Duration, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = async move { + processor + .process_gossip_blob( + message_id, + peer_id, + peer_client, + blob_index, + blob_sidecar, + seen_timestamp, + ) + .await + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::GossipBlobSidecar(Box::pin(process_fn)), + }) + } + /// Create a new `Work` event for some sync committee signature. pub fn send_gossip_sync_signature( self: &Arc, @@ -376,7 +409,7 @@ impl NetworkBeaconProcessor { pub fn send_rpc_beacon_block( self: &Arc, block_root: Hash256, - block: Arc>, + block: RpcBlock, seen_timestamp: Duration, process_type: BlockProcessType, ) -> Result<(), Error> { @@ -392,11 +425,36 @@ impl NetworkBeaconProcessor { }) } + /// Create a new `Work` event for some blobs, where the result from computation (if any) is + /// sent to the other side of `result_tx`. + pub fn send_rpc_blobs( + self: &Arc, + block_root: Hash256, + blobs: FixedBlobSidecarList, + seen_timestamp: Duration, + process_type: BlockProcessType, + ) -> Result<(), Error> { + let blob_count = blobs.iter().filter(|b| b.is_some()).count(); + if blob_count == 0 { + return Ok(()); + } + let process_fn = self.clone().generate_rpc_blobs_process_fn( + block_root, + blobs, + seen_timestamp, + process_type, + ); + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::RpcBlobs { process_fn }, + }) + } + /// Create a new work event to import `blocks` as a beacon chain segment. pub fn send_chain_segment( self: &Arc, process_id: ChainSegmentProcessId, - blocks: Vec>>, + blocks: Vec>, ) -> Result<(), Error> { let is_backfill = matches!(&process_id, ChainSegmentProcessId::BackSyncBatchId { .. }); let processor = self.clone(); @@ -496,6 +554,40 @@ impl NetworkBeaconProcessor { }) } + /// Create a new work event to process `BlobsByRangeRequest`s from the RPC network. + pub fn send_blobs_by_range_request( + self: &Arc, + peer_id: PeerId, + request_id: PeerRequestId, + request: BlobsByRangeRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = + move || processor.handle_blobs_by_range_request(peer_id, request_id, request); + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::BlobsByRangeRequest(Box::new(process_fn)), + }) + } + + /// Create a new work event to process `BlobsByRootRequest`s from the RPC network. + pub fn send_blobs_by_roots_request( + self: &Arc, + peer_id: PeerId, + request_id: PeerRequestId, + request: BlobsByRootRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = + move || processor.handle_blobs_by_root_request(peer_id, request_id, request); + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::BlobsByRootsRequest(Box::new(process_fn)), + }) + } + /// Create a new work event to process `LightClientBootstrap`s from the RPC network. pub fn send_lightclient_bootstrap_request( self: &Arc, diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 19b0a60a4..a731dea7c 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -5,15 +5,18 @@ use crate::sync::SyncMessage; use beacon_chain::{BeaconChainError, BeaconChainTypes, HistoricalBlockError, WhenSlotSkipped}; use beacon_processor::SendOnDrop; use itertools::process_results; +use lighthouse_network::rpc::methods::{BlobsByRangeRequest, BlobsByRootRequest}; use lighthouse_network::rpc::StatusMessage; use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; use slog::{debug, error, warn}; use slot_clock::SlotClock; +use std::collections::{hash_map::Entry, HashMap}; use std::sync::Arc; use task_executor::TaskExecutor; use tokio_stream::StreamExt; -use types::{light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec, Hash256, Slot}; +use types::blob_sidecar::BlobIdentifier; +use types::{Epoch, EthSpec, ForkName, Hash256, Slot}; impl NetworkBeaconProcessor { /* Auxiliary functions */ @@ -118,7 +121,10 @@ impl NetworkBeaconProcessor { }; self.send_sync_message(SyncMessage::AddPeer(peer_id, info)); } - Err(e) => error!(self.log, "Could not process status message"; "error" => ?e), + Err(e) => error!(self.log, "Could not process status message"; + "peer" => %peer_id, + "error" => ?e + ), } } @@ -207,6 +213,86 @@ impl NetworkBeaconProcessor { "load_blocks_by_root_blocks", ) } + /// Handle a `BlobsByRoot` request from the peer. + pub fn handle_blobs_by_root_request( + self: Arc, + peer_id: PeerId, + request_id: PeerRequestId, + request: BlobsByRootRequest, + ) { + let Some(requested_root) = request.blob_ids.as_slice().first().map(|id| id.block_root) + else { + // No blob ids requested. + return; + }; + let requested_indices = request + .blob_ids + .as_slice() + .iter() + .map(|id| id.index) + .collect::>(); + let mut send_blob_count = 0; + let send_response = true; + + let mut blob_list_results = HashMap::new(); + for id in request.blob_ids.as_slice() { + // First attempt to get the blobs from the RPC cache. + if let Ok(Some(blob)) = self.chain.data_availability_checker.get_blob(id) { + self.send_response(peer_id, Response::BlobsByRoot(Some(blob)), request_id); + send_blob_count += 1; + } else { + let BlobIdentifier { + block_root: root, + index, + } = id; + + let blob_list_result = match blob_list_results.entry(root) { + Entry::Vacant(entry) => { + entry.insert(self.chain.get_blobs_checking_early_attester_cache(root)) + } + Entry::Occupied(entry) => entry.into_mut(), + }; + + match blob_list_result.as_ref() { + Ok(blobs_sidecar_list) => { + 'inner: for blob_sidecar in blobs_sidecar_list.iter() { + if blob_sidecar.index == *index { + self.send_response( + peer_id, + Response::BlobsByRoot(Some(blob_sidecar.clone())), + request_id, + ); + send_blob_count += 1; + break 'inner; + } + } + } + Err(e) => { + debug!( + self.log, + "Error fetching blob for peer"; + "peer" => %peer_id, + "request_root" => ?root, + "error" => ?e, + ); + } + } + } + } + debug!( + self.log, + "Received BlobsByRoot Request"; + "peer" => %peer_id, + "request_root" => %requested_root, + "request_indices" => ?requested_indices, + "returned" => send_blob_count + ); + + // send stream termination + if send_response { + self.send_response(peer_id, Response::BlobsByRoot(None), request_id); + } + } /// Handle a `BlocksByRoot` request from the peer. pub fn handle_light_client_bootstrap( @@ -216,69 +302,32 @@ impl NetworkBeaconProcessor { request: LightClientBootstrapRequest, ) { let block_root = request.root; - let state_root = match self.chain.get_blinded_block(&block_root) { - Ok(signed_block) => match signed_block { - Some(signed_block) => signed_block.state_root(), - None => { - self.send_error_response( - peer_id, - RPCResponseErrorCode::ResourceUnavailable, - "Bootstrap not avaiable".into(), - request_id, - ); - return; - } - }, - Err(_) => { + match self.chain.get_light_client_bootstrap(&block_root) { + Ok(Some((bootstrap, _))) => self.send_response( + peer_id, + Response::LightClientBootstrap(bootstrap), + request_id, + ), + Ok(None) => self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Bootstrap not available".into(), + request_id, + ), + Err(e) => { self.send_error_response( peer_id, RPCResponseErrorCode::ResourceUnavailable, - "Bootstrap not avaiable".into(), + "Bootstrap not available".into(), request_id, ); - return; + error!(self.log, "Error getting LightClientBootstrap instance"; + "block_root" => ?block_root, + "peer" => %peer_id, + "error" => ?e + ) } }; - let mut beacon_state = match self.chain.get_state(&state_root, None) { - Ok(beacon_state) => match beacon_state { - Some(state) => state, - None => { - self.send_error_response( - peer_id, - RPCResponseErrorCode::ResourceUnavailable, - "Bootstrap not avaiable".into(), - request_id, - ); - return; - } - }, - Err(_) => { - self.send_error_response( - peer_id, - RPCResponseErrorCode::ResourceUnavailable, - "Bootstrap not avaiable".into(), - request_id, - ); - return; - } - }; - let bootstrap = match LightClientBootstrap::from_beacon_state(&mut beacon_state) { - Ok(bootstrap) => bootstrap, - Err(_) => { - self.send_error_response( - peer_id, - RPCResponseErrorCode::ResourceUnavailable, - "Bootstrap not avaiable".into(), - request_id, - ); - return; - } - }; - self.send_response( - peer_id, - Response::LightClientBootstrap(bootstrap), - request_id, - ) } /// Handle a `BlocksByRange` request from the peer. @@ -288,7 +337,7 @@ impl NetworkBeaconProcessor { send_on_drop: SendOnDrop, peer_id: PeerId, request_id: PeerRequestId, - mut req: BlocksByRangeRequest, + req: BlocksByRangeRequest, ) { debug!(self.log, "Received BlocksByRange Request"; "peer_id" => %peer_id, @@ -297,8 +346,24 @@ impl NetworkBeaconProcessor { ); // Should not send more than max request blocks - if *req.count() > MAX_REQUEST_BLOCKS { - *req.count_mut() = MAX_REQUEST_BLOCKS; + let max_request_size = + self.chain + .epoch() + .map_or(self.chain.spec.max_request_blocks, |epoch| { + match self.chain.spec.fork_name_at_epoch(epoch) { + ForkName::Deneb => self.chain.spec.max_request_blocks_deneb, + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + self.chain.spec.max_request_blocks + } + } + }); + if *req.count() > max_request_size { + return self.send_error_response( + peer_id, + RPCResponseErrorCode::InvalidRequest, + format!("Request exceeded max size {max_request_size}"), + request_id, + ); } let forwards_block_root_iter = match self @@ -312,7 +377,10 @@ impl NetworkBeaconProcessor { oldest_block_slot, }, )) => { - debug!(self.log, "Range request failed during backfill"; "requested_slot" => slot, "oldest_known_slot" => oldest_block_slot); + debug!(self.log, "Range request failed during backfill"; + "requested_slot" => slot, + "oldest_known_slot" => oldest_block_slot + ); return self.send_error_response( peer_id, RPCResponseErrorCode::ResourceUnavailable, @@ -320,7 +388,19 @@ impl NetworkBeaconProcessor { request_id, ); } - Err(e) => return error!(self.log, "Unable to obtain root iter"; "error" => ?e), + Err(e) => { + self.send_error_response( + peer_id, + RPCResponseErrorCode::ServerError, + "Database error".into(), + request_id, + ); + return error!(self.log, "Unable to obtain root iter"; + "request" => ?req, + "peer" => %peer_id, + "error" => ?e + ); + } }; // Pick out the required blocks, ignoring skip-slots. @@ -344,7 +424,13 @@ impl NetworkBeaconProcessor { let block_roots = match maybe_block_roots { Ok(block_roots) => block_roots, - Err(e) => return error!(self.log, "Error during iteration over blocks"; "error" => ?e), + Err(e) => { + return error!(self.log, "Error during iteration over blocks"; + "request" => ?req, + "peer" => %peer_id, + "error" => ?e + ) + } }; // remove all skip slots @@ -381,8 +467,17 @@ impl NetworkBeaconProcessor { error!( self.log, "Block in the chain is not in the store"; + "request" => ?req, + "peer" => %peer_id, "request_root" => ?root ); + self.send_error_response( + peer_id, + RPCResponseErrorCode::ServerError, + "Database inconsistency".into(), + request_id, + ); + send_response = false; break; } Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => { @@ -479,4 +574,210 @@ impl NetworkBeaconProcessor { "load_blocks_by_range_blocks", ); } + + /// Handle a `BlobsByRange` request from the peer. + pub fn handle_blobs_by_range_request( + self: Arc, + peer_id: PeerId, + request_id: PeerRequestId, + req: BlobsByRangeRequest, + ) { + debug!(self.log, "Received BlobsByRange Request"; + "peer_id" => %peer_id, + "count" => req.count, + "start_slot" => req.start_slot, + ); + + // Should not send more than max request blocks + if req.max_blobs_requested::() > self.chain.spec.max_request_blob_sidecars { + return self.send_error_response( + peer_id, + RPCResponseErrorCode::InvalidRequest, + "Request exceeded `MAX_REQUEST_BLOBS_SIDECARS`".into(), + request_id, + ); + } + + let request_start_slot = Slot::from(req.start_slot); + + let data_availability_boundary_slot = match self.chain.data_availability_boundary() { + Some(boundary) => boundary.start_slot(T::EthSpec::slots_per_epoch()), + None => { + debug!(self.log, "Deneb fork is disabled"); + self.send_error_response( + peer_id, + RPCResponseErrorCode::InvalidRequest, + "Deneb fork is disabled".into(), + request_id, + ); + return; + } + }; + + let oldest_blob_slot = self + .chain + .store + .get_blob_info() + .oldest_blob_slot + .unwrap_or(data_availability_boundary_slot); + if request_start_slot < oldest_blob_slot { + debug!( + self.log, + "Range request start slot is older than data availability boundary."; + "requested_slot" => request_start_slot, + "oldest_blob_slot" => oldest_blob_slot, + "data_availability_boundary" => data_availability_boundary_slot + ); + + return if data_availability_boundary_slot < oldest_blob_slot { + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "blobs pruned within boundary".into(), + request_id, + ) + } else { + self.send_error_response( + peer_id, + RPCResponseErrorCode::InvalidRequest, + "Req outside availability period".into(), + request_id, + ) + }; + } + + let forwards_block_root_iter = + match self.chain.forwards_iter_block_roots(request_start_slot) { + Ok(iter) => iter, + Err(BeaconChainError::HistoricalBlockError( + HistoricalBlockError::BlockOutOfRange { + slot, + oldest_block_slot, + }, + )) => { + debug!(self.log, "Range request failed during backfill"; + "requested_slot" => slot, + "oldest_known_slot" => oldest_block_slot + ); + return self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Backfilling".into(), + request_id, + ); + } + Err(e) => { + self.send_error_response( + peer_id, + RPCResponseErrorCode::ServerError, + "Database error".into(), + request_id, + ); + return error!(self.log, "Unable to obtain root iter"; + "request" => ?req, + "peer" => %peer_id, + "error" => ?e + ); + } + }; + + // Use `WhenSlotSkipped::Prev` to get the most recent block root prior to + // `request_start_slot` in order to check whether the `request_start_slot` is a skip. + let mut last_block_root = req.start_slot.checked_sub(1).and_then(|prev_slot| { + self.chain + .block_root_at_slot(Slot::new(prev_slot), WhenSlotSkipped::Prev) + .ok() + .flatten() + }); + + // Pick out the required blocks, ignoring skip-slots. + let maybe_block_roots = process_results(forwards_block_root_iter, |iter| { + iter.take_while(|(_, slot)| slot.as_u64() < req.start_slot.saturating_add(req.count)) + // map skip slots to None + .map(|(root, _)| { + let result = if Some(root) == last_block_root { + None + } else { + Some(root) + }; + last_block_root = Some(root); + result + }) + .collect::>>() + }); + + let block_roots = match maybe_block_roots { + Ok(block_roots) => block_roots, + Err(e) => { + return error!(self.log, "Error during iteration over blocks"; + "request" => ?req, + "peer" => %peer_id, + "error" => ?e + ) + } + }; + + // remove all skip slots + let block_roots = block_roots.into_iter().flatten(); + + let mut blobs_sent = 0; + let mut send_response = true; + + for root in block_roots { + match self.chain.get_blobs(&root) { + Ok(blob_sidecar_list) => { + for blob_sidecar in blob_sidecar_list.iter() { + blobs_sent += 1; + self.send_network_message(NetworkMessage::SendResponse { + peer_id, + response: Response::BlobsByRange(Some(blob_sidecar.clone())), + id: request_id, + }); + } + } + Err(e) => { + error!( + self.log, + "Error fetching blobs block root"; + "request" => ?req, + "peer" => %peer_id, + "block_root" => ?root, + "error" => ?e + ); + self.send_error_response( + peer_id, + RPCResponseErrorCode::ServerError, + "No blobs and failed fetching corresponding block".into(), + request_id, + ); + send_response = false; + break; + } + } + } + + let current_slot = self + .chain + .slot() + .unwrap_or_else(|_| self.chain.slot_clock.genesis_slot()); + + debug!( + self.log, + "BlobsByRange Response processed"; + "peer" => %peer_id, + "start_slot" => req.start_slot, + "current_slot" => current_slot, + "requested" => req.count, + "returned" => blobs_sent + ); + + if send_response { + // send the stream terminator + self.send_network_message(NetworkMessage::SendResponse { + peer_id, + response: Response::BlobsByRange(None), + id: request_id, + }); + } + } } diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index c33e2acf5..608d10d66 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -1,5 +1,3 @@ -use std::time::Duration; - use crate::metrics; use crate::network_beacon_processor::{NetworkBeaconProcessor, FUTURE_SLOT_TOLERANCE}; use crate::sync::BatchProcessResult; @@ -7,10 +5,14 @@ use crate::sync::{ manager::{BlockProcessType, SyncMessage}, ChainId, }; +use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; +use beacon_chain::data_availability_checker::AvailabilityCheckError; +use beacon_chain::data_availability_checker::MaybeAvailableBlock; use beacon_chain::{ - observed_block_producers::Error as ObserveError, validator_monitor::get_block_delay_ms, - BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, - NotifyExecutionLayer, + observed_block_producers::Error as ObserveError, + validator_monitor::{get_block_delay_ms, get_slot_delay_ms}, + AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError, + ChainSegmentResult, HistoricalBlockError, NotifyExecutionLayer, }; use beacon_processor::{ work_reprocessing_queue::{QueuedRpcBlock, ReprocessQueueMessage}, @@ -20,9 +22,13 @@ use lighthouse_network::PeerAction; use slog::{debug, error, info, warn}; use slot_clock::SlotClock; use std::sync::Arc; +use std::time::Duration; use std::time::{SystemTime, UNIX_EPOCH}; +use store::KzgCommitment; use tokio::sync::mpsc; -use types::{Epoch, Hash256, SignedBeaconBlock}; +use types::beacon_block_body::format_kzg_commitments; +use types::blob_sidecar::FixedBlobSidecarList; +use types::{Epoch, Hash256}; /// Id associated to a batch processing request, either a sync batch or a parent lookup. #[derive(Clone, Debug, PartialEq)] @@ -44,14 +50,14 @@ struct ChainSegmentFailed { } impl NetworkBeaconProcessor { - /// Returns an async closure which processes a beacon block recieved via RPC. + /// Returns an async closure which processes a beacon block received via RPC. /// /// This separate function was required to prevent a cycle during compiler /// type checking. pub fn generate_rpc_beacon_block_process_fn( self: Arc, block_root: Hash256, - block: Arc>, + block: RpcBlock, seen_timestamp: Duration, process_type: BlockProcessType, ) -> AsyncFn { @@ -75,7 +81,7 @@ impl NetworkBeaconProcessor { pub fn generate_rpc_beacon_block_fns( self: Arc, block_root: Hash256, - block: Arc>, + block: RpcBlock, seen_timestamp: Duration, process_type: BlockProcessType, ) -> (AsyncFn, BlockingFn) { @@ -89,9 +95,9 @@ impl NetworkBeaconProcessor { // A closure which will ignore the block. let ignore_fn = move || { // Sync handles these results - self.send_sync_message(SyncMessage::BlockProcessed { + self.send_sync_message(SyncMessage::BlockComponentProcessed { process_type, - result: crate::sync::manager::BlockProcessResult::Ignored, + result: crate::sync::manager::BlockProcessingResult::Ignored, }); }; (process_fn, Box::new(ignore_fn)) @@ -102,41 +108,38 @@ impl NetworkBeaconProcessor { pub async fn process_rpc_block( self: Arc>, block_root: Hash256, - block: Arc>, + block: RpcBlock, seen_timestamp: Duration, process_type: BlockProcessType, reprocess_tx: mpsc::Sender, duplicate_cache: DuplicateCache, ) { // Check if the block is already being imported through another source - let handle = match duplicate_cache.check_and_insert(block_root) { - Some(handle) => handle, - None => { - debug!( - self.log, - "Gossip block is being processed"; - "action" => "sending rpc block to reprocessing queue", - "block_root" => %block_root, - ); + let Some(handle) = duplicate_cache.check_and_insert(block_root) else { + debug!( + self.log, + "Gossip block is being processed"; + "action" => "sending rpc block to reprocessing queue", + "block_root" => %block_root, + ); - // Send message to work reprocess queue to retry the block - let (process_fn, ignore_fn) = self.clone().generate_rpc_beacon_block_fns( - block_root, - block, - seen_timestamp, - process_type, - ); - let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock { - beacon_block_root: block_root, - process_fn, - ignore_fn, - }); + // Send message to work reprocess queue to retry the block + let (process_fn, ignore_fn) = self.clone().generate_rpc_beacon_block_fns( + block_root, + block, + seen_timestamp, + process_type, + ); + let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock { + beacon_block_root: block_root, + process_fn, + ignore_fn, + }); - if reprocess_tx.try_send(reprocess_msg).is_err() { - error!(self.log, "Failed to inform block import"; "source" => "rpc", "block_root" => %block_root) - }; - return; - } + if reprocess_tx.try_send(reprocess_msg).is_err() { + error!(self.log, "Failed to inform block import"; "source" => "rpc", "block_root" => %block_root) + }; + return; }; // Returns `true` if the time now is after the 4s attestation deadline. @@ -152,13 +155,12 @@ impl NetworkBeaconProcessor { // Checks if a block from this proposer is already known. let block_equivocates = || { - match self - .chain - .observed_block_producers - .read() - .proposer_has_been_observed(block.message(), block.canonical_root()) - { - Ok(seen_status) => seen_status.is_slashable(), + match self.chain.observed_slashable.read().is_slashable( + block.slot(), + block.message().proposer_index(), + block.canonical_root(), + ) { + Ok(is_slashable) => is_slashable, //Both of these blocks will be rejected, so reject them now rather // than re-queuing them. Err(ObserveError::FinalizedBlock { .. }) @@ -208,15 +210,26 @@ impl NetworkBeaconProcessor { let slot = block.slot(); let parent_root = block.message().parent_root(); + let commitments_formatted = block.as_block().commitments_formatted(); + + debug!( + self.log, + "Processing RPC block"; + "block_root" => ?block_root, + "proposer" => block.message().proposer_index(), + "slot" => block.slot(), + "commitments" => commitments_formatted, + ); + let result = self .chain - .process_block(block_root, block, NotifyExecutionLayer::Yes, || Ok(())) + .process_block_with_early_caching(block_root, block, NotifyExecutionLayer::Yes) .await; metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); // RPC block imported, regardless of process type - if let &Ok(hash) = &result { + if let &Ok(AvailabilityProcessingStatus::Imported(hash)) = &result { info!(self.log, "New RPC block received"; "slot" => slot, "hash" => %hash); // Trigger processing for work referencing this block. @@ -240,7 +253,7 @@ impl NetworkBeaconProcessor { } } // Sync handles these results - self.send_sync_message(SyncMessage::BlockProcessed { + self.send_sync_message(SyncMessage::BlockComponentProcessed { process_type, result: result.into(), }); @@ -249,12 +262,122 @@ impl NetworkBeaconProcessor { drop(handle); } + /// Returns an async closure which processes a list of blobs received via RPC. + /// + /// This separate function was required to prevent a cycle during compiler + /// type checking. + pub fn generate_rpc_blobs_process_fn( + self: Arc, + block_root: Hash256, + blobs: FixedBlobSidecarList, + seen_timestamp: Duration, + process_type: BlockProcessType, + ) -> AsyncFn { + let process_fn = async move { + self.clone() + .process_rpc_blobs(block_root, blobs, seen_timestamp, process_type) + .await; + }; + Box::pin(process_fn) + } + + /// Attempt to process a list of blobs received from a direct RPC request. + pub async fn process_rpc_blobs( + self: Arc>, + block_root: Hash256, + blobs: FixedBlobSidecarList, + seen_timestamp: Duration, + process_type: BlockProcessType, + ) { + let Some(slot) = blobs + .iter() + .find_map(|blob| blob.as_ref().map(|blob| blob.slot())) + else { + return; + }; + + let (indices, commitments): (Vec, Vec) = blobs + .iter() + .filter_map(|blob_opt| { + blob_opt + .as_ref() + .map(|blob| (blob.index, blob.kzg_commitment)) + }) + .unzip(); + let commitments = format_kzg_commitments(&commitments); + + debug!( + self.log, + "RPC blobs received"; + "indices" => ?indices, + "block_root" => %block_root, + "slot" => %slot, + "commitments" => commitments, + ); + + if let Ok(current_slot) = self.chain.slot() { + if current_slot == slot { + // Note: this metric is useful to gauge how long it takes to receive blobs requested + // over rpc. Since we always send the request for block components at `slot_clock.single_lookup_delay()` + // we can use that as a baseline to measure against. + let delay = get_slot_delay_ms(seen_timestamp, slot, &self.chain.slot_clock); + + metrics::observe_duration(&metrics::BEACON_BLOB_RPC_SLOT_START_DELAY_TIME, delay); + } + } + + let result = self.chain.process_rpc_blobs(slot, block_root, blobs).await; + + match &result { + Ok(AvailabilityProcessingStatus::Imported(hash)) => { + debug!( + self.log, + "Block components retrieved"; + "result" => "imported block and blobs", + "slot" => %slot, + "block_hash" => %hash, + ); + } + Ok(AvailabilityProcessingStatus::MissingComponents(_, _)) => { + debug!( + self.log, + "Missing components over rpc"; + "block_hash" => %block_root, + "slot" => %slot, + ); + } + Err(BlockError::BlockIsAlreadyKnown) => { + debug!( + self.log, + "Blobs have already been imported"; + "block_hash" => %block_root, + "slot" => %slot, + ); + } + Err(e) => { + warn!( + self.log, + "Error when importing rpc blobs"; + "error" => ?e, + "block_hash" => %block_root, + "slot" => %slot, + ); + } + } + + // Sync handles these results + self.send_sync_message(SyncMessage::BlockComponentProcessed { + process_type, + result: result.into(), + }); + } + /// Attempt to import the chain segment (`blocks`) to the beacon chain, informing the sync /// thread if more blocks are needed to process it. pub async fn process_chain_segment( &self, sync_type: ChainSegmentProcessId, - downloaded_blocks: Vec>>, + downloaded_blocks: Vec>, notify_execution_layer: NotifyExecutionLayer, ) { let result = match sync_type { @@ -304,6 +427,10 @@ impl NetworkBeaconProcessor { let start_slot = downloaded_blocks.first().map(|b| b.slot().as_u64()); let end_slot = downloaded_blocks.last().map(|b| b.slot().as_u64()); let sent_blocks = downloaded_blocks.len(); + let n_blobs = downloaded_blocks + .iter() + .map(|wrapped| wrapped.n_blobs()) + .sum::(); match self.process_backfill_blocks(downloaded_blocks) { (_, Ok(_)) => { @@ -312,6 +439,7 @@ impl NetworkBeaconProcessor { "first_block_slot" => start_slot, "last_block_slot" => end_slot, "processed_blocks" => sent_blocks, + "processed_blobs" => n_blobs, "service"=> "sync"); BatchProcessResult::Success { was_non_empty: sent_blocks > 0, @@ -322,6 +450,7 @@ impl NetworkBeaconProcessor { "batch_epoch" => epoch, "first_block_slot" => start_slot, "last_block_slot" => end_slot, + "processed_blobs" => n_blobs, "error" => %e.message, "service" => "sync"); match e.peer_action { @@ -373,10 +502,10 @@ impl NetworkBeaconProcessor { /// Helper function to process blocks batches which only consumes the chain and blocks to process. async fn process_blocks<'a>( &self, - downloaded_blocks: impl Iterator>>, + downloaded_blocks: impl Iterator>, notify_execution_layer: NotifyExecutionLayer, ) -> (usize, Result<(), ChainSegmentFailed>) { - let blocks: Vec> = downloaded_blocks.cloned().collect(); + let blocks: Vec<_> = downloaded_blocks.cloned().collect(); match self .chain .process_chain_segment(blocks, notify_execution_layer) @@ -406,19 +535,63 @@ impl NetworkBeaconProcessor { /// Helper function to process backfill block batches which only consumes the chain and blocks to process. fn process_backfill_blocks( &self, - blocks: Vec>>, + downloaded_blocks: Vec>, ) -> (usize, Result<(), ChainSegmentFailed>) { - let blinded_blocks = blocks - .iter() - .map(|full_block| full_block.clone_as_blinded()) - .map(Arc::new) - .collect(); - match self.chain.import_historical_block_batch(blinded_blocks) { + let total_blocks = downloaded_blocks.len(); + let available_blocks = match self + .chain + .data_availability_checker + .verify_kzg_for_rpc_blocks(downloaded_blocks) + { + Ok(blocks) => blocks + .into_iter() + .filter_map(|maybe_available| match maybe_available { + MaybeAvailableBlock::Available(block) => Some(block), + MaybeAvailableBlock::AvailabilityPending { .. } => None, + }) + .collect::>(), + Err(e) => match e { + AvailabilityCheckError::StoreError(_) + | AvailabilityCheckError::KzgNotInitialized => { + return ( + 0, + Err(ChainSegmentFailed { + peer_action: None, + message: "Failed to check block availability".into(), + }), + ); + } + e => { + return ( + 0, + Err(ChainSegmentFailed { + peer_action: Some(PeerAction::LowToleranceError), + message: format!("Failed to check block availability : {:?}", e), + }), + ) + } + }, + }; + + if available_blocks.len() != total_blocks { + return ( + 0, + Err(ChainSegmentFailed { + peer_action: Some(PeerAction::LowToleranceError), + message: format!( + "{} out of {} blocks were unavailable", + (total_blocks - available_blocks.len()), + total_blocks + ), + }), + ); + } + + match self.chain.import_historical_block_batch(available_blocks) { Ok(imported_blocks) => { metrics::inc_counter( &metrics::BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_SUCCESS_TOTAL, ); - (imported_blocks, Ok(())) } Err(error) => { diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index ac5722a56..dd58eb835 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -8,25 +8,31 @@ use crate::{ service::NetworkMessage, sync::{manager::BlockProcessType, SyncMessage}, }; +use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::test_utils::{ - AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, + test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; -use beacon_chain::BeaconChain; +use beacon_chain::{BeaconChain, WhenSlotSkipped}; use beacon_processor::{work_reprocessing_queue::*, *}; +use lighthouse_network::discovery::ConnectionId; +use lighthouse_network::rpc::methods::BlobsByRangeRequest; +use lighthouse_network::rpc::SubstreamId; use lighthouse_network::{ - discv5::enr::{CombinedKey, EnrBuilder}, + discv5::enr::{self, CombinedKey}, rpc::methods::{MetaData, MetaDataV2}, types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}, - Client, MessageId, NetworkGlobals, PeerId, + Client, MessageId, NetworkGlobals, PeerId, Response, }; use slot_clock::SlotClock; use std::iter::Iterator; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; +use types::blob_sidecar::FixedBlobSidecarList; use types::{ - Attestation, AttesterSlashing, Epoch, EthSpec, Hash256, MainnetEthSpec, ProposerSlashing, - SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, SubnetId, + Attestation, AttesterSlashing, BlobSidecar, BlobSidecarList, Epoch, Hash256, MainnetEthSpec, + ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, Slot, + SubnetId, }; type E = MainnetEthSpec; @@ -46,6 +52,7 @@ const STANDARD_TIMEOUT: Duration = Duration::from_secs(10); struct TestRig { chain: Arc>, next_block: Arc>, + next_blobs: Option>, attestations: Vec<(Attestation, SubnetId)>, next_block_attestations: Vec<(Attestation, SubnetId)>, next_block_aggregate_attestations: Vec>, @@ -82,13 +89,14 @@ impl TestRig { pub async fn new_parametric(chain_length: u64, enable_backfill_rate_limiting: bool) -> Self { // This allows for testing voluntary exits without building out a massive chain. - let mut spec = E::default_spec(); + let mut spec = test_spec::(); spec.shard_committee_period = 2; let harness = BeaconChainHarness::builder(MainnetEthSpec) .spec(spec) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() + .mock_execution_layer() .chain_config(<_>::default()) .build(); @@ -114,7 +122,7 @@ impl TestRig { "precondition: current slot is one after head" ); - let (next_block, next_state) = harness + let (next_block_tuple, next_state) = harness .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) .await; @@ -140,9 +148,9 @@ impl TestRig { .get_unaggregated_attestations( &AttestationStrategy::AllValidators, &next_state, - next_block.state_root(), - next_block.canonical_root(), - next_block.slot(), + next_block_tuple.0.state_root(), + next_block_tuple.0.canonical_root(), + next_block_tuple.0.slot(), ) .into_iter() .flatten() @@ -152,9 +160,9 @@ impl TestRig { .make_attestations( &harness.get_all_validators(), &next_state, - next_block.state_root(), - next_block.canonical_root().into(), - next_block.slot(), + next_block_tuple.0.state_root(), + next_block_tuple.0.canonical_root().into(), + next_block_tuple.0.slot(), ) .into_iter() .filter_map(|(_, aggregate_opt)| aggregate_opt) @@ -175,8 +183,10 @@ impl TestRig { let log = harness.logger().clone(); - let mut beacon_processor_config = BeaconProcessorConfig::default(); - beacon_processor_config.enable_backfill_rate_limiting = enable_backfill_rate_limiting; + let beacon_processor_config = BeaconProcessorConfig { + enable_backfill_rate_limiting, + ..Default::default() + }; let BeaconProcessorChannels { beacon_processor_tx, beacon_processor_rx, @@ -193,7 +203,7 @@ impl TestRig { syncnets: EnrSyncCommitteeBitfield::::default(), }); let enr_key = CombinedKey::generate_secp256k1(); - let enr = EnrBuilder::new("v4").build(&enr_key).unwrap(); + let enr = enr::Enr::builder().build(&enr_key).unwrap(); let network_globals = Arc::new(NetworkGlobals::new(enr, meta_data, vec![], false, &log)); let executor = harness.runtime.task_executor.clone(); @@ -231,11 +241,17 @@ impl TestRig { chain.spec.maximum_gossip_clock_disparity(), ); - assert!(!beacon_processor.is_err()); - + assert!(beacon_processor.is_ok()); + let block = next_block_tuple.0; + let blob_sidecars = if let Some((kzg_proofs, blobs)) = next_block_tuple.1 { + Some(BlobSidecar::build_sidecars(blobs, &block, kzg_proofs).unwrap()) + } else { + None + }; Self { chain, - next_block: Arc::new(next_block), + next_block: block, + next_blobs: blob_sidecars, attestations, next_block_attestations, next_block_aggregate_attestations, @@ -272,11 +288,28 @@ impl TestRig { .unwrap(); } + pub fn enqueue_gossip_blob(&self, blob_index: usize) { + if let Some(blobs) = self.next_blobs.as_ref() { + let blob = blobs.get(blob_index).unwrap(); + self.network_beacon_processor + .send_gossip_blob_sidecar( + junk_message_id(), + junk_peer_id(), + Client::default(), + blob.index, + blob.clone(), + Duration::from_secs(0), + ) + .unwrap(); + } + } + pub fn enqueue_rpc_block(&self) { + let block_root = self.next_block.canonical_root(); self.network_beacon_processor .send_rpc_beacon_block( - self.next_block.canonical_root(), - self.next_block.clone(), + block_root, + RpcBlock::new_without_blobs(Some(block_root), self.next_block.clone()), std::time::Duration::default(), BlockProcessType::ParentLookup { chain_hash: Hash256::random(), @@ -286,15 +319,42 @@ impl TestRig { } pub fn enqueue_single_lookup_rpc_block(&self) { + let block_root = self.next_block.canonical_root(); self.network_beacon_processor .send_rpc_beacon_block( - self.next_block.canonical_root(), - self.next_block.clone(), + block_root, + RpcBlock::new_without_blobs(Some(block_root), self.next_block.clone()), std::time::Duration::default(), BlockProcessType::SingleBlock { id: 1 }, ) .unwrap(); } + pub fn enqueue_single_lookup_rpc_blobs(&self) { + if let Some(blobs) = self.next_blobs.clone() { + let blobs = FixedBlobSidecarList::from(blobs.into_iter().map(Some).collect::>()); + self.network_beacon_processor + .send_rpc_blobs( + self.next_block.canonical_root(), + blobs, + std::time::Duration::default(), + BlockProcessType::SingleBlock { id: 1 }, + ) + .unwrap(); + } + } + + pub fn enqueue_blobs_by_range_request(&self, count: u64) { + self.network_beacon_processor + .send_blobs_by_range_request( + PeerId::random(), + (ConnectionId::new_unchecked(42), SubstreamId::new(24)), + BlobsByRangeRequest { + start_slot: 0, + count, + }, + ) + .unwrap(); + } pub fn enqueue_backfill_batch(&self) { self.network_beacon_processor @@ -520,6 +580,13 @@ async fn import_gossip_block_acceptably_early() { rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]) .await; + let num_blobs = rig.next_blobs.as_ref().map(|b| b.len()).unwrap_or(0); + for i in 0..num_blobs { + rig.enqueue_gossip_blob(i); + rig.assert_event_journal(&[GOSSIP_BLOBS_SIDECAR, WORKER_FREED, NOTHING_TO_DO]) + .await; + } + // Note: this section of the code is a bit race-y. We're assuming that we can set the slot clock // and check the head in the time between the block arrived early and when its due for // processing. @@ -528,6 +595,7 @@ async fn import_gossip_block_acceptably_early() { // processing, instead of just ADDITIONAL_QUEUED_BLOCK_DELAY. Speak to @paulhauner if this test // starts failing. rig.chain.slot_clock.set_slot(rig.next_block.slot().into()); + assert!( rig.head_root() != rig.next_block.canonical_root(), "block not yet imported" @@ -595,6 +663,19 @@ async fn import_gossip_block_at_current_slot() { rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]) .await; + let num_blobs = rig + .next_blobs + .as_ref() + .map(|blobs| blobs.len()) + .unwrap_or(0); + + for i in 0..num_blobs { + rig.enqueue_gossip_blob(i); + + rig.assert_event_journal(&[GOSSIP_BLOBS_SIDECAR, WORKER_FREED, NOTHING_TO_DO]) + .await; + } + assert_eq!( rig.head_root(), rig.next_block.canonical_root(), @@ -647,20 +728,34 @@ async fn attestation_to_unknown_block_processed(import_method: BlockImportMethod ); // Send the block and ensure that the attestation is received back and imported. - - let block_event = match import_method { + let num_blobs = rig + .next_blobs + .as_ref() + .map(|blobs| blobs.len()) + .unwrap_or(0); + let mut events = vec![]; + match import_method { BlockImportMethod::Gossip => { rig.enqueue_gossip_block(); - GOSSIP_BLOCK + events.push(GOSSIP_BLOCK); + for i in 0..num_blobs { + rig.enqueue_gossip_blob(i); + events.push(GOSSIP_BLOBS_SIDECAR); + } } BlockImportMethod::Rpc => { rig.enqueue_rpc_block(); - RPC_BLOCK + events.push(RPC_BLOCK); + if num_blobs > 0 { + rig.enqueue_single_lookup_rpc_blobs(); + events.push(RPC_BLOBS); + } } }; - rig.assert_event_journal_contains_ordered(&[block_event, UNKNOWN_BLOCK_ATTESTATION]) - .await; + events.push(UNKNOWN_BLOCK_ATTESTATION); + + rig.assert_event_journal_contains_ordered(&events).await; // Run fork choice, since it isn't run when processing an RPC block. At runtime it is the // responsibility of the sync manager to do this. @@ -716,20 +811,34 @@ async fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod ); // Send the block and ensure that the attestation is received back and imported. - - let block_event = match import_method { + let num_blobs = rig + .next_blobs + .as_ref() + .map(|blobs| blobs.len()) + .unwrap_or(0); + let mut events = vec![]; + match import_method { BlockImportMethod::Gossip => { rig.enqueue_gossip_block(); - GOSSIP_BLOCK + events.push(GOSSIP_BLOCK); + for i in 0..num_blobs { + rig.enqueue_gossip_blob(i); + events.push(GOSSIP_BLOBS_SIDECAR); + } } BlockImportMethod::Rpc => { rig.enqueue_rpc_block(); - RPC_BLOCK + events.push(RPC_BLOCK); + if num_blobs > 0 { + rig.enqueue_single_lookup_rpc_blobs(); + events.push(RPC_BLOBS); + } } }; - rig.assert_event_journal_contains_ordered(&[block_event, UNKNOWN_BLOCK_AGGREGATE]) - .await; + events.push(UNKNOWN_BLOCK_AGGREGATE); + + rig.assert_event_journal_contains_ordered(&events).await; // Run fork choice, since it isn't run when processing an RPC block. At runtime it is the // responsibility of the sync manager to do this. @@ -897,9 +1006,15 @@ async fn test_rpc_block_reprocessing() { // Insert the next block into the duplicate cache manually let handle = rig.duplicate_cache.check_and_insert(next_block_root); rig.enqueue_single_lookup_rpc_block(); - rig.assert_event_journal(&[RPC_BLOCK, WORKER_FREED, NOTHING_TO_DO]) .await; + + rig.enqueue_single_lookup_rpc_blobs(); + if rig.next_blobs.as_ref().map(|b| b.len()).unwrap_or(0) > 0 { + rig.assert_event_journal(&[RPC_BLOBS, WORKER_FREED, NOTHING_TO_DO]) + .await; + } + // next_block shouldn't be processed since it couldn't get the // duplicate cache handle assert_ne!(next_block_root, rig.head_root()); @@ -960,3 +1075,42 @@ async fn test_backfill_sync_processing_rate_limiting_disabled() { ) .await; } + +#[tokio::test] +async fn test_blobs_by_range() { + if test_spec::().deneb_fork_epoch.is_none() { + return; + }; + let mut rig = TestRig::new(64).await; + let slot_count = 32; + rig.enqueue_blobs_by_range_request(slot_count); + + let mut blob_count = 0; + for slot in 0..slot_count { + let root = rig + .chain + .block_root_at_slot(Slot::new(slot), WhenSlotSkipped::None) + .unwrap(); + blob_count += root + .map(|root| rig.chain.get_blobs(&root).unwrap_or_default().len()) + .unwrap_or(0); + } + let mut actual_count = 0; + while let Some(next) = rig._network_rx.recv().await { + if let NetworkMessage::SendResponse { + peer_id: _, + response: Response::BlobsByRange(blob), + id: _, + } = next + { + if blob.is_some() { + actual_count += 1; + } else { + break; + } + } else { + panic!("unexpected message {:?}", next); + } + } + assert_eq!(blob_count, actual_count); +} diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index c8332705c..f56a3b744 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -21,13 +21,13 @@ use lighthouse_network::{ MessageId, NetworkGlobals, PeerId, PeerRequestId, PubsubMessage, Request, Response, }; use logging::TimeLatch; -use slog::{debug, o, trace}; +use slog::{crit, debug, o, trace}; use slog::{error, warn}; use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use tokio::sync::mpsc; use tokio_stream::wrappers::UnboundedReceiverStream; -use types::{EthSpec, SignedBeaconBlock}; +use types::{BlobSidecar, EthSpec, SignedBeaconBlock}; /// Handles messages from the network and routes them to the appropriate service to be handled. pub struct Router { @@ -68,6 +68,7 @@ pub enum RouterMessage { RPCFailed { peer_id: PeerId, request_id: RequestId, + error: RPCError, }, /// A gossip message has been received. The fields are: message id, the peer that sent us this /// message, the message itself and a bool which indicates if the message should be processed @@ -177,8 +178,9 @@ impl Router { RouterMessage::RPCFailed { peer_id, request_id, + error, } => { - self.on_rpc_error(peer_id, request_id); + self.on_rpc_error(peer_id, request_id, error); } RouterMessage::PubsubMessage(id, peer_id, gossip, should_process) => { self.handle_gossip(id, peer_id, gossip, should_process); @@ -206,6 +208,14 @@ impl Router { self.network_beacon_processor .send_blocks_by_roots_request(peer_id, request_id, request), ), + Request::BlobsByRange(request) => self.handle_beacon_processor_send_result( + self.network_beacon_processor + .send_blobs_by_range_request(peer_id, request_id, request), + ), + Request::BlobsByRoot(request) => self.handle_beacon_processor_send_result( + self.network_beacon_processor + .send_blobs_by_roots_request(peer_id, request_id, request), + ), Request::LightClientBootstrap(request) => self.handle_beacon_processor_send_result( self.network_beacon_processor .send_lightclient_bootstrap_request(peer_id, request_id, request), @@ -234,6 +244,12 @@ impl Router { Response::BlocksByRoot(beacon_block) => { self.on_blocks_by_root_response(peer_id, request_id, beacon_block); } + Response::BlobsByRange(blob) => { + self.on_blobs_by_range_response(peer_id, request_id, blob); + } + Response::BlobsByRoot(blob) => { + self.on_blobs_by_root_response(peer_id, request_id, blob); + } Response::LightClientBootstrap(_) => unreachable!(), } } @@ -279,6 +295,19 @@ impl Router { timestamp_now(), ), ), + PubsubMessage::BlobSidecar(data) => { + let (blob_index, blob_sidecar) = *data; + self.handle_beacon_processor_send_result( + self.network_beacon_processor.send_gossip_blob_sidecar( + message_id, + peer_id, + self.network_globals.client(&peer_id), + blob_index, + blob_sidecar, + timestamp_now(), + ), + ) + } PubsubMessage::VoluntaryExit(exit) => { debug!(self.log, "Received a voluntary exit"; "peer_id" => %peer_id); self.handle_beacon_processor_send_result( @@ -408,12 +437,13 @@ impl Router { /// An error occurred during an RPC request. The state is maintained by the sync manager, so /// this function notifies the sync manager of the error. - pub fn on_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId) { + pub fn on_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId, error: RPCError) { // Check if the failed RPC belongs to sync if let RequestId::Sync(request_id) = request_id { self.send_to_sync(SyncMessage::RpcError { peer_id, request_id, + error, }); } } @@ -452,12 +482,22 @@ impl Router { ) { let request_id = match request_id { RequestId::Sync(sync_id) => match sync_id { - SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. } => { - unreachable!("Block lookups do not request BBRange requests") + SyncId::SingleBlock { .. } + | SyncId::SingleBlob { .. } + | SyncId::ParentLookup { .. } + | SyncId::ParentLookupBlob { .. } => { + crit!(self.log, "Block lookups do not request BBRange requests"; "peer_id" => %peer_id); + return; } - id @ (SyncId::BackFillSync { .. } | SyncId::RangeSync { .. }) => id, + id @ (SyncId::BackFillBlocks { .. } + | SyncId::RangeBlocks { .. } + | SyncId::BackFillBlockAndBlobs { .. } + | SyncId::RangeBlockAndBlobs { .. }) => id, }, - RequestId::Router => unreachable!("All BBRange requests belong to sync"), + RequestId::Router => { + crit!(self.log, "All BBRange requests belong to sync"; "peer_id" => %peer_id); + return; + } }; trace!( @@ -474,6 +514,33 @@ impl Router { }); } + pub fn on_blobs_by_range_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + blob_sidecar: Option>>, + ) { + trace!( + self.log, + "Received BlobsByRange Response"; + "peer" => %peer_id, + ); + + if let RequestId::Sync(id) = request_id { + self.send_to_sync(SyncMessage::RpcBlob { + peer_id, + request_id: id, + blob_sidecar, + seen_timestamp: timestamp_now(), + }); + } else { + crit!( + self.log, + "All blobs by range responses should belong to sync" + ); + } + } + /// Handle a `BlocksByRoot` response from the peer. pub fn on_blocks_by_root_response( &mut self, @@ -484,11 +551,22 @@ impl Router { let request_id = match request_id { RequestId::Sync(sync_id) => match sync_id { id @ (SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. }) => id, - SyncId::BackFillSync { .. } | SyncId::RangeSync { .. } => { - unreachable!("Batch syncing do not request BBRoot requests") + SyncId::BackFillBlocks { .. } + | SyncId::RangeBlocks { .. } + | SyncId::RangeBlockAndBlobs { .. } + | SyncId::BackFillBlockAndBlobs { .. } => { + crit!(self.log, "Batch syncing do not request BBRoot requests"; "peer_id" => %peer_id); + return; + } + SyncId::SingleBlob { .. } | SyncId::ParentLookupBlob { .. } => { + crit!(self.log, "Blob response to block by roots request"; "peer_id" => %peer_id); + return; } }, - RequestId::Router => unreachable!("All BBRoot requests belong to sync"), + RequestId::Router => { + crit!(self.log, "All BBRoot requests belong to sync"; "peer_id" => %peer_id); + return; + } }; trace!( @@ -504,6 +582,47 @@ impl Router { }); } + /// Handle a `BlobsByRoot` response from the peer. + pub fn on_blobs_by_root_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + blob_sidecar: Option>>, + ) { + let request_id = match request_id { + RequestId::Sync(sync_id) => match sync_id { + id @ (SyncId::SingleBlob { .. } | SyncId::ParentLookupBlob { .. }) => id, + SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. } => { + crit!(self.log, "Block response to blobs by roots request"; "peer_id" => %peer_id); + return; + } + SyncId::BackFillBlocks { .. } + | SyncId::RangeBlocks { .. } + | SyncId::RangeBlockAndBlobs { .. } + | SyncId::BackFillBlockAndBlobs { .. } => { + crit!(self.log, "Batch syncing does not request BBRoot requests"; "peer_id" => %peer_id); + return; + } + }, + RequestId::Router => { + crit!(self.log, "All BlobsByRoot requests belong to sync"; "peer_id" => %peer_id); + return; + } + }; + + trace!( + self.log, + "Received BlobsByRoot Response"; + "peer" => %peer_id, + ); + self.send_to_sync(SyncMessage::RpcBlob { + request_id, + peer_id, + blob_sidecar, + seen_timestamp: timestamp_now(), + }); + } + fn handle_beacon_processor_send_result( &mut self, result: Result<(), crate::network_beacon_processor::Error>, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 174a0ec14..01a7e1f98 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -215,18 +215,21 @@ pub struct NetworkService { } impl NetworkService { - #[allow(clippy::type_complexity)] - pub async fn start( + async fn build( beacon_chain: Arc>, config: &NetworkConfig, executor: task_executor::TaskExecutor, - gossipsub_registry: Option<&'_ mut Registry>, + libp2p_registry: Option<&'_ mut Registry>, beacon_processor_send: BeaconProcessorSend, beacon_processor_reprocess_tx: mpsc::Sender, - ) -> error::Result<(Arc>, NetworkSenders)> { + ) -> error::Result<( + NetworkService, + Arc>, + NetworkSenders, + )> { let network_log = executor.log().clone(); // build the channels for external comms - let (network_senders, network_recievers) = NetworkSenders::new(); + let (network_senders, network_receivers) = NetworkSenders::new(); #[cfg(feature = "disable-backfill")] warn!( @@ -282,7 +285,7 @@ impl NetworkService { enr_fork_id, fork_context: fork_context.clone(), chain_spec: &beacon_chain.spec, - gossipsub_registry, + libp2p_registry, }; // launch libp2p service @@ -341,7 +344,7 @@ impl NetworkService { let NetworkReceivers { network_recv, validator_subscription_recv, - } = network_recievers; + } = network_receivers; // create the network service and spawn the task let network_log = network_log.new(o!("service" => "network")); @@ -369,6 +372,28 @@ impl NetworkService { enable_light_client_server: config.enable_light_client_server, }; + Ok((network_service, network_globals, network_senders)) + } + + #[allow(clippy::type_complexity)] + pub async fn start( + beacon_chain: Arc>, + config: &NetworkConfig, + executor: task_executor::TaskExecutor, + libp2p_registry: Option<&'_ mut Registry>, + beacon_processor_send: BeaconProcessorSend, + beacon_processor_reprocess_tx: mpsc::Sender, + ) -> error::Result<(Arc>, NetworkSenders)> { + let (network_service, network_globals, network_senders) = Self::build( + beacon_chain, + config, + executor.clone(), + libp2p_registry, + beacon_processor_send, + beacon_processor_reprocess_tx, + ) + .await?; + network_service.spawn_service(executor); Ok((network_globals, network_senders)) @@ -472,7 +497,6 @@ impl NetworkService { } } } - metrics::update_bandwidth_metrics(self.libp2p.bandwidth.clone()); } }; executor.spawn(service_fut, "network"); @@ -516,10 +540,11 @@ impl NetworkService { response, }); } - NetworkEvent::RPCFailed { id, peer_id } => { + NetworkEvent::RPCFailed { id, peer_id, error } => { self.send_to_router(RouterMessage::RPCFailed { peer_id, request_id: id, + error, }); } NetworkEvent::StatusPeer(peer_id) => { @@ -609,7 +634,7 @@ impl NetworkService { id, reason, } => { - self.libp2p.send_error_reponse(peer_id, id, error, reason); + self.libp2p.send_error_response(peer_id, id, error, reason); } NetworkMessage::UPnPMappingEstablished { mappings } => { self.upnp_mappings = mappings; @@ -691,7 +716,10 @@ impl NetworkService { } let mut subscribed_topics: Vec = vec![]; - for topic_kind in core_topics_to_subscribe(self.fork_context.current_fork()) { + for topic_kind in core_topics_to_subscribe::( + self.fork_context.current_fork(), + &self.fork_context.spec, + ) { for fork_digest in self.required_gossip_fork_digests() { let topic = GossipTopic::new( topic_kind.clone(), @@ -882,9 +910,10 @@ impl NetworkService { fn update_next_fork(&mut self) { let new_enr_fork_id = self.beacon_chain.enr_fork_id(); + let new_fork_digest = new_enr_fork_id.fork_digest; let fork_context = &self.fork_context; - if let Some(new_fork_name) = fork_context.from_context_bytes(new_enr_fork_id.fork_digest) { + if let Some(new_fork_name) = fork_context.from_context_bytes(new_fork_digest) { info!( self.log, "Transitioned to new fork"; @@ -907,13 +936,20 @@ impl NetworkService { Box::pin(next_fork_subscriptions_delay(&self.beacon_chain).into()); self.next_unsubscribe = Box::pin(Some(tokio::time::sleep(unsubscribe_delay)).into()); info!(self.log, "Network will unsubscribe from old fork gossip topics in a few epochs"; "remaining_epochs" => UNSUBSCRIBE_DELAY_EPOCHS); + + // Remove topic weight from old fork topics to prevent peers that left on the mesh on + // old topics from being penalized for not sending us messages. + self.libp2p.remove_topic_weight_except(new_fork_digest); } else { crit!(self.log, "Unknown new enr fork id"; "new_fork_id" => ?new_enr_fork_id); } } fn subscribed_core_topics(&self) -> bool { - let core_topics = core_topics_to_subscribe(self.fork_context.current_fork()); + let core_topics = core_topics_to_subscribe::( + self.fork_context.current_fork(), + &self.fork_context.spec, + ); let core_topics: HashSet<&GossipKind> = HashSet::from_iter(&core_topics); let subscriptions = self.network_globals.gossipsub_subscriptions.read(); let subscribed_topics: HashSet<&GossipKind> = diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index 23bcf456d..35a7f1eab 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -4,14 +4,26 @@ mod tests { use crate::persisted_dht::load_dht; use crate::{NetworkConfig, NetworkService}; use beacon_chain::test_utils::BeaconChainHarness; - use beacon_processor::BeaconProcessorChannels; - use lighthouse_network::Enr; + use beacon_chain::BeaconChainTypes; + use beacon_processor::{BeaconProcessorChannels, BeaconProcessorConfig}; + use futures::StreamExt; + use lighthouse_network::types::{GossipEncoding, GossipKind}; + use lighthouse_network::{Enr, GossipTopic}; use slog::{o, Drain, Level, Logger}; use sloggers::{null::NullLoggerBuilder, Build}; use std::str::FromStr; use std::sync::Arc; use tokio::runtime::Runtime; - use types::MinimalEthSpec; + use types::{Epoch, EthSpec, ForkName, MinimalEthSpec, SubnetId}; + + impl NetworkService { + fn get_topic_params( + &self, + topic: GossipTopic, + ) -> Option<&lighthouse_network::libp2p::gossipsub::TopicScoreParams> { + self.libp2p.get_topic_params(topic) + } + } fn get_logger(actual_log: bool) -> Logger { if actual_log { @@ -102,4 +114,126 @@ mod tests { "should have persisted the second ENR to store" ); } + + // Test removing topic weight on old topics when a fork happens. + #[test] + fn test_removing_topic_weight_on_old_topics() { + let runtime = Arc::new(Runtime::new().unwrap()); + + // Capella spec + let mut spec = MinimalEthSpec::default_spec(); + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(1)); + + // Build beacon chain. + let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec) + .spec(spec.clone()) + .deterministic_keypairs(8) + .fresh_ephemeral_store() + .mock_execution_layer() + .build() + .chain; + let (next_fork_name, _) = beacon_chain.duration_to_next_fork().expect("next fork"); + assert_eq!(next_fork_name, ForkName::Capella); + + // Build network service. + let (mut network_service, network_globals, _network_senders) = runtime.block_on(async { + let (_, exit) = exit_future::signal(); + let (shutdown_tx, _) = futures::channel::mpsc::channel(1); + let executor = task_executor::TaskExecutor::new( + Arc::downgrade(&runtime), + exit, + get_logger(false), + shutdown_tx, + ); + + let mut config = NetworkConfig::default(); + config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21214, 21214, 21215); + config.discv5_config.table_filter = |_| true; // Do not ignore local IPs + config.upnp_enabled = false; + + let beacon_processor_channels = + BeaconProcessorChannels::new(&BeaconProcessorConfig::default()); + NetworkService::build( + beacon_chain.clone(), + &config, + executor.clone(), + None, + beacon_processor_channels.beacon_processor_tx, + beacon_processor_channels.work_reprocessing_tx, + ) + .await + .unwrap() + }); + + // Subscribe to the topics. + runtime.block_on(async { + while network_globals.gossipsub_subscriptions.read().len() < 2 { + if let Some(msg) = network_service.attestation_service.next().await { + network_service.on_attestation_service_msg(msg); + } + } + }); + + // Make sure the service is subscribed to the topics. + let (old_topic1, old_topic2) = { + let mut subnets = SubnetId::compute_subnets_for_epoch::( + network_globals.local_enr().node_id().raw().into(), + beacon_chain.epoch().unwrap(), + &spec, + ) + .unwrap() + .0 + .collect::>(); + assert_eq!(2, subnets.len()); + + let old_fork_digest = beacon_chain.enr_fork_id().fork_digest; + let old_topic1 = GossipTopic::new( + GossipKind::Attestation(subnets.pop().unwrap()), + GossipEncoding::SSZSnappy, + old_fork_digest, + ); + let old_topic2 = GossipTopic::new( + GossipKind::Attestation(subnets.pop().unwrap()), + GossipEncoding::SSZSnappy, + old_fork_digest, + ); + + (old_topic1, old_topic2) + }; + let subscriptions = network_globals.gossipsub_subscriptions.read().clone(); + assert_eq!(2, subscriptions.len()); + assert!(subscriptions.contains(&old_topic1)); + assert!(subscriptions.contains(&old_topic2)); + let old_topic_params1 = network_service + .get_topic_params(old_topic1.clone()) + .expect("topic score params"); + assert!(old_topic_params1.topic_weight > 0.0); + let old_topic_params2 = network_service + .get_topic_params(old_topic2.clone()) + .expect("topic score params"); + assert!(old_topic_params2.topic_weight > 0.0); + + // Advance slot to the next fork + for _ in 0..MinimalEthSpec::slots_per_epoch() { + beacon_chain.slot_clock.advance_slot(); + } + + // Run `NetworkService::update_next_fork()`. + runtime.block_on(async { + network_service.update_next_fork(); + }); + + // Check that topic_weight on the old topics has been zeroed. + let old_topic_params1 = network_service + .get_topic_params(old_topic1) + .expect("topic score params"); + assert_eq!(0.0, old_topic_params1.topic_weight); + + let old_topic_params2 = network_service + .get_topic_params(old_topic2) + .expect("topic score params"); + assert_eq!(0.0, old_topic_params2.topic_weight); + } } diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 3b8c89a44..769775a62 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -2,7 +2,6 @@ use super::*; use beacon_chain::{ builder::{BeaconChainBuilder, Witness}, eth1_chain::CachingEth1Backend, - validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, BeaconChain, }; use futures::prelude::*; @@ -76,7 +75,6 @@ impl TestBeaconChain { Duration::from_millis(SLOT_DURATION_MILLIS), )) .shutdown_sender(shutdown_tx) - .monitor_validators(true, vec![], DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, log) .build() .expect("should build"), ); diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index a1c2404e5..0d7e7c16c 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -14,6 +14,7 @@ use crate::sync::network_context::SyncNetworkContext; use crate::sync::range_sync::{ BatchConfig, BatchId, BatchInfo, BatchOperationOutcome, BatchProcessingResult, BatchState, }; +use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::types::{BackFillState, NetworkGlobals}; use lighthouse_network::{PeerAction, PeerId}; @@ -24,7 +25,7 @@ use std::collections::{ HashMap, HashSet, }; use std::sync::Arc; -use types::{Epoch, EthSpec, SignedBeaconBlock}; +use types::{Epoch, EthSpec}; /// Blocks are downloaded in batches from peers. This constant specifies how many epochs worth of /// blocks per batch are requested _at most_. A batch may request less blocks to account for @@ -32,7 +33,7 @@ use types::{Epoch, EthSpec, SignedBeaconBlock}; /// we will negatively report peers with poor bandwidth. This can be set arbitrarily high, in which /// case the responder will fill the response up to the max request size, assuming they have the /// bandwidth to do so. -pub const BACKFILL_EPOCHS_PER_BATCH: u64 = 2; +pub const BACKFILL_EPOCHS_PER_BATCH: u64 = 1; /// The maximum number of batches to queue before requesting more. const BACKFILL_BATCH_BUFFER_SIZE: u8 = 20; @@ -54,7 +55,7 @@ impl BatchConfig for BackFillBatchConfig { fn max_batch_processing_attempts() -> u8 { MAX_BATCH_PROCESSING_ATTEMPTS } - fn batch_attempt_hash(blocks: &[Arc>]) -> u64 { + fn batch_attempt_hash(blocks: &[RpcBlock]) -> u64 { use std::collections::hash_map::DefaultHasher; use std::hash::{Hash, Hasher}; let mut hasher = DefaultHasher::new(); @@ -391,7 +392,7 @@ impl BackFillSync { batch_id: BatchId, peer_id: &PeerId, request_id: Id, - beacon_block: Option>>, + beacon_block: Option>, ) -> Result { // check if we have this batch let batch = match self.batches.get_mut(&batch_id) { @@ -508,16 +509,13 @@ impl BackFillSync { return Ok(ProcessResult::Successful); } - let batch = match self.batches.get_mut(&batch_id) { - Some(batch) => batch, - None => { - return self - .fail_sync(BackFillError::InvalidSyncState(format!( - "Trying to process a batch that does not exist: {}", - batch_id - ))) - .map(|_| ProcessResult::Successful); - } + let Some(batch) = self.batches.get_mut(&batch_id) else { + return self + .fail_sync(BackFillError::InvalidSyncState(format!( + "Trying to process a batch that does not exist: {}", + batch_id + ))) + .map(|_| ProcessResult::Successful); }; // NOTE: We send empty batches to the processor in order to trigger the block processor @@ -908,9 +906,8 @@ impl BackFillSync { network: &mut SyncNetworkContext, batch_id: BatchId, ) -> Result<(), BackFillError> { - let batch = match self.batches.get_mut(&batch_id) { - Some(batch) => batch, - None => return Ok(()), + let Some(batch) = self.batches.get_mut(&batch_id) else { + return Ok(()); }; // Find a peer to request the batch @@ -932,7 +929,7 @@ impl BackFillSync { .collect::>(); // Sort peers prioritizing unrelated peers with less active requests. priorized_peers.sort_unstable(); - priorized_peers.get(0).map(|&(_, _, peer)| peer) + priorized_peers.first().map(|&(_, _, peer)| peer) }; if let Some(peer) = new_peer { @@ -954,8 +951,8 @@ impl BackFillSync { peer: PeerId, ) -> Result<(), BackFillError> { if let Some(batch) = self.batches.get_mut(&batch_id) { - let request = batch.to_blocks_by_range_request(); - match network.backfill_blocks_by_range_request(peer, request, batch_id) { + let (request, is_blob_batch) = batch.to_blocks_by_range_request(); + match network.backfill_blocks_by_range_request(peer, is_blob_batch, request, batch_id) { Ok(request_id) => { // inform the batch about the new request if let Err(e) = batch.start_downloading_from_peer(peer, request_id) { @@ -1055,7 +1052,7 @@ impl BackFillSync { idle_peers.shuffle(&mut rng); while let Some(peer) = idle_peers.pop() { - if let Some(batch_id) = self.include_next_batch() { + if let Some(batch_id) = self.include_next_batch(network) { // send the batch self.send_batch(network, batch_id, peer)?; } else { @@ -1068,7 +1065,7 @@ impl BackFillSync { /// Creates the next required batch from the chain. If there are no more batches required, /// `false` is returned. - fn include_next_batch(&mut self) -> Option { + fn include_next_batch(&mut self, network: &mut SyncNetworkContext) -> Option { // don't request batches beyond genesis; if self.last_batch_downloaded { return None; @@ -1105,10 +1102,15 @@ impl BackFillSync { self.to_be_downloaded = self .to_be_downloaded .saturating_sub(BACKFILL_EPOCHS_PER_BATCH); - self.include_next_batch() + self.include_next_batch(network) } Entry::Vacant(entry) => { - entry.insert(BatchInfo::new(&batch_id, BACKFILL_EPOCHS_PER_BATCH)); + let batch_type = network.batch_type(batch_id); + entry.insert(BatchInfo::new( + &batch_id, + BACKFILL_EPOCHS_PER_BATCH, + batch_type, + )); if self.would_complete(batch_id) { self.last_batch_downloaded = true; } diff --git a/beacon_node/network/src/sync/block_lookups/common.rs b/beacon_node/network/src/sync/block_lookups/common.rs new file mode 100644 index 000000000..d989fbb33 --- /dev/null +++ b/beacon_node/network/src/sync/block_lookups/common.rs @@ -0,0 +1,455 @@ +use crate::sync::block_lookups::parent_lookup::PARENT_FAIL_TOLERANCE; +use crate::sync::block_lookups::single_block_lookup::{ + LookupRequestError, LookupVerifyError, SingleBlockLookup, SingleLookupRequestState, State, +}; +use crate::sync::block_lookups::{ + BlobRequestState, BlockLookups, BlockRequestState, PeerId, SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS, +}; +use crate::sync::manager::{BlockProcessType, Id, SingleLookupReqId}; +use crate::sync::network_context::SyncNetworkContext; +use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::data_availability_checker::{AvailabilityView, ChildComponents}; +use beacon_chain::{get_block_root, BeaconChainTypes}; +use lighthouse_network::rpc::methods::BlobsByRootRequest; +use lighthouse_network::rpc::BlocksByRootRequest; +use rand::prelude::IteratorRandom; +use std::ops::IndexMut; +use std::sync::Arc; +use std::time::Duration; +use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList}; +use types::{BlobSidecar, ChainSpec, EthSpec, Hash256, SignedBeaconBlock}; + +#[derive(Debug, Copy, Clone)] +pub enum ResponseType { + Block, + Blob, +} + +#[derive(Debug, Copy, Clone)] +pub enum LookupType { + Current, + Parent, +} + +/// This trait helps differentiate `SingleBlockLookup`s from `ParentLookup`s .This is useful in +/// ensuring requests and responses are handled separately and enables us to use different failure +/// tolerances for each, while re-using the same basic request and retry logic. +pub trait Lookup { + const MAX_ATTEMPTS: u8; + fn lookup_type() -> LookupType; + fn max_attempts() -> u8 { + Self::MAX_ATTEMPTS + } +} + +/// A `Lookup` that is a part of a `ParentLookup`. +pub struct Parent; + +impl Lookup for Parent { + const MAX_ATTEMPTS: u8 = PARENT_FAIL_TOLERANCE; + fn lookup_type() -> LookupType { + LookupType::Parent + } +} + +/// A `Lookup` that part of a single block lookup. +pub struct Current; + +impl Lookup for Current { + const MAX_ATTEMPTS: u8 = SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS; + fn lookup_type() -> LookupType { + LookupType::Current + } +} + +/// This trait unifies common single block lookup functionality across blocks and blobs. This +/// includes making requests, verifying responses, and handling processing results. A +/// `SingleBlockLookup` includes both a `BlockRequestState` and a `BlobRequestState`, this trait is +/// implemented for each. +/// +/// The use of the `ResponseType` associated type gives us a degree of type +/// safety when handling a block/blob response ensuring we only mutate the correct corresponding +/// state. +pub trait RequestState { + /// The type of the request . + type RequestType; + + /// A block or blob response. + type ResponseType; + + /// The type created after validation. + type VerifiedResponseType: Clone; + + /// We convert a `VerifiedResponseType` to this type prior to sending it to the beacon processor. + type ReconstructedResponseType; + + /* Request building methods */ + + /// Construct a new request. + fn build_request( + &mut self, + spec: &ChainSpec, + ) -> Result<(PeerId, Self::RequestType), LookupRequestError> { + // Verify and construct request. + self.too_many_attempts()?; + let peer = self.get_peer()?; + let request = self.new_request(spec); + Ok((peer, request)) + } + + /// Construct a new request and send it. + fn build_request_and_send( + &mut self, + id: Id, + cx: &SyncNetworkContext, + ) -> Result<(), LookupRequestError> { + // Check if request is necessary. + if !matches!(self.get_state().state, State::AwaitingDownload) { + return Ok(()); + } + + // Construct request. + let (peer_id, request) = self.build_request(&cx.chain.spec)?; + + // Update request state. + self.get_state_mut().state = State::Downloading { peer_id }; + self.get_state_mut().req_counter += 1; + + // Make request + let id = SingleLookupReqId { + id, + req_counter: self.get_state().req_counter, + }; + Self::make_request(id, peer_id, request, cx) + } + + /// Verify the current request has not exceeded the maximum number of attempts. + fn too_many_attempts(&self) -> Result<(), LookupRequestError> { + let max_attempts = L::max_attempts(); + let request_state = self.get_state(); + + if request_state.failed_attempts() >= max_attempts { + let cannot_process = + request_state.failed_processing >= request_state.failed_downloading; + Err(LookupRequestError::TooManyAttempts { cannot_process }) + } else { + Ok(()) + } + } + + /// Get the next peer to request. Draws from the set of peers we think should have both the + /// block and blob first. If that fails, we draw from the set of peers that may have either. + fn get_peer(&mut self) -> Result { + let request_state = self.get_state_mut(); + let peer_id = request_state + .available_peers + .iter() + .choose(&mut rand::thread_rng()) + .copied() + .ok_or(LookupRequestError::NoPeers)?; + request_state.used_peers.insert(peer_id); + Ok(peer_id) + } + + /// Initialize `Self::RequestType`. + fn new_request(&self, spec: &ChainSpec) -> Self::RequestType; + + /// Send the request to the network service. + fn make_request( + id: SingleLookupReqId, + peer_id: PeerId, + request: Self::RequestType, + cx: &SyncNetworkContext, + ) -> Result<(), LookupRequestError>; + + /* Response handling methods */ + + /// Verify the response is valid based on what we requested. + fn verify_response( + &mut self, + expected_block_root: Hash256, + response: Option, + ) -> Result, LookupVerifyError> { + let request_state = self.get_state_mut(); + match request_state.state { + State::AwaitingDownload => { + request_state.register_failure_downloading(); + Err(LookupVerifyError::ExtraBlocksReturned) + } + State::Downloading { peer_id } => { + self.verify_response_inner(expected_block_root, response, peer_id) + } + State::Processing { peer_id: _ } => match response { + Some(_) => { + // We sent the block for processing and received an extra block. + request_state.register_failure_downloading(); + Err(LookupVerifyError::ExtraBlocksReturned) + } + None => { + // This is simply the stream termination and we are already processing the + // block + Ok(None) + } + }, + } + } + + /// The response verification unique to block or blobs. + fn verify_response_inner( + &mut self, + expected_block_root: Hash256, + response: Option, + peer_id: PeerId, + ) -> Result, LookupVerifyError>; + + /// A getter for the parent root of the response. Returns an `Option` because we won't know + /// the blob parent if we don't end up getting any blobs in the response. + fn get_parent_root(verified_response: &Self::VerifiedResponseType) -> Option; + + /// Caches the verified response in the lookup if necessary. This is only necessary for lookups + /// triggered by `UnknownParent` errors. + fn add_to_child_components( + verified_response: Self::VerifiedResponseType, + components: &mut ChildComponents, + ); + + /// Convert a verified response to the type we send to the beacon processor. + fn verified_to_reconstructed( + block_root: Hash256, + verified: Self::VerifiedResponseType, + ) -> Self::ReconstructedResponseType; + + /// Send the response to the beacon processor. + fn send_reconstructed_for_processing( + id: Id, + bl: &BlockLookups, + block_root: Hash256, + verified: Self::ReconstructedResponseType, + duration: Duration, + cx: &SyncNetworkContext, + ) -> Result<(), LookupRequestError>; + + /// Register a failure to process the block or blob. + fn register_failure_downloading(&mut self) { + self.get_state_mut().register_failure_downloading() + } + + /* Utility methods */ + + /// Returns the `ResponseType` associated with this trait implementation. Useful in logging. + fn response_type() -> ResponseType; + + /// A getter for the `BlockRequestState` or `BlobRequestState` associated with this trait. + fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self; + + /// A getter for a reference to the `SingleLookupRequestState` associated with this trait. + fn get_state(&self) -> &SingleLookupRequestState; + + /// A getter for a mutable reference to the SingleLookupRequestState associated with this trait. + fn get_state_mut(&mut self) -> &mut SingleLookupRequestState; +} + +impl RequestState for BlockRequestState { + type RequestType = BlocksByRootRequest; + type ResponseType = Arc>; + type VerifiedResponseType = Arc>; + type ReconstructedResponseType = RpcBlock; + + fn new_request(&self, spec: &ChainSpec) -> BlocksByRootRequest { + BlocksByRootRequest::new(vec![self.requested_block_root], spec) + } + + fn make_request( + id: SingleLookupReqId, + peer_id: PeerId, + request: Self::RequestType, + cx: &SyncNetworkContext, + ) -> Result<(), LookupRequestError> { + cx.block_lookup_request(id, peer_id, request, L::lookup_type()) + .map_err(LookupRequestError::SendFailed) + } + + fn verify_response_inner( + &mut self, + expected_block_root: Hash256, + response: Option, + peer_id: PeerId, + ) -> Result>>, LookupVerifyError> { + match response { + Some(block) => { + // Compute the block root using this specific function so that we can get timing + // metrics. + let block_root = get_block_root(&block); + if block_root != expected_block_root { + // return an error and drop the block + // NOTE: we take this is as a download failure to prevent counting the + // attempt as a chain failure, but simply a peer failure. + self.state.register_failure_downloading(); + Err(LookupVerifyError::RootMismatch) + } else { + // Return the block for processing. + self.state.state = State::Processing { peer_id }; + Ok(Some(block)) + } + } + None => { + self.state.register_failure_downloading(); + Err(LookupVerifyError::NoBlockReturned) + } + } + } + + fn get_parent_root(verified_response: &Arc>) -> Option { + Some(verified_response.parent_root()) + } + + fn add_to_child_components( + verified_response: Arc>, + components: &mut ChildComponents, + ) { + components.merge_block(verified_response); + } + + fn verified_to_reconstructed( + block_root: Hash256, + block: Arc>, + ) -> RpcBlock { + RpcBlock::new_without_blobs(Some(block_root), block) + } + + fn send_reconstructed_for_processing( + id: Id, + bl: &BlockLookups, + block_root: Hash256, + constructed: RpcBlock, + duration: Duration, + cx: &SyncNetworkContext, + ) -> Result<(), LookupRequestError> { + bl.send_block_for_processing( + block_root, + constructed, + duration, + BlockProcessType::SingleBlock { id }, + cx, + ) + } + + fn response_type() -> ResponseType { + ResponseType::Block + } + fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self { + &mut request.block_request_state + } + fn get_state(&self) -> &SingleLookupRequestState { + &self.state + } + fn get_state_mut(&mut self) -> &mut SingleLookupRequestState { + &mut self.state + } +} + +impl RequestState for BlobRequestState { + type RequestType = BlobsByRootRequest; + type ResponseType = Arc>; + type VerifiedResponseType = FixedBlobSidecarList; + type ReconstructedResponseType = FixedBlobSidecarList; + + fn new_request(&self, spec: &ChainSpec) -> BlobsByRootRequest { + let blob_id_vec: Vec = self.requested_ids.clone().into(); + BlobsByRootRequest::new(blob_id_vec, spec) + } + + fn make_request( + id: SingleLookupReqId, + peer_id: PeerId, + request: Self::RequestType, + cx: &SyncNetworkContext, + ) -> Result<(), LookupRequestError> { + cx.blob_lookup_request(id, peer_id, request, L::lookup_type()) + .map_err(LookupRequestError::SendFailed) + } + + fn verify_response_inner( + &mut self, + _expected_block_root: Hash256, + blob: Option, + peer_id: PeerId, + ) -> Result>, LookupVerifyError> { + match blob { + Some(blob) => { + let received_id = blob.id(); + if !self.requested_ids.contains(&received_id) { + self.state.register_failure_downloading(); + Err(LookupVerifyError::UnrequestedBlobId) + } else { + // State should remain downloading until we receive the stream terminator. + self.requested_ids.remove(&received_id); + let blob_index = blob.index; + + if blob_index >= T::EthSpec::max_blobs_per_block() as u64 { + return Err(LookupVerifyError::InvalidIndex(blob.index)); + } + *self.blob_download_queue.index_mut(blob_index as usize) = Some(blob); + Ok(None) + } + } + None => { + self.state.state = State::Processing { peer_id }; + let blobs = std::mem::take(&mut self.blob_download_queue); + Ok(Some(blobs)) + } + } + } + + fn get_parent_root(verified_response: &FixedBlobSidecarList) -> Option { + verified_response + .into_iter() + .filter_map(|blob| blob.as_ref()) + .map(|blob| blob.block_parent_root()) + .next() + } + + fn add_to_child_components( + verified_response: FixedBlobSidecarList, + components: &mut ChildComponents, + ) { + components.merge_blobs(verified_response); + } + + fn verified_to_reconstructed( + _block_root: Hash256, + blobs: FixedBlobSidecarList, + ) -> FixedBlobSidecarList { + blobs + } + + fn send_reconstructed_for_processing( + id: Id, + bl: &BlockLookups, + block_root: Hash256, + verified: FixedBlobSidecarList, + duration: Duration, + cx: &SyncNetworkContext, + ) -> Result<(), LookupRequestError> { + bl.send_blobs_for_processing( + block_root, + verified, + duration, + BlockProcessType::SingleBlob { id }, + cx, + ) + } + + fn response_type() -> ResponseType { + ResponseType::Blob + } + fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self { + &mut request.blob_request_state + } + fn get_state(&self) -> &SingleLookupRequestState { + &self.state + } + fn get_state_mut(&mut self) -> &mut SingleLookupRequestState { + &mut self.state + } +} diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 4340aa41d..62cdc4fa2 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -1,64 +1,69 @@ -use std::collections::hash_map::Entry; -use std::collections::HashMap; -use std::time::Duration; - +use self::parent_lookup::ParentVerifyError; +use self::single_block_lookup::SingleBlockLookup; +use super::manager::BlockProcessingResult; +use super::BatchProcessResult; +use super::{manager::BlockProcessType, network_context::SyncNetworkContext}; +use crate::metrics; use crate::network_beacon_processor::ChainSegmentProcessId; -use beacon_chain::{BeaconChainTypes, BlockError}; +use crate::sync::block_lookups::common::LookupType; +use crate::sync::block_lookups::parent_lookup::{ParentLookup, RequestError}; +use crate::sync::block_lookups::single_block_lookup::{CachedChild, LookupRequestError}; +use crate::sync::manager::{Id, SingleLookupReqId}; +use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; +pub use beacon_chain::data_availability_checker::ChildComponents; +use beacon_chain::data_availability_checker::{ + AvailabilityCheckErrorCategory, DataAvailabilityChecker, +}; +use beacon_chain::validator_monitor::timestamp_now; +use beacon_chain::{AvailabilityProcessingStatus, BeaconChainTypes, BlockError}; +pub use common::Current; +pub use common::Lookup; +pub use common::Parent; +pub use common::RequestState; use fnv::FnvHashMap; +use lighthouse_network::rpc::RPCError; use lighthouse_network::{PeerAction, PeerId}; use lru_cache::LRUTimeCache; +pub use single_block_lookup::{BlobRequestState, BlockRequestState}; use slog::{debug, error, trace, warn, Logger}; use smallvec::SmallVec; +use std::collections::{HashMap, VecDeque}; use std::sync::Arc; -use store::{Hash256, SignedBeaconBlock}; - -use crate::metrics; - -use self::parent_lookup::PARENT_FAIL_TOLERANCE; -use self::{ - parent_lookup::{ParentLookup, VerifyError}, - single_block_lookup::SingleBlockRequest, -}; - -use super::manager::BlockProcessResult; -use super::BatchProcessResult; -use super::{ - manager::{BlockProcessType, Id}, - network_context::SyncNetworkContext, -}; +use std::time::Duration; +use store::Hash256; +use types::blob_sidecar::FixedBlobSidecarList; +use types::Slot; +pub mod common; mod parent_lookup; mod single_block_lookup; #[cfg(test)] mod tests; -pub type RootBlockTuple = (Hash256, Arc>); +pub type DownloadedBlock = (Hash256, RpcBlock); const FAILED_CHAINS_CACHE_EXPIRY_SECONDS: u64 = 60; -const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 3; +pub const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 3; -pub(crate) struct BlockLookups { +pub struct BlockLookups { /// Parent chain lookups being downloaded. parent_lookups: SmallVec<[ParentLookup; 3]>, - processing_parent_lookups: - HashMap, SingleBlockRequest)>, + processing_parent_lookups: HashMap, SingleBlockLookup)>, /// A cache of failed chain lookups to prevent duplicate searches. failed_chains: LRUTimeCache, - /// A collection of block hashes being searched for and a flag indicating if a result has been - /// received or not. - /// - /// The flag allows us to determine if the peer returned data or sent us nothing. - single_block_lookups: FnvHashMap>, + single_block_lookups: FnvHashMap>, + + pub(crate) da_checker: Arc>, /// The logger for the import manager. log: Logger, } impl BlockLookups { - pub fn new(log: Logger) -> Self { + pub fn new(da_checker: Arc>, log: Logger) -> Self { Self { parent_lookups: Default::default(), processing_parent_lookups: Default::default(), @@ -66,27 +71,95 @@ impl BlockLookups { FAILED_CHAINS_CACHE_EXPIRY_SECONDS, )), single_block_lookups: Default::default(), + da_checker, log, } } /* Lookup requests */ + /// Creates a lookup for the block with the given `block_root` and immediately triggers it. + pub fn search_block( + &mut self, + block_root: Hash256, + peer_source: &[PeerId], + cx: &mut SyncNetworkContext, + ) { + self.new_current_lookup(block_root, None, peer_source, cx) + } + + /// Creates a lookup for the block with the given `block_root`, while caching other block + /// components we've already received. The block components are cached here because we haven't + /// imported its parent and therefore can't fully validate it and store it in the data + /// availability cache. + /// + /// The request is immediately triggered. + pub fn search_child_block( + &mut self, + block_root: Hash256, + child_components: ChildComponents, + peer_source: &[PeerId], + cx: &mut SyncNetworkContext, + ) { + self.new_current_lookup(block_root, Some(child_components), peer_source, cx) + } + + /// Attempts to trigger the request matching the given `block_root`. + pub fn trigger_single_lookup( + &mut self, + mut single_block_lookup: SingleBlockLookup, + cx: &SyncNetworkContext, + ) { + let block_root = single_block_lookup.block_root(); + match single_block_lookup.request_block_and_blobs(cx) { + Ok(()) => self.add_single_lookup(single_block_lookup), + Err(e) => { + debug!(self.log, "Single block lookup failed"; + "error" => ?e, + "block_root" => ?block_root, + ); + } + } + } + + /// Adds a lookup to the `single_block_lookups` map. + pub fn add_single_lookup(&mut self, single_block_lookup: SingleBlockLookup) { + self.single_block_lookups + .insert(single_block_lookup.id, single_block_lookup); + + metrics::set_gauge( + &metrics::SYNC_SINGLE_BLOCK_LOOKUPS, + self.single_block_lookups.len() as i64, + ); + } + /// Searches for a single block hash. If the blocks parent is unknown, a chain of blocks is /// constructed. - pub fn search_block(&mut self, hash: Hash256, peer_id: PeerId, cx: &mut SyncNetworkContext) { + pub fn new_current_lookup( + &mut self, + block_root: Hash256, + child_components: Option>, + peers: &[PeerId], + cx: &mut SyncNetworkContext, + ) { // Do not re-request a block that is already being requested - if self + if let Some((_, lookup)) = self .single_block_lookups - .values_mut() - .any(|single_block_request| single_block_request.add_peer(&hash, &peer_id)) + .iter_mut() + .find(|(_id, lookup)| lookup.is_for_block(block_root)) { + lookup.add_peers(peers); + if let Some(components) = child_components { + lookup.add_child_components(components); + } return; } - if self.parent_lookups.iter_mut().any(|parent_req| { - parent_req.add_peer(&hash, &peer_id) || parent_req.contains_block(&hash) + if let Some(parent_lookup) = self.parent_lookups.iter_mut().find(|parent_req| { + parent_req.is_for_block(block_root) || parent_req.contains_block(&block_root) }) { + parent_lookup.add_peers(peers); + // If the block was already downloaded, or is being downloaded in this moment, do not // request it. return; @@ -95,57 +168,58 @@ impl BlockLookups { if self .processing_parent_lookups .values() - .any(|(hashes, _last_parent_request)| hashes.contains(&hash)) + .any(|(hashes, _last_parent_request)| hashes.contains(&block_root)) { // we are already processing this block, ignore it. return; } - debug!( - self.log, - "Searching for block"; - "peer_id" => %peer_id, - "block" => %hash + let msg = if child_components.is_some() { + "Searching for components of a block with unknown parent" + } else { + "Searching for block components" + }; + + let lookup = SingleBlockLookup::new( + block_root, + child_components, + peers, + self.da_checker.clone(), + cx.next_id(), ); - let mut single_block_request = SingleBlockRequest::new(hash, peer_id); - - let (peer_id, request) = single_block_request.request_block().unwrap(); - if let Ok(request_id) = cx.single_block_lookup_request(peer_id, request) { - self.single_block_lookups - .insert(request_id, single_block_request); - - metrics::set_gauge( - &metrics::SYNC_SINGLE_BLOCK_LOOKUPS, - self.single_block_lookups.len() as i64, - ); - } + debug!( + self.log, + "{}", msg; + "peer_ids" => ?peers, + "block" => ?block_root, + ); + self.trigger_single_lookup(lookup, cx); } /// If a block is attempted to be processed but we do not know its parent, this function is /// called in order to find the block's parent. pub fn search_parent( &mut self, + slot: Slot, block_root: Hash256, - block: Arc>, + parent_root: Hash256, peer_id: PeerId, cx: &mut SyncNetworkContext, ) { - let parent_root = block.parent_root(); // If this block or it's parent is part of a known failed chain, ignore it. if self.failed_chains.contains(&parent_root) || self.failed_chains.contains(&block_root) { debug!(self.log, "Block is from a past failed chain. Dropping"; - "block_root" => ?block_root, "block_slot" => block.slot()); + "block_root" => ?block_root, "block_slot" => slot); return; } // Make sure this block is not already downloaded, and that neither it or its parent is // being searched for. - if self.parent_lookups.iter_mut().any(|parent_req| { - parent_req.contains_block(&block_root) - || parent_req.add_peer(&block_root, &peer_id) - || parent_req.add_peer(&parent_root, &peer_id) + if let Some(parent_lookup) = self.parent_lookups.iter_mut().find(|parent_req| { + parent_req.contains_block(&block_root) || parent_req.is_for_block(block_root) }) { + parent_lookup.add_peer(peer_id); // we are already searching for this block, ignore it return; } @@ -158,68 +232,84 @@ impl BlockLookups { // we are already processing this block, ignore it. return; } - - let parent_lookup = ParentLookup::new(block_root, block, peer_id); + let parent_lookup = ParentLookup::new( + block_root, + parent_root, + peer_id, + self.da_checker.clone(), + cx, + ); self.request_parent(parent_lookup, cx); } /* Lookup responses */ - pub fn single_block_lookup_response( + /// Get a single block lookup by its ID. This method additionally ensures the `req_counter` + /// matches the current `req_counter` for the lookup. This ensures any stale responses from requests + /// that have been retried are ignored. + fn get_single_lookup>( &mut self, - id: Id, + id: SingleLookupReqId, + ) -> Option> { + let mut lookup = self.single_block_lookups.remove(&id.id)?; + + let request_state = R::request_state_mut(&mut lookup); + if id.req_counter != request_state.get_state().req_counter { + // We don't want to drop the lookup, just ignore the old response. + self.single_block_lookups.insert(id.id, lookup); + return None; + } + Some(lookup) + } + + /// Checks whether a single block lookup is waiting for a parent lookup to complete. This is + /// necessary because we want to make sure all parents are processed before sending a child + /// for processing, otherwise the block will fail validation and will be returned to the network + /// layer with an `UnknownParent` error. + pub fn has_pending_parent_request(&self, block_root: Hash256) -> bool { + self.parent_lookups + .iter() + .any(|parent_lookup| parent_lookup.chain_hash() == block_root) + } + + /// Process a block or blob response received from a single lookup request. + pub fn single_lookup_response>( + &mut self, + lookup_id: SingleLookupReqId, peer_id: PeerId, - block: Option>>, + response: Option, seen_timestamp: Duration, - cx: &mut SyncNetworkContext, + cx: &SyncNetworkContext, ) { - let mut request = match self.single_block_lookups.entry(id) { - Entry::Occupied(req) => req, - Entry::Vacant(_) => { - if block.is_some() { - debug!( - self.log, - "Block returned for single block lookup not present" - ); - } - return; + let id = lookup_id.id; + let response_type = R::response_type(); + + let Some(lookup) = self.get_single_lookup::(lookup_id) else { + if response.is_some() { + // We don't have the ability to cancel in-flight RPC requests. So this can happen + // if we started this RPC request, and later saw the block/blobs via gossip. + debug!( + self.log, + "Block returned for single block lookup not present"; + "response_type" => ?response_type, + ); } + return; }; - match request.get_mut().verify_block(block) { - Ok(Some((block_root, block))) => { - // This is the correct block, send it for processing - if self - .send_block_for_processing( - block_root, - block, - seen_timestamp, - BlockProcessType::SingleBlock { id }, - cx, - ) - .is_err() - { - // Remove to avoid inconsistencies - self.single_block_lookups.remove(&id); - } - } - Ok(None) => { - // request finished correctly, it will be removed after the block is processed. - } - Err(error) => { - let msg: &str = error.into(); - cx.report_peer(peer_id, PeerAction::LowToleranceError, msg); - // Remove the request, if it can be retried it will be added with a new id. - let mut req = request.remove(); + let expected_block_root = lookup.block_root(); - debug!(self.log, "Single block lookup failed"; - "peer_id" => %peer_id, "error" => msg, "block_root" => %req.hash); - // try the request again if possible - if let Ok((peer_id, request)) = req.request_block() { - if let Ok(id) = cx.single_block_lookup_request(peer_id, request) { - self.single_block_lookups.insert(id, req); - } - } + match self.single_lookup_response_inner::(peer_id, response, seen_timestamp, cx, lookup) + { + Ok(lookup) => { + self.single_block_lookups.insert(id, lookup); + } + Err(e) => { + debug!(self.log, + "Single lookup request failed"; + "error" => ?e, + "block_root" => ?expected_block_root, + ); } } @@ -229,82 +319,179 @@ impl BlockLookups { ); } - /// Process a response received from a parent lookup request. - pub fn parent_lookup_response( - &mut self, - id: Id, + /// Consolidates error handling for `single_lookup_response`. An `Err` here should always mean + /// the lookup is dropped. + fn single_lookup_response_inner>( + &self, peer_id: PeerId, - block: Option>>, + response: Option, seen_timestamp: Duration, - cx: &mut SyncNetworkContext, - ) { + cx: &SyncNetworkContext, + mut lookup: SingleBlockLookup, + ) -> Result, LookupRequestError> { + let response_type = R::response_type(); + let log = self.log.clone(); + let expected_block_root = lookup.block_root(); + let request_state = R::request_state_mut(&mut lookup); + + match request_state.verify_response(expected_block_root, response) { + Ok(Some(verified_response)) => { + self.handle_verified_response::( + seen_timestamp, + cx, + BlockProcessType::SingleBlock { id: lookup.id }, + verified_response, + &mut lookup, + )?; + } + Ok(None) => {} + Err(e) => { + debug!( + log, + "Single lookup response verification failed, retrying"; + "block_root" => ?expected_block_root, + "peer_id" => %peer_id, + "response_type" => ?response_type, + "error" => ?e + ); + let msg = e.into(); + cx.report_peer(peer_id, PeerAction::LowToleranceError, msg); + + request_state.register_failure_downloading(); + lookup.request_block_and_blobs(cx)?; + } + } + Ok(lookup) + } + + fn handle_verified_response>( + &self, + seen_timestamp: Duration, + cx: &SyncNetworkContext, + process_type: BlockProcessType, + verified_response: R::VerifiedResponseType, + lookup: &mut SingleBlockLookup, + ) -> Result<(), LookupRequestError> { + let id = lookup.id; + let block_root = lookup.block_root(); + + R::request_state_mut(lookup) + .get_state_mut() + .component_downloaded = true; + + let cached_child = lookup.add_response::(verified_response.clone()); + match cached_child { + CachedChild::Ok(block) => { + // If we have an outstanding parent request for this block, delay sending the response until + // all parent blocks have been processed, otherwise we will fail validation with an + // `UnknownParent`. + let delay_send = match L::lookup_type() { + LookupType::Parent => false, + LookupType::Current => self.has_pending_parent_request(lookup.block_root()), + }; + + if !delay_send { + self.send_block_for_processing( + block_root, + block, + seen_timestamp, + process_type, + cx, + )? + } + } + CachedChild::DownloadIncomplete => { + // If this was the result of a block request, we can't determine if the block peer + // did anything wrong. If we already had both a block and blobs response processed, + // we should penalize the blobs peer because they did not provide all blobs on the + // initial request. + if lookup.both_components_downloaded() { + lookup.penalize_blob_peer(cx); + lookup + .blob_request_state + .state + .register_failure_downloading(); + } + lookup.request_block_and_blobs(cx)?; + } + CachedChild::NotRequired => R::send_reconstructed_for_processing( + id, + self, + block_root, + R::verified_to_reconstructed(block_root, verified_response), + seen_timestamp, + cx, + )?, + CachedChild::Err(e) => { + warn!(self.log, "Consistency error in cached block"; + "error" => ?e, + "block_root" => ?block_root + ); + lookup.handle_consistency_failure(cx); + lookup.request_block_and_blobs(cx)?; + } + } + Ok(()) + } + + /// Get a parent block lookup by its ID. This method additionally ensures the `req_counter` + /// matches the current `req_counter` for the lookup. This any stale responses from requests + /// that have been retried are ignored. + fn get_parent_lookup>( + &mut self, + id: SingleLookupReqId, + ) -> Option> { let mut parent_lookup = if let Some(pos) = self .parent_lookups .iter() - .position(|request| request.pending_response(id)) + .position(|request| request.current_parent_request.id == id.id) { self.parent_lookups.remove(pos) } else { - if block.is_some() { + return None; + }; + + if R::request_state_mut(&mut parent_lookup.current_parent_request) + .get_state() + .req_counter + != id.req_counter + { + self.parent_lookups.push(parent_lookup); + return None; + } + Some(parent_lookup) + } + + /// Process a response received from a parent lookup request. + pub fn parent_lookup_response>( + &mut self, + id: SingleLookupReqId, + peer_id: PeerId, + response: Option, + seen_timestamp: Duration, + cx: &SyncNetworkContext, + ) { + let Some(mut parent_lookup) = self.get_parent_lookup::(id) else { + if response.is_some() { debug!(self.log, "Response for a parent lookup request that was not found"; "peer_id" => %peer_id); } return; }; - match parent_lookup.verify_block(block, &mut self.failed_chains) { - Ok(Some((block_root, block))) => { - // Block is correct, send to the beacon processor. - let chain_hash = parent_lookup.chain_hash(); - if self - .send_block_for_processing( - block_root, - block, - seen_timestamp, - BlockProcessType::ParentLookup { chain_hash }, - cx, - ) - .is_ok() - { - self.parent_lookups.push(parent_lookup) - } - } - Ok(None) => { - // Request finished successfully, nothing else to do. It will be removed after the - // processing result arrives. + match self.parent_lookup_response_inner::( + peer_id, + response, + seen_timestamp, + cx, + &mut parent_lookup, + ) { + Ok(()) => { self.parent_lookups.push(parent_lookup); } - Err(e) => match e { - VerifyError::RootMismatch - | VerifyError::NoBlockReturned - | VerifyError::ExtraBlocksReturned => { - let e = e.into(); - warn!(self.log, "Peer sent invalid response to parent request."; - "peer_id" => %peer_id, "reason" => %e); - - // We do not tolerate these kinds of errors. We will accept a few but these are signs - // of a faulty peer. - cx.report_peer(peer_id, PeerAction::LowToleranceError, e); - - // We try again if possible. - self.request_parent(parent_lookup, cx); - } - VerifyError::PreviousFailure { parent_root } => { - debug!( - self.log, - "Parent chain ignored due to past failure"; - "block" => %parent_root, - ); - // Add the root block to failed chains - self.failed_chains.insert(parent_lookup.chain_hash()); - - cx.report_peer( - peer_id, - PeerAction::MidToleranceError, - "bbroot_failed_chains", - ); - } - }, - }; + Err(e) => { + self.handle_parent_request_error(&mut parent_lookup, cx, e); + } + } metrics::set_gauge( &metrics::SYNC_PARENT_BLOCK_LOOKUPS, @@ -312,47 +499,129 @@ impl BlockLookups { ); } - /* Error responses */ + /// Consolidates error handling for `parent_lookup_response`. An `Err` here should always mean + /// the lookup is dropped. + fn parent_lookup_response_inner>( + &mut self, + peer_id: PeerId, + response: Option, + seen_timestamp: Duration, + cx: &SyncNetworkContext, + parent_lookup: &mut ParentLookup, + ) -> Result<(), RequestError> { + match parent_lookup.verify_response::(response, &mut self.failed_chains) { + Ok(Some(verified_response)) => { + self.handle_verified_response::( + seen_timestamp, + cx, + BlockProcessType::ParentLookup { + chain_hash: parent_lookup.chain_hash(), + }, + verified_response, + &mut parent_lookup.current_parent_request, + )?; + } + Ok(None) => {} + Err(e) => self.handle_parent_verify_error::(peer_id, parent_lookup, e, cx)?, + }; + Ok(()) + } - #[allow(clippy::needless_collect)] // false positive - pub fn peer_disconnected(&mut self, peer_id: &PeerId, cx: &mut SyncNetworkContext) { - /* Check disconnection for single block lookups */ - // better written after https://github.com/rust-lang/rust/issues/59618 - let remove_retry_ids: Vec = self - .single_block_lookups - .iter_mut() - .filter_map(|(id, req)| { - if req.check_peer_disconnected(peer_id).is_err() { - Some(*id) - } else { - None - } - }) - .collect(); + /// Handle logging and peer scoring for `ParentVerifyError`s during parent lookup requests. + fn handle_parent_verify_error>( + &mut self, + peer_id: PeerId, + parent_lookup: &mut ParentLookup, + e: ParentVerifyError, + cx: &SyncNetworkContext, + ) -> Result<(), RequestError> { + match e { + ParentVerifyError::RootMismatch + | ParentVerifyError::NoBlockReturned + | ParentVerifyError::NotEnoughBlobsReturned + | ParentVerifyError::ExtraBlocksReturned + | ParentVerifyError::UnrequestedBlobId + | ParentVerifyError::ExtraBlobsReturned + | ParentVerifyError::InvalidIndex(_) => { + let e = e.into(); + warn!(self.log, "Peer sent invalid response to parent request."; + "peer_id" => %peer_id, "reason" => %e); - for mut req in remove_retry_ids - .into_iter() - .map(|id| self.single_block_lookups.remove(&id).unwrap()) - .collect::>() - { - // retry the request - match req.request_block() { - Ok((peer_id, block_request)) => { - if let Ok(request_id) = cx.single_block_lookup_request(peer_id, block_request) { - self.single_block_lookups.insert(request_id, req); - } - } - Err(e) => { - trace!( - self.log, - "Single block request failed on peer disconnection"; - "block_root" => %req.hash, - "peer_id" => %peer_id, - "reason" => <&str>::from(e), - ); - } + // We do not tolerate these kinds of errors. We will accept a few but these are signs + // of a faulty peer. + cx.report_peer(peer_id, PeerAction::LowToleranceError, e); + + // We try again if possible. + parent_lookup.request_parent(cx)?; + } + ParentVerifyError::PreviousFailure { parent_root } => { + debug!( + self.log, + "Parent chain ignored due to past failure"; + "block" => %parent_root, + ); + // Add the root block to failed chains + self.failed_chains.insert(parent_lookup.chain_hash()); + + cx.report_peer( + peer_id, + PeerAction::MidToleranceError, + "bbroot_failed_chains", + ); } } + Ok(()) + } + + /// Handle logging and peer scoring for `RequestError`s during parent lookup requests. + fn handle_parent_request_error( + &mut self, + parent_lookup: &mut ParentLookup, + cx: &SyncNetworkContext, + e: RequestError, + ) { + debug!(self.log, "Failed to request parent"; "error" => e.as_static()); + match e { + RequestError::SendFailed(_) => { + // Probably shutting down, nothing to do here. Drop the request + } + RequestError::ChainTooLong => { + self.failed_chains.insert(parent_lookup.chain_hash()); + // This indicates faulty peers. + for &peer_id in parent_lookup.used_peers() { + cx.report_peer(peer_id, PeerAction::LowToleranceError, e.as_static()) + } + } + RequestError::TooManyAttempts { cannot_process } => { + // We only consider the chain failed if we were unable to process it. + // We could have failed because one peer continually failed to send us + // bad blocks. We still allow other peers to send us this chain. Note + // that peers that do this, still get penalised. + if cannot_process { + self.failed_chains.insert(parent_lookup.chain_hash()); + } + // This indicates faulty peers. + for &peer_id in parent_lookup.used_peers() { + cx.report_peer(peer_id, PeerAction::LowToleranceError, e.as_static()) + } + } + RequestError::NoPeers => { + // This happens if the peer disconnects while the block is being + // processed. Drop the request without extra penalty + } + } + } + + /* Error responses */ + + pub fn peer_disconnected(&mut self, peer_id: &PeerId, cx: &mut SyncNetworkContext) { + /* Check disconnection for single lookups */ + self.single_block_lookups.retain(|_, req| { + let should_drop_lookup = + req.should_drop_lookup_on_disconnected_peer(peer_id, cx, &self.log); + + !should_drop_lookup + }); /* Check disconnection for parent lookups */ while let Some(pos) = self @@ -367,39 +636,67 @@ impl BlockLookups { } /// An RPC error has occurred during a parent lookup. This function handles this case. - pub fn parent_lookup_failed( + pub fn parent_lookup_failed>( &mut self, - id: Id, + id: SingleLookupReqId, peer_id: PeerId, - cx: &mut SyncNetworkContext, + cx: &SyncNetworkContext, + error: RPCError, ) { - if let Some(pos) = self - .parent_lookups - .iter() - .position(|request| request.pending_response(id)) - { - let mut parent_lookup = self.parent_lookups.remove(pos); - parent_lookup.download_failed(); - trace!(self.log, "Parent lookup request failed"; &parent_lookup); - self.request_parent(parent_lookup, cx); - } else { - return debug!(self.log, "RPC failure for a parent lookup request that was not found"; "peer_id" => %peer_id); + let msg = error.as_static_str(); + let Some(mut parent_lookup) = self.get_parent_lookup::(id) else { + debug!(self.log, + "RPC failure for a block parent lookup request that was not found"; + "peer_id" => %peer_id, + "error" => msg + ); + return; }; + R::request_state_mut(&mut parent_lookup.current_parent_request) + .register_failure_downloading(); + trace!(self.log, "Parent lookup block request failed"; &parent_lookup, "error" => msg); + + self.request_parent(parent_lookup, cx); + metrics::set_gauge( &metrics::SYNC_PARENT_BLOCK_LOOKUPS, self.parent_lookups.len() as i64, ); } - pub fn single_block_lookup_failed(&mut self, id: Id, cx: &mut SyncNetworkContext) { - if let Some(mut request) = self.single_block_lookups.remove(&id) { - request.register_failure_downloading(); - trace!(self.log, "Single block lookup failed"; "block" => %request.hash); - if let Ok((peer_id, block_request)) = request.request_block() { - if let Ok(request_id) = cx.single_block_lookup_request(peer_id, block_request) { - self.single_block_lookups.insert(request_id, request); - } - } + /// An RPC error has occurred during a single lookup. This function handles this case.\ + pub fn single_block_lookup_failed>( + &mut self, + id: SingleLookupReqId, + peer_id: &PeerId, + cx: &SyncNetworkContext, + error: RPCError, + ) { + let msg = error.as_static_str(); + let log = self.log.clone(); + let Some(mut lookup) = self.get_single_lookup::(id) else { + debug!(log, "Error response to dropped lookup"; "error" => ?error); + return; + }; + let block_root = lookup.block_root(); + let request_state = R::request_state_mut(&mut lookup); + let response_type = R::response_type(); + trace!(log, + "Single lookup failed"; + "block_root" => ?block_root, + "error" => msg, + "peer_id" => %peer_id, + "response_type" => ?response_type + ); + let id = id.id; + request_state.register_failure_downloading(); + if let Err(e) = lookup.request_block_and_blobs(cx) { + debug!(self.log, + "Single lookup retry failed"; + "error" => ?e, + "block_root" => ?block_root, + ); + self.single_block_lookups.remove(&id); } metrics::set_gauge( @@ -410,33 +707,47 @@ impl BlockLookups { /* Processing responses */ - pub fn single_block_processed( + pub fn single_block_component_processed>( &mut self, - id: Id, - result: BlockProcessResult, + target_id: Id, + result: BlockProcessingResult, cx: &mut SyncNetworkContext, ) { - let mut req = match self.single_block_lookups.remove(&id) { - Some(req) => req, - None => { - return debug!( - self.log, - "Block processed for single block lookup not present" - ); - } + let Some(mut lookup) = self.single_block_lookups.remove(&target_id) else { + return; }; - let root = req.hash; - let peer_id = match req.processing_peer() { - Ok(peer) => peer, - Err(_) => return, + let root = lookup.block_root(); + let request_state = R::request_state_mut(&mut lookup); + + let Ok(peer_id) = request_state.get_state().processing_peer() else { + return; }; + debug!( + self.log, + "Block component processed for lookup"; + "response_type" => ?R::response_type(), + "block_root" => ?root, + ); match result { - BlockProcessResult::Ok => { - trace!(self.log, "Single block processing succeeded"; "block" => %root); - } - BlockProcessResult::Ignored => { + BlockProcessingResult::Ok(status) => match status { + AvailabilityProcessingStatus::Imported(root) => { + trace!(self.log, "Single block processing succeeded"; "block" => %root); + } + AvailabilityProcessingStatus::MissingComponents(_, _block_root) => { + match self.handle_missing_components::(cx, &mut lookup) { + Ok(()) => { + self.single_block_lookups.insert(target_id, lookup); + } + Err(e) => { + // Drop with an additional error. + warn!(self.log, "Single block lookup failed"; "block" => %root, "error" => ?e); + } + } + } + }, + BlockProcessingResult::Ignored => { // Beacon processor signalled to ignore the block processing result. // This implies that the cpu is overloaded. Drop the request. warn!( @@ -445,84 +756,158 @@ impl BlockLookups { "action" => "dropping single block request" ); } - BlockProcessResult::Err(e) => { - trace!(self.log, "Single block processing failed"; "block" => %root, "error" => %e); - match e { - BlockError::BlockIsAlreadyKnown => { - // No error here + BlockProcessingResult::Err(e) => { + match self.handle_single_lookup_block_error(cx, lookup, peer_id, e) { + Ok(Some(lookup)) => { + self.single_block_lookups.insert(target_id, lookup); } - BlockError::BeaconChainError(e) => { - // Internal error - error!(self.log, "Beacon chain error processing single block"; "block_root" => %root, "error" => ?e); + Ok(None) => { + // Drop without an additional error. } - BlockError::ParentUnknown(block) => { - self.search_parent(root, block, peer_id, cx); - } - ref e @ BlockError::ExecutionPayloadError(ref epe) if !epe.penalize_peer() => { - // These errors indicate that the execution layer is offline - // and failed to validate the execution payload. Do not downscore peer. - debug!( - self.log, - "Single block lookup failed. Execution layer is offline / unsynced / misconfigured"; - "root" => %root, - "error" => ?e - ); - } - other => { - warn!(self.log, "Peer sent invalid block in single block lookup"; "root" => %root, "error" => ?other, "peer_id" => %peer_id); - cx.report_peer( - peer_id, - PeerAction::MidToleranceError, - "single_block_failure", - ); - // Try it again if possible. - req.register_failure_processing(); - if let Ok((peer_id, request)) = req.request_block() { - if let Ok(request_id) = cx.single_block_lookup_request(peer_id, request) - { - // insert with the new id - self.single_block_lookups.insert(request_id, req); - } - } + Err(e) => { + // Drop with an additional error. + warn!(self.log, "Single block lookup failed"; "block" => %root, "error" => ?e); } } } - } + }; + } - metrics::set_gauge( - &metrics::SYNC_SINGLE_BLOCK_LOOKUPS, - self.single_block_lookups.len() as i64, - ); + /// Handles a `MissingComponents` block processing error. Handles peer scoring and retries. + /// + /// If this was the result of a block request, we can't determined if the block peer did anything + /// wrong. If we already had both a block and blobs response processed, we should penalize the + /// blobs peer because they did not provide all blobs on the initial request. + fn handle_missing_components>( + &self, + cx: &SyncNetworkContext, + lookup: &mut SingleBlockLookup, + ) -> Result<(), LookupRequestError> { + let request_state = R::request_state_mut(lookup); + + request_state.get_state_mut().component_processed = true; + if lookup.both_components_processed() { + lookup.penalize_blob_peer(cx); + + // Try it again if possible. + lookup + .blob_request_state + .state + .register_failure_processing(); + lookup.request_block_and_blobs(cx)?; + } + Ok(()) + } + + /// Handles peer scoring and retries related to a `BlockError` in response to a single block + /// or blob lookup processing result. + fn handle_single_lookup_block_error( + &mut self, + cx: &mut SyncNetworkContext, + mut lookup: SingleBlockLookup, + peer_id: PeerId, + e: BlockError, + ) -> Result>, LookupRequestError> { + let root = lookup.block_root(); + trace!(self.log, "Single block processing failed"; "block" => %root, "error" => %e); + match e { + BlockError::BlockIsAlreadyKnown => { + // No error here + return Ok(None); + } + BlockError::BeaconChainError(e) => { + // Internal error + error!(self.log, "Beacon chain error processing single block"; "block_root" => %root, "error" => ?e); + return Ok(None); + } + BlockError::ParentUnknown(block) => { + let slot = block.slot(); + let parent_root = block.parent_root(); + lookup.add_child_components(block.into()); + lookup.request_block_and_blobs(cx)?; + self.search_parent(slot, root, parent_root, peer_id, cx); + } + ref e @ BlockError::ExecutionPayloadError(ref epe) if !epe.penalize_peer() => { + // These errors indicate that the execution layer is offline + // and failed to validate the execution payload. Do not downscore peer. + debug!( + self.log, + "Single block lookup failed. Execution layer is offline / unsynced / misconfigured"; + "root" => %root, + "error" => ?e + ); + return Ok(None); + } + BlockError::AvailabilityCheck(e) => match e.category() { + AvailabilityCheckErrorCategory::Internal => { + warn!(self.log, "Internal availability check failure"; "root" => %root, "peer_id" => %peer_id, "error" => ?e); + lookup + .block_request_state + .state + .register_failure_downloading(); + lookup + .blob_request_state + .state + .register_failure_downloading(); + lookup.request_block_and_blobs(cx)? + } + AvailabilityCheckErrorCategory::Malicious => { + warn!(self.log, "Availability check failure"; "root" => %root, "peer_id" => %peer_id, "error" => ?e); + lookup.handle_availability_check_failure(cx); + lookup.request_block_and_blobs(cx)? + } + }, + other => { + warn!(self.log, "Peer sent invalid block in single block lookup"; "root" => %root, "error" => ?other, "peer_id" => %peer_id); + if let Ok(block_peer) = lookup.block_request_state.state.processing_peer() { + cx.report_peer( + block_peer, + PeerAction::MidToleranceError, + "single_block_failure", + ); + + // Try it again if possible. + lookup + .block_request_state + .state + .register_failure_processing(); + lookup.request_block_and_blobs(cx)? + } + } + } + Ok(Some(lookup)) } pub fn parent_block_processed( &mut self, chain_hash: Hash256, - result: BlockProcessResult, + result: BlockProcessingResult, cx: &mut SyncNetworkContext, ) { - let (mut parent_lookup, peer_id) = if let Some((pos, peer)) = self + let index = self .parent_lookups .iter() .enumerate() - .find_map(|(pos, request)| { - request - .get_processing_peer(chain_hash) - .map(|peer| (pos, peer)) - }) { - (self.parent_lookups.remove(pos), peer) - } else { + .find(|(_, lookup)| lookup.chain_hash() == chain_hash) + .map(|(index, _)| index); + + let Some(mut parent_lookup) = index.map(|index| self.parent_lookups.remove(index)) else { return debug!(self.log, "Process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash); }; match &result { - BlockProcessResult::Ok => { - trace!(self.log, "Parent block processing succeeded"; &parent_lookup) - } - BlockProcessResult::Err(e) => { + BlockProcessingResult::Ok(status) => match status { + AvailabilityProcessingStatus::Imported(block_root) => { + trace!(self.log, "Parent block processing succeeded"; &parent_lookup, "block_root" => ?block_root) + } + AvailabilityProcessingStatus::MissingComponents(_, block_root) => { + trace!(self.log, "Parent missing parts, triggering single block lookup "; &parent_lookup,"block_root" => ?block_root) + } + }, + BlockProcessingResult::Err(e) => { trace!(self.log, "Parent block processing failed"; &parent_lookup, "error" => %e) } - BlockProcessResult::Ignored => { + BlockProcessingResult::Ignored => { trace!( self.log, "Parent block processing job was ignored"; @@ -533,32 +918,62 @@ impl BlockLookups { } match result { - BlockProcessResult::Err(BlockError::ParentUnknown(block)) => { - // need to keep looking for parents - // add the block back to the queue and continue the search - parent_lookup.add_block(block); + BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( + _, + block_root, + )) => { + let expected_block_root = parent_lookup.current_parent_request.block_root(); + if block_root != expected_block_root { + warn!( + self.log, + "Parent block processing result/request root mismatch"; + "request" =>?expected_block_root, + "result" => ?block_root + ); + return; + } + + // We only send parent blocks + blobs for processing together. This means a + // `MissingComponents` response here indicates missing blobs. Therefore we always + // register a blob processing failure here. + parent_lookup + .current_parent_request + .blob_request_state + .state + .register_failure_processing(); + match parent_lookup + .current_parent_request + .request_block_and_blobs(cx) + { + Ok(()) => self.parent_lookups.push(parent_lookup), + Err(e) => self.handle_parent_request_error(&mut parent_lookup, cx, e.into()), + } + } + BlockProcessingResult::Err(BlockError::ParentUnknown(block)) => { + parent_lookup.add_unknown_parent_block(block); self.request_parent(parent_lookup, cx); } - BlockProcessResult::Ok - | BlockProcessResult::Err(BlockError::BlockIsAlreadyKnown { .. }) => { + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(_)) + | BlockProcessingResult::Err(BlockError::BlockIsAlreadyKnown { .. }) => { // Check if the beacon processor is available - let beacon_processor = match cx.beacon_processor_if_enabled() { - Some(beacon_processor) => beacon_processor, - None => { - return trace!( - self.log, - "Dropping parent chain segment that was ready for processing."; - parent_lookup - ); - } + let Some(beacon_processor) = cx.beacon_processor_if_enabled() else { + return trace!( + self.log, + "Dropping parent chain segment that was ready for processing."; + parent_lookup + ); }; - let (chain_hash, blocks, hashes, request) = parent_lookup.parts_for_processing(); + let (chain_hash, blocks, hashes, block_request) = + parent_lookup.parts_for_processing(); + + let blocks = self.add_child_block_to_chain(chain_hash, blocks, cx).into(); + let process_id = ChainSegmentProcessId::ParentLookup(chain_hash); match beacon_processor.send_chain_segment(process_id, blocks) { Ok(_) => { self.processing_parent_lookups - .insert(chain_hash, (hashes, request)); + .insert(chain_hash, (hashes, block_request)); } Err(e) => { error!( @@ -569,7 +984,7 @@ impl BlockLookups { } } } - ref e @ BlockProcessResult::Err(BlockError::ExecutionPayloadError(ref epe)) + ref e @ BlockProcessingResult::Err(BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => { // These errors indicate that the execution layer is offline @@ -581,25 +996,10 @@ impl BlockLookups { "error" => ?e ); } - BlockProcessResult::Err(outcome) => { - // all else we consider the chain a failure and downvote the peer that sent - // us the last block - warn!( - self.log, "Invalid parent chain"; - "score_adjustment" => %PeerAction::MidToleranceError, - "outcome" => ?outcome, - "last_peer" => %peer_id, - ); - - // This currently can be a host of errors. We permit this due to the partial - // ambiguity. - cx.report_peer(peer_id, PeerAction::MidToleranceError, "parent_request_err"); - - // Try again if possible - parent_lookup.processing_failed(); - self.request_parent(parent_lookup, cx); + BlockProcessingResult::Err(outcome) => { + self.handle_parent_block_error(outcome, cx, parent_lookup); } - BlockProcessResult::Ignored => { + BlockProcessingResult::Ignored => { // Beacon processor signalled to ignore the block processing result. // This implies that the cpu is overloaded. Drop the request. warn!( @@ -616,31 +1016,188 @@ impl BlockLookups { ); } + /// Find the child block that spawned the parent lookup request and add it to the chain + /// to send for processing. + fn add_child_block_to_chain( + &mut self, + chain_hash: Hash256, + mut blocks: VecDeque>, + cx: &SyncNetworkContext, + ) -> VecDeque> { + // Find the child block that spawned the parent lookup request and add it to the chain + // to send for processing. + if let Some(child_lookup_id) = self + .single_block_lookups + .iter() + .find_map(|(id, lookup)| (lookup.block_root() == chain_hash).then_some(*id)) + { + let Some(child_lookup) = self.single_block_lookups.get_mut(&child_lookup_id) else { + debug!(self.log, "Missing child for parent lookup request"; "child_root" => ?chain_hash); + return blocks; + }; + match child_lookup.get_cached_child_block() { + CachedChild::Ok(rpc_block) => { + // Insert this block at the front. This order is important because we later check + // for linear roots in `filter_chain_segment` + blocks.push_front(rpc_block); + } + CachedChild::DownloadIncomplete => { + trace!(self.log, "Parent lookup chain complete, awaiting child response"; "chain_hash" => ?chain_hash); + } + CachedChild::NotRequired => { + warn!(self.log, "Child not cached for parent lookup"; "chain_hash" => %chain_hash); + } + CachedChild::Err(e) => { + warn!( + self.log, + "Consistency error in child block triggering chain or parent lookups"; + "error" => ?e, + "chain_hash" => ?chain_hash + ); + child_lookup.handle_consistency_failure(cx); + if let Err(e) = child_lookup.request_block_and_blobs(cx) { + debug!(self.log, + "Failed to request block and blobs, dropping lookup"; + "error" => ?e + ); + self.single_block_lookups.remove(&child_lookup_id); + } + } + } + } else { + debug!(self.log, "Missing child for parent lookup request"; "child_root" => ?chain_hash); + }; + blocks + } + + /// Handle the peer scoring, retries, and logging related to a `BlockError` returned from + /// processing a block + blobs for a parent lookup. + fn handle_parent_block_error( + &mut self, + outcome: BlockError<::EthSpec>, + cx: &SyncNetworkContext, + mut parent_lookup: ParentLookup, + ) { + // We should always have a block peer. + let Ok(block_peer_id) = parent_lookup.block_processing_peer() else { + return; + }; + + // We may not have a blob peer, if there were no blobs required for this block. + let blob_peer_id = parent_lookup.blob_processing_peer().ok(); + + // all else we consider the chain a failure and downvote the peer that sent + // us the last block + warn!( + self.log, "Invalid parent chain"; + "score_adjustment" => %PeerAction::MidToleranceError, + "outcome" => ?outcome, + "block_peer_id" => %block_peer_id, + ); + // This currently can be a host of errors. We permit this due to the partial + // ambiguity. + cx.report_peer( + block_peer_id, + PeerAction::MidToleranceError, + "parent_request_err", + ); + // Don't downscore the same peer twice + if let Some(blob_peer_id) = blob_peer_id { + if block_peer_id != blob_peer_id { + debug!( + self.log, "Additionally down-scoring blob peer"; + "score_adjustment" => %PeerAction::MidToleranceError, + "outcome" => ?outcome, + "blob_peer_id" => %blob_peer_id, + ); + cx.report_peer( + blob_peer_id, + PeerAction::MidToleranceError, + "parent_request_err", + ); + } + } + + // Try again if possible + parent_lookup.processing_failed(); + self.request_parent(parent_lookup, cx); + } + pub fn parent_chain_processed( &mut self, chain_hash: Hash256, result: BatchProcessResult, - cx: &mut SyncNetworkContext, + cx: &SyncNetworkContext, ) { - let request = match self.processing_parent_lookups.remove(&chain_hash) { - Some((_hashes, request)) => request, - None => { - return debug!(self.log, "Chain process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash, "result" => ?result) - } + let Some((_hashes, request)) = self.processing_parent_lookups.remove(&chain_hash) else { + return debug!(self.log, "Chain process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash, "result" => ?result); }; debug!(self.log, "Parent chain processed"; "chain_hash" => %chain_hash, "result" => ?result); match result { BatchProcessResult::Success { .. } => { - // nothing to do. + let Some(id) = self + .single_block_lookups + .iter() + .find_map(|(id, req)| (req.block_root() == chain_hash).then_some(*id)) + else { + warn!(self.log, "No id found for single block lookup"; "chain_hash" => %chain_hash); + return; + }; + + let Some(lookup) = self.single_block_lookups.get_mut(&id) else { + warn!(self.log, "No id found for single block lookup"; "chain_hash" => %chain_hash); + return; + }; + + match lookup.get_cached_child_block() { + CachedChild::Ok(rpc_block) => { + // This is the correct block, send it for processing + if self + .send_block_for_processing( + chain_hash, + rpc_block, + timestamp_now(), + BlockProcessType::SingleBlock { id }, + cx, + ) + .is_err() + { + // Remove to avoid inconsistencies + self.single_block_lookups.remove(&id); + } + } + CachedChild::DownloadIncomplete => { + trace!(self.log, "Parent chain complete, awaiting child response"; "chain_hash" => %chain_hash); + } + CachedChild::NotRequired => { + warn!(self.log, "Child not cached for parent lookup"; "chain_hash" => %chain_hash); + } + CachedChild::Err(e) => { + warn!( + self.log, + "Consistency error in child block triggering parent lookup"; + "chain_hash" => %chain_hash, + "error" => ?e + ); + lookup.handle_consistency_failure(cx); + if let Err(e) = lookup.request_block_and_blobs(cx) { + debug!(self.log, + "Failed to request block and blobs, dropping lookup"; + "error" => ?e + ); + self.single_block_lookups.remove(&id); + } + } + } } BatchProcessResult::FaultyFailure { imported_blocks: _, penalty, } => { self.failed_chains.insert(chain_hash); - for peer_id in request.used_peers { - cx.report_peer(peer_id, penalty, "parent_chain_failure") + for peer_source in request.all_peers() { + cx.report_peer(peer_source, penalty, "parent_chain_failure") } } BatchProcessResult::NonFaultyFailure => { @@ -657,13 +1214,13 @@ impl BlockLookups { /* Helper functions */ fn send_block_for_processing( - &mut self, + &self, block_root: Hash256, - block: Arc>, + block: RpcBlock, duration: Duration, process_type: BlockProcessType, - cx: &mut SyncNetworkContext, - ) -> Result<(), ()> { + cx: &SyncNetworkContext, + ) -> Result<(), LookupRequestError> { match cx.beacon_processor_if_enabled() { Some(beacon_processor) => { trace!(self.log, "Sending block for processing"; "block" => ?block_root, "process" => ?process_type); @@ -678,61 +1235,68 @@ impl BlockLookups { "Failed to send sync block to processor"; "error" => ?e ); - Err(()) + Err(LookupRequestError::SendFailed( + "beacon processor send failure", + )) } else { Ok(()) } } None => { trace!(self.log, "Dropping block ready for processing. Beacon processor not available"; "block" => %block_root); - Err(()) + Err(LookupRequestError::SendFailed( + "beacon processor unavailable", + )) } } } - fn request_parent( - &mut self, - mut parent_lookup: ParentLookup, - cx: &mut SyncNetworkContext, - ) { - match parent_lookup.request_parent(cx) { - Err(e) => { - debug!(self.log, "Failed to request parent"; &parent_lookup, "error" => e.as_static()); - match e { - parent_lookup::RequestError::SendFailed(_) => { - // Probably shutting down, nothing to do here. Drop the request - } - parent_lookup::RequestError::ChainTooLong => { - self.failed_chains.insert(parent_lookup.chain_hash()); - // This indicates faulty peers. - for &peer_id in parent_lookup.used_peers() { - cx.report_peer(peer_id, PeerAction::LowToleranceError, e.as_static()) - } - } - parent_lookup::RequestError::TooManyAttempts { cannot_process } => { - // We only consider the chain failed if we were unable to process it. - // We could have failed because one peer continually failed to send us - // bad blocks. We still allow other peers to send us this chain. Note - // that peers that do this, still get penalised. - if cannot_process { - self.failed_chains.insert(parent_lookup.chain_hash()); - } - // This indicates faulty peers. - for &peer_id in parent_lookup.used_peers() { - cx.report_peer(peer_id, PeerAction::LowToleranceError, e.as_static()) - } - } - parent_lookup::RequestError::NoPeers => { - // This happens if the peer disconnects while the block is being - // processed. Drop the request without extra penalty - } + fn send_blobs_for_processing( + &self, + block_root: Hash256, + blobs: FixedBlobSidecarList, + duration: Duration, + process_type: BlockProcessType, + cx: &SyncNetworkContext, + ) -> Result<(), LookupRequestError> { + match cx.beacon_processor_if_enabled() { + Some(beacon_processor) => { + trace!(self.log, "Sending blobs for processing"; "block" => ?block_root, "process_type" => ?process_type); + if let Err(e) = + beacon_processor.send_rpc_blobs(block_root, blobs, duration, process_type) + { + error!( + self.log, + "Failed to send sync blobs to processor"; + "error" => ?e + ); + Err(LookupRequestError::SendFailed( + "beacon processor send failure", + )) + } else { + Ok(()) } } - Ok(_) => { - debug!(self.log, "Requesting parent"; &parent_lookup); - self.parent_lookups.push(parent_lookup) + None => { + trace!(self.log, "Dropping blobs ready for processing. Beacon processor not available"; "block_root" => %block_root); + Err(LookupRequestError::SendFailed( + "beacon processor unavailable", + )) } } + } + + /// Attempts to request the next unknown parent. This method handles peer scoring and dropping + /// the lookup in the event of failure. + fn request_parent(&mut self, mut parent_lookup: ParentLookup, cx: &SyncNetworkContext) { + let response = parent_lookup.request_parent(cx); + + match response { + Err(e) => { + self.handle_parent_request_error(&mut parent_lookup, cx, e); + } + Ok(_) => self.parent_lookups.push(parent_lookup), + } // We remove and add back again requests so we want this updated regardless of outcome. metrics::set_gauge( @@ -743,7 +1307,9 @@ impl BlockLookups { /// Drops all the single block requests and returns how many requests were dropped. pub fn drop_single_block_requests(&mut self) -> usize { - self.single_block_lookups.drain().len() + let requests_to_drop = self.single_block_lookups.len(); + self.single_block_lookups.clear(); + requests_to_drop } /// Drops all the parent chain requests and returns how many requests were dropped. diff --git a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs index a2c2f1d1c..5c2e90b48 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs @@ -1,17 +1,18 @@ -use super::RootBlockTuple; +use super::single_block_lookup::{LookupRequestError, LookupVerifyError, SingleBlockLookup}; +use super::{DownloadedBlock, PeerId}; +use crate::sync::block_lookups::common::Parent; +use crate::sync::block_lookups::common::RequestState; +use crate::sync::{manager::SLOT_IMPORT_TOLERANCE, network_context::SyncNetworkContext}; +use beacon_chain::block_verification_types::AsBlock; +use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::data_availability_checker::{ChildComponents, DataAvailabilityChecker}; use beacon_chain::BeaconChainTypes; -use lighthouse_network::PeerId; +use itertools::Itertools; +use std::collections::VecDeque; use std::sync::Arc; -use store::{Hash256, SignedBeaconBlock}; +use store::Hash256; use strum::IntoStaticStr; -use crate::sync::{ - manager::{Id, SLOT_IMPORT_TOLERANCE}, - network_context::SyncNetworkContext, -}; - -use super::single_block_lookup::{self, SingleBlockRequest}; - /// How many attempts we try to find a parent of a block before we give up trying. pub(crate) const PARENT_FAIL_TOLERANCE: u8 = 5; /// The maximum depth we will search for a parent block. In principle we should have sync'd any @@ -24,18 +25,20 @@ pub(crate) struct ParentLookup { /// The root of the block triggering this parent request. chain_hash: Hash256, /// The blocks that have currently been downloaded. - downloaded_blocks: Vec>, + downloaded_blocks: Vec>, /// Request of the last parent. - current_parent_request: SingleBlockRequest, - /// Id of the last parent request. - current_parent_request_id: Option, + pub current_parent_request: SingleBlockLookup, } #[derive(Debug, PartialEq, Eq, IntoStaticStr)] -pub enum VerifyError { +pub enum ParentVerifyError { RootMismatch, NoBlockReturned, + NotEnoughBlobsReturned, ExtraBlocksReturned, + UnrequestedBlobId, + ExtraBlobsReturned, + InvalidIndex(u64), PreviousFailure { parent_root: Hash256 }, } @@ -53,62 +56,86 @@ pub enum RequestError { } impl ParentLookup { + pub fn new( + block_root: Hash256, + parent_root: Hash256, + peer_id: PeerId, + da_checker: Arc>, + cx: &mut SyncNetworkContext, + ) -> Self { + let current_parent_request = SingleBlockLookup::new( + parent_root, + Some(ChildComponents::empty(block_root)), + &[peer_id], + da_checker, + cx.next_id(), + ); + + Self { + chain_hash: block_root, + downloaded_blocks: vec![], + current_parent_request, + } + } + pub fn contains_block(&self, block_root: &Hash256) -> bool { self.downloaded_blocks .iter() .any(|(root, _d_block)| root == block_root) } - pub fn new( - block_root: Hash256, - block: Arc>, - peer_id: PeerId, - ) -> Self { - let current_parent_request = SingleBlockRequest::new(block.parent_root(), peer_id); - - Self { - chain_hash: block_root, - downloaded_blocks: vec![(block_root, block)], - current_parent_request, - current_parent_request_id: None, - } + pub fn is_for_block(&self, block_root: Hash256) -> bool { + self.current_parent_request.is_for_block(block_root) } /// Attempts to request the next unknown parent. If the request fails, it should be removed. - pub fn request_parent(&mut self, cx: &mut SyncNetworkContext) -> Result<(), RequestError> { + pub fn request_parent(&mut self, cx: &SyncNetworkContext) -> Result<(), RequestError> { // check to make sure this request hasn't failed - if self.downloaded_blocks.len() >= PARENT_DEPTH_TOLERANCE { + if self.downloaded_blocks.len() + 1 >= PARENT_DEPTH_TOLERANCE { return Err(RequestError::ChainTooLong); } - let (peer_id, request) = self.current_parent_request.request_block()?; - match cx.parent_lookup_request(peer_id, request) { - Ok(request_id) => { - self.current_parent_request_id = Some(request_id); - Ok(()) - } - Err(reason) => { - self.current_parent_request_id = None; - Err(RequestError::SendFailed(reason)) - } - } + self.current_parent_request + .request_block_and_blobs(cx) + .map_err(Into::into) } pub fn check_peer_disconnected(&mut self, peer_id: &PeerId) -> Result<(), ()> { - self.current_parent_request.check_peer_disconnected(peer_id) + self.current_parent_request + .block_request_state + .state + .check_peer_disconnected(peer_id) + .and_then(|()| { + self.current_parent_request + .blob_request_state + .state + .check_peer_disconnected(peer_id) + }) } - pub fn add_block(&mut self, block: Arc>) { + pub fn add_unknown_parent_block(&mut self, block: RpcBlock) { let next_parent = block.parent_root(); - let current_root = self.current_parent_request.hash; + // Cache the block. + let current_root = self.current_parent_request.block_root(); self.downloaded_blocks.push((current_root, block)); - self.current_parent_request.hash = next_parent; - self.current_parent_request.state = single_block_lookup::State::AwaitingDownload; - self.current_parent_request_id = None; + + // Update the parent request. + self.current_parent_request + .update_requested_parent_block(next_parent) } - pub fn pending_response(&self, req_id: Id) -> bool { - self.current_parent_request_id == Some(req_id) + pub fn block_processing_peer(&self) -> Result { + self.current_parent_request + .block_request_state + .state + .processing_peer() + } + + pub fn blob_processing_peer(&self) -> Result { + self.current_parent_request + .blob_request_state + .state + .processing_peer() } /// Consumes the parent request and destructures it into it's parts. @@ -117,21 +144,20 @@ impl ParentLookup { self, ) -> ( Hash256, - Vec>>, + VecDeque>, Vec, - SingleBlockRequest, + SingleBlockLookup, ) { let ParentLookup { chain_hash, downloaded_blocks, current_parent_request, - current_parent_request_id: _, } = self; let block_count = downloaded_blocks.len(); - let mut blocks = Vec::with_capacity(block_count); + let mut blocks = VecDeque::with_capacity(block_count); let mut hashes = Vec::with_capacity(block_count); - for (hash, block) in downloaded_blocks { - blocks.push(block); + for (hash, block) in downloaded_blocks.into_iter() { + blocks.push_back(block); hashes.push(hash); } (chain_hash, blocks, hashes, current_parent_request) @@ -142,81 +168,97 @@ impl ParentLookup { self.chain_hash } - pub fn download_failed(&mut self) { - self.current_parent_request.register_failure_downloading(); - self.current_parent_request_id = None; - } - pub fn processing_failed(&mut self) { - self.current_parent_request.register_failure_processing(); - self.current_parent_request_id = None; + self.current_parent_request + .block_request_state + .state + .register_failure_processing(); + self.current_parent_request + .blob_request_state + .state + .register_failure_processing(); + if let Some(components) = self.current_parent_request.child_components.as_mut() { + components.downloaded_block = None; + components.downloaded_blobs = <_>::default(); + } } /// Verifies that the received block is what we requested. If so, parent lookup now waits for /// the processing result of the block. - pub fn verify_block( + pub fn verify_response>( &mut self, - block: Option>>, + block: Option, failed_chains: &mut lru_cache::LRUTimeCache, - ) -> Result>, VerifyError> { - let root_and_block = self.current_parent_request.verify_block(block)?; + ) -> Result, ParentVerifyError> { + let expected_block_root = self.current_parent_request.block_root(); + let request_state = R::request_state_mut(&mut self.current_parent_request); + let root_and_verified = request_state.verify_response(expected_block_root, block)?; // check if the parent of this block isn't in the failed cache. If it is, this chain should // be dropped and the peer downscored. - if let Some(parent_root) = root_and_block + if let Some(parent_root) = root_and_verified .as_ref() - .map(|(_, block)| block.parent_root()) + .and_then(|block| R::get_parent_root(block)) { if failed_chains.contains(&parent_root) { - self.current_parent_request.register_failure_downloading(); - self.current_parent_request_id = None; - return Err(VerifyError::PreviousFailure { parent_root }); + request_state.register_failure_downloading(); + return Err(ParentVerifyError::PreviousFailure { parent_root }); } } - Ok(root_and_block) + Ok(root_and_verified) } - pub fn get_processing_peer(&self, chain_hash: Hash256) -> Option { - if self.chain_hash == chain_hash { - return self.current_parent_request.processing_peer().ok(); - } - None + pub fn add_peer(&mut self, peer: PeerId) { + self.current_parent_request.add_peer(peer) } - #[cfg(test)] - pub fn failed_attempts(&self) -> u8 { - self.current_parent_request.failed_attempts() - } - - pub fn add_peer(&mut self, block_root: &Hash256, peer_id: &PeerId) -> bool { - self.current_parent_request.add_peer(block_root, peer_id) + /// Adds a list of peers to the parent request. + pub fn add_peers(&mut self, peers: &[PeerId]) { + self.current_parent_request.add_peers(peers) } pub fn used_peers(&self) -> impl Iterator + '_ { - self.current_parent_request.used_peers.iter() + self.current_parent_request + .block_request_state + .state + .used_peers + .iter() + .chain( + self.current_parent_request + .blob_request_state + .state + .used_peers + .iter(), + ) + .unique() } } -impl From for VerifyError { - fn from(e: super::single_block_lookup::VerifyError) -> Self { - use super::single_block_lookup::VerifyError as E; +impl From for ParentVerifyError { + fn from(e: LookupVerifyError) -> Self { + use LookupVerifyError as E; match e { - E::RootMismatch => VerifyError::RootMismatch, - E::NoBlockReturned => VerifyError::NoBlockReturned, - E::ExtraBlocksReturned => VerifyError::ExtraBlocksReturned, + E::RootMismatch => ParentVerifyError::RootMismatch, + E::NoBlockReturned => ParentVerifyError::NoBlockReturned, + E::ExtraBlocksReturned => ParentVerifyError::ExtraBlocksReturned, + E::UnrequestedBlobId => ParentVerifyError::UnrequestedBlobId, + E::ExtraBlobsReturned => ParentVerifyError::ExtraBlobsReturned, + E::InvalidIndex(index) => ParentVerifyError::InvalidIndex(index), + E::NotEnoughBlobsReturned => ParentVerifyError::NotEnoughBlobsReturned, } } } -impl From for RequestError { - fn from(e: super::single_block_lookup::LookupRequestError) -> Self { - use super::single_block_lookup::LookupRequestError as E; +impl From for RequestError { + fn from(e: LookupRequestError) -> Self { + use LookupRequestError as E; match e { E::TooManyAttempts { cannot_process } => { RequestError::TooManyAttempts { cannot_process } } E::NoPeers => RequestError::NoPeers, + E::SendFailed(msg) => RequestError::SendFailed(msg), } } } diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 62ca68e7b..8c60621f1 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -1,30 +1,23 @@ +use super::PeerId; +use crate::sync::block_lookups::common::{Lookup, RequestState}; +use crate::sync::block_lookups::Id; +use crate::sync::network_context::SyncNetworkContext; +use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::data_availability_checker::{ + AvailabilityCheckError, DataAvailabilityChecker, MissingBlobs, +}; +use beacon_chain::data_availability_checker::{AvailabilityView, ChildComponents}; +use beacon_chain::BeaconChainTypes; +use lighthouse_network::PeerAction; +use slog::{trace, Logger}; use std::collections::HashSet; +use std::fmt::Debug; +use std::marker::PhantomData; use std::sync::Arc; - -use super::RootBlockTuple; -use beacon_chain::get_block_root; -use lighthouse_network::{rpc::BlocksByRootRequest, PeerId}; -use rand::seq::IteratorRandom; -use ssz_types::VariableList; -use store::{EthSpec, Hash256, SignedBeaconBlock}; +use store::Hash256; use strum::IntoStaticStr; - -/// Object representing a single block lookup request. -#[derive(PartialEq, Eq)] -pub struct SingleBlockRequest { - /// The hash of the requested block. - pub hash: Hash256, - /// State of this request. - pub state: State, - /// Peers that should have this block. - pub available_peers: HashSet, - /// Peers from which we have requested this block. - pub used_peers: HashSet, - /// How many times have we attempted to process this block. - failed_processing: u8, - /// How many times have we attempted to download this block. - failed_downloading: u8, -} +use types::blob_sidecar::FixedBlobSidecarList; +use types::EthSpec; #[derive(Debug, PartialEq, Eq)] pub enum State { @@ -34,10 +27,14 @@ pub enum State { } #[derive(Debug, PartialEq, Eq, IntoStaticStr)] -pub enum VerifyError { +pub enum LookupVerifyError { RootMismatch, NoBlockReturned, ExtraBlocksReturned, + UnrequestedBlobId, + ExtraBlobsReturned, + NotEnoughBlobsReturned, + InvalidIndex(u64), } #[derive(Debug, PartialEq, Eq, IntoStaticStr)] @@ -48,17 +45,375 @@ pub enum LookupRequestError { cannot_process: bool, }, NoPeers, + SendFailed(&'static str), } -impl SingleBlockRequest { - pub fn new(hash: Hash256, peer_id: PeerId) -> Self { +pub struct SingleBlockLookup { + pub id: Id, + pub block_request_state: BlockRequestState, + pub blob_request_state: BlobRequestState, + pub da_checker: Arc>, + /// Only necessary for requests triggered by an `UnknownBlockParent` or `UnknownBlockParent` + /// because any blocks or blobs without parents won't hit the data availability cache. + pub child_components: Option>, +} + +impl SingleBlockLookup { + pub fn new( + requested_block_root: Hash256, + child_components: Option>, + peers: &[PeerId], + da_checker: Arc>, + id: Id, + ) -> Self { + let is_deneb = da_checker.is_deneb(); + Self { + id, + block_request_state: BlockRequestState::new(requested_block_root, peers), + blob_request_state: BlobRequestState::new(requested_block_root, peers, is_deneb), + da_checker, + child_components, + } + } + + /// Get the block root that is being requested. + pub fn block_root(&self) -> Hash256 { + self.block_request_state.requested_block_root + } + + /// Check the block root matches the requested block root. + pub fn is_for_block(&self, block_root: Hash256) -> bool { + self.block_root() == block_root + } + + /// Update the requested block, this should only be used in a chain of parent lookups to request + /// the next parent. + pub fn update_requested_parent_block(&mut self, block_root: Hash256) { + self.block_request_state.requested_block_root = block_root; + self.block_request_state.state.state = State::AwaitingDownload; + self.blob_request_state.state.state = State::AwaitingDownload; + self.block_request_state.state.component_downloaded = false; + self.blob_request_state.state.component_downloaded = false; + self.block_request_state.state.component_processed = false; + self.blob_request_state.state.component_processed = false; + self.child_components = Some(ChildComponents::empty(block_root)); + } + + /// Get all unique peers across block and blob requests. + pub fn all_peers(&self) -> HashSet { + let mut all_peers = self.block_request_state.state.used_peers.clone(); + all_peers.extend(self.blob_request_state.state.used_peers.clone()); + all_peers + } + + /// Send the necessary requests for blocks and/or blobs. This will check whether we have + /// downloaded the block and/or blobs already and will not send requests if so. It will also + /// inspect the request state or blocks and blobs to ensure we are not already processing or + /// downloading the block and/or blobs. + pub fn request_block_and_blobs( + &mut self, + cx: &SyncNetworkContext, + ) -> Result<(), LookupRequestError> { + let block_already_downloaded = self.block_already_downloaded(); + let blobs_already_downloaded = self.blobs_already_downloaded(); + + if !block_already_downloaded { + self.block_request_state + .build_request_and_send(self.id, cx)?; + } + if !blobs_already_downloaded { + self.blob_request_state + .build_request_and_send(self.id, cx)?; + } + Ok(()) + } + + /// Returns a `CachedChild`, which is a wrapper around a `RpcBlock` that is either: + /// + /// 1. `NotRequired`: there is no child caching required for this lookup. + /// 2. `DownloadIncomplete`: Child caching is required, but all components are not yet downloaded. + /// 3. `Ok`: The child is required and we have downloaded it. + /// 4. `Err`: The child is required, but has failed consistency checks. + pub fn get_cached_child_block(&self) -> CachedChild { + if let Some(components) = self.child_components.as_ref() { + let Some(block) = components.downloaded_block.as_ref() else { + return CachedChild::DownloadIncomplete; + }; + + if !self.missing_blob_ids().is_empty() { + return CachedChild::DownloadIncomplete; + } + + match RpcBlock::new_from_fixed( + self.block_request_state.requested_block_root, + block.clone(), + components.downloaded_blobs.clone(), + ) { + Ok(rpc_block) => CachedChild::Ok(rpc_block), + Err(e) => CachedChild::Err(e), + } + } else { + CachedChild::NotRequired + } + } + + /// Accepts a verified response, and adds it to the child components if required. This method + /// returns a `CachedChild` which provides a completed block + blob response if all components have been + /// received, or information about whether the child is required and if it has been downloaded. + pub fn add_response>( + &mut self, + verified_response: R::VerifiedResponseType, + ) -> CachedChild { + if let Some(child_components) = self.child_components.as_mut() { + R::add_to_child_components(verified_response, child_components); + self.get_cached_child_block() + } else { + CachedChild::NotRequired + } + } + + /// Add a child component to the lookup request. Merges with any existing child components. + pub fn add_child_components(&mut self, components: ChildComponents) { + if let Some(ref mut existing_components) = self.child_components { + let ChildComponents { + block_root: _, + downloaded_block, + downloaded_blobs, + } = components; + if let Some(block) = downloaded_block { + existing_components.merge_block(block); + } + existing_components.merge_blobs(downloaded_blobs); + } else { + self.child_components = Some(components); + } + } + + /// Add all given peers to both block and blob request states. + pub fn add_peer(&mut self, peer_id: PeerId) { + self.block_request_state.state.add_peer(&peer_id); + self.blob_request_state.state.add_peer(&peer_id); + } + + /// Add all given peers to both block and blob request states. + pub fn add_peers(&mut self, peers: &[PeerId]) { + for peer in peers { + self.add_peer(*peer); + } + } + + /// Returns true if the block has already been downloaded. + pub fn both_components_downloaded(&self) -> bool { + self.block_request_state.state.component_downloaded + && self.blob_request_state.state.component_downloaded + } + + /// Returns true if the block has already been downloaded. + pub fn both_components_processed(&self) -> bool { + self.block_request_state.state.component_processed + && self.blob_request_state.state.component_processed + } + + /// Checks both the block and blob request states to see if the peer is disconnected. + /// + /// Returns true if the lookup should be dropped. + pub fn should_drop_lookup_on_disconnected_peer( + &mut self, + peer_id: &PeerId, + cx: &SyncNetworkContext, + log: &Logger, + ) -> bool { + let block_root = self.block_root(); + let block_peer_disconnected = self + .block_request_state + .state + .check_peer_disconnected(peer_id) + .is_err(); + let blob_peer_disconnected = self + .blob_request_state + .state + .check_peer_disconnected(peer_id) + .is_err(); + + if block_peer_disconnected || blob_peer_disconnected { + if let Err(e) = self.request_block_and_blobs(cx) { + trace!(log, "Single lookup failed on peer disconnection"; "block_root" => ?block_root, "error" => ?e); + return true; + } + } + false + } + + /// Returns `true` if the block has already been downloaded. + pub(crate) fn block_already_downloaded(&self) -> bool { + if let Some(components) = self.child_components.as_ref() { + components.block_exists() + } else { + self.da_checker.has_block(&self.block_root()) + } + } + + /// Updates the `requested_ids` field of the `BlockRequestState` with the most recent picture + /// of which blobs still need to be requested. Returns `true` if there are no more blobs to + /// request. + pub(crate) fn blobs_already_downloaded(&mut self) -> bool { + self.update_blobs_request(); + self.blob_request_state.requested_ids.is_empty() + } + + /// Updates this request with the most recent picture of which blobs still need to be requested. + pub fn update_blobs_request(&mut self) { + self.blob_request_state.requested_ids = self.missing_blob_ids(); + } + + /// If `child_components` is `Some`, we know block components won't hit the data + /// availability cache, so we don't check its processing cache unless `child_components` + /// is `None`. + pub(crate) fn missing_blob_ids(&self) -> MissingBlobs { + let block_root = self.block_root(); + if let Some(components) = self.child_components.as_ref() { + self.da_checker.get_missing_blob_ids(block_root, components) + } else { + let Some(processing_availability_view) = + self.da_checker.get_processing_components(block_root) + else { + return MissingBlobs::new_without_block(block_root, self.da_checker.is_deneb()); + }; + self.da_checker + .get_missing_blob_ids(block_root, &processing_availability_view) + } + } + + /// Penalizes a blob peer if it should have blobs but didn't return them to us. + pub fn penalize_blob_peer(&mut self, cx: &SyncNetworkContext) { + if let Ok(blob_peer) = self.blob_request_state.state.processing_peer() { + cx.report_peer( + blob_peer, + PeerAction::MidToleranceError, + "single_blob_failure", + ); + } + } + + /// This failure occurs on download, so register a failure downloading, penalize the peer + /// and clear the blob cache. + pub fn handle_consistency_failure(&mut self, cx: &SyncNetworkContext) { + self.penalize_blob_peer(cx); + if let Some(cached_child) = self.child_components.as_mut() { + cached_child.clear_blobs(); + } + self.blob_request_state.state.register_failure_downloading() + } + + /// This failure occurs after processing, so register a failure processing, penalize the peer + /// and clear the blob cache. + pub fn handle_availability_check_failure(&mut self, cx: &SyncNetworkContext) { + self.penalize_blob_peer(cx); + if let Some(cached_child) = self.child_components.as_mut() { + cached_child.clear_blobs(); + } + self.blob_request_state.state.register_failure_processing() + } +} + +/// The state of the blob request component of a `SingleBlockLookup`. +pub struct BlobRequestState { + /// The latest picture of which blobs still need to be requested. This includes information + /// from both block/blobs downloaded in the network layer and any blocks/blobs that exist in + /// the data availability checker. + pub requested_ids: MissingBlobs, + /// Where we store blobs until we receive the stream terminator. + pub blob_download_queue: FixedBlobSidecarList, + pub state: SingleLookupRequestState, + _phantom: PhantomData, +} + +impl BlobRequestState { + pub fn new(block_root: Hash256, peer_source: &[PeerId], is_deneb: bool) -> Self { + let default_ids = MissingBlobs::new_without_block(block_root, is_deneb); + Self { + requested_ids: default_ids, + blob_download_queue: <_>::default(), + state: SingleLookupRequestState::new(peer_source), + _phantom: PhantomData, + } + } +} + +/// The state of the block request component of a `SingleBlockLookup`. +pub struct BlockRequestState { + pub requested_block_root: Hash256, + pub state: SingleLookupRequestState, + _phantom: PhantomData, +} + +impl BlockRequestState { + pub fn new(block_root: Hash256, peers: &[PeerId]) -> Self { + Self { + requested_block_root: block_root, + state: SingleLookupRequestState::new(peers), + _phantom: PhantomData, + } + } +} + +/// This is the status of cached components for a lookup if they are required. It provides information +/// about whether we should send a responses immediately for processing, whether we require more +/// responses, or whether all cached components have been received and the reconstructed block +/// should be sent for processing. +pub enum CachedChild { + /// All child components have been received, this is the reconstructed block, including all. + /// It has been checked for consistency between blobs and block, but no consensus checks have + /// been performed and no kzg verification has been performed. + Ok(RpcBlock), + /// All child components have not yet been received. + DownloadIncomplete, + /// Child components should not be cached, send this directly for processing. + NotRequired, + /// There was an error during consistency checks between block and blobs. + Err(AvailabilityCheckError), +} +/// Object representing the state of a single block or blob lookup request. +#[derive(PartialEq, Eq, Debug)] +pub struct SingleLookupRequestState { + /// State of this request. + pub state: State, + /// Peers that should have this block or blob. + pub available_peers: HashSet, + /// Peers from which we have requested this block. + pub used_peers: HashSet, + /// How many times have we attempted to process this block or blob. + pub failed_processing: u8, + /// How many times have we attempted to download this block or blob. + pub failed_downloading: u8, + /// Whether or not we have downloaded this block or blob. + pub component_downloaded: bool, + /// Whether or not we have processed this block or blob. + pub component_processed: bool, + /// Should be incremented everytime this request is retried. The purpose of this is to + /// differentiate retries of the same block/blob request within a lookup. We currently penalize + /// peers and retry requests prior to receiving the stream terminator. This means responses + /// from a prior request may arrive after a new request has been sent, this counter allows + /// us to differentiate these two responses. + pub req_counter: u32, +} + +impl SingleLookupRequestState { + pub fn new(peers: &[PeerId]) -> Self { + let mut available_peers = HashSet::default(); + for peer in peers.iter().copied() { + available_peers.insert(peer); + } + Self { - hash, state: State::AwaitingDownload, - available_peers: HashSet::from([peer_id]), + available_peers, used_peers: HashSet::default(), failed_processing: 0, failed_downloading: 0, + component_downloaded: false, + component_processed: false, + req_counter: 0, } } @@ -80,12 +435,9 @@ impl SingleBlockRequest { self.failed_processing + self.failed_downloading } - pub fn add_peer(&mut self, hash: &Hash256, peer_id: &PeerId) -> bool { - let is_useful = &self.hash == hash; - if is_useful { - self.available_peers.insert(*peer_id); - } - is_useful + /// This method should be used for peers wrapped in `PeerId::BlockAndBlobs`. + pub fn add_peer(&mut self, peer_id: &PeerId) { + self.available_peers.insert(*peer_id); } /// If a peer disconnects, this request could be failed. If so, an error is returned @@ -101,70 +453,8 @@ impl SingleBlockRequest { Ok(()) } - /// Verifies if the received block matches the requested one. - /// Returns the block for processing if the response is what we expected. - pub fn verify_block( - &mut self, - block: Option>>, - ) -> Result>, VerifyError> { - match self.state { - State::AwaitingDownload => { - self.register_failure_downloading(); - Err(VerifyError::ExtraBlocksReturned) - } - State::Downloading { peer_id } => match block { - Some(block) => { - // Compute the block root using this specific function so that we can get timing - // metrics. - let block_root = get_block_root(&block); - if block_root != self.hash { - // return an error and drop the block - // NOTE: we take this is as a download failure to prevent counting the - // attempt as a chain failure, but simply a peer failure. - self.register_failure_downloading(); - Err(VerifyError::RootMismatch) - } else { - // Return the block for processing. - self.state = State::Processing { peer_id }; - Ok(Some((block_root, block))) - } - } - None => { - self.register_failure_downloading(); - Err(VerifyError::NoBlockReturned) - } - }, - State::Processing { peer_id: _ } => match block { - Some(_) => { - // We sent the block for processing and received an extra block. - self.register_failure_downloading(); - Err(VerifyError::ExtraBlocksReturned) - } - None => { - // This is simply the stream termination and we are already processing the - // block - Ok(None) - } - }, - } - } - - pub fn request_block(&mut self) -> Result<(PeerId, BlocksByRootRequest), LookupRequestError> { - debug_assert!(matches!(self.state, State::AwaitingDownload)); - if self.failed_attempts() >= MAX_ATTEMPTS { - Err(LookupRequestError::TooManyAttempts { - cannot_process: self.failed_processing >= self.failed_downloading, - }) - } else if let Some(&peer_id) = self.available_peers.iter().choose(&mut rand::thread_rng()) { - let request = BlocksByRootRequest::new(VariableList::from(vec![self.hash])); - self.state = State::Downloading { peer_id }; - self.used_peers.insert(peer_id); - Ok((peer_id, request)) - } else { - Err(LookupRequestError::NoPeers) - } - } - + /// Returns the id peer we downloaded from if we have downloaded a verified block, otherwise + /// returns an error. pub fn processing_peer(&self) -> Result { if let State::Processing { peer_id } = &self.state { Ok(*peer_id) @@ -174,15 +464,40 @@ impl SingleBlockRequest { } } -impl slog::Value for SingleBlockRequest { +impl slog::Value for SingleBlockLookup { + fn serialize( + &self, + _record: &slog::Record, + key: slog::Key, + serializer: &mut dyn slog::Serializer, + ) -> slog::Result { + serializer.emit_str("request", key)?; + serializer.emit_arguments("lookup_type", &format_args!("{:?}", L::lookup_type()))?; + serializer.emit_arguments("hash", &format_args!("{}", self.block_root()))?; + serializer.emit_arguments( + "blob_ids", + &format_args!("{:?}", self.blob_request_state.requested_ids.indices()), + )?; + serializer.emit_arguments( + "block_request_state.state", + &format_args!("{:?}", self.block_request_state.state), + )?; + serializer.emit_arguments( + "blob_request_state.state", + &format_args!("{:?}", self.blob_request_state.state), + )?; + slog::Result::Ok(()) + } +} + +impl slog::Value for SingleLookupRequestState { fn serialize( &self, record: &slog::Record, key: slog::Key, serializer: &mut dyn slog::Serializer, ) -> slog::Result { - serializer.emit_str("request", key)?; - serializer.emit_arguments("hash", &format_args!("{}", self.hash))?; + serializer.emit_str("request_state", key)?; match &self.state { State::AwaitingDownload => { "awaiting_download".serialize(record, "state", serializer)? @@ -203,8 +518,19 @@ impl slog::Value for SingleBlockRequest { #[cfg(test)] mod tests { use super::*; - use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use types::MinimalEthSpec as E; + use crate::sync::block_lookups::common::LookupType; + use crate::sync::block_lookups::common::{Lookup, RequestState}; + use beacon_chain::builder::Witness; + use beacon_chain::eth1_chain::CachingEth1Backend; + use sloggers::null::NullLoggerBuilder; + use sloggers::Build; + use slot_clock::{SlotClock, TestingSlotClock}; + use std::time::Duration; + use store::{HotColdDB, MemoryStore, StoreConfig}; + use types::{ + test_utils::{SeedableRng, TestRandom, XorShiftRng}, + ChainSpec, EthSpec, MinimalEthSpec as E, SignedBeaconBlock, Slot, + }; fn rand_block() -> SignedBeaconBlock { let mut rng = XorShiftRng::from_seed([42; 16]); @@ -215,37 +541,128 @@ mod tests { types::Signature::random_for_test(&mut rng), ) } + type T = Witness, E, MemoryStore, MemoryStore>; + + struct TestLookup1; + + impl Lookup for TestLookup1 { + const MAX_ATTEMPTS: u8 = 3; + + fn lookup_type() -> LookupType { + panic!() + } + } + + struct TestLookup2; + + impl Lookup for TestLookup2 { + const MAX_ATTEMPTS: u8 = 4; + + fn lookup_type() -> LookupType { + panic!() + } + } #[test] fn test_happy_path() { let peer_id = PeerId::random(); let block = rand_block(); + let spec = E::default_spec(); + let slot_clock = TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(0), + Duration::from_secs(spec.seconds_per_slot), + ); + let log = NullLoggerBuilder.build().expect("logger should build"); + let store = + HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log.clone()) + .expect("store"); + let da_checker = Arc::new( + DataAvailabilityChecker::new(slot_clock, None, store.into(), &log, spec.clone()) + .expect("data availability checker"), + ); + let mut sl = SingleBlockLookup::::new( + block.canonical_root(), + None, + &[peer_id], + da_checker, + 1, + ); + as RequestState>::build_request( + &mut sl.block_request_state, + &spec, + ) + .unwrap(); + sl.block_request_state.state.state = State::Downloading { peer_id }; - let mut sl = SingleBlockRequest::<4>::new(block.canonical_root(), peer_id); - sl.request_block().unwrap(); - sl.verify_block(Some(Arc::new(block))).unwrap().unwrap(); + as RequestState>::verify_response( + &mut sl.block_request_state, + block.canonical_root(), + Some(block.into()), + ) + .unwrap() + .unwrap(); } #[test] fn test_block_lookup_failures() { - const FAILURES: u8 = 3; let peer_id = PeerId::random(); let block = rand_block(); + let spec = E::default_spec(); + let slot_clock = TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(0), + Duration::from_secs(spec.seconds_per_slot), + ); + let log = NullLoggerBuilder.build().expect("logger should build"); + let store = + HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log.clone()) + .expect("store"); - let mut sl = SingleBlockRequest::::new(block.canonical_root(), peer_id); - for _ in 1..FAILURES { - sl.request_block().unwrap(); - sl.register_failure_downloading(); + let da_checker = Arc::new( + DataAvailabilityChecker::new(slot_clock, None, store.into(), &log, spec.clone()) + .expect("data availability checker"), + ); + + let mut sl = SingleBlockLookup::::new( + block.canonical_root(), + None, + &[peer_id], + da_checker, + 1, + ); + for _ in 1..TestLookup2::MAX_ATTEMPTS { + as RequestState>::build_request( + &mut sl.block_request_state, + &spec, + ) + .unwrap(); + sl.block_request_state.state.register_failure_downloading(); } // Now we receive the block and send it for processing - sl.request_block().unwrap(); - sl.verify_block(Some(Arc::new(block))).unwrap().unwrap(); + as RequestState>::build_request( + &mut sl.block_request_state, + &spec, + ) + .unwrap(); + sl.block_request_state.state.state = State::Downloading { peer_id }; + + as RequestState>::verify_response( + &mut sl.block_request_state, + block.canonical_root(), + Some(block.into()), + ) + .unwrap() + .unwrap(); // One processing failure maxes the available attempts - sl.register_failure_processing(); + sl.block_request_state.state.register_failure_processing(); assert_eq!( - sl.request_block(), + as RequestState>::build_request( + &mut sl.block_request_state, + &spec + ), Err(LookupRequestError::TooManyAttempts { cannot_process: false }) diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index c588f867b..c506696b9 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -1,22 +1,28 @@ -use std::sync::Arc; - use crate::network_beacon_processor::NetworkBeaconProcessor; + use crate::service::RequestId; -use crate::sync::manager::RequestId as SyncId; +use crate::sync::manager::{RequestId as SyncId, SingleLookupReqId}; use crate::NetworkMessage; +use std::sync::Arc; use super::*; +use crate::sync::block_lookups::common::ResponseType; use beacon_chain::builder::Witness; use beacon_chain::eth1_chain::CachingEth1Backend; +use beacon_chain::test_utils::{ + build_log, generate_rand_block_and_blobs, BeaconChainHarness, EphemeralHarnessType, NumBlobs, +}; use beacon_processor::WorkEvent; +use lighthouse_network::rpc::RPCResponseErrorCode; use lighthouse_network::{NetworkGlobals, Request}; -use slog::{Drain, Level}; -use slot_clock::ManualSlotClock; +use slot_clock::{ManualSlotClock, SlotClock, TestingSlotClock}; use store::MemoryStore; use tokio::sync::mpsc; -use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; -use types::MinimalEthSpec as E; +use types::{ + test_utils::{SeedableRng, XorShiftRng}, + BlobSidecar, EthSpec, ForkName, MinimalEthSpec as E, SignedBeaconBlock, +}; type T = Witness, E, MemoryStore, MemoryStore>; @@ -24,23 +30,29 @@ struct TestRig { beacon_processor_rx: mpsc::Receiver>, network_rx: mpsc::UnboundedReceiver>, rng: XorShiftRng, + harness: BeaconChainHarness, } const D: Duration = Duration::new(0, 0); impl TestRig { - fn test_setup(log_level: Option) -> (BlockLookups, SyncNetworkContext, Self) { - let log = { - let decorator = slog_term::TermDecorator::new().build(); - let drain = slog_term::FullFormat::new(decorator).build().fuse(); - let drain = slog_async::Async::new(drain).build().fuse(); + fn test_setup(enable_log: bool) -> (BlockLookups, SyncNetworkContext, Self) { + let log = build_log(slog::Level::Debug, enable_log); - if let Some(log_level) = log_level { - slog::Logger::root(drain.filter_level(log_level).fuse(), slog::o!()) - } else { - slog::Logger::root(drain.filter(|_| false).fuse(), slog::o!()) - } - }; + // Initialise a new beacon chain + let harness = BeaconChainHarness::>::builder(E) + .default_spec() + .logger(log.clone()) + .deterministic_keypairs(1) + .fresh_ephemeral_store() + .testing_slot_clock(TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(0), + Duration::from_secs(12), + )) + .build(); + + let chain = harness.chain.clone(); let (network_tx, network_rx) = mpsc::unbounded_channel(); let globals = Arc::new(NetworkGlobals::new_test_globals(Vec::new(), &log)); @@ -51,12 +63,18 @@ impl TestRig { beacon_processor_rx, network_rx, rng, + harness, }; - let bl = BlockLookups::new(log.new(slog::o!("component" => "block_lookups"))); + + let bl = BlockLookups::new( + chain.data_availability_checker.clone(), + log.new(slog::o!("component" => "block_lookups")), + ); let cx = { SyncNetworkContext::new( network_tx, Arc::new(network_beacon_processor), + chain, log.new(slog::o!("component" => "network_context")), ) }; @@ -64,48 +82,82 @@ impl TestRig { (bl, cx, rig) } - fn rand_block(&mut self) -> SignedBeaconBlock { - SignedBeaconBlock::from_block( - types::BeaconBlock::Base(types::BeaconBlockBase { - ..<_>::random_for_test(&mut self.rng) - }), - types::Signature::random_for_test(&mut self.rng), - ) + fn rand_block(&mut self, fork_name: ForkName) -> SignedBeaconBlock { + self.rand_block_and_blobs(fork_name, NumBlobs::None).0 + } + + fn rand_block_and_blobs( + &mut self, + fork_name: ForkName, + num_blobs: NumBlobs, + ) -> (SignedBeaconBlock, Vec>) { + let rng = &mut self.rng; + generate_rand_block_and_blobs::(fork_name, num_blobs, rng) } #[track_caller] - fn expect_block_request(&mut self) -> Id { - match self.network_rx.try_recv() { - Ok(NetworkMessage::SendRequest { - peer_id: _, - request: Request::BlocksByRoot(_request), - request_id: RequestId::Sync(SyncId::SingleBlock { id }), - }) => id, - other => { - panic!("Expected block request, found {:?}", other); - } + fn expect_lookup_request(&mut self, response_type: ResponseType) -> SingleLookupReqId { + match response_type { + ResponseType::Block => match self.network_rx.try_recv() { + Ok(NetworkMessage::SendRequest { + peer_id: _, + request: Request::BlocksByRoot(_request), + request_id: RequestId::Sync(SyncId::SingleBlock { id }), + }) => id, + other => { + panic!("Expected block request, found {:?}", other); + } + }, + ResponseType::Blob => match self.network_rx.try_recv() { + Ok(NetworkMessage::SendRequest { + peer_id: _, + request: Request::BlobsByRoot(_request), + request_id: RequestId::Sync(SyncId::SingleBlob { id }), + }) => id, + other => { + panic!("Expected blob request, found {:?}", other); + } + }, } } #[track_caller] - fn expect_parent_request(&mut self) -> Id { - match self.network_rx.try_recv() { - Ok(NetworkMessage::SendRequest { - peer_id: _, - request: Request::BlocksByRoot(_request), - request_id: RequestId::Sync(SyncId::ParentLookup { id }), - }) => id, - other => panic!("Expected parent request, found {:?}", other), + fn expect_parent_request(&mut self, response_type: ResponseType) -> SingleLookupReqId { + match response_type { + ResponseType::Block => match self.network_rx.try_recv() { + Ok(NetworkMessage::SendRequest { + peer_id: _, + request: Request::BlocksByRoot(_request), + request_id: RequestId::Sync(SyncId::ParentLookup { id }), + }) => id, + other => panic!("Expected parent request, found {:?}", other), + }, + ResponseType::Blob => match self.network_rx.try_recv() { + Ok(NetworkMessage::SendRequest { + peer_id: _, + request: Request::BlobsByRoot(_request), + request_id: RequestId::Sync(SyncId::ParentLookupBlob { id }), + }) => id, + other => panic!("Expected parent blobs request, found {:?}", other), + }, } } #[track_caller] - fn expect_block_process(&mut self) { - match self.beacon_processor_rx.try_recv() { - Ok(work) => { - assert_eq!(work.work_type(), beacon_processor::RPC_BLOCK); - } - other => panic!("Expected block process, found {:?}", other), + fn expect_block_process(&mut self, response_type: ResponseType) { + match response_type { + ResponseType::Block => match self.beacon_processor_rx.try_recv() { + Ok(work) => { + assert_eq!(work.work_type(), beacon_processor::RPC_BLOCK); + } + other => panic!("Expected block process, found {:?}", other), + }, + ResponseType::Blob => match self.beacon_processor_rx.try_recv() { + Ok(work) => { + assert_eq!(work.work_type(), beacon_processor::RPC_BLOBS); + } + other => panic!("Expected blob process, found {:?}", other), + }, } } @@ -127,6 +179,14 @@ impl TestRig { ); } + #[track_caller] + fn expect_empty_beacon_processor(&mut self) { + assert_eq!( + self.beacon_processor_rx.try_recv().expect_err("must err"), + mpsc::error::TryRecvError::Empty + ); + } + #[track_caller] pub fn expect_penalty(&mut self) { match self.network_rx.try_recv() { @@ -135,151 +195,266 @@ impl TestRig { } } - pub fn block_with_parent(&mut self, parent_root: Hash256) -> SignedBeaconBlock { - SignedBeaconBlock::from_block( - types::BeaconBlock::Base(types::BeaconBlockBase { - parent_root, - ..<_>::random_for_test(&mut self.rng) - }), - types::Signature::random_for_test(&mut self.rng), - ) + pub fn block_with_parent( + &mut self, + parent_root: Hash256, + fork_name: ForkName, + ) -> SignedBeaconBlock { + let mut block = self.rand_block(fork_name); + *block.message_mut().parent_root_mut() = parent_root; + block + } + + pub fn block_with_parent_and_blobs( + &mut self, + parent_root: Hash256, + fork_name: ForkName, + num_blobs: NumBlobs, + ) -> (SignedBeaconBlock, Vec>) { + let (mut block, mut blobs) = self.rand_block_and_blobs(fork_name, num_blobs); + *block.message_mut().parent_root_mut() = parent_root; + blobs.iter_mut().for_each(|blob| { + blob.signed_block_header = block.signed_block_header(); + }); + (block, blobs) } } #[test] fn test_single_block_lookup_happy_path() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); - let block = rig.rand_block(); + let block = rig.rand_block(fork_name); let peer_id = PeerId::random(); - + let block_root = block.canonical_root(); // Trigger the request - bl.search_block(block.canonical_root(), peer_id, &mut cx); - let id = rig.expect_block_request(); + bl.search_block(block_root, &[peer_id], &mut cx); + let id = rig.expect_lookup_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_lookup_request(ResponseType::Blob); + } // The peer provides the correct block, should not be penalized. Now the block should be sent // for processing. - bl.single_block_lookup_response(id, peer_id, Some(Arc::new(block)), D, &mut cx); + bl.single_lookup_response::>( + id, + peer_id, + Some(block.into()), + D, + &cx, + ); rig.expect_empty_network(); - rig.expect_block_process(); + rig.expect_block_process(response_type); // The request should still be active. assert_eq!(bl.single_block_lookups.len(), 1); // Send the stream termination. Peer should have not been penalized, and the request removed // after processing. - bl.single_block_lookup_response(id, peer_id, None, D, &mut cx); - bl.single_block_processed(id, Ok(()).into(), &mut cx); + bl.single_lookup_response::>(id, peer_id, None, D, &cx); + bl.single_block_component_processed::>( + id.id, + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(block_root)), + &mut cx, + ); rig.expect_empty_network(); assert_eq!(bl.single_block_lookups.len(), 0); } #[test] fn test_single_block_lookup_empty_response() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); let block_hash = Hash256::random(); let peer_id = PeerId::random(); // Trigger the request - bl.search_block(block_hash, peer_id, &mut cx); - let id = rig.expect_block_request(); + bl.search_block(block_hash, &[peer_id], &mut cx); + let id = rig.expect_lookup_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_lookup_request(ResponseType::Blob); + } // The peer does not have the block. It should be penalized. - bl.single_block_lookup_response(id, peer_id, None, D, &mut cx); + bl.single_lookup_response::>(id, peer_id, None, D, &cx); rig.expect_penalty(); - rig.expect_block_request(); // it should be retried + rig.expect_lookup_request(response_type); // it should be retried } #[test] fn test_single_block_lookup_wrong_response() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); let block_hash = Hash256::random(); let peer_id = PeerId::random(); // Trigger the request - bl.search_block(block_hash, peer_id, &mut cx); - let id = rig.expect_block_request(); + bl.search_block(block_hash, &[peer_id], &mut cx); + let id = rig.expect_lookup_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_lookup_request(ResponseType::Blob); + } // Peer sends something else. It should be penalized. - let bad_block = rig.rand_block(); - bl.single_block_lookup_response(id, peer_id, Some(Arc::new(bad_block)), D, &mut cx); + let bad_block = rig.rand_block(fork_name); + bl.single_lookup_response::>( + id, + peer_id, + Some(bad_block.into()), + D, + &cx, + ); rig.expect_penalty(); - rig.expect_block_request(); // should be retried + rig.expect_lookup_request(response_type); // should be retried // Send the stream termination. This should not produce an additional penalty. - bl.single_block_lookup_response(id, peer_id, None, D, &mut cx); + bl.single_lookup_response::>(id, peer_id, None, D, &cx); rig.expect_empty_network(); } #[test] fn test_single_block_lookup_failure() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); let block_hash = Hash256::random(); let peer_id = PeerId::random(); // Trigger the request - bl.search_block(block_hash, peer_id, &mut cx); - let id = rig.expect_block_request(); + bl.search_block(block_hash, &[peer_id], &mut cx); + let id = rig.expect_lookup_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_lookup_request(ResponseType::Blob); + } // The request fails. RPC failures are handled elsewhere so we should not penalize the peer. - bl.single_block_lookup_failed(id, &mut cx); - rig.expect_block_request(); + bl.single_block_lookup_failed::>( + id, + &peer_id, + &cx, + RPCError::UnsupportedProtocol, + ); + rig.expect_lookup_request(response_type); rig.expect_empty_network(); } #[test] fn test_single_block_lookup_becomes_parent_request() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); - let block = rig.rand_block(); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let block = Arc::new(rig.rand_block(fork_name)); let peer_id = PeerId::random(); // Trigger the request - bl.search_block(block.canonical_root(), peer_id, &mut cx); - let id = rig.expect_block_request(); + bl.search_block(block.canonical_root(), &[peer_id], &mut cx); + let id = rig.expect_lookup_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_lookup_request(ResponseType::Blob); + } // The peer provides the correct block, should not be penalized. Now the block should be sent // for processing. - bl.single_block_lookup_response(id, peer_id, Some(Arc::new(block.clone())), D, &mut cx); + bl.single_lookup_response::>( + id, + peer_id, + Some(block.clone()), + D, + &cx, + ); rig.expect_empty_network(); - rig.expect_block_process(); + rig.expect_block_process(response_type); // The request should still be active. assert_eq!(bl.single_block_lookups.len(), 1); // Send the stream termination. Peer should have not been penalized, and the request moved to a // parent request after processing. - bl.single_block_processed( - id, - BlockError::ParentUnknown(Arc::new(block)).into(), + bl.single_block_component_processed::>( + id.id, + BlockError::ParentUnknown(RpcBlock::new_without_blobs(None, block)).into(), &mut cx, ); - assert_eq!(bl.single_block_lookups.len(), 0); - rig.expect_parent_request(); + assert_eq!(bl.single_block_lookups.len(), 1); + rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_parent_request(ResponseType::Blob); + } rig.expect_empty_network(); assert_eq!(bl.parent_lookups.len(), 1); } #[test] fn test_parent_lookup_happy_path() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); - let parent = rig.rand_block(); - let block = rig.block_with_parent(parent.canonical_root()); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let parent = rig.rand_block(fork_name); + let block = rig.block_with_parent(parent.canonical_root(), fork_name); let chain_hash = block.canonical_root(); let peer_id = PeerId::random(); + let block_root = block.canonical_root(); + let parent_root = block.parent_root(); + let slot = block.slot(); // Trigger the request - bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx); - let id = rig.expect_parent_request(); + bl.search_parent(slot, block_root, parent_root, peer_id, &mut cx); + let id = rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_parent_request(ResponseType::Blob); + } // Peer sends the right block, it should be sent for processing. Peer should not be penalized. - bl.parent_lookup_response(id, peer_id, Some(Arc::new(parent)), D, &mut cx); - rig.expect_block_process(); + bl.parent_lookup_response::>( + id, + peer_id, + Some(parent.into()), + D, + &cx, + ); + rig.expect_block_process(response_type); rig.expect_empty_network(); // Processing succeeds, now the rest of the chain should be sent for processing. @@ -288,140 +463,264 @@ fn test_parent_lookup_happy_path() { let process_result = BatchProcessResult::Success { was_non_empty: true, }; - bl.parent_chain_processed(chain_hash, process_result, &mut cx); + bl.parent_chain_processed(chain_hash, process_result, &cx); assert_eq!(bl.parent_lookups.len(), 0); } #[test] fn test_parent_lookup_wrong_response() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); - let parent = rig.rand_block(); - let block = rig.block_with_parent(parent.canonical_root()); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let parent = rig.rand_block(fork_name); + let block = rig.block_with_parent(parent.canonical_root(), fork_name); let chain_hash = block.canonical_root(); let peer_id = PeerId::random(); + let block_root = block.canonical_root(); + let parent_root = block.parent_root(); + let slot = block.slot(); // Trigger the request - bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx); - let id1 = rig.expect_parent_request(); + bl.search_parent(slot, block_root, parent_root, peer_id, &mut cx); + let id1 = rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_parent_request(ResponseType::Blob); + } // Peer sends the wrong block, peer should be penalized and the block re-requested. - let bad_block = rig.rand_block(); - bl.parent_lookup_response(id1, peer_id, Some(Arc::new(bad_block)), D, &mut cx); + let bad_block = rig.rand_block(fork_name); + bl.parent_lookup_response::>( + id1, + peer_id, + Some(bad_block.into()), + D, + &cx, + ); rig.expect_penalty(); - let id2 = rig.expect_parent_request(); + let id2 = rig.expect_parent_request(response_type); // Send the stream termination for the first request. This should not produce extra penalties. - bl.parent_lookup_response(id1, peer_id, None, D, &mut cx); + bl.parent_lookup_response::>(id1, peer_id, None, D, &cx); rig.expect_empty_network(); // Send the right block this time. - bl.parent_lookup_response(id2, peer_id, Some(Arc::new(parent)), D, &mut cx); - rig.expect_block_process(); + bl.parent_lookup_response::>( + id2, + peer_id, + Some(parent.into()), + D, + &cx, + ); + rig.expect_block_process(response_type); // Processing succeeds, now the rest of the chain should be sent for processing. - bl.parent_block_processed(chain_hash, Ok(()).into(), &mut cx); + bl.parent_block_processed( + chain_hash, + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(block_root)), + &mut cx, + ); rig.expect_parent_chain_process(); let process_result = BatchProcessResult::Success { was_non_empty: true, }; - bl.parent_chain_processed(chain_hash, process_result, &mut cx); + bl.parent_chain_processed(chain_hash, process_result, &cx); assert_eq!(bl.parent_lookups.len(), 0); } #[test] fn test_parent_lookup_empty_response() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); - let parent = rig.rand_block(); - let block = rig.block_with_parent(parent.canonical_root()); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let parent = rig.rand_block(fork_name); + let block = rig.block_with_parent(parent.canonical_root(), fork_name); let chain_hash = block.canonical_root(); let peer_id = PeerId::random(); + let block_root = block.canonical_root(); + let parent_root = block.parent_root(); + let slot = block.slot(); // Trigger the request - bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx); - let id1 = rig.expect_parent_request(); + bl.search_parent(slot, block_root, parent_root, peer_id, &mut cx); + let id1 = rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_parent_request(ResponseType::Blob); + } // Peer sends an empty response, peer should be penalized and the block re-requested. - bl.parent_lookup_response(id1, peer_id, None, D, &mut cx); + bl.parent_lookup_response::>(id1, peer_id, None, D, &cx); rig.expect_penalty(); - let id2 = rig.expect_parent_request(); + let id2 = rig.expect_parent_request(response_type); // Send the right block this time. - bl.parent_lookup_response(id2, peer_id, Some(Arc::new(parent)), D, &mut cx); - rig.expect_block_process(); + bl.parent_lookup_response::>( + id2, + peer_id, + Some(parent.into()), + D, + &cx, + ); + rig.expect_block_process(response_type); // Processing succeeds, now the rest of the chain should be sent for processing. - bl.parent_block_processed(chain_hash, Ok(()).into(), &mut cx); + bl.parent_block_processed( + chain_hash, + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(block_root)), + &mut cx, + ); rig.expect_parent_chain_process(); let process_result = BatchProcessResult::Success { was_non_empty: true, }; - bl.parent_chain_processed(chain_hash, process_result, &mut cx); + bl.parent_chain_processed(chain_hash, process_result, &cx); assert_eq!(bl.parent_lookups.len(), 0); } #[test] fn test_parent_lookup_rpc_failure() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); - let parent = rig.rand_block(); - let block = rig.block_with_parent(parent.canonical_root()); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let parent = rig.rand_block(fork_name); + let block = rig.block_with_parent(parent.canonical_root(), fork_name); let chain_hash = block.canonical_root(); let peer_id = PeerId::random(); + let block_root = block.canonical_root(); + let parent_root = block.parent_root(); + let slot = block.slot(); // Trigger the request - bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx); - let id1 = rig.expect_parent_request(); + bl.search_parent(slot, block_root, parent_root, peer_id, &mut cx); + let id1 = rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_parent_request(ResponseType::Blob); + } // The request fails. It should be tried again. - bl.parent_lookup_failed(id1, peer_id, &mut cx); - let id2 = rig.expect_parent_request(); + bl.parent_lookup_failed::>( + id1, + peer_id, + &cx, + RPCError::ErrorResponse( + RPCResponseErrorCode::ResourceUnavailable, + "older than deneb".into(), + ), + ); + let id2 = rig.expect_parent_request(response_type); // Send the right block this time. - bl.parent_lookup_response(id2, peer_id, Some(Arc::new(parent)), D, &mut cx); - rig.expect_block_process(); + bl.parent_lookup_response::>( + id2, + peer_id, + Some(parent.into()), + D, + &cx, + ); + rig.expect_block_process(response_type); // Processing succeeds, now the rest of the chain should be sent for processing. - bl.parent_block_processed(chain_hash, Ok(()).into(), &mut cx); + bl.parent_block_processed( + chain_hash, + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(block_root)), + &mut cx, + ); rig.expect_parent_chain_process(); let process_result = BatchProcessResult::Success { was_non_empty: true, }; - bl.parent_chain_processed(chain_hash, process_result, &mut cx); + bl.parent_chain_processed(chain_hash, process_result, &cx); assert_eq!(bl.parent_lookups.len(), 0); } #[test] fn test_parent_lookup_too_many_attempts() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); - let parent = rig.rand_block(); - let block = rig.block_with_parent(parent.canonical_root()); - let chain_hash = block.canonical_root(); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let parent = rig.rand_block(fork_name); + let block = rig.block_with_parent(parent.canonical_root(), fork_name); let peer_id = PeerId::random(); + let block_root = block.canonical_root(); + let parent_root = block.parent_root(); + let slot = block.slot(); // Trigger the request - bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx); + bl.search_parent(slot, block_root, parent_root, peer_id, &mut cx); for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE { - let id = rig.expect_parent_request(); + let id = rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) && i == 1 { + let _ = rig.expect_parent_request(ResponseType::Blob); + } match i % 2 { // make sure every error is accounted for 0 => { // The request fails. It should be tried again. - bl.parent_lookup_failed(id, peer_id, &mut cx); + bl.parent_lookup_failed::>( + id, + peer_id, + &cx, + RPCError::ErrorResponse( + RPCResponseErrorCode::ResourceUnavailable, + "older than deneb".into(), + ), + ); } _ => { // Send a bad block this time. It should be tried again. - let bad_block = rig.rand_block(); - bl.parent_lookup_response(id, peer_id, Some(Arc::new(bad_block)), D, &mut cx); + let bad_block = rig.rand_block(fork_name); + bl.parent_lookup_response::>( + id, + peer_id, + Some(bad_block.into()), + D, + &cx, + ); // Send the stream termination - bl.parent_lookup_response(id, peer_id, None, D, &mut cx); + + // Note, previously we would send the same lookup id with a stream terminator, + // we'd ignore it because we'd intrepret it as an unrequested response, since + // we already got one response for the block. I'm not sure what the intent is + // for having this stream terminator line in this test at all. Receiving an invalid + // block and a stream terminator with the same Id now results in two failed attempts, + // I'm unsure if this is how it should behave? + // + bl.parent_lookup_response::>(id, peer_id, None, D, &cx); rig.expect_penalty(); } } if i < parent_lookup::PARENT_FAIL_TOLERANCE { - assert_eq!(bl.parent_lookups[0].failed_attempts(), dbg!(i)); + assert_eq!( + bl.parent_lookups[0] + .current_parent_request + .block_request_state + .state + .failed_attempts(), + dbg!(i) + ); } } @@ -430,29 +729,63 @@ fn test_parent_lookup_too_many_attempts() { #[test] fn test_parent_lookup_too_many_download_attempts_no_blacklist() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); - let parent = rig.rand_block(); - let block = rig.block_with_parent(parent.canonical_root()); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let parent = rig.rand_block(fork_name); + let block = rig.block_with_parent(parent.canonical_root(), fork_name); let block_hash = block.canonical_root(); let peer_id = PeerId::random(); + let block_root = block.canonical_root(); + let parent_root = block.parent_root(); + let slot = block.slot(); // Trigger the request - bl.search_parent(block_hash, Arc::new(block), peer_id, &mut cx); + bl.search_parent(slot, block_root, parent_root, peer_id, &mut cx); for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE { assert!(!bl.failed_chains.contains(&block_hash)); - let id = rig.expect_parent_request(); + let id = rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) && i == 1 { + let _ = rig.expect_parent_request(ResponseType::Blob); + } if i % 2 != 0 { // The request fails. It should be tried again. - bl.parent_lookup_failed(id, peer_id, &mut cx); + bl.parent_lookup_failed::>( + id, + peer_id, + &cx, + RPCError::ErrorResponse( + RPCResponseErrorCode::ResourceUnavailable, + "older than deneb".into(), + ), + ); } else { // Send a bad block this time. It should be tried again. - let bad_block = rig.rand_block(); - bl.parent_lookup_response(id, peer_id, Some(Arc::new(bad_block)), D, &mut cx); + let bad_block = rig.rand_block(fork_name); + bl.parent_lookup_response::>( + id, + peer_id, + Some(bad_block.into()), + D, + &cx, + ); rig.expect_penalty(); } if i < parent_lookup::PARENT_FAIL_TOLERANCE { - assert_eq!(bl.parent_lookups[0].failed_attempts(), dbg!(i)); + assert_eq!( + bl.parent_lookups[0] + .current_parent_request + .block_request_state + .state + .failed_attempts(), + dbg!(i) + ); } } @@ -463,70 +796,126 @@ fn test_parent_lookup_too_many_download_attempts_no_blacklist() { #[test] fn test_parent_lookup_too_many_processing_attempts_must_blacklist() { + let response_type = ResponseType::Block; const PROCESSING_FAILURES: u8 = parent_lookup::PARENT_FAIL_TOLERANCE / 2 + 1; - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); - let parent = Arc::new(rig.rand_block()); - let block = rig.block_with_parent(parent.canonical_root()); - let block_hash = block.canonical_root(); + let parent = Arc::new(rig.rand_block(fork_name)); + let block = rig.block_with_parent(parent.canonical_root(), fork_name); let peer_id = PeerId::random(); + let block_root = block.canonical_root(); + let parent_root = block.parent_root(); + let slot = block.slot(); // Trigger the request - bl.search_parent(block_hash, Arc::new(block), peer_id, &mut cx); + bl.search_parent(slot, block_root, parent_root, peer_id, &mut cx); // Fail downloading the block - for _ in 0..(parent_lookup::PARENT_FAIL_TOLERANCE - PROCESSING_FAILURES) { - let id = rig.expect_parent_request(); + for i in 0..(parent_lookup::PARENT_FAIL_TOLERANCE - PROCESSING_FAILURES) { + let id = rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) && i == 0 { + let _ = rig.expect_parent_request(ResponseType::Blob); + } // The request fails. It should be tried again. - bl.parent_lookup_failed(id, peer_id, &mut cx); + bl.parent_lookup_failed::>( + id, + peer_id, + &cx, + RPCError::ErrorResponse( + RPCResponseErrorCode::ResourceUnavailable, + "older than deneb".into(), + ), + ); } // Now fail processing a block in the parent request - for _ in 0..PROCESSING_FAILURES { - let id = dbg!(rig.expect_parent_request()); - assert!(!bl.failed_chains.contains(&block_hash)); + for i in 0..PROCESSING_FAILURES { + let id = dbg!(rig.expect_parent_request(response_type)); + if matches!(fork_name, ForkName::Deneb) && i != 0 { + let _ = rig.expect_parent_request(ResponseType::Blob); + } + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + assert!(!bl.failed_chains.contains(&block_root)); // send the right parent but fail processing - bl.parent_lookup_response(id, peer_id, Some(parent.clone()), D, &mut cx); - bl.parent_block_processed(block_hash, BlockError::InvalidSignature.into(), &mut cx); - bl.parent_lookup_response(id, peer_id, None, D, &mut cx); + bl.parent_lookup_response::>( + id, + peer_id, + Some(parent.clone()), + D, + &cx, + ); + bl.parent_block_processed(block_root, BlockError::InvalidSignature.into(), &mut cx); + bl.parent_lookup_response::>(id, peer_id, None, D, &cx); rig.expect_penalty(); } - assert!(bl.failed_chains.contains(&block_hash)); + assert!(bl.failed_chains.contains(&block_root)); assert_eq!(bl.parent_lookups.len(), 0); } #[test] fn test_parent_lookup_too_deep() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); let mut blocks = - Vec::>::with_capacity(parent_lookup::PARENT_DEPTH_TOLERANCE); + Vec::>>::with_capacity(parent_lookup::PARENT_DEPTH_TOLERANCE); while blocks.len() < parent_lookup::PARENT_DEPTH_TOLERANCE { let parent = blocks .last() .map(|b| b.canonical_root()) .unwrap_or_else(Hash256::random); - let block = rig.block_with_parent(parent); + let block = Arc::new(rig.block_with_parent(parent, fork_name)); blocks.push(block); } let peer_id = PeerId::random(); let trigger_block = blocks.pop().unwrap(); let chain_hash = trigger_block.canonical_root(); - bl.search_parent(chain_hash, Arc::new(trigger_block), peer_id, &mut cx); + let trigger_block_root = trigger_block.canonical_root(); + let trigger_parent_root = trigger_block.parent_root(); + let trigger_slot = trigger_block.slot(); + bl.search_parent( + trigger_slot, + trigger_block_root, + trigger_parent_root, + peer_id, + &mut cx, + ); for block in blocks.into_iter().rev() { - let id = rig.expect_parent_request(); + let id = rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_parent_request(ResponseType::Blob); + } // the block - bl.parent_lookup_response(id, peer_id, Some(Arc::new(block.clone())), D, &mut cx); + bl.parent_lookup_response::>( + id, + peer_id, + Some(block.clone()), + D, + &cx, + ); // the stream termination - bl.parent_lookup_response(id, peer_id, None, D, &mut cx); + bl.parent_lookup_response::>(id, peer_id, None, D, &cx); // the processing request - rig.expect_block_process(); + rig.expect_block_process(response_type); // the processing result bl.parent_block_processed( chain_hash, - BlockError::ParentUnknown(Arc::new(block)).into(), + BlockError::ParentUnknown(RpcBlock::new_without_blobs(None, block)).into(), &mut cx, ) } @@ -537,68 +926,117 @@ fn test_parent_lookup_too_deep() { #[test] fn test_parent_lookup_disconnection() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); let peer_id = PeerId::random(); - let trigger_block = rig.rand_block(); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let trigger_block = rig.rand_block(fork_name); + let trigger_block_root = trigger_block.canonical_root(); + let trigger_parent_root = trigger_block.parent_root(); + let trigger_slot = trigger_block.slot(); bl.search_parent( - trigger_block.canonical_root(), - Arc::new(trigger_block), + trigger_slot, + trigger_block_root, + trigger_parent_root, peer_id, &mut cx, ); + bl.peer_disconnected(&peer_id, &mut cx); assert!(bl.parent_lookups.is_empty()); } #[test] fn test_single_block_lookup_ignored_response() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); - let block = rig.rand_block(); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let block = rig.rand_block(fork_name); let peer_id = PeerId::random(); // Trigger the request - bl.search_block(block.canonical_root(), peer_id, &mut cx); - let id = rig.expect_block_request(); + bl.search_block(block.canonical_root(), &[peer_id], &mut cx); + let id = rig.expect_lookup_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_lookup_request(ResponseType::Blob); + } // The peer provides the correct block, should not be penalized. Now the block should be sent // for processing. - bl.single_block_lookup_response(id, peer_id, Some(Arc::new(block)), D, &mut cx); + bl.single_lookup_response::>( + id, + peer_id, + Some(block.into()), + D, + &cx, + ); rig.expect_empty_network(); - rig.expect_block_process(); + rig.expect_block_process(response_type); // The request should still be active. assert_eq!(bl.single_block_lookups.len(), 1); // Send the stream termination. Peer should have not been penalized, and the request removed // after processing. - bl.single_block_lookup_response(id, peer_id, None, D, &mut cx); + bl.single_lookup_response::>(id, peer_id, None, D, &cx); // Send an Ignored response, the request should be dropped - bl.single_block_processed(id, BlockProcessResult::Ignored, &mut cx); + bl.single_block_component_processed::>( + id.id, + BlockProcessingResult::Ignored, + &mut cx, + ); rig.expect_empty_network(); assert_eq!(bl.single_block_lookups.len(), 0); } #[test] fn test_parent_lookup_ignored_response() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); - let parent = rig.rand_block(); - let block = rig.block_with_parent(parent.canonical_root()); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let parent = rig.rand_block(fork_name); + let block = rig.block_with_parent(parent.canonical_root(), fork_name); let chain_hash = block.canonical_root(); let peer_id = PeerId::random(); + let block_root = block.canonical_root(); + let parent_root = block.parent_root(); + let slot = block.slot(); // Trigger the request - bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx); - let id = rig.expect_parent_request(); + bl.search_parent(slot, block_root, parent_root, peer_id, &mut cx); + let id = rig.expect_parent_request(response_type); + + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_parent_request(ResponseType::Blob); + } // Peer sends the right block, it should be sent for processing. Peer should not be penalized. - bl.parent_lookup_response(id, peer_id, Some(Arc::new(parent)), D, &mut cx); - rig.expect_block_process(); + bl.parent_lookup_response::>( + id, + peer_id, + Some(parent.into()), + D, + &cx, + ); + rig.expect_block_process(response_type); rig.expect_empty_network(); // Return an Ignored result. The request should be dropped - bl.parent_block_processed(chain_hash, BlockProcessResult::Ignored, &mut cx); + bl.parent_block_processed(chain_hash, BlockProcessingResult::Ignored, &mut cx); rig.expect_empty_network(); assert_eq!(bl.parent_lookups.len(), 0); } @@ -606,8 +1044,13 @@ fn test_parent_lookup_ignored_response() { /// This is a regression test. #[test] fn test_same_chain_race_condition() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(Some(Level::Debug)); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(true); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); #[track_caller] fn parent_lookups_consistency(bl: &BlockLookups) { let hashes: Vec<_> = bl @@ -634,29 +1077,53 @@ fn test_same_chain_race_condition() { .last() .map(|b| b.canonical_root()) .unwrap_or_else(Hash256::random); - let block = Arc::new(rig.block_with_parent(parent)); + let block = Arc::new(rig.block_with_parent(parent, fork_name)); blocks.push(block); } let peer_id = PeerId::random(); let trigger_block = blocks.pop().unwrap(); let chain_hash = trigger_block.canonical_root(); - bl.search_parent(chain_hash, trigger_block.clone(), peer_id, &mut cx); + let trigger_block_root = trigger_block.canonical_root(); + let trigger_parent_root = trigger_block.parent_root(); + let trigger_slot = trigger_block.slot(); + bl.search_parent( + trigger_slot, + trigger_block_root, + trigger_parent_root, + peer_id, + &mut cx, + ); for (i, block) in blocks.into_iter().rev().enumerate() { - let id = rig.expect_parent_request(); + let id = rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_parent_request(ResponseType::Blob); + } // the block - bl.parent_lookup_response(id, peer_id, Some(block.clone()), D, &mut cx); + bl.parent_lookup_response::>( + id, + peer_id, + Some(block.clone()), + D, + &cx, + ); // the stream termination - bl.parent_lookup_response(id, peer_id, None, D, &mut cx); + bl.parent_lookup_response::>(id, peer_id, None, D, &cx); // the processing request - rig.expect_block_process(); + rig.expect_block_process(response_type); // the processing result if i + 2 == depth { // one block was removed bl.parent_block_processed(chain_hash, BlockError::BlockIsAlreadyKnown.into(), &mut cx) } else { - bl.parent_block_processed(chain_hash, BlockError::ParentUnknown(block).into(), &mut cx) + bl.parent_block_processed( + chain_hash, + BlockError::ParentUnknown(RpcBlock::new_without_blobs(None, block)).into(), + &mut cx, + ) } parent_lookups_consistency(&bl) } @@ -666,12 +1133,959 @@ fn test_same_chain_race_condition() { // Try to get this block again while the chain is being processed. We should not request it again. let peer_id = PeerId::random(); - bl.search_parent(chain_hash, trigger_block, peer_id, &mut cx); + let trigger_block_root = trigger_block.canonical_root(); + let trigger_parent_root = trigger_block.parent_root(); + let trigger_slot = trigger_block.slot(); + bl.search_parent( + trigger_slot, + trigger_block_root, + trigger_parent_root, + peer_id, + &mut cx, + ); parent_lookups_consistency(&bl); let process_result = BatchProcessResult::Success { was_non_empty: true, }; - bl.parent_chain_processed(chain_hash, process_result, &mut cx); + bl.parent_chain_processed(chain_hash, process_result, &cx); assert_eq!(bl.parent_lookups.len(), 0); } + +mod deneb_only { + use super::*; + use crate::sync::block_lookups::common::ResponseType; + use beacon_chain::data_availability_checker::AvailabilityCheckError; + use beacon_chain::test_utils::NumBlobs; + use ssz_types::VariableList; + use std::ops::IndexMut; + use std::str::FromStr; + + struct DenebTester { + bl: BlockLookups, + cx: SyncNetworkContext, + rig: TestRig, + block: Arc>, + blobs: Vec>>, + parent_block: VecDeque>>, + parent_blobs: VecDeque>>>, + unknown_parent_block: Option>>, + unknown_parent_blobs: Option>>>, + peer_id: PeerId, + block_req_id: Option, + parent_block_req_id: Option, + blob_req_id: Option, + parent_blob_req_id: Option, + slot: Slot, + block_root: Hash256, + } + + enum RequestTrigger { + AttestationUnknownBlock, + GossipUnknownParentBlock { num_parents: usize }, + GossipUnknownParentBlob { num_parents: usize }, + } + + impl RequestTrigger { + fn num_parents(&self) -> usize { + match self { + RequestTrigger::AttestationUnknownBlock => 0, + RequestTrigger::GossipUnknownParentBlock { num_parents } => *num_parents, + RequestTrigger::GossipUnknownParentBlob { num_parents } => *num_parents, + } + } + } + + impl DenebTester { + fn new(request_trigger: RequestTrigger) -> Option { + let fork_name = get_fork_name(); + if !matches!(fork_name, ForkName::Deneb) { + return None; + } + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); + rig.harness.chain.slot_clock.set_slot( + E::slots_per_epoch() * rig.harness.spec.deneb_fork_epoch.unwrap().as_u64(), + ); + let (block, blobs) = rig.rand_block_and_blobs(fork_name, NumBlobs::Random); + let mut block = Arc::new(block); + let mut blobs = blobs.into_iter().map(Arc::new).collect::>(); + let slot = block.slot(); + + let num_parents = request_trigger.num_parents(); + let mut parent_block_chain = VecDeque::with_capacity(num_parents); + let mut parent_blobs_chain = VecDeque::with_capacity(num_parents); + for _ in 0..num_parents { + // Set the current block as the parent. + let parent_root = block.canonical_root(); + let parent_block = block.clone(); + let parent_blobs = blobs.clone(); + parent_block_chain.push_front(parent_block); + parent_blobs_chain.push_front(parent_blobs); + + // Create the next block. + let (child_block, child_blobs) = + rig.block_with_parent_and_blobs(parent_root, get_fork_name(), NumBlobs::Random); + let mut child_block = Arc::new(child_block); + let mut child_blobs = child_blobs.into_iter().map(Arc::new).collect::>(); + + // Update the new block to the current block. + std::mem::swap(&mut child_block, &mut block); + std::mem::swap(&mut child_blobs, &mut blobs); + } + let block_root = block.canonical_root(); + let parent_root = block.parent_root(); + + let peer_id = PeerId::random(); + + // Trigger the request + let (block_req_id, blob_req_id, parent_block_req_id, parent_blob_req_id) = + match request_trigger { + RequestTrigger::AttestationUnknownBlock => { + bl.search_block(block_root, &[peer_id], &mut cx); + let block_req_id = rig.expect_lookup_request(ResponseType::Block); + let blob_req_id = rig.expect_lookup_request(ResponseType::Blob); + (Some(block_req_id), Some(blob_req_id), None, None) + } + RequestTrigger::GossipUnknownParentBlock { .. } => { + bl.search_child_block( + block_root, + ChildComponents::new(block_root, Some(block.clone()), None), + &[peer_id], + &mut cx, + ); + + let blob_req_id = rig.expect_lookup_request(ResponseType::Blob); + rig.expect_empty_network(); // expect no block request + bl.search_parent(slot, block_root, parent_root, peer_id, &mut cx); + let parent_block_req_id = rig.expect_parent_request(ResponseType::Block); + let parent_blob_req_id = rig.expect_parent_request(ResponseType::Blob); + ( + None, + Some(blob_req_id), + Some(parent_block_req_id), + Some(parent_blob_req_id), + ) + } + RequestTrigger::GossipUnknownParentBlob { .. } => { + let single_blob = blobs.first().cloned().unwrap(); + let child_root = single_blob.block_root(); + + let mut lookup_blobs = FixedBlobSidecarList::default(); + *lookup_blobs.index_mut(0) = Some(single_blob); + bl.search_child_block( + child_root, + ChildComponents::new(child_root, None, Some(lookup_blobs)), + &[peer_id], + &mut cx, + ); + + let block_req_id = rig.expect_lookup_request(ResponseType::Block); + let blobs_req_id = rig.expect_lookup_request(ResponseType::Blob); + rig.expect_empty_network(); // expect no block request + bl.search_parent(slot, child_root, parent_root, peer_id, &mut cx); + let parent_block_req_id = rig.expect_parent_request(ResponseType::Block); + let parent_blob_req_id = rig.expect_parent_request(ResponseType::Blob); + ( + Some(block_req_id), + Some(blobs_req_id), + Some(parent_block_req_id), + Some(parent_blob_req_id), + ) + } + }; + + Some(Self { + bl, + cx, + rig, + block, + blobs, + parent_block: parent_block_chain, + parent_blobs: parent_blobs_chain, + unknown_parent_block: None, + unknown_parent_blobs: None, + peer_id, + block_req_id, + parent_block_req_id, + blob_req_id, + parent_blob_req_id, + slot, + block_root, + }) + } + + fn parent_block_response(mut self) -> Self { + self.rig.expect_empty_network(); + let block = self.parent_block.pop_front().unwrap().clone(); + let _ = self.unknown_parent_block.insert(block.clone()); + self.bl.parent_lookup_response::>( + self.parent_block_req_id.expect("parent request id"), + self.peer_id, + Some(block), + D, + &self.cx, + ); + + assert_eq!(self.bl.parent_lookups.len(), 1); + self + } + + fn parent_blob_response(mut self) -> Self { + let blobs = self.parent_blobs.pop_front().unwrap(); + let _ = self.unknown_parent_blobs.insert(blobs.clone()); + for blob in &blobs { + self.bl + .parent_lookup_response::>( + self.parent_blob_req_id.expect("parent blob request id"), + self.peer_id, + Some(blob.clone()), + D, + &self.cx, + ); + assert_eq!(self.bl.parent_lookups.len(), 1); + } + self.bl + .parent_lookup_response::>( + self.parent_blob_req_id.expect("blob request id"), + self.peer_id, + None, + D, + &self.cx, + ); + + self + } + + fn block_response_triggering_process(self) -> Self { + let mut me = self.block_response(); + me.rig.expect_block_process(ResponseType::Block); + + // The request should still be active. + assert_eq!(me.bl.single_block_lookups.len(), 1); + me + } + + fn block_response(mut self) -> Self { + // The peer provides the correct block, should not be penalized. Now the block should be sent + // for processing. + self.bl + .single_lookup_response::>( + self.block_req_id.expect("block request id"), + self.peer_id, + Some(self.block.clone()), + D, + &self.cx, + ); + self.rig.expect_empty_network(); + + // The request should still be active. + assert_eq!(self.bl.single_block_lookups.len(), 1); + self + } + + fn blobs_response(mut self) -> Self { + for blob in &self.blobs { + self.bl + .single_lookup_response::>( + self.blob_req_id.expect("blob request id"), + self.peer_id, + Some(blob.clone()), + D, + &self.cx, + ); + assert_eq!(self.bl.single_block_lookups.len(), 1); + } + self.bl + .single_lookup_response::>( + self.blob_req_id.expect("blob request id"), + self.peer_id, + None, + D, + &self.cx, + ); + self + } + + fn blobs_response_was_valid(mut self) -> Self { + self.rig.expect_empty_network(); + if !self.blobs.is_empty() { + self.rig.expect_block_process(ResponseType::Blob); + } + self + } + + fn expect_empty_beacon_processor(mut self) -> Self { + self.rig.expect_empty_beacon_processor(); + self + } + + fn empty_block_response(mut self) -> Self { + self.bl + .single_lookup_response::>( + self.block_req_id.expect("block request id"), + self.peer_id, + None, + D, + &self.cx, + ); + self + } + + fn empty_blobs_response(mut self) -> Self { + self.bl + .single_lookup_response::>( + self.blob_req_id.expect("blob request id"), + self.peer_id, + None, + D, + &self.cx, + ); + self + } + + fn empty_parent_block_response(mut self) -> Self { + self.bl.parent_lookup_response::>( + self.parent_block_req_id.expect("block request id"), + self.peer_id, + None, + D, + &self.cx, + ); + self + } + + fn empty_parent_blobs_response(mut self) -> Self { + self.bl + .parent_lookup_response::>( + self.parent_blob_req_id.expect("blob request id"), + self.peer_id, + None, + D, + &self.cx, + ); + self + } + + fn block_imported(mut self) -> Self { + // Missing blobs should be the request is not removed, the outstanding blobs request should + // mean we do not send a new request. + self.bl + .single_block_component_processed::>( + self.block_req_id.expect("block request id").id, + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported( + self.block_root, + )), + &mut self.cx, + ); + self.rig.expect_empty_network(); + assert_eq!(self.bl.single_block_lookups.len(), 0); + self + } + + fn parent_block_imported(mut self) -> Self { + self.bl.parent_block_processed( + self.block_root, + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(self.block_root)), + &mut self.cx, + ); + self.rig.expect_empty_network(); + assert_eq!(self.bl.parent_lookups.len(), 0); + self + } + + fn parent_block_unknown_parent(mut self) -> Self { + let block = self.unknown_parent_block.take().unwrap(); + let block = RpcBlock::new( + Some(block.canonical_root()), + block, + self.unknown_parent_blobs.take().map(VariableList::from), + ) + .unwrap(); + self.bl.parent_block_processed( + self.block_root, + BlockProcessingResult::Err(BlockError::ParentUnknown(block)), + &mut self.cx, + ); + assert_eq!(self.bl.parent_lookups.len(), 1); + self + } + + fn invalid_parent_processed(mut self) -> Self { + self.bl.parent_block_processed( + self.block_root, + BlockProcessingResult::Err(BlockError::ProposalSignatureInvalid), + &mut self.cx, + ); + assert_eq!(self.bl.parent_lookups.len(), 1); + self + } + + fn invalid_block_processed(mut self) -> Self { + self.bl + .single_block_component_processed::>( + self.block_req_id.expect("block request id").id, + BlockProcessingResult::Err(BlockError::ProposalSignatureInvalid), + &mut self.cx, + ); + assert_eq!(self.bl.single_block_lookups.len(), 1); + self + } + + fn invalid_blob_processed(mut self) -> Self { + self.bl + .single_block_component_processed::>( + self.blob_req_id.expect("blob request id").id, + BlockProcessingResult::Err(BlockError::AvailabilityCheck( + AvailabilityCheckError::KzgVerificationFailed, + )), + &mut self.cx, + ); + assert_eq!(self.bl.single_block_lookups.len(), 1); + self + } + + fn missing_components_from_block_request(mut self) -> Self { + self.bl + .single_block_component_processed::>( + self.block_req_id.expect("block request id").id, + BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( + self.slot, + self.block_root, + )), + &mut self.cx, + ); + assert_eq!(self.bl.single_block_lookups.len(), 1); + self + } + + fn missing_components_from_blob_request(mut self) -> Self { + self.bl + .single_block_component_processed::>( + self.blob_req_id.expect("blob request id").id, + BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( + self.slot, + self.block_root, + )), + &mut self.cx, + ); + assert_eq!(self.bl.single_block_lookups.len(), 1); + self + } + + fn expect_penalty(mut self) -> Self { + self.rig.expect_penalty(); + self + } + fn expect_no_penalty(mut self) -> Self { + self.rig.expect_empty_network(); + self + } + fn expect_block_request(mut self) -> Self { + let id = self.rig.expect_lookup_request(ResponseType::Block); + self.block_req_id = Some(id); + self + } + fn expect_blobs_request(mut self) -> Self { + let id = self.rig.expect_lookup_request(ResponseType::Blob); + self.blob_req_id = Some(id); + self + } + fn expect_parent_block_request(mut self) -> Self { + let id = self.rig.expect_parent_request(ResponseType::Block); + self.parent_block_req_id = Some(id); + self + } + fn expect_parent_blobs_request(mut self) -> Self { + let id = self.rig.expect_parent_request(ResponseType::Blob); + self.parent_blob_req_id = Some(id); + self + } + fn expect_no_blobs_request(mut self) -> Self { + self.rig.expect_empty_network(); + self + } + fn expect_no_block_request(mut self) -> Self { + self.rig.expect_empty_network(); + self + } + fn invalidate_blobs_too_few(mut self) -> Self { + self.blobs.pop().expect("blobs"); + self + } + fn invalidate_blobs_too_many(mut self) -> Self { + let first_blob = self.blobs.first().expect("blob").clone(); + self.blobs.push(first_blob); + self + } + fn expect_parent_chain_process(mut self) -> Self { + self.rig.expect_parent_chain_process(); + self + } + fn expect_block_process(mut self) -> Self { + self.rig.expect_block_process(ResponseType::Block); + self + } + } + + fn get_fork_name() -> ForkName { + ForkName::from_str( + &std::env::var(beacon_chain::test_utils::FORK_NAME_ENV_VAR).unwrap_or_else(|e| { + panic!( + "{} env var must be defined when using fork_from_env: {:?}", + beacon_chain::test_utils::FORK_NAME_ENV_VAR, + e + ) + }), + ) + .unwrap() + } + + #[test] + fn single_block_and_blob_lookup_block_returned_first_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .block_response_triggering_process() + .blobs_response() + .blobs_response_was_valid() + .block_imported(); + } + + #[test] + fn single_block_and_blob_lookup_blobs_returned_first_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .blobs_response() + .blobs_response_was_valid() + .block_response_triggering_process() + .block_imported(); + } + + #[test] + fn single_block_and_blob_lookup_empty_response_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .empty_block_response() + .expect_penalty() + .expect_block_request() + .expect_no_blobs_request() + .empty_blobs_response() + .expect_empty_beacon_processor() + .expect_no_penalty() + .expect_no_block_request() + .expect_no_blobs_request() + .block_response_triggering_process() + .missing_components_from_block_request(); + } + + #[test] + fn single_block_response_then_empty_blob_response_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .block_response_triggering_process() + .missing_components_from_block_request() + .empty_blobs_response() + .missing_components_from_blob_request() + .expect_penalty() + .expect_blobs_request() + .expect_no_block_request(); + } + + #[test] + fn single_blob_response_then_empty_block_response_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .blobs_response() + .blobs_response_was_valid() + .expect_no_penalty() + .expect_no_block_request() + .expect_no_blobs_request() + .missing_components_from_blob_request() + .empty_block_response() + .expect_penalty() + .expect_block_request() + .expect_no_blobs_request(); + } + + #[test] + fn single_invalid_block_response_then_blob_response_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .block_response_triggering_process() + .invalid_block_processed() + .expect_penalty() + .expect_block_request() + .expect_no_blobs_request() + .blobs_response() + .missing_components_from_blob_request() + .expect_no_penalty() + .expect_no_block_request() + .expect_no_block_request(); + } + + #[test] + fn single_block_response_then_invalid_blob_response_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .block_response_triggering_process() + .missing_components_from_block_request() + .blobs_response() + .invalid_blob_processed() + .expect_penalty() + .expect_blobs_request() + .expect_no_block_request(); + } + + #[test] + fn single_block_response_then_too_few_blobs_response_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .block_response_triggering_process() + .missing_components_from_block_request() + .invalidate_blobs_too_few() + .blobs_response() + .missing_components_from_blob_request() + .expect_penalty() + .expect_blobs_request() + .expect_no_block_request(); + } + + #[test] + fn single_block_response_then_too_many_blobs_response_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .block_response_triggering_process() + .invalidate_blobs_too_many() + .blobs_response() + .expect_penalty() + .expect_blobs_request() + .expect_no_block_request(); + } + #[test] + fn too_few_blobs_response_then_block_response_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .invalidate_blobs_too_few() + .blobs_response() + .blobs_response_was_valid() + .expect_no_penalty() + .expect_no_blobs_request() + .expect_no_block_request() + .block_response_triggering_process(); + } + + #[test] + fn too_many_blobs_response_then_block_response_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .invalidate_blobs_too_many() + .blobs_response() + .expect_penalty() + .expect_blobs_request() + .expect_no_block_request() + .block_response_triggering_process(); + } + + #[test] + fn parent_block_unknown_parent() { + let Some(tester) = + DenebTester::new(RequestTrigger::GossipUnknownParentBlock { num_parents: 1 }) + else { + return; + }; + + tester + .blobs_response() + .expect_empty_beacon_processor() + .parent_block_response() + .parent_blob_response() + .expect_block_process() + .parent_block_unknown_parent() + .expect_parent_block_request() + .expect_parent_blobs_request() + .expect_empty_beacon_processor(); + } + + #[test] + fn parent_block_invalid_parent() { + let Some(tester) = + DenebTester::new(RequestTrigger::GossipUnknownParentBlock { num_parents: 1 }) + else { + return; + }; + + tester + .blobs_response() + .expect_empty_beacon_processor() + .parent_block_response() + .parent_blob_response() + .expect_block_process() + .invalid_parent_processed() + .expect_penalty() + .expect_parent_block_request() + .expect_parent_blobs_request() + .expect_empty_beacon_processor(); + } + + #[test] + fn parent_block_and_blob_lookup_parent_returned_first() { + let Some(tester) = + DenebTester::new(RequestTrigger::GossipUnknownParentBlock { num_parents: 1 }) + else { + return; + }; + + tester + .parent_block_response() + .parent_blob_response() + .expect_block_process() + .parent_block_imported() + .blobs_response() + .expect_parent_chain_process(); + } + + #[test] + fn parent_block_and_blob_lookup_child_returned_first() { + let Some(tester) = + DenebTester::new(RequestTrigger::GossipUnknownParentBlock { num_parents: 1 }) + else { + return; + }; + + tester + .blobs_response() + .expect_no_penalty() + .expect_no_block_request() + .expect_no_blobs_request() + .parent_block_response() + .parent_blob_response() + .expect_block_process() + .parent_block_imported() + .expect_parent_chain_process(); + } + + #[test] + fn empty_parent_block_then_parent_blob() { + let Some(tester) = + DenebTester::new(RequestTrigger::GossipUnknownParentBlock { num_parents: 1 }) + else { + return; + }; + + tester + .empty_parent_block_response() + .expect_penalty() + .expect_parent_block_request() + .expect_no_blobs_request() + .parent_blob_response() + .expect_empty_beacon_processor() + .parent_block_response() + .expect_block_process() + .parent_block_imported() + .blobs_response() + .expect_parent_chain_process(); + } + + #[test] + fn empty_parent_blobs_then_parent_block() { + let Some(tester) = + DenebTester::new(RequestTrigger::GossipUnknownParentBlock { num_parents: 1 }) + else { + return; + }; + + tester + .blobs_response() + .empty_parent_blobs_response() + .expect_no_penalty() + .expect_no_blobs_request() + .expect_no_block_request() + .parent_block_response() + .expect_penalty() + .expect_parent_blobs_request() + .parent_blob_response() + .expect_block_process() + .parent_block_imported() + .expect_parent_chain_process(); + } + + #[test] + fn parent_blob_unknown_parent() { + let Some(tester) = + DenebTester::new(RequestTrigger::GossipUnknownParentBlob { num_parents: 1 }) + else { + return; + }; + + tester + .block_response() + .expect_empty_beacon_processor() + .parent_block_response() + .parent_blob_response() + .expect_block_process() + .parent_block_unknown_parent() + .expect_parent_block_request() + .expect_parent_blobs_request() + .expect_empty_beacon_processor(); + } + + #[test] + fn parent_blob_invalid_parent() { + let Some(tester) = + DenebTester::new(RequestTrigger::GossipUnknownParentBlob { num_parents: 1 }) + else { + return; + }; + + tester + .block_response() + .expect_empty_beacon_processor() + .parent_block_response() + .parent_blob_response() + .expect_block_process() + .invalid_parent_processed() + .expect_penalty() + .expect_parent_block_request() + .expect_parent_blobs_request() + .expect_empty_beacon_processor(); + } + + #[test] + fn parent_block_and_blob_lookup_parent_returned_first_blob_trigger() { + let Some(tester) = + DenebTester::new(RequestTrigger::GossipUnknownParentBlob { num_parents: 1 }) + else { + return; + }; + + tester + .parent_block_response() + .parent_blob_response() + .expect_block_process() + .parent_block_imported() + .block_response() + .expect_parent_chain_process(); + } + + #[test] + fn parent_block_and_blob_lookup_child_returned_first_blob_trigger() { + let Some(tester) = + DenebTester::new(RequestTrigger::GossipUnknownParentBlob { num_parents: 1 }) + else { + return; + }; + + tester + .block_response() + .expect_no_penalty() + .expect_no_block_request() + .expect_no_blobs_request() + .parent_block_response() + .parent_blob_response() + .expect_block_process() + .parent_block_imported() + .expect_parent_chain_process(); + } + + #[test] + fn empty_parent_block_then_parent_blob_blob_trigger() { + let Some(tester) = + DenebTester::new(RequestTrigger::GossipUnknownParentBlob { num_parents: 1 }) + else { + return; + }; + + tester + .empty_parent_block_response() + .expect_penalty() + .expect_parent_block_request() + .expect_no_blobs_request() + .parent_blob_response() + .expect_empty_beacon_processor() + .parent_block_response() + .expect_block_process() + .parent_block_imported() + .block_response() + .expect_parent_chain_process(); + } + + #[test] + fn empty_parent_blobs_then_parent_block_blob_trigger() { + let Some(tester) = + DenebTester::new(RequestTrigger::GossipUnknownParentBlob { num_parents: 1 }) + else { + return; + }; + + tester + .block_response() + .empty_parent_blobs_response() + .expect_no_penalty() + .expect_no_blobs_request() + .expect_no_block_request() + .parent_block_response() + .expect_penalty() + .expect_parent_blobs_request() + .parent_blob_response() + .expect_block_process() + .parent_block_imported() + .expect_parent_chain_process(); + } + + #[test] + fn parent_blob_unknown_parent_chain() { + let Some(tester) = + DenebTester::new(RequestTrigger::GossipUnknownParentBlob { num_parents: 2 }) + else { + return; + }; + + tester + .block_response() + .expect_empty_beacon_processor() + .parent_block_response() + .parent_blob_response() + .expect_no_penalty() + .expect_block_process() + .parent_block_unknown_parent() + .expect_parent_block_request() + .expect_parent_blobs_request() + .expect_empty_beacon_processor() + .parent_block_response() + .parent_blob_response() + .expect_no_penalty() + .expect_block_process(); + } +} diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs new file mode 100644 index 000000000..f9ed45fcd --- /dev/null +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -0,0 +1,83 @@ +use beacon_chain::block_verification_types::RpcBlock; +use ssz_types::VariableList; +use std::{collections::VecDeque, sync::Arc}; +use types::{BlobSidecar, EthSpec, SignedBeaconBlock}; + +#[derive(Debug, Default)] +pub struct BlocksAndBlobsRequestInfo { + /// Blocks we have received awaiting for their corresponding sidecar. + accumulated_blocks: VecDeque>>, + /// Sidecars we have received awaiting for their corresponding block. + accumulated_sidecars: VecDeque>>, + /// Whether the individual RPC request for blocks is finished or not. + is_blocks_stream_terminated: bool, + /// Whether the individual RPC request for sidecars is finished or not. + is_sidecars_stream_terminated: bool, +} + +impl BlocksAndBlobsRequestInfo { + pub fn add_block_response(&mut self, block_opt: Option>>) { + match block_opt { + Some(block) => self.accumulated_blocks.push_back(block), + None => self.is_blocks_stream_terminated = true, + } + } + + pub fn add_sidecar_response(&mut self, sidecar_opt: Option>>) { + match sidecar_opt { + Some(sidecar) => self.accumulated_sidecars.push_back(sidecar), + None => self.is_sidecars_stream_terminated = true, + } + } + + pub fn into_responses(self) -> Result>, String> { + let BlocksAndBlobsRequestInfo { + accumulated_blocks, + accumulated_sidecars, + .. + } = self; + + // There can't be more more blobs than blocks. i.e. sending any blob (empty + // included) for a skipped slot is not permitted. + let mut responses = Vec::with_capacity(accumulated_blocks.len()); + let mut blob_iter = accumulated_sidecars.into_iter().peekable(); + for block in accumulated_blocks.into_iter() { + let mut blob_list = Vec::with_capacity(T::max_blobs_per_block()); + while { + let pair_next_blob = blob_iter + .peek() + .map(|sidecar| sidecar.slot() == block.slot()) + .unwrap_or(false); + pair_next_blob + } { + blob_list.push(blob_iter.next().ok_or("Missing next blob".to_string())?); + } + + let mut blobs_buffer = vec![None; T::max_blobs_per_block()]; + for blob in blob_list { + let blob_index = blob.index as usize; + let Some(blob_opt) = blobs_buffer.get_mut(blob_index) else { + return Err("Invalid blob index".to_string()); + }; + if blob_opt.is_some() { + return Err("Repeat blob index".to_string()); + } else { + *blob_opt = Some(blob); + } + } + let blobs = VariableList::from(blobs_buffer.into_iter().flatten().collect::>()); + responses.push(RpcBlock::new(None, block, Some(blobs)).map_err(|e| format!("{e:?}"))?) + } + + // if accumulated sidecars is not empty, throw an error. + if blob_iter.next().is_some() { + return Err("Received sidecars that don't pair well".to_string()); + } + + Ok(responses) + } + + pub fn is_finished(&self) -> bool { + self.is_blocks_stream_terminated && self.is_sidecars_stream_terminated + } +} diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index b910f7b33..acb735ea4 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -35,25 +35,36 @@ use super::backfill_sync::{BackFillSync, ProcessResult, SyncStart}; use super::block_lookups::BlockLookups; -use super::network_context::SyncNetworkContext; +use super::network_context::{BlockOrBlob, SyncNetworkContext}; use super::peer_sync_info::{remote_sync_type, PeerSyncType}; use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; use crate::network_beacon_processor::{ChainSegmentProcessId, NetworkBeaconProcessor}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; -use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, EngineState}; +use crate::sync::block_lookups::common::{Current, Parent}; +use crate::sync::block_lookups::{BlobRequestState, BlockRequestState}; +use crate::sync::network_context::BlocksAndBlobsByRangeRequest; +use crate::sync::range_sync::ByRangeRequestType; +use beacon_chain::block_verification_types::AsBlock; +use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::data_availability_checker::ChildComponents; +use beacon_chain::{ + AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, BlockError, EngineState, +}; use futures::StreamExt; -use lighthouse_network::rpc::methods::MAX_REQUEST_BLOCKS; +use lighthouse_network::rpc::RPCError; use lighthouse_network::types::{NetworkGlobals, SyncState}; use lighthouse_network::SyncInfo; use lighthouse_network::{PeerAction, PeerId}; -use slog::{crit, debug, error, info, trace, Logger}; +use slog::{crit, debug, error, info, trace, warn, Logger}; use std::boxed::Box; +use std::ops::IndexMut; use std::ops::Sub; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; -use types::{EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::blob_sidecar::FixedBlobSidecarList; +use types::{BlobSidecar, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync /// from a peer. If a peer is within this tolerance (forwards or backwards), it is treated as a @@ -66,21 +77,37 @@ pub const SLOT_IMPORT_TOLERANCE: usize = 32; pub type Id = u32; +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub struct SingleLookupReqId { + pub id: Id, + pub req_counter: Id, +} + /// Id of rpc requests sent by sync to the network. #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] pub enum RequestId { /// Request searching for a block given a hash. - SingleBlock { id: Id }, - /// Request searching for a block's parent. The id is the chain - ParentLookup { id: Id }, + SingleBlock { id: SingleLookupReqId }, + /// Request searching for a set of blobs given a hash. + SingleBlob { id: SingleLookupReqId }, + /// Request searching for a block's parent. The id is the chain, share with the corresponding + /// blob id. + ParentLookup { id: SingleLookupReqId }, + /// Request searching for a block's parent blobs. The id is the chain, shared with the corresponding + /// block id. + ParentLookupBlob { id: SingleLookupReqId }, /// Request was from the backfill sync algorithm. - BackFillSync { id: Id }, + BackFillBlocks { id: Id }, + /// Backfill request that is composed by both a block range request and a blob range request. + BackFillBlockAndBlobs { id: Id }, /// The request was from a chain in the range sync algorithm. - RangeSync { id: Id }, + RangeBlocks { id: Id }, + /// Range request that is composed by both a block range request and a blob range request. + RangeBlockAndBlobs { id: Id }, } #[derive(Debug)] -/// A message than can be sent to the sync manager thread. +/// A message that can be sent to the sync manager thread. pub enum SyncMessage { /// A useful peer has been discovered. AddPeer(PeerId, SyncInfo), @@ -93,12 +120,23 @@ pub enum SyncMessage { seen_timestamp: Duration, }, - /// A block with an unknown parent has been received. - UnknownBlock(PeerId, Arc>, Hash256), + /// A blob has been received from the RPC. + RpcBlob { + request_id: RequestId, + peer_id: PeerId, + blob_sidecar: Option>>, + seen_timestamp: Duration, + }, - /// A peer has sent an object that references a block that is unknown. This triggers the + /// A block with an unknown parent has been received. + UnknownParentBlock(PeerId, RpcBlock, Hash256), + + /// A blob with an unknown parent has been received. + UnknownParentBlob(PeerId, Arc>), + + /// A peer has sent an attestation that references a block that is unknown. This triggers the /// manager to attempt to find the block matching the unknown hash. - UnknownBlockHash(PeerId, Hash256), + UnknownBlockHashFromAttestation(PeerId, Hash256), /// A peer has disconnected. Disconnect(PeerId), @@ -107,6 +145,7 @@ pub enum SyncMessage { RpcError { peer_id: PeerId, request_id: RequestId, + error: RPCError, }, /// A batch has been processed by the block processor thread. @@ -116,9 +155,9 @@ pub enum SyncMessage { }, /// Block processed - BlockProcessed { + BlockComponentProcessed { process_type: BlockProcessType, - result: BlockProcessResult, + result: BlockProcessingResult, }, } @@ -126,12 +165,13 @@ pub enum SyncMessage { #[derive(Debug, Clone)] pub enum BlockProcessType { SingleBlock { id: Id }, + SingleBlob { id: Id }, ParentLookup { chain_hash: Hash256 }, } #[derive(Debug)] -pub enum BlockProcessResult { - Ok, +pub enum BlockProcessingResult { + Ok(AvailabilityProcessingStatus), Err(BlockError), Ignored, } @@ -189,7 +229,7 @@ pub fn spawn( log: slog::Logger, ) { assert!( - MAX_REQUEST_BLOCKS >= T::EthSpec::slots_per_epoch() * EPOCHS_PER_BATCH, + beacon_chain.spec.max_request_blocks >= T::EthSpec::slots_per_epoch() * EPOCHS_PER_BATCH, "Max blocks that can be requested in a single batch greater than max allowed blocks in a single request" ); @@ -198,10 +238,18 @@ pub fn spawn( let mut sync_manager = SyncManager { chain: beacon_chain.clone(), input_channel: sync_recv, - network: SyncNetworkContext::new(network_send, beacon_processor, log.clone()), + network: SyncNetworkContext::new( + network_send, + beacon_processor.clone(), + beacon_chain.clone(), + log.clone(), + ), range_sync: RangeSync::new(beacon_chain.clone(), log.clone()), - backfill_sync: BackFillSync::new(beacon_chain, network_globals, log.clone()), - block_lookups: BlockLookups::new(log.clone()), + backfill_sync: BackFillSync::new(beacon_chain.clone(), network_globals, log.clone()), + block_lookups: BlockLookups::new( + beacon_chain.data_availability_checker.clone(), + log.clone(), + ), log: log.clone(), }; @@ -250,19 +298,50 @@ impl SyncManager { } /// Handles RPC errors related to requests that were emitted from the sync manager. - fn inject_error(&mut self, peer_id: PeerId, request_id: RequestId) { + fn inject_error(&mut self, peer_id: PeerId, request_id: RequestId, error: RPCError) { trace!(self.log, "Sync manager received a failed RPC"); match request_id { RequestId::SingleBlock { id } => { self.block_lookups - .single_block_lookup_failed(id, &mut self.network); + .single_block_lookup_failed::>( + id, + &peer_id, + &self.network, + error, + ); + } + RequestId::SingleBlob { id } => { + self.block_lookups + .single_block_lookup_failed::>( + id, + &peer_id, + &self.network, + error, + ); } RequestId::ParentLookup { id } => { self.block_lookups - .parent_lookup_failed(id, peer_id, &mut self.network); + .parent_lookup_failed::>( + id, + peer_id, + &self.network, + error, + ); } - RequestId::BackFillSync { id } => { - if let Some(batch_id) = self.network.backfill_sync_response(id, true) { + RequestId::ParentLookupBlob { id } => { + self.block_lookups + .parent_lookup_failed::>( + id, + peer_id, + &self.network, + error, + ); + } + RequestId::BackFillBlocks { id } => { + if let Some(batch_id) = self + .network + .backfill_request_failed(id, ByRangeRequestType::Blocks) + { match self .backfill_sync .inject_error(&mut self.network, batch_id, &peer_id, id) @@ -272,8 +351,41 @@ impl SyncManager { } } } - RequestId::RangeSync { id } => { - if let Some((chain_id, batch_id)) = self.network.range_sync_response(id, true) { + + RequestId::BackFillBlockAndBlobs { id } => { + if let Some(batch_id) = self + .network + .backfill_request_failed(id, ByRangeRequestType::BlocksAndBlobs) + { + match self + .backfill_sync + .inject_error(&mut self.network, batch_id, &peer_id, id) + { + Ok(_) => {} + Err(_) => self.update_sync_state(), + } + } + } + RequestId::RangeBlocks { id } => { + if let Some((chain_id, batch_id)) = self + .network + .range_sync_request_failed(id, ByRangeRequestType::Blocks) + { + self.range_sync.inject_error( + &mut self.network, + peer_id, + batch_id, + chain_id, + id, + ); + self.update_sync_state() + } + } + RequestId::RangeBlockAndBlobs { id } => { + if let Some((chain_id, batch_id)) = self + .network + .range_sync_request_failed(id, ByRangeRequestType::BlocksAndBlobs) + { self.range_sync.inject_error( &mut self.network, peer_id, @@ -499,37 +611,47 @@ impl SyncManager { } => { self.rpc_block_received(request_id, peer_id, beacon_block, seen_timestamp); } - SyncMessage::UnknownBlock(peer_id, block, block_root) => { - // If we are not synced or within SLOT_IMPORT_TOLERANCE of the block, ignore - if !self.network_globals().sync_state.read().is_synced() { - let head_slot = self.chain.canonical_head.cached_head().head_slot(); - let unknown_block_slot = block.slot(); - - // if the block is far in the future, ignore it. If its within the slot tolerance of - // our current head, regardless of the syncing state, fetch it. - if (head_slot >= unknown_block_slot - && head_slot.sub(unknown_block_slot).as_usize() > SLOT_IMPORT_TOLERANCE) - || (head_slot < unknown_block_slot - && unknown_block_slot.sub(head_slot).as_usize() > SLOT_IMPORT_TOLERANCE) - { - return; - } - } - if self.network_globals().peers.read().is_connected(&peer_id) - && self.network.is_execution_engine_online() - { - self.block_lookups - .search_parent(block_root, block, peer_id, &mut self.network); - } + SyncMessage::RpcBlob { + request_id, + peer_id, + blob_sidecar, + seen_timestamp, + } => self.rpc_blob_received(request_id, peer_id, blob_sidecar, seen_timestamp), + SyncMessage::UnknownParentBlock(peer_id, block, block_root) => { + let block_slot = block.slot(); + let parent_root = block.parent_root(); + self.handle_unknown_parent( + peer_id, + block_root, + parent_root, + block_slot, + block.into(), + ); } - SyncMessage::UnknownBlockHash(peer_id, block_hash) => { + SyncMessage::UnknownParentBlob(peer_id, blob) => { + let blob_slot = blob.slot(); + let block_root = blob.block_root(); + let parent_root = blob.block_parent_root(); + let blob_index = blob.index; + if blob_index >= T::EthSpec::max_blobs_per_block() as u64 { + warn!(self.log, "Peer sent blob with invalid index"; "index" => blob_index, "peer_id" => %peer_id); + return; + } + let mut blobs = FixedBlobSidecarList::default(); + *blobs.index_mut(blob_index as usize) = Some(blob); + self.handle_unknown_parent( + peer_id, + block_root, + parent_root, + blob_slot, + ChildComponents::new(block_root, None, Some(blobs)), + ); + } + SyncMessage::UnknownBlockHashFromAttestation(peer_id, block_hash) => { // If we are not synced, ignore this block. - if self.network_globals().sync_state.read().is_synced() - && self.network_globals().peers.read().is_connected(&peer_id) - && self.network.is_execution_engine_online() - { + if self.synced_and_connected(&peer_id) { self.block_lookups - .search_block(block_hash, peer_id, &mut self.network); + .search_block(block_hash, &[peer_id], &mut self.network); } } SyncMessage::Disconnect(peer_id) => { @@ -538,15 +660,26 @@ impl SyncManager { SyncMessage::RpcError { peer_id, request_id, - } => self.inject_error(peer_id, request_id), - SyncMessage::BlockProcessed { + error, + } => self.inject_error(peer_id, request_id, error), + SyncMessage::BlockComponentProcessed { process_type, result, } => match process_type { - BlockProcessType::SingleBlock { id } => { - self.block_lookups - .single_block_processed(id, result, &mut self.network) - } + BlockProcessType::SingleBlock { id } => self + .block_lookups + .single_block_component_processed::>( + id, + result, + &mut self.network, + ), + BlockProcessType::SingleBlob { id } => self + .block_lookups + .single_block_component_processed::>( + id, + result, + &mut self.network, + ), BlockProcessType::ParentLookup { chain_hash } => self .block_lookups .parent_block_processed(chain_hash, result, &mut self.network), @@ -578,11 +711,64 @@ impl SyncManager { } ChainSegmentProcessId::ParentLookup(chain_hash) => self .block_lookups - .parent_chain_processed(chain_hash, result, &mut self.network), + .parent_chain_processed(chain_hash, result, &self.network), }, } } + fn handle_unknown_parent( + &mut self, + peer_id: PeerId, + block_root: Hash256, + parent_root: Hash256, + slot: Slot, + child_components: ChildComponents, + ) { + if self.should_search_for_block(slot, &peer_id) { + self.block_lookups.search_parent( + slot, + block_root, + parent_root, + peer_id, + &mut self.network, + ); + self.block_lookups.search_child_block( + block_root, + child_components, + &[peer_id], + &mut self.network, + ); + } + } + + fn should_search_for_block(&mut self, block_slot: Slot, peer_id: &PeerId) -> bool { + if !self.network_globals().sync_state.read().is_synced() { + let head_slot = self.chain.canonical_head.cached_head().head_slot(); + + // if the block is far in the future, ignore it. If its within the slot tolerance of + // our current head, regardless of the syncing state, fetch it. + if (head_slot >= block_slot + && head_slot.sub(block_slot).as_usize() > SLOT_IMPORT_TOLERANCE) + || (head_slot < block_slot + && block_slot.sub(head_slot).as_usize() > SLOT_IMPORT_TOLERANCE) + { + return false; + } + } + + self.network_globals().peers.read().is_connected(peer_id) + && self.network.is_execution_engine_online() + } + + fn synced(&mut self) -> bool { + self.network_globals().sync_state.read().is_synced() + && self.network.is_execution_engine_online() + } + + fn synced_and_connected(&mut self, peer_id: &PeerId) -> bool { + self.synced() && self.network_globals().peers.read().is_connected(peer_id) + } + fn handle_new_execution_engine_state(&mut self, engine_state: EngineState) { self.network.update_execution_engine_state(engine_state); @@ -642,35 +828,46 @@ impl SyncManager { &mut self, request_id: RequestId, peer_id: PeerId, - beacon_block: Option>>, + block: Option>>, seen_timestamp: Duration, ) { match request_id { - RequestId::SingleBlock { id } => self.block_lookups.single_block_lookup_response( - id, - peer_id, - beacon_block, - seen_timestamp, - &mut self.network, - ), - RequestId::ParentLookup { id } => self.block_lookups.parent_lookup_response( - id, - peer_id, - beacon_block, - seen_timestamp, - &mut self.network, - ), - RequestId::BackFillSync { id } => { + RequestId::SingleBlock { id } => self + .block_lookups + .single_lookup_response::>( + id, + peer_id, + block, + seen_timestamp, + &self.network, + ), + RequestId::SingleBlob { .. } => { + crit!(self.log, "Block received during blob request"; "peer_id" => %peer_id ); + } + RequestId::ParentLookup { id } => self + .block_lookups + .parent_lookup_response::>( + id, + peer_id, + block, + seen_timestamp, + &self.network, + ), + RequestId::ParentLookupBlob { id: _ } => { + crit!(self.log, "Block received during parent blob request"; "peer_id" => %peer_id ); + } + RequestId::BackFillBlocks { id } => { + let is_stream_terminator = block.is_none(); if let Some(batch_id) = self .network - .backfill_sync_response(id, beacon_block.is_none()) + .backfill_sync_only_blocks_response(id, is_stream_terminator) { match self.backfill_sync.on_block_response( &mut self.network, batch_id, &peer_id, id, - beacon_block, + block.map(|b| RpcBlock::new_without_blobs(None, b)), ) { Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), Ok(ProcessResult::Successful) => {} @@ -682,9 +879,11 @@ impl SyncManager { } } } - RequestId::RangeSync { id } => { - if let Some((chain_id, batch_id)) = - self.network.range_sync_response(id, beacon_block.is_none()) + RequestId::RangeBlocks { id } => { + let is_stream_terminator = block.is_none(); + if let Some((chain_id, batch_id)) = self + .network + .range_sync_block_only_response(id, is_stream_terminator) { self.range_sync.blocks_by_range_response( &mut self.network, @@ -692,26 +891,222 @@ impl SyncManager { chain_id, batch_id, id, - beacon_block, + block.map(|b| RpcBlock::new_without_blobs(None, b)), ); self.update_sync_state(); } } + RequestId::BackFillBlockAndBlobs { id } => { + self.backfill_block_and_blobs_response(id, peer_id, block.into()) + } + RequestId::RangeBlockAndBlobs { id } => { + self.range_block_and_blobs_response(id, peer_id, block.into()) + } + } + } + + fn rpc_blob_received( + &mut self, + request_id: RequestId, + peer_id: PeerId, + blob: Option>>, + seen_timestamp: Duration, + ) { + match request_id { + RequestId::SingleBlock { .. } => { + crit!(self.log, "Single blob received during block request"; "peer_id" => %peer_id ); + } + RequestId::SingleBlob { id } => { + if let Some(blob) = blob.as_ref() { + debug!(self.log, + "Peer returned blob for single lookup"; + "peer_id" => %peer_id , + "blob_id" =>?blob.id() + ); + } + self.block_lookups + .single_lookup_response::>( + id, + peer_id, + blob, + seen_timestamp, + &self.network, + ) + } + + RequestId::ParentLookup { id: _ } => { + crit!(self.log, "Single blob received during parent block request"; "peer_id" => %peer_id ); + } + RequestId::ParentLookupBlob { id } => { + if let Some(blob) = blob.as_ref() { + debug!(self.log, + "Peer returned blob for parent lookup"; + "peer_id" => %peer_id , + "blob_id" =>?blob.id() + ); + } + self.block_lookups + .parent_lookup_response::>( + id, + peer_id, + blob, + seen_timestamp, + &self.network, + ) + } + RequestId::BackFillBlocks { id: _ } => { + crit!(self.log, "Blob received during backfill block request"; "peer_id" => %peer_id ); + } + RequestId::RangeBlocks { id: _ } => { + crit!(self.log, "Blob received during range block request"; "peer_id" => %peer_id ); + } + RequestId::BackFillBlockAndBlobs { id } => { + self.backfill_block_and_blobs_response(id, peer_id, blob.into()) + } + RequestId::RangeBlockAndBlobs { id } => { + self.range_block_and_blobs_response(id, peer_id, blob.into()) + } + } + } + + /// Handles receiving a response for a range sync request that should have both blocks and + /// blobs. + fn range_block_and_blobs_response( + &mut self, + id: Id, + peer_id: PeerId, + block_or_blob: BlockOrBlob, + ) { + if let Some((chain_id, resp)) = self + .network + .range_sync_block_and_blob_response(id, block_or_blob) + { + match resp.responses { + Ok(blocks) => { + for block in blocks + .into_iter() + .map(Some) + // chain the stream terminator + .chain(vec![None]) + { + self.range_sync.blocks_by_range_response( + &mut self.network, + peer_id, + chain_id, + resp.batch_id, + id, + block, + ); + self.update_sync_state(); + } + } + Err(e) => { + // Re-insert the request so we can retry + let new_req = BlocksAndBlobsByRangeRequest { + chain_id, + batch_id: resp.batch_id, + block_blob_info: <_>::default(), + }; + self.network + .insert_range_blocks_and_blobs_request(id, new_req); + // inform range that the request needs to be treated as failed + // With time we will want to downgrade this log + warn!( + self.log, + "Blocks and blobs request for range received invalid data"; + "peer_id" => %peer_id, + "batch_id" => resp.batch_id, + "error" => e.clone() + ); + let id = RequestId::RangeBlockAndBlobs { id }; + self.network.report_peer( + peer_id, + PeerAction::MidToleranceError, + "block_blob_faulty_batch", + ); + self.inject_error(peer_id, id, RPCError::InvalidData(e)) + } + } + } + } + + /// Handles receiving a response for a Backfill sync request that should have both blocks and + /// blobs. + fn backfill_block_and_blobs_response( + &mut self, + id: Id, + peer_id: PeerId, + block_or_blob: BlockOrBlob, + ) { + if let Some(resp) = self + .network + .backfill_sync_block_and_blob_response(id, block_or_blob) + { + match resp.responses { + Ok(blocks) => { + for block in blocks + .into_iter() + .map(Some) + // chain the stream terminator + .chain(vec![None]) + { + match self.backfill_sync.on_block_response( + &mut self.network, + resp.batch_id, + &peer_id, + id, + block, + ) { + Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), + Ok(ProcessResult::Successful) => {} + Err(_error) => { + // The backfill sync has failed, errors are reported + // within. + self.update_sync_state(); + } + } + } + } + Err(e) => { + // Re-insert the request so we can retry + self.network.insert_backfill_blocks_and_blobs_requests( + id, + resp.batch_id, + <_>::default(), + ); + + // inform backfill that the request needs to be treated as failed + // With time we will want to downgrade this log + warn!( + self.log, "Blocks and blobs request for backfill received invalid data"; + "peer_id" => %peer_id, "batch_id" => resp.batch_id, "error" => e.clone() + ); + let id = RequestId::BackFillBlockAndBlobs { id }; + self.network.report_peer( + peer_id, + PeerAction::MidToleranceError, + "block_blob_faulty_backfill_batch", + ); + self.inject_error(peer_id, id, RPCError::InvalidData(e)) + } + } } } } -impl From>> for BlockProcessResult { - fn from(result: Result>) -> Self { +impl From>> + for BlockProcessingResult +{ + fn from(result: Result>) -> Self { match result { - Ok(_) => BlockProcessResult::Ok, - Err(e) => e.into(), + Ok(status) => BlockProcessingResult::Ok(status), + Err(e) => BlockProcessingResult::Err(e), } } } -impl From> for BlockProcessResult { +impl From> for BlockProcessingResult { fn from(e: BlockError) -> Self { - BlockProcessResult::Err(e) + BlockProcessingResult::Err(e) } } diff --git a/beacon_node/network/src/sync/mod.rs b/beacon_node/network/src/sync/mod.rs index dc18a5c98..7b244bcec 100644 --- a/beacon_node/network/src/sync/mod.rs +++ b/beacon_node/network/src/sync/mod.rs @@ -3,6 +3,7 @@ //! Stores the various syncing methods for the beacon chain. mod backfill_sync; mod block_lookups; +mod block_sidecar_coupling; pub mod manager; mod network_context; mod peer_sync_info; diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index adc235130..04feb8fdc 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -1,21 +1,38 @@ //! Provides network functionality for the Syncing thread. This fundamentally wraps a network //! channel and stores a global RPC ID to perform requests. +use super::block_sidecar_coupling::BlocksAndBlobsRequestInfo; use super::manager::{Id, RequestId as SyncRequestId}; -use super::range_sync::{BatchId, ChainId}; +use super::range_sync::{BatchId, ByRangeRequestType, ChainId}; use crate::network_beacon_processor::NetworkBeaconProcessor; use crate::service::{NetworkMessage, RequestId}; use crate::status::ToStatusMessage; -use beacon_chain::{BeaconChainTypes, EngineState}; +use crate::sync::block_lookups::common::LookupType; +use crate::sync::manager::SingleLookupReqId; +use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::{BeaconChain, BeaconChainTypes, EngineState}; use fnv::FnvHashMap; +use lighthouse_network::rpc::methods::{BlobsByRangeRequest, BlobsByRootRequest}; use lighthouse_network::rpc::{BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason}; use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource, Request}; use slog::{debug, trace, warn}; +use std::collections::hash_map::Entry; use std::sync::Arc; use tokio::sync::mpsc; +use types::{BlobSidecar, EthSpec, SignedBeaconBlock}; + +pub struct BlocksAndBlobsByRangeResponse { + pub batch_id: BatchId, + pub responses: Result>, String>, +} + +pub struct BlocksAndBlobsByRangeRequest { + pub chain_id: ChainId, + pub batch_id: BatchId, + pub block_blob_info: BlocksAndBlobsRequestInfo, +} /// Wraps a Network channel to employ various RPC related network functionality for the Sync manager. This includes management of a global RPC request Id. - pub struct SyncNetworkContext { /// The network channel to relay messages to the Network service. network_send: mpsc::UnboundedSender>, @@ -29,6 +46,13 @@ pub struct SyncNetworkContext { /// BlocksByRange requests made by backfill syncing. backfill_requests: FnvHashMap, + /// BlocksByRange requests paired with BlobsByRange requests made by the range. + range_blocks_and_blobs_requests: FnvHashMap>, + + /// BlocksByRange requests paired with BlobsByRange requests made by the backfill sync. + backfill_blocks_and_blobs_requests: + FnvHashMap)>, + /// Whether the ee is online. If it's not, we don't allow access to the /// `beacon_processor_send`. execution_engine_state: EngineState, @@ -36,23 +60,47 @@ pub struct SyncNetworkContext { /// Sends work to the beacon processor via a channel. network_beacon_processor: Arc>, + pub chain: Arc>, + /// Logger for the `SyncNetworkContext`. - log: slog::Logger, + pub log: slog::Logger, +} + +/// Small enumeration to make dealing with block and blob requests easier. +pub enum BlockOrBlob { + Block(Option>>), + Blob(Option>>), +} + +impl From>>> for BlockOrBlob { + fn from(block: Option>>) -> Self { + BlockOrBlob::Block(block) + } +} + +impl From>>> for BlockOrBlob { + fn from(blob: Option>>) -> Self { + BlockOrBlob::Blob(blob) + } } impl SyncNetworkContext { pub fn new( network_send: mpsc::UnboundedSender>, network_beacon_processor: Arc>, + chain: Arc>, log: slog::Logger, ) -> Self { - Self { + SyncNetworkContext { network_send, execution_engine_state: EngineState::Online, // always assume `Online` at the start request_id: 1, range_requests: FnvHashMap::default(), backfill_requests: FnvHashMap::default(), + range_blocks_and_blobs_requests: FnvHashMap::default(), + backfill_blocks_and_blobs_requests: FnvHashMap::default(), network_beacon_processor, + chain, log, } } @@ -71,11 +119,7 @@ impl SyncNetworkContext { .unwrap_or_default() } - pub fn status_peers( - &mut self, - chain: &C, - peers: impl Iterator, - ) { + pub fn status_peers(&self, chain: &C, peers: impl Iterator) { let status_message = chain.status_message(); for peer_id in peers { debug!( @@ -103,123 +147,363 @@ impl SyncNetworkContext { pub fn blocks_by_range_request( &mut self, peer_id: PeerId, + batch_type: ByRangeRequestType, request: BlocksByRangeRequest, chain_id: ChainId, batch_id: BatchId, ) -> Result { - trace!( - self.log, - "Sending BlocksByRange Request"; - "method" => "BlocksByRange", - "count" => request.count(), - "peer" => %peer_id, - ); - let request = Request::BlocksByRange(request); - let id = self.next_id(); - let request_id = RequestId::Sync(SyncRequestId::RangeSync { id }); - self.send_network_msg(NetworkMessage::SendRequest { - peer_id, - request, - request_id, - })?; - self.range_requests.insert(id, (chain_id, batch_id)); - Ok(id) + match batch_type { + ByRangeRequestType::Blocks => { + trace!( + self.log, + "Sending BlocksByRange request"; + "method" => "BlocksByRange", + "count" => request.count(), + "peer" => %peer_id, + ); + let request = Request::BlocksByRange(request); + let id = self.next_id(); + let request_id = RequestId::Sync(SyncRequestId::RangeBlocks { id }); + self.send_network_msg(NetworkMessage::SendRequest { + peer_id, + request, + request_id, + })?; + self.range_requests.insert(id, (chain_id, batch_id)); + Ok(id) + } + ByRangeRequestType::BlocksAndBlobs => { + debug!( + self.log, + "Sending BlocksByRange and BlobsByRange requests"; + "method" => "Mixed by range request", + "count" => request.count(), + "peer" => %peer_id, + ); + + // create the shared request id. This is fine since the rpc handles substream ids. + let id = self.next_id(); + let request_id = RequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }); + + // Create the blob request based on the blob request. + let blobs_request = Request::BlobsByRange(BlobsByRangeRequest { + start_slot: *request.start_slot(), + count: *request.count(), + }); + let blocks_request = Request::BlocksByRange(request); + + // Send both requests. Make sure both can be sent. + self.send_network_msg(NetworkMessage::SendRequest { + peer_id, + request: blocks_request, + request_id, + })?; + self.send_network_msg(NetworkMessage::SendRequest { + peer_id, + request: blobs_request, + request_id, + })?; + let block_blob_info = BlocksAndBlobsRequestInfo::default(); + self.range_blocks_and_blobs_requests.insert( + id, + BlocksAndBlobsByRangeRequest { + chain_id, + batch_id, + block_blob_info, + }, + ); + Ok(id) + } + } } /// A blocks by range request sent by the backfill sync algorithm pub fn backfill_blocks_by_range_request( &mut self, peer_id: PeerId, + batch_type: ByRangeRequestType, request: BlocksByRangeRequest, batch_id: BatchId, ) -> Result { - trace!( - self.log, - "Sending backfill BlocksByRange Request"; - "method" => "BlocksByRange", - "count" => request.count(), - "peer" => %peer_id, - ); - let request = Request::BlocksByRange(request); - let id = self.next_id(); - let request_id = RequestId::Sync(SyncRequestId::BackFillSync { id }); - self.send_network_msg(NetworkMessage::SendRequest { - peer_id, - request, - request_id, - })?; - self.backfill_requests.insert(id, batch_id); - Ok(id) + match batch_type { + ByRangeRequestType::Blocks => { + trace!( + self.log, + "Sending backfill BlocksByRange request"; + "method" => "BlocksByRange", + "count" => request.count(), + "peer" => %peer_id, + ); + let request = Request::BlocksByRange(request); + let id = self.next_id(); + let request_id = RequestId::Sync(SyncRequestId::BackFillBlocks { id }); + self.send_network_msg(NetworkMessage::SendRequest { + peer_id, + request, + request_id, + })?; + self.backfill_requests.insert(id, batch_id); + Ok(id) + } + ByRangeRequestType::BlocksAndBlobs => { + debug!( + self.log, + "Sending backfill BlocksByRange and BlobsByRange requests"; + "method" => "Mixed by range request", + "count" => request.count(), + "peer" => %peer_id, + ); + + // create the shared request id. This is fine since the rpc handles substream ids. + let id = self.next_id(); + let request_id = RequestId::Sync(SyncRequestId::BackFillBlockAndBlobs { id }); + + // Create the blob request based on the blob request. + let blobs_request = Request::BlobsByRange(BlobsByRangeRequest { + start_slot: *request.start_slot(), + count: *request.count(), + }); + let blocks_request = Request::BlocksByRange(request); + + // Send both requests. Make sure both can be sent. + self.send_network_msg(NetworkMessage::SendRequest { + peer_id, + request: blocks_request, + request_id, + })?; + self.send_network_msg(NetworkMessage::SendRequest { + peer_id, + request: blobs_request, + request_id, + })?; + let block_blob_info = BlocksAndBlobsRequestInfo::default(); + self.backfill_blocks_and_blobs_requests + .insert(id, (batch_id, block_blob_info)); + Ok(id) + } + } } - /// Received a blocks by range response. - pub fn range_sync_response( + /// Response for a request that is only for blocks. + pub fn range_sync_block_only_response( &mut self, request_id: Id, - remove: bool, + is_stream_terminator: bool, ) -> Option<(ChainId, BatchId)> { - if remove { + if is_stream_terminator { self.range_requests.remove(&request_id) } else { - self.range_requests.get(&request_id).cloned() + self.range_requests.get(&request_id).copied() } } - /// Received a blocks by range response. - pub fn backfill_sync_response(&mut self, request_id: Id, remove: bool) -> Option { - if remove { + /// Received a blocks by range response for a request that couples blocks and blobs. + pub fn range_sync_block_and_blob_response( + &mut self, + request_id: Id, + block_or_blob: BlockOrBlob, + ) -> Option<(ChainId, BlocksAndBlobsByRangeResponse)> { + match self.range_blocks_and_blobs_requests.entry(request_id) { + Entry::Occupied(mut entry) => { + let req = entry.get_mut(); + let info = &mut req.block_blob_info; + match block_or_blob { + BlockOrBlob::Block(maybe_block) => info.add_block_response(maybe_block), + BlockOrBlob::Blob(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar), + } + if info.is_finished() { + // If the request is finished, dequeue everything + let BlocksAndBlobsByRangeRequest { + chain_id, + batch_id, + block_blob_info, + } = entry.remove(); + Some(( + chain_id, + BlocksAndBlobsByRangeResponse { + batch_id, + responses: block_blob_info.into_responses(), + }, + )) + } else { + None + } + } + Entry::Vacant(_) => None, + } + } + + pub fn range_sync_request_failed( + &mut self, + request_id: Id, + batch_type: ByRangeRequestType, + ) -> Option<(ChainId, BatchId)> { + let req = match batch_type { + ByRangeRequestType::BlocksAndBlobs => self + .range_blocks_and_blobs_requests + .remove(&request_id) + .map(|req| (req.chain_id, req.batch_id)), + ByRangeRequestType::Blocks => self.range_requests.remove(&request_id), + }; + if let Some(req) = req { + debug!( + self.log, + "Range sync request failed"; + "request_id" => request_id, + "batch_type" => ?batch_type, + "chain_id" => ?req.0, + "batch_id" => ?req.1 + ); + Some(req) + } else { + debug!(self.log, "Range sync request failed"; "request_id" => request_id, "batch_type" => ?batch_type); + None + } + } + + pub fn backfill_request_failed( + &mut self, + request_id: Id, + batch_type: ByRangeRequestType, + ) -> Option { + let batch_id = match batch_type { + ByRangeRequestType::BlocksAndBlobs => self + .backfill_blocks_and_blobs_requests + .remove(&request_id) + .map(|(batch_id, _info)| batch_id), + ByRangeRequestType::Blocks => self.backfill_requests.remove(&request_id), + }; + if let Some(batch_id) = batch_id { + debug!( + self.log, + "Backfill sync request failed"; + "request_id" => request_id, + "batch_type" => ?batch_type, + "batch_id" => ?batch_id + ); + Some(batch_id) + } else { + debug!(self.log, "Backfill sync request failed"; "request_id" => request_id, "batch_type" => ?batch_type); + None + } + } + + /// Response for a request that is only for blocks. + pub fn backfill_sync_only_blocks_response( + &mut self, + request_id: Id, + is_stream_terminator: bool, + ) -> Option { + if is_stream_terminator { self.backfill_requests.remove(&request_id) } else { - self.backfill_requests.get(&request_id).cloned() + self.backfill_requests.get(&request_id).copied() } } - /// Sends a blocks by root request for a single block lookup. - pub fn single_block_lookup_request( + /// Received a blocks by range or blobs by range response for a request that couples blocks ' + /// and blobs. + pub fn backfill_sync_block_and_blob_response( &mut self, - peer_id: PeerId, - request: BlocksByRootRequest, - ) -> Result { - trace!( - self.log, - "Sending BlocksByRoot Request"; - "method" => "BlocksByRoot", - "count" => request.block_roots().len(), - "peer" => %peer_id - ); - let request = Request::BlocksByRoot(request); - let id = self.next_id(); - let request_id = RequestId::Sync(SyncRequestId::SingleBlock { id }); - self.send_network_msg(NetworkMessage::SendRequest { - peer_id, - request, - request_id, - })?; - Ok(id) + request_id: Id, + block_or_blob: BlockOrBlob, + ) -> Option> { + match self.backfill_blocks_and_blobs_requests.entry(request_id) { + Entry::Occupied(mut entry) => { + let (_, info) = entry.get_mut(); + match block_or_blob { + BlockOrBlob::Block(maybe_block) => info.add_block_response(maybe_block), + BlockOrBlob::Blob(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar), + } + if info.is_finished() { + // If the request is finished, dequeue everything + let (batch_id, info) = entry.remove(); + + let responses = info.into_responses(); + Some(BlocksAndBlobsByRangeResponse { + batch_id, + responses, + }) + } else { + None + } + } + Entry::Vacant(_) => None, + } } - /// Sends a blocks by root request for a parent request. - pub fn parent_lookup_request( - &mut self, + pub fn block_lookup_request( + &self, + id: SingleLookupReqId, peer_id: PeerId, request: BlocksByRootRequest, - ) -> Result { - trace!( + lookup_type: LookupType, + ) -> Result<(), &'static str> { + let sync_id = match lookup_type { + LookupType::Current => SyncRequestId::SingleBlock { id }, + LookupType::Parent => SyncRequestId::ParentLookup { id }, + }; + let request_id = RequestId::Sync(sync_id); + + debug!( self.log, "Sending BlocksByRoot Request"; "method" => "BlocksByRoot", - "count" => request.block_roots().len(), - "peer" => %peer_id + "block_roots" => ?request.block_roots().to_vec(), + "peer" => %peer_id, + "lookup_type" => ?lookup_type ); - let request = Request::BlocksByRoot(request); - let id = self.next_id(); - let request_id = RequestId::Sync(SyncRequestId::ParentLookup { id }); + self.send_network_msg(NetworkMessage::SendRequest { peer_id, - request, + request: Request::BlocksByRoot(request), request_id, })?; - Ok(id) + Ok(()) + } + + pub fn blob_lookup_request( + &self, + id: SingleLookupReqId, + blob_peer_id: PeerId, + blob_request: BlobsByRootRequest, + lookup_type: LookupType, + ) -> Result<(), &'static str> { + let sync_id = match lookup_type { + LookupType::Current => SyncRequestId::SingleBlob { id }, + LookupType::Parent => SyncRequestId::ParentLookupBlob { id }, + }; + let request_id = RequestId::Sync(sync_id); + + if let Some(block_root) = blob_request + .blob_ids + .as_slice() + .first() + .map(|id| id.block_root) + { + let indices = blob_request + .blob_ids + .as_slice() + .iter() + .map(|id| id.index) + .collect::>(); + debug!( + self.log, + "Sending BlobsByRoot Request"; + "method" => "BlobsByRoot", + "block_root" => ?block_root, + "blob_indices" => ?indices, + "peer" => %blob_peer_id, + "lookup_type" => ?lookup_type + ); + + self.send_network_msg(NetworkMessage::SendRequest { + peer_id: blob_peer_id, + request: Request::BlobsByRoot(blob_request), + request_id, + })?; + } + Ok(()) } pub fn is_execution_engine_online(&self) -> bool { @@ -246,7 +530,7 @@ impl SyncNetworkContext { } /// Reports to the scoring algorithm the behaviour of a peer. - pub fn report_peer(&mut self, peer_id: PeerId, action: PeerAction, msg: &'static str) { + pub fn report_peer(&self, peer_id: PeerId, action: PeerAction, msg: &'static str) { debug!(self.log, "Sync reporting peer"; "peer_id" => %peer_id, "action" => %action); self.network_send .send(NetworkMessage::ReportPeer { @@ -261,7 +545,7 @@ impl SyncNetworkContext { } /// Subscribes to core topics. - pub fn subscribe_core_topics(&mut self) { + pub fn subscribe_core_topics(&self) { self.network_send .send(NetworkMessage::SubscribeCoreTopics) .unwrap_or_else(|e| { @@ -270,7 +554,7 @@ impl SyncNetworkContext { } /// Sends an arbitrary network message. - fn send_network_msg(&mut self, msg: NetworkMessage) -> Result<(), &'static str> { + fn send_network_msg(&self, msg: NetworkMessage) -> Result<(), &'static str> { self.network_send.send(msg).map_err(|_| { debug!(self.log, "Could not send message to the network service"); "Network channel send Failed" @@ -286,9 +570,49 @@ impl SyncNetworkContext { &self.network_beacon_processor } - fn next_id(&mut self) -> Id { + pub fn next_id(&mut self) -> Id { let id = self.request_id; self.request_id += 1; id } + + /// Check whether a batch for this epoch (and only this epoch) should request just blocks or + /// blocks and blobs. + pub fn batch_type(&self, epoch: types::Epoch) -> ByRangeRequestType { + // Induces a compile time panic if this doesn't hold true. + #[allow(clippy::assertions_on_constants)] + const _: () = assert!( + super::backfill_sync::BACKFILL_EPOCHS_PER_BATCH == 1 + && super::range_sync::EPOCHS_PER_BATCH == 1, + "To deal with alignment with deneb boundaries, batches need to be of just one epoch" + ); + + if let Some(data_availability_boundary) = self.chain.data_availability_boundary() { + if epoch >= data_availability_boundary { + ByRangeRequestType::BlocksAndBlobs + } else { + ByRangeRequestType::Blocks + } + } else { + ByRangeRequestType::Blocks + } + } + + pub fn insert_range_blocks_and_blobs_request( + &mut self, + id: Id, + request: BlocksAndBlobsByRangeRequest, + ) { + self.range_blocks_and_blobs_requests.insert(id, request); + } + + pub fn insert_backfill_blocks_and_blobs_requests( + &mut self, + id: Id, + batch_id: BatchId, + request: BlocksAndBlobsRequestInfo, + ) { + self.backfill_blocks_and_blobs_requests + .insert(id, (batch_id, request)); + } } diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index 723ea9b59..f5c320cb8 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -1,11 +1,12 @@ use crate::sync::manager::Id; +use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; use lighthouse_network::rpc::methods::BlocksByRangeRequest; use lighthouse_network::PeerId; use std::collections::HashSet; use std::hash::{Hash, Hasher}; use std::ops::Sub; -use std::sync::Arc; -use types::{Epoch, EthSpec, SignedBeaconBlock, Slot}; +use strum::Display; +use types::{Epoch, EthSpec, Slot}; /// The number of times to retry a batch before it is considered failed. const MAX_BATCH_DOWNLOAD_ATTEMPTS: u8 = 5; @@ -14,6 +15,14 @@ const MAX_BATCH_DOWNLOAD_ATTEMPTS: u8 = 5; /// after `MAX_BATCH_PROCESSING_ATTEMPTS` times, it is considered faulty. const MAX_BATCH_PROCESSING_ATTEMPTS: u8 = 3; +/// Type of expected batch. +#[derive(Debug, Copy, Clone, Display)] +#[strum(serialize_all = "snake_case")] +pub enum ByRangeRequestType { + BlocksAndBlobs, + Blocks, +} + /// Allows customisation of the above constants used in other sync methods such as BackFillSync. pub trait BatchConfig { /// The maximum batch download attempts. @@ -47,7 +56,7 @@ pub trait BatchConfig { /// Note that simpler hashing functions considered in the past (hash of first block, hash of last /// block, number of received blocks) are not good enough to differentiate attempts. For this /// reason, we hash the complete set of blocks both in RangeSync and BackFillSync. - fn batch_attempt_hash(blocks: &[Arc>]) -> u64; + fn batch_attempt_hash(blocks: &[RpcBlock]) -> u64; } pub struct RangeSyncBatchConfig {} @@ -59,7 +68,7 @@ impl BatchConfig for RangeSyncBatchConfig { fn max_batch_processing_attempts() -> u8 { MAX_BATCH_PROCESSING_ATTEMPTS } - fn batch_attempt_hash(blocks: &[Arc>]) -> u64 { + fn batch_attempt_hash(blocks: &[RpcBlock]) -> u64 { let mut hasher = std::collections::hash_map::DefaultHasher::new(); blocks.hash(&mut hasher); hasher.finish() @@ -96,6 +105,8 @@ pub struct BatchInfo { failed_download_attempts: Vec, /// State of the batch. state: BatchState, + /// Whether this batch contains all blocks or all blocks and blobs. + batch_type: ByRangeRequestType, /// Pin the generic marker: std::marker::PhantomData, } @@ -105,9 +116,9 @@ pub enum BatchState { /// The batch has failed either downloading or processing, but can be requested again. AwaitingDownload, /// The batch is being downloaded. - Downloading(PeerId, Vec>>, Id), + Downloading(PeerId, Vec>, Id), /// The batch has been completely downloaded and is ready for processing. - AwaitingProcessing(PeerId, Vec>>), + AwaitingProcessing(PeerId, Vec>), /// The batch is being processed. Processing(Attempt), /// The batch was successfully processed and is waiting to be validated. @@ -139,8 +150,13 @@ impl BatchInfo { /// Epoch boundary | | /// ... | 30 | 31 | 32 | 33 | 34 | ... | 61 | 62 | 63 | 64 | 65 | /// Batch 1 | Batch 2 | Batch 3 - pub fn new(start_epoch: &Epoch, num_of_epochs: u64) -> Self { - let start_slot = start_epoch.start_slot(T::slots_per_epoch()) + 1; + /// + /// NOTE: Removed the shift by one for deneb because otherwise the last batch before the blob + /// fork boundary will be of mixed type (all blocks and one last blockblob), and I don't want to + /// deal with this for now. + /// This means finalization might be slower in deneb + pub fn new(start_epoch: &Epoch, num_of_epochs: u64, batch_type: ByRangeRequestType) -> Self { + let start_slot = start_epoch.start_slot(T::slots_per_epoch()); let end_slot = start_slot + num_of_epochs * T::slots_per_epoch(); BatchInfo { start_slot, @@ -149,6 +165,7 @@ impl BatchInfo { failed_download_attempts: Vec::new(), non_faulty_processing_attempts: 0, state: BatchState::AwaitingDownload, + batch_type, marker: std::marker::PhantomData, } } @@ -201,10 +218,13 @@ impl BatchInfo { } /// Returns a BlocksByRange request associated with the batch. - pub fn to_blocks_by_range_request(&self) -> BlocksByRangeRequest { - BlocksByRangeRequest::new( - self.start_slot.into(), - self.end_slot.sub(self.start_slot).into(), + pub fn to_blocks_by_range_request(&self) -> (BlocksByRangeRequest, ByRangeRequestType) { + ( + BlocksByRangeRequest::new( + self.start_slot.into(), + self.end_slot.sub(self.start_slot).into(), + ), + self.batch_type, ) } @@ -231,7 +251,7 @@ impl BatchInfo { } /// Adds a block to a downloading batch. - pub fn add_block(&mut self, block: Arc>) -> Result<(), WrongState> { + pub fn add_block(&mut self, block: RpcBlock) -> Result<(), WrongState> { match self.state.poison() { BatchState::Downloading(peer, mut blocks, req_id) => { blocks.push(block); @@ -363,7 +383,7 @@ impl BatchInfo { } } - pub fn start_processing(&mut self) -> Result>>, WrongState> { + pub fn start_processing(&mut self) -> Result>, WrongState> { match self.state.poison() { BatchState::AwaitingProcessing(peer, blocks) => { self.state = BatchState::Processing(Attempt::new::(peer, &blocks)); @@ -461,10 +481,7 @@ pub struct Attempt { } impl Attempt { - fn new( - peer_id: PeerId, - blocks: &[Arc>], - ) -> Self { + fn new(peer_id: PeerId, blocks: &[RpcBlock]) -> Self { let hash = B::batch_attempt_hash(blocks); Attempt { peer_id, hash } } @@ -498,6 +515,7 @@ impl slog::KV for BatchInfo { serializer.emit_usize("processed", self.failed_processing_attempts.len())?; serializer.emit_u8("processed_no_penalty", self.non_faulty_processing_attempts)?; serializer.emit_arguments("state", &format_args!("{:?}", self.state))?; + serializer.emit_arguments("batch_ty", &format_args!("{}", self.batch_type))?; slog::Result::Ok(()) } } diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index af547885d..5a77340e3 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -3,6 +3,7 @@ use crate::network_beacon_processor::ChainSegmentProcessId; use crate::sync::{ manager::Id, network_context::SyncNetworkContext, BatchOperationOutcome, BatchProcessResult, }; +use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::BeaconChainTypes; use fnv::FnvHashMap; use lighthouse_network::{PeerAction, PeerId}; @@ -10,8 +11,7 @@ use rand::seq::SliceRandom; use slog::{crit, debug, o, warn}; use std::collections::{btree_map::Entry, BTreeMap, HashSet}; use std::hash::{Hash, Hasher}; -use std::sync::Arc; -use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::{Epoch, EthSpec, Hash256, Slot}; /// Blocks are downloaded in batches from peers. This constant specifies how many epochs worth of /// blocks per batch are requested _at most_. A batch may request less blocks to account for @@ -19,7 +19,7 @@ use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// we will negatively report peers with poor bandwidth. This can be set arbitrarily high, in which /// case the responder will fill the response up to the max request size, assuming they have the /// bandwidth to do so. -pub const EPOCHS_PER_BATCH: u64 = 2; +pub const EPOCHS_PER_BATCH: u64 = 1; /// The maximum number of batches to queue before requesting more. const BATCH_BUFFER_SIZE: u8 = 5; @@ -221,7 +221,7 @@ impl SyncingChain { batch_id: BatchId, peer_id: &PeerId, request_id: Id, - beacon_block: Option>>, + beacon_block: Option>, ) -> ProcessingResult { // check if we have this batch let batch = match self.batches.get_mut(&batch_id) { @@ -294,19 +294,15 @@ impl SyncingChain { return Ok(KeepChain); } - let beacon_processor = match network.beacon_processor_if_enabled() { - Some(beacon_processor) => beacon_processor, - None => return Ok(KeepChain), + let Some(beacon_processor) = network.beacon_processor_if_enabled() else { + return Ok(KeepChain); }; - let batch = match self.batches.get_mut(&batch_id) { - Some(batch) => batch, - None => { - return Err(RemoveChain::WrongChainState(format!( - "Trying to process a batch that does not exist: {}", - batch_id - ))); - } + let Some(batch) = self.batches.get_mut(&batch_id) else { + return Err(RemoveChain::WrongChainState(format!( + "Trying to process a batch that does not exist: {}", + batch_id + ))); }; // NOTE: We send empty batches to the processor in order to trigger the block processor @@ -598,6 +594,7 @@ impl SyncingChain { /// /// If a previous batch has been validated and it had been re-processed, penalize the original /// peer. + #[allow(clippy::modulo_one)] fn advance_chain(&mut self, network: &mut SyncNetworkContext, validating_epoch: Epoch) { // make sure this epoch produces an advancement if validating_epoch <= self.start_epoch { @@ -826,9 +823,24 @@ impl SyncingChain { // sending an error /timeout) if the peer is removed from the chain for other // reasons. Check that this block belongs to the expected peer if !batch.is_expecting_block(peer_id, &request_id) { + debug!( + self.log, + "Batch not expecting block"; + "batch_epoch" => batch_id, + "batch_state" => ?batch.state(), + "peer_id" => %peer_id, + "request_id" => %request_id + ); return Ok(KeepChain); } - debug!(self.log, "Batch failed. RPC Error"; "batch_epoch" => batch_id); + debug!( + self.log, + "Batch failed. RPC Error"; + "batch_epoch" => batch_id, + "batch_state" => ?batch.state(), + "peer_id" => %peer_id, + "request_id" => %request_id + ); if let Some(active_requests) = self.peers.get_mut(peer_id) { active_requests.remove(&batch_id); } @@ -840,6 +852,13 @@ impl SyncingChain { } self.retry_batch_download(network, batch_id) } else { + debug!( + self.log, + "Batch not found"; + "batch_epoch" => batch_id, + "peer_id" => %peer_id, + "request_id" => %request_id + ); // this could be an error for an old batch, removed when the chain advances Ok(KeepChain) } @@ -851,9 +870,8 @@ impl SyncingChain { network: &mut SyncNetworkContext, batch_id: BatchId, ) -> ProcessingResult { - let batch = match self.batches.get_mut(&batch_id) { - Some(batch) => batch, - None => return Ok(KeepChain), + let Some(batch) = self.batches.get_mut(&batch_id) else { + return Ok(KeepChain); }; // Find a peer to request the batch @@ -867,7 +885,7 @@ impl SyncingChain { .collect::>(); // Sort peers prioritizing unrelated peers with less active requests. priorized_peers.sort_unstable(); - priorized_peers.get(0).map(|&(_, _, peer)| peer) + priorized_peers.first().map(|&(_, _, peer)| peer) }; if let Some(peer) = new_peer { @@ -886,8 +904,8 @@ impl SyncingChain { peer: PeerId, ) -> ProcessingResult { if let Some(batch) = self.batches.get_mut(&batch_id) { - let request = batch.to_blocks_by_range_request(); - match network.blocks_by_range_request(peer, request, self.id, batch_id) { + let (request, batch_type) = batch.to_blocks_by_range_request(); + match network.blocks_by_range_request(peer, batch_type, request, self.id, batch_id) { Ok(request_id) => { // inform the batch about the new request batch.start_downloading_from_peer(peer, request_id)?; @@ -991,7 +1009,8 @@ impl SyncingChain { if let Some(epoch) = self.optimistic_start { if let Entry::Vacant(entry) = self.batches.entry(epoch) { if let Some(peer) = idle_peers.pop() { - let optimistic_batch = BatchInfo::new(&epoch, EPOCHS_PER_BATCH); + let batch_type = network.batch_type(epoch); + let optimistic_batch = BatchInfo::new(&epoch, EPOCHS_PER_BATCH, batch_type); entry.insert(optimistic_batch); self.send_batch(network, epoch, peer)?; } @@ -1000,7 +1019,7 @@ impl SyncingChain { } while let Some(peer) = idle_peers.pop() { - if let Some(batch_id) = self.include_next_batch() { + if let Some(batch_id) = self.include_next_batch(network) { // send the batch self.send_batch(network, batch_id, peer)?; } else { @@ -1014,7 +1033,7 @@ impl SyncingChain { /// Creates the next required batch from the chain. If there are no more batches required, /// `false` is returned. - fn include_next_batch(&mut self) -> Option { + fn include_next_batch(&mut self, network: &mut SyncNetworkContext) -> Option { // don't request batches beyond the target head slot if self .to_be_downloaded @@ -1048,10 +1067,11 @@ impl SyncingChain { Entry::Occupied(_) => { // this batch doesn't need downloading, let this same function decide the next batch self.to_be_downloaded += EPOCHS_PER_BATCH; - self.include_next_batch() + self.include_next_batch(network) } Entry::Vacant(entry) => { - entry.insert(BatchInfo::new(&batch_id, EPOCHS_PER_BATCH)); + let batch_type = network.batch_type(batch_id); + entry.insert(BatchInfo::new(&batch_id, EPOCHS_PER_BATCH, batch_type)); self.to_be_downloaded += EPOCHS_PER_BATCH; Some(batch_id) } diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index 65ddcefe8..364514a35 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -280,7 +280,7 @@ impl ChainCollection { old_id = Some(Some(syncing_id)); } else { // chains have the same number of peers, pick the currently syncing - // chain to avoid unnecesary switchings and try to advance it + // chain to avoid unnecessary switchings and try to advance it new_id = syncing_id; old_id = Some(None); } diff --git a/beacon_node/network/src/sync/range_sync/mod.rs b/beacon_node/network/src/sync/range_sync/mod.rs index f4db32bc9..d0f2f9217 100644 --- a/beacon_node/network/src/sync/range_sync/mod.rs +++ b/beacon_node/network/src/sync/range_sync/mod.rs @@ -8,7 +8,10 @@ mod chain_collection; mod range; mod sync_type; -pub use batch::{BatchConfig, BatchInfo, BatchOperationOutcome, BatchProcessingResult, BatchState}; +pub use batch::{ + BatchConfig, BatchInfo, BatchOperationOutcome, BatchProcessingResult, BatchState, + ByRangeRequestType, +}; pub use chain::{BatchId, ChainId, EPOCHS_PER_BATCH}; pub use range::RangeSync; pub use sync_type::RangeSyncType; diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 05ad5204b..e42fd936e 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -47,6 +47,7 @@ use crate::status::ToStatusMessage; use crate::sync::manager::Id; use crate::sync::network_context::SyncNetworkContext; use crate::sync::BatchProcessResult; +use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::rpc::GoodbyeReason; use lighthouse_network::PeerId; @@ -55,7 +56,7 @@ use lru_cache::LRUTimeCache; use slog::{crit, debug, trace, warn}; use std::collections::HashMap; use std::sync::Arc; -use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::{Epoch, EthSpec, Hash256, Slot}; /// For how long we store failed finalized chains to prevent retries. const FAILED_CHAINS_EXPIRY_SECONDS: u64 = 30; @@ -141,13 +142,20 @@ where debug!(self.log, "Finalization sync peer joined"; "peer_id" => %peer_id); self.awaiting_head_peers.remove(&peer_id); + // Because of our change in finalized sync batch size from 2 to 1 and our transition + // to using exact epoch boundaries for batches (rather than one slot past the epoch + // boundary), we need to sync finalized sync to 2 epochs + 1 slot past our peer's + // finalized slot in order to finalize the chain locally. + let target_head_slot = + remote_finalized_slot + (2 * T::EthSpec::slots_per_epoch()) + 1; + // Note: We keep current head chains. These can continue syncing whilst we complete // this new finalized chain. self.chains.add_peer_or_create_chain( local_info.finalized_epoch, remote_info.finalized_root, - remote_finalized_slot, + target_head_slot, peer_id, RangeSyncType::Finalized, network, @@ -202,7 +210,7 @@ where chain_id: ChainId, batch_id: BatchId, request_id: Id, - beacon_block: Option>>, + beacon_block: Option>, ) { // check if this chunk removes the chain match self.chains.call_by_id(chain_id, |chain| { @@ -376,22 +384,21 @@ mod tests { use crate::NetworkMessage; use super::*; + use crate::sync::network_context::BlockOrBlob; use beacon_chain::builder::Witness; use beacon_chain::eth1_chain::CachingEth1Backend; use beacon_chain::parking_lot::RwLock; + use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use beacon_chain::EngineState; use beacon_processor::WorkEvent as BeaconWorkEvent; - use lighthouse_network::rpc::BlocksByRangeRequest; - use lighthouse_network::Request; use lighthouse_network::{rpc::StatusMessage, NetworkGlobals}; use slog::{o, Drain}; - use tokio::sync::mpsc; - - use slot_clock::ManualSlotClock; + use slot_clock::TestingSlotClock; use std::collections::HashSet; use std::sync::Arc; use store::MemoryStore; - use types::{Hash256, MinimalEthSpec as E}; + use tokio::sync::mpsc; + use types::{ForkName, Hash256, MinimalEthSpec as E}; #[derive(Debug)] struct FakeStorage { @@ -438,7 +445,7 @@ mod tests { } type TestBeaconChainType = - Witness, E, MemoryStore, MemoryStore>; + Witness, E, MemoryStore, MemoryStore>; fn build_log(level: slog::Level, enabled: bool) -> slog::Logger { let decorator = slog_term::TermDecorator::new().build(); @@ -457,7 +464,7 @@ mod tests { log: slog::Logger, /// To check what does sync send to the beacon processor. beacon_processor_rx: mpsc::Receiver>, - /// To set up different scenarios where sync is told about known/unkown blocks. + /// To set up different scenarios where sync is told about known/unknown blocks. chain: Arc, /// Needed by range to handle communication with the network. cx: SyncNetworkContext, @@ -507,18 +514,39 @@ mod tests { /// Reads an BlocksByRange request to a given peer from the network receiver channel. #[track_caller] - fn grab_request(&mut self, expected_peer: &PeerId) -> (RequestId, BlocksByRangeRequest) { - if let Ok(NetworkMessage::SendRequest { + fn grab_request( + &mut self, + expected_peer: &PeerId, + fork_name: ForkName, + ) -> (RequestId, Option) { + let block_req_id = if let Ok(NetworkMessage::SendRequest { peer_id, - request: Request::BlocksByRange(request), + request: _, request_id, }) = self.network_rx.try_recv() { assert_eq!(&peer_id, expected_peer); - (request_id, request) + request_id } else { panic!("Should have sent a batch request to the peer") - } + }; + let blob_req_id = match fork_name { + ForkName::Deneb => { + if let Ok(NetworkMessage::SendRequest { + peer_id, + request: _, + request_id, + }) = self.network_rx.try_recv() + { + assert_eq!(&peer_id, expected_peer); + Some(request_id) + } else { + panic!("Should have sent a batch request to the peer") + } + } + _ => None, + }; + (block_req_id, blob_req_id) } /// Produce a head peer @@ -592,10 +620,19 @@ mod tests { } fn range(log_enabled: bool) -> (TestRig, RangeSync) { - let chain = Arc::new(FakeStorage::default()); let log = build_log(slog::Level::Trace, log_enabled); + // Initialise a new beacon chain + let harness = BeaconChainHarness::>::builder(E) + .default_spec() + .logger(log.clone()) + .deterministic_keypairs(1) + .fresh_ephemeral_store() + .build(); + let chain = harness.chain; + + let fake_store = Arc::new(FakeStorage::default()); let range_sync = RangeSync::::new( - chain.clone(), + fake_store.clone(), log.new(o!("component" => "range")), ); let (network_tx, network_rx) = mpsc::unbounded_channel(); @@ -605,12 +642,13 @@ mod tests { let cx = SyncNetworkContext::new( network_tx, Arc::new(network_beacon_processor), + chain, log.new(o!("component" => "network_context")), ); let test_rig = TestRig { log, beacon_processor_rx, - chain, + chain: fake_store, cx, network_rx, globals, @@ -628,8 +666,14 @@ mod tests { range.add_peer(&mut rig.cx, local_info, head_peer, remote_info); range.assert_state(RangeSyncType::Head); + let fork = rig + .cx + .chain + .spec + .fork_name_at_epoch(rig.cx.chain.epoch().unwrap()); + // Sync should have requested a batch, grab the request. - let _request = rig.grab_request(&head_peer); + let _ = rig.grab_request(&head_peer, fork); // Now get a peer with an advanced finalized epoch. let (finalized_peer, local_info, remote_info) = rig.finalized_peer(); @@ -637,7 +681,7 @@ mod tests { range.assert_state(RangeSyncType::Finalized); // Sync should have requested a batch, grab the request - let _second_request = rig.grab_request(&finalized_peer); + let _ = rig.grab_request(&finalized_peer, fork); // Fail the head chain by disconnecting the peer. range.remove_peer(&mut rig.cx, &head_peer); @@ -655,8 +699,14 @@ mod tests { range.add_peer(&mut rig.cx, local_info, head_peer, head_info); range.assert_state(RangeSyncType::Head); + let fork = rig + .cx + .chain + .spec + .fork_name_at_epoch(rig.cx.chain.epoch().unwrap()); + // Sync should have requested a batch, grab the request. - let _request = rig.grab_request(&head_peer); + let _ = rig.grab_request(&head_peer, fork); // Now get a peer with an advanced finalized epoch. let (finalized_peer, local_info, remote_info) = rig.finalized_peer(); @@ -665,7 +715,7 @@ mod tests { range.assert_state(RangeSyncType::Finalized); // Sync should have requested a batch, grab the request - let _second_request = rig.grab_request(&finalized_peer); + let _ = rig.grab_request(&finalized_peer, fork); // Now the chain knows both chains target roots. rig.chain.remember_block(head_peer_root); @@ -679,15 +729,39 @@ mod tests { #[test] fn pause_and_resume_on_ee_offline() { let (mut rig, mut range) = range(true); + let fork = rig + .cx + .chain + .spec + .fork_name_at_epoch(rig.cx.chain.epoch().unwrap()); // add some peers let (peer1, local_info, head_info) = rig.head_peer(); range.add_peer(&mut rig.cx, local_info, peer1, head_info); - let ((chain1, batch1), id1) = match rig.grab_request(&peer1).0 { - RequestId::Sync(crate::sync::manager::RequestId::RangeSync { id }) => { - (rig.cx.range_sync_response(id, true).unwrap(), id) + let (block_req, blob_req_opt) = rig.grab_request(&peer1, fork); + + let (chain1, batch1, id1) = if blob_req_opt.is_some() { + match block_req { + RequestId::Sync(crate::sync::manager::RequestId::RangeBlockAndBlobs { id }) => { + let _ = rig + .cx + .range_sync_block_and_blob_response(id, BlockOrBlob::Block(None)); + let (chain1, response) = rig + .cx + .range_sync_block_and_blob_response(id, BlockOrBlob::Blob(None)) + .unwrap(); + (chain1, response.batch_id, id) + } + other => panic!("unexpected request {:?}", other), + } + } else { + match block_req { + RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => { + let (chain, batch) = rig.cx.range_sync_block_only_response(id, true).unwrap(); + (chain, batch, id) + } + other => panic!("unexpected request {:?}", other), } - other => panic!("unexpected request {:?}", other), }; // make the ee offline @@ -702,11 +776,30 @@ mod tests { // while the ee is offline, more peers might arrive. Add a new finalized peer. let (peer2, local_info, finalized_info) = rig.finalized_peer(); range.add_peer(&mut rig.cx, local_info, peer2, finalized_info); - let ((chain2, batch2), id2) = match rig.grab_request(&peer2).0 { - RequestId::Sync(crate::sync::manager::RequestId::RangeSync { id }) => { - (rig.cx.range_sync_response(id, true).unwrap(), id) + let (block_req, blob_req_opt) = rig.grab_request(&peer2, fork); + + let (chain2, batch2, id2) = if blob_req_opt.is_some() { + match block_req { + RequestId::Sync(crate::sync::manager::RequestId::RangeBlockAndBlobs { id }) => { + let _ = rig + .cx + .range_sync_block_and_blob_response(id, BlockOrBlob::Block(None)); + let (chain2, response) = rig + .cx + .range_sync_block_and_blob_response(id, BlockOrBlob::Blob(None)) + .unwrap(); + (chain2, response.batch_id, id) + } + other => panic!("unexpected request {:?}", other), + } + } else { + match block_req { + RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => { + let (chain, batch) = rig.cx.range_sync_block_only_response(id, true).unwrap(); + (chain, batch, id) + } + other => panic!("unexpected request {:?}", other), } - other => panic!("unexpected request {:?}", other), }; // send the response to the request diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index afdbd7257..36595994f 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -16,7 +16,6 @@ ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } rayon = { workspace = true } serde = { workspace = true } -serde_derive = "1.0.116" store = { workspace = true } bitvec = { workspace = true } rand = { workspace = true } @@ -25,3 +24,6 @@ rand = { workspace = true } beacon_chain = { workspace = true } tokio = { workspace = true } maplit = { workspace = true } + +[features] +portable = ["beacon_chain/portable"] \ No newline at end of file diff --git a/beacon_node/operation_pool/src/attestation.rs b/beacon_node/operation_pool/src/attestation.rs index fbbd5d7dd..97c291aa8 100644 --- a/beacon_node/operation_pool/src/attestation.rs +++ b/beacon_node/operation_pool/src/attestation.rs @@ -30,7 +30,7 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> { if let BeaconState::Base(ref base_state) = state { Self::new_for_base(att, state, base_state, total_active_balance, spec) } else { - Self::new_for_altair(att, state, reward_cache, total_active_balance, spec) + Self::new_for_altair_deneb(att, state, reward_cache, total_active_balance, spec) } } @@ -69,7 +69,7 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> { } /// Initialise an attestation cover object for Altair or later. - pub fn new_for_altair( + pub fn new_for_altair_deneb( att: AttestationRef<'a, T>, state: &BeaconState, reward_cache: &'a RewardCache, diff --git a/beacon_node/operation_pool/src/attestation_id.rs b/beacon_node/operation_pool/src/attestation_id.rs index b65975787..f0dc6536a 100644 --- a/beacon_node/operation_pool/src/attestation_id.rs +++ b/beacon_node/operation_pool/src/attestation_id.rs @@ -1,4 +1,4 @@ -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; /// Serialized `AttestationData` augmented with a domain to encode the fork info. diff --git a/beacon_node/operation_pool/src/attestation_storage.rs b/beacon_node/operation_pool/src/attestation_storage.rs index 0fb9bafd8..dac5e25b3 100644 --- a/beacon_node/operation_pool/src/attestation_storage.rs +++ b/beacon_node/operation_pool/src/attestation_storage.rs @@ -151,14 +151,8 @@ impl AttestationMap { indexed, } = SplitAttestation::new(attestation, attesting_indices); - let attestation_map = self - .checkpoint_map - .entry(checkpoint) - .or_insert_with(AttestationDataMap::default); - let attestations = attestation_map - .attestations - .entry(data) - .or_insert_with(Vec::new); + let attestation_map = self.checkpoint_map.entry(checkpoint).or_default(); + let attestations = attestation_map.attestations.entry(data).or_default(); // Greedily aggregate the attestation with all existing attestations. // NOTE: this is sub-optimal and in future we will remove this in favour of max-clique diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 24c0623f5..7e1ddb1fd 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -1852,7 +1852,21 @@ mod release_tests { // Sign an exit with the Altair domain and a phase0 epoch. This is a weird type of exit // that is valid because after the Bellatrix fork we'll use the Altair fork domain to verify // all prior epochs. - let exit2 = harness.make_voluntary_exit(2, Epoch::new(0)); + let unsigned_exit = VoluntaryExit { + epoch: Epoch::new(0), + validator_index: 2, + }; + let exit2 = SignedVoluntaryExit { + message: unsigned_exit.clone(), + signature: harness.validator_keypairs[2] + .sk + .sign(unsigned_exit.signing_root(spec.compute_domain( + Domain::VoluntaryExit, + harness.spec.altair_fork_version, + harness.chain.genesis_validators_root, + ))), + }; + let verified_exit2 = exit2 .clone() .validate(&bellatrix_head.beacon_state, &harness.chain.spec) diff --git a/beacon_node/operation_pool/src/sync_aggregate_id.rs b/beacon_node/operation_pool/src/sync_aggregate_id.rs index 401e0c5f8..40d6e3649 100644 --- a/beacon_node/operation_pool/src/sync_aggregate_id.rs +++ b/beacon_node/operation_pool/src/sync_aggregate_id.rs @@ -1,4 +1,4 @@ -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use types::{Hash256, Slot}; diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 15f8f5655..002bb344a 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -29,6 +29,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("Data directory for the freezer database.") .takes_value(true) ) + .arg( + Arg::with_name("blobs-dir") + .long("blobs-dir") + .value_name("DIR") + .help("Data directory for the blobs database.") + .takes_value(true) + ) /* * Network parameters. */ @@ -381,12 +388,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { address of this server (e.g., http://localhost:5052).") .takes_value(true), ) - .arg( - Arg::with_name("http-disable-legacy-spec") - .long("http-disable-legacy-spec") - .requires("enable_http") - .hidden(true) - ) .arg( Arg::with_name("http-spec-fork") .long("http-spec-fork") @@ -562,24 +563,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("If present, uses an eth1 backend that generates static dummy data.\ Identical to the method used at the 2019 Canada interop.") ) - .arg( - Arg::with_name("eth1-endpoint") - .long("eth1-endpoint") - .value_name("HTTP-ENDPOINT") - .help("Deprecated. Use --eth1-endpoints.") - .takes_value(true) - ) - .arg( - Arg::with_name("eth1-endpoints") - .long("eth1-endpoints") - .value_name("HTTP-ENDPOINTS") - .conflicts_with("eth1-endpoint") - .help("One http endpoint for a web3 connection to an execution node. \ - Note: This flag is now only useful for testing, use `--execution-endpoint` \ - flag to connect to an execution node on mainnet and testnets. - Defaults to http://127.0.0.1:8545.") - .takes_value(true) - ) .arg( Arg::with_name("eth1-purge-cache") .long("eth1-purge-cache") @@ -642,14 +625,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { /* * Execution Layer Integration */ - .arg( - Arg::with_name("merge") - .long("merge") - .help("Deprecated. The feature activates automatically when --execution-endpoint \ - is supplied.") - .takes_value(false) - .hidden(true) - ) .arg( Arg::with_name("execution-endpoint") .long("execution-endpoint") @@ -730,6 +705,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("1") .takes_value(true) ) + /* Deneb settings */ + .arg( + Arg::with_name("trusted-setup-file-override") + .long("trusted-setup-file-override") + .value_name("FILE") + .help("Path to a json file containing the trusted setup params. \ + NOTE: This will override the trusted setup that is generated \ + from the mainnet kzg ceremony. Use with caution") + .takes_value(true) + ) /* * Database purging and compaction. */ @@ -760,6 +745,34 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .default_value("true") ) + .arg( + Arg::with_name("prune-blobs") + .long("prune-blobs") + .value_name("BOOLEAN") + .help("Prune blobs from Lighthouse's database when they are older than the data \ + data availability boundary relative to the current epoch.") + .takes_value(true) + .default_value("true") + ) + .arg( + Arg::with_name("epochs-per-blob-prune") + .long("epochs-per-blob-prune") + .value_name("EPOCHS") + .help("The epoch interval with which to prune blobs from Lighthouse's \ + database when they are older than the data availability boundary \ + relative to the current epoch.") + .takes_value(true) + .default_value("1") + ) + .arg( + Arg::with_name("blob-prune-margin-epochs") + .long("blob-prune-margin-epochs") + .value_name("EPOCHS") + .help("The margin for blob pruning in epochs. The oldest blobs are pruned \ + up until data_availability_boundary - blob_prune_margin_epochs.") + .takes_value(true) + .default_value("0") + ) /* * Misc. @@ -942,6 +955,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .default_value("180") ) + .arg( + Arg::with_name("allow-insecure-genesis-sync") + .long("allow-insecure-genesis-sync") + .help("Enable syncing from genesis, which is generally insecure and incompatible with data availability checks. \ + Checkpoint syncing is the preferred method for syncing a node. \ + Only use this flag when testing. DO NOT use on mainnet!") + .conflicts_with("checkpoint-sync-url") + .conflicts_with("checkpoint-state") + .takes_value(false) + ) .arg( Arg::with_name("reconstruct-historic-states") .long("reconstruct-historic-states") @@ -1118,15 +1141,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("builder-profit-threshold") .long("builder-profit-threshold") .value_name("WEI_VALUE") - .help("The minimum reward in wei provided to the proposer by a block builder for \ - an external payload to be considered for inclusion in a proposal. If this \ - threshold is not met, the local EE's payload will be used. This is currently \ - *NOT* in comparison to the value of the local EE's payload. It simply checks \ - whether the total proposer reward from an external payload is equal to or \ - greater than this value. In the future, a comparison to a local payload is \ - likely to be added. Example: Use 250000000000000000 to set the threshold to \ - 0.25 ETH.") - .default_value("0") + .help("This flag is deprecated and has no effect.") .takes_value(true) ) .arg( @@ -1138,22 +1153,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .requires("builder") .takes_value(true) ) - .arg( - Arg::with_name("count-unrealized") - .long("count-unrealized") - .hidden(true) - .help("This flag is deprecated and has no effect.") - .takes_value(true) - .default_value("true") - ) - .arg( - Arg::with_name("count-unrealized-full") - .long("count-unrealized-full") - .hidden(true) - .help("This flag is deprecated and has no effect.") - .takes_value(true) - .default_value("false") - ) .arg( Arg::with_name("reset-payload-statuses") .long("reset-payload-statuses") @@ -1165,7 +1164,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("disable-deposit-contract-sync") .long("disable-deposit-contract-sync") - .help("Explictly disables syncing of deposit logs from the execution node. \ + .help("Explicitly disables syncing of deposit logs from the execution node. \ This overrides any previous option that depends on it. \ Useful if you intend to run a non-validating beacon node.") .takes_value(false) @@ -1194,11 +1193,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("always-prefer-builder-payload") .long("always-prefer-builder-payload") - .help("If set, the beacon node always uses the payload from the builder instead of the local payload.") - // The builder profit threshold flag is used to provide preference - // to local payloads, therefore it fundamentally conflicts with - // always using the builder. - .conflicts_with("builder-profit-threshold") + .help("This flag is deprecated and has no effect.") ) .arg( Arg::with_name("invalid-gossip-verified-blocks-path") @@ -1213,12 +1208,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("progressive-balances") .long("progressive-balances") .value_name("MODE") - .help("Options to enable or disable the progressive balances cache for \ - unrealized FFG progression calculation. The default `checked` mode compares \ - the progressive balances from the cache against results from the existing \ - method. If there is a mismatch, it falls back to the existing method. The \ - optimized mode (`fast`) is faster but is still experimental, and is \ - not recommended for mainnet usage at this time.") + .help("Control the progressive balances cache mode. The default `fast` mode uses \ + the cache to speed up fork choice. A more conservative `checked` mode \ + compares the cache's results against results without the cache. If \ + there is a mismatch, it falls back to the cache-free result. Using the \ + default `fast` mode is recommended unless advised otherwise by the \ + Lighthouse team.") .takes_value(true) .possible_values(ProgressiveBalancesMode::VARIANTS) ) @@ -1273,5 +1268,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("64") .takes_value(true) ) + .arg( + Arg::with_name("disable-duplicate-warn-logs") + .long("disable-duplicate-warn-logs") + .help("Disable warning logs for duplicate gossip messages. The WARN level log is \ + useful for detecting a duplicate validator key running elsewhere. However, this may \ + result in excessive warning logs if the validator is broadcasting messages to \ + multiple beacon nodes via the validator client --broadcast flag. In this case, \ + disabling these warn logs may be useful.") + .takes_value(false) + ) .group(ArgGroup::with_name("enable_http").args(&["http", "gui", "staking"]).multiple(true)) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 4ab92a7fd..c940049c5 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -2,6 +2,7 @@ use beacon_chain::chain_config::{ DisallowedReOrgOffsets, ReOrgThreshold, DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_THRESHOLD, }; +use beacon_chain::TrustedSetup; use clap::ArgMatches; use clap_utils::flags::DISABLE_MALLOC_TUNING_FLAG; use clap_utils::parse_required; @@ -21,6 +22,7 @@ use std::fmt::Debug; use std::fs; use std::net::Ipv6Addr; use std::net::{IpAddr, Ipv4Addr, ToSocketAddrs}; +use std::num::NonZeroU16; use std::path::{Path, PathBuf}; use std::str::FromStr; use std::time::Duration; @@ -60,6 +62,13 @@ pub fn get_config( fs::remove_dir_all(freezer_db) .map_err(|err| format!("Failed to remove freezer_db: {}", err))?; } + + // Remove the blobs db. + let blobs_db = client_config.get_blobs_db_path(); + if blobs_db.exists() { + fs::remove_dir_all(blobs_db) + .map_err(|err| format!("Failed to remove blobs_db: {}", err))?; + } } // Create `datadir` and any non-existing parent directories. @@ -118,13 +127,6 @@ pub fn get_config( client_config.http_api.allow_origin = Some(allow_origin.to_string()); } - if cli_args.is_present("http-disable-legacy-spec") { - warn!( - log, - "The flag --http-disable-legacy-spec is deprecated and will be removed" - ); - } - if let Some(fork_name) = clap_utils::parse_optional(cli_args, "http-spec-fork")? { client_config.http_api.spec_fork_name = Some(fork_name); } @@ -156,6 +158,9 @@ pub fn get_config( client_config.http_api.duplicate_block_status_code = parse_required(cli_args, "http-duplicate-block-status")?; + + client_config.http_api.enable_light_client_server = + cli_args.is_present("light-client-server"); } if let Some(cache_size) = clap_utils::parse_optional(cli_args, "shuffling-cache-size")? { @@ -238,25 +243,6 @@ pub fn get_config( client_config.sync_eth1_chain = true; } - // Defines the URL to reach the eth1 node. - if let Some(endpoint) = cli_args.value_of("eth1-endpoint") { - warn!( - log, - "The --eth1-endpoint flag is deprecated"; - "msg" => "please use --eth1-endpoints instead" - ); - client_config.sync_eth1_chain = true; - - let endpoint = SensitiveUrl::parse(endpoint) - .map_err(|e| format!("eth1-endpoint was an invalid URL: {:?}", e))?; - client_config.eth1.endpoint = Eth1Endpoint::NoAuth(endpoint); - } else if let Some(endpoint) = cli_args.value_of("eth1-endpoints") { - client_config.sync_eth1_chain = true; - let endpoint = SensitiveUrl::parse(endpoint) - .map_err(|e| format!("eth1-endpoints contains an invalid URL {:?}", e))?; - client_config.eth1.endpoint = Eth1Endpoint::NoAuth(endpoint); - } - if let Some(val) = cli_args.value_of("eth1-blocks-per-log-query") { client_config.eth1.blocks_per_log_query = val .parse() @@ -273,20 +259,6 @@ pub fn get_config( client_config.eth1.cache_follow_distance = Some(follow_distance); } - if cli_args.is_present("merge") { - if cli_args.is_present("execution-endpoint") { - warn!( - log, - "The --merge flag is deprecated"; - "info" => "the --execution-endpoint flag automatically enables this feature" - ) - } else { - return Err("The --merge flag is deprecated. \ - Supply a value to --execution-endpoint instead." - .into()); - } - } - if let Some(endpoints) = cli_args.value_of("execution-endpoint") { let mut el_config = execution_layer::Config::default(); @@ -326,7 +298,7 @@ pub fn get_config( .write_all(jwt_secret_key.as_bytes()) .map_err(|e| { format!( - "Error occured while writing to jwt_secret_key file: {:?}", + "Error occurred while writing to jwt_secret_key file: {:?}", e ) })?; @@ -344,6 +316,21 @@ pub fn get_config( clap_utils::parse_optional(cli_args, "builder-user-agent")?; } + if cli_args.is_present("builder-profit-threshold") { + warn!( + log, + "Ignoring --builder-profit-threshold"; + "info" => "this flag is deprecated and will be removed" + ); + } + if cli_args.is_present("always-prefer-builder-payload") { + warn!( + log, + "Ignoring --always-prefer-builder-payload"; + "info" => "this flag is deprecated and will be removed" + ); + } + // Set config values from parse values. el_config.secret_files = vec![secret_file.clone()]; el_config.execution_endpoints = vec![execution_endpoint.clone()]; @@ -352,25 +339,10 @@ pub fn get_config( el_config.jwt_id = clap_utils::parse_optional(cli_args, "execution-jwt-id")?; el_config.jwt_version = clap_utils::parse_optional(cli_args, "execution-jwt-version")?; el_config.default_datadir = client_config.data_dir().clone(); - el_config.builder_profit_threshold = - clap_utils::parse_required(cli_args, "builder-profit-threshold")?; - el_config.always_prefer_builder_payload = - cli_args.is_present("always-prefer-builder-payload"); - let execution_timeout_multiplier = clap_utils::parse_required(cli_args, "execution-timeout-multiplier")?; el_config.execution_timeout_multiplier = Some(execution_timeout_multiplier); - // If `--execution-endpoint` is provided, we should ignore any `--eth1-endpoints` values and - // use `--execution-endpoint` instead. Also, log a deprecation warning. - if cli_args.is_present("eth1-endpoints") || cli_args.is_present("eth1-endpoint") { - warn!( - log, - "Ignoring --eth1-endpoints flag"; - "info" => "the value for --execution-endpoint will be used instead. \ - --eth1-endpoints has been deprecated for post-merge configurations" - ); - } client_config.eth1.endpoint = Eth1Endpoint::Auth { endpoint: execution_endpoint, jwt_path: secret_file, @@ -382,10 +354,32 @@ pub fn get_config( client_config.execution_layer = Some(el_config); } + // 4844 params + client_config.trusted_setup = context + .eth2_network_config + .as_ref() + .and_then(|config| config.kzg_trusted_setup.as_ref()) + .map(|trusted_setup_bytes| serde_json::from_slice(trusted_setup_bytes)) + .transpose() + .map_err(|e| format!("Unable to read trusted setup file: {}", e))?; + + // Override default trusted setup file if required + if let Some(trusted_setup_file_path) = cli_args.value_of("trusted-setup-file-override") { + let file = std::fs::File::open(trusted_setup_file_path) + .map_err(|e| format!("Failed to open trusted setup file: {}", e))?; + let trusted_setup: TrustedSetup = serde_json::from_reader(file) + .map_err(|e| format!("Unable to read trusted setup file: {}", e))?; + client_config.trusted_setup = Some(trusted_setup); + } + if let Some(freezer_dir) = cli_args.value_of("freezer-dir") { client_config.freezer_db_path = Some(PathBuf::from(freezer_dir)); } + if let Some(blobs_db_dir) = cli_args.value_of("blobs-dir") { + client_config.blobs_db_path = Some(PathBuf::from(blobs_db_dir)); + } + let (sprp, sprp_explicit) = get_slots_per_restore_point::(cli_args)?; client_config.store.slots_per_restore_point = sprp; client_config.store.slots_per_restore_point_set_explicitly = sprp_explicit; @@ -419,6 +413,22 @@ pub fn get_config( client_config.chain.epochs_per_migration = epochs_per_migration; } + if let Some(prune_blobs) = clap_utils::parse_optional(cli_args, "prune-blobs")? { + client_config.store.prune_blobs = prune_blobs; + } + + if let Some(epochs_per_blob_prune) = + clap_utils::parse_optional(cli_args, "epochs-per-blob-prune")? + { + client_config.store.epochs_per_blob_prune = epochs_per_blob_prune; + } + + if let Some(blob_prune_margin_epochs) = + clap_utils::parse_optional(cli_args, "blob-prune-margin-epochs")? + { + client_config.store.blob_prune_margin_epochs = blob_prune_margin_epochs; + } + /* * Zero-ports * @@ -494,6 +504,8 @@ pub fn get_config( None }; + client_config.allow_insecure_genesis_sync = cli_args.is_present("allow-insecure-genesis-sync"); + client_config.genesis = if eth2_network_config.genesis_state_is_known() { // Set up weak subjectivity sync, or start from the hardcoded genesis state. if let (Some(initial_state_path), Some(initial_block_path)) = ( @@ -678,7 +690,7 @@ pub fn get_config( } if cli_args.is_present("validator-monitor-auto") { - client_config.validator_monitor_auto = true; + client_config.validator_monitor.auto_register = true; } if let Some(pubkeys) = cli_args.value_of("validator-monitor-pubkeys") { @@ -688,7 +700,8 @@ pub fn get_config( .collect::, _>>() .map_err(|e| format!("Invalid --validator-monitor-pubkeys value: {:?}", e))?; client_config - .validator_monitor_pubkeys + .validator_monitor + .validators .extend_from_slice(&pubkeys); } @@ -706,14 +719,17 @@ pub fn get_config( .collect::, _>>() .map_err(|e| format!("Invalid --validator-monitor-file contents: {:?}", e))?; client_config - .validator_monitor_pubkeys + .validator_monitor + .validators .extend_from_slice(&pubkeys); } if let Some(count) = clap_utils::parse_optional(cli_args, "validator-monitor-individual-tracking-threshold")? { - client_config.validator_monitor_individual_tracking_threshold = count; + client_config + .validator_monitor + .individual_tracking_threshold = count; } if cli_args.is_present("disable-lock-timeouts") { @@ -771,22 +787,6 @@ pub fn get_config( client_config.chain.fork_choice_before_proposal_timeout_ms = timeout; } - if !clap_utils::parse_required::(cli_args, "count-unrealized")? { - warn!( - log, - "The flag --count-unrealized is deprecated and will be removed"; - "info" => "any use of the flag will have no effect" - ); - } - - if clap_utils::parse_required::(cli_args, "count-unrealized-full")? { - warn!( - log, - "The flag --count-unrealized-full is deprecated and will be removed"; - "info" => "setting it to `true` has no effect" - ); - } - client_config.chain.always_reset_payload_statuses = cli_args.is_present("reset-payload-statuses"); @@ -809,7 +809,7 @@ pub fn get_config( // Graphical user interface config. if cli_args.is_present("gui") { client_config.http_api.enabled = true; - client_config.validator_monitor_auto = true; + client_config.validator_monitor.auto_register = true; } // Optimistic finalized sync. @@ -980,13 +980,13 @@ pub fn parse_listening_addresses( .then(unused_port::unused_udp6_port) .transpose()? .or(maybe_disc_port) - .unwrap_or(port); + .unwrap_or(tcp_port); let quic_port = use_zero_ports .then(unused_port::unused_udp6_port) .transpose()? .or(maybe_quic_port) - .unwrap_or(port + 1); + .unwrap_or(if tcp_port == 0 { 0 } else { tcp_port + 1 }); ListenAddress::V6(lighthouse_network::ListenAddr { addr: ipv6, @@ -1009,14 +1009,14 @@ pub fn parse_listening_addresses( .then(unused_port::unused_udp4_port) .transpose()? .or(maybe_disc_port) - .unwrap_or(port); + .unwrap_or(tcp_port); // use zero ports if required. If not, use the specific quic port. If none given, use // the tcp port + 1. let quic_port = use_zero_ports .then(unused_port::unused_udp4_port) .transpose()? .or(maybe_quic_port) - .unwrap_or(port + 1); + .unwrap_or(if tcp_port == 0 { 0 } else { tcp_port + 1 }); ListenAddress::V4(lighthouse_network::ListenAddr { addr: ipv4, @@ -1039,7 +1039,11 @@ pub fn parse_listening_addresses( .then(unused_port::unused_udp4_port) .transpose()? .or(maybe_quic_port) - .unwrap_or(port + 1); + .unwrap_or(if ipv4_tcp_port == 0 { + 0 + } else { + ipv4_tcp_port + 1 + }); // Defaults to 9090 when required let ipv6_tcp_port = use_zero_ports @@ -1055,7 +1059,11 @@ pub fn parse_listening_addresses( .then(unused_port::unused_udp6_port) .transpose()? .or(maybe_quic6_port) - .unwrap_or(ipv6_tcp_port + 1); + .unwrap_or(if ipv6_tcp_port == 0 { + 0 + } else { + ipv6_tcp_port + 1 + }); ListenAddress::DualStack( lighthouse_network::ListenAddr { @@ -1178,23 +1186,23 @@ pub fn set_network_config( if let Some(enr_udp_port_str) = cli_args.value_of("enr-udp-port") { config.enr_udp4_port = Some( enr_udp_port_str - .parse::() - .map_err(|_| format!("Invalid discovery port: {}", enr_udp_port_str))?, + .parse::() + .map_err(|_| format!("Invalid ENR discovery port: {}", enr_udp_port_str))?, ); } if let Some(enr_quic_port_str) = cli_args.value_of("enr-quic-port") { config.enr_quic4_port = Some( enr_quic_port_str - .parse::() - .map_err(|_| format!("Invalid quic port: {}", enr_quic_port_str))?, + .parse::() + .map_err(|_| format!("Invalid ENR quic port: {}", enr_quic_port_str))?, ); } if let Some(enr_tcp_port_str) = cli_args.value_of("enr-tcp-port") { config.enr_tcp4_port = Some( enr_tcp_port_str - .parse::() + .parse::() .map_err(|_| format!("Invalid ENR TCP port: {}", enr_tcp_port_str))?, ); } @@ -1202,23 +1210,23 @@ pub fn set_network_config( if let Some(enr_udp_port_str) = cli_args.value_of("enr-udp6-port") { config.enr_udp6_port = Some( enr_udp_port_str - .parse::() - .map_err(|_| format!("Invalid discovery port: {}", enr_udp_port_str))?, + .parse::() + .map_err(|_| format!("Invalid ENR discovery port: {}", enr_udp_port_str))?, ); } if let Some(enr_quic_port_str) = cli_args.value_of("enr-quic6-port") { config.enr_quic6_port = Some( enr_quic_port_str - .parse::() - .map_err(|_| format!("Invalid quic port: {}", enr_quic_port_str))?, + .parse::() + .map_err(|_| format!("Invalid ENR quic port: {}", enr_quic_port_str))?, ); } if let Some(enr_tcp_port_str) = cli_args.value_of("enr-tcp6-port") { config.enr_tcp6_port = Some( enr_tcp_port_str - .parse::() + .parse::() .map_err(|_| format!("Invalid ENR TCP port: {}", enr_tcp_port_str))?, ); } @@ -1226,25 +1234,38 @@ pub fn set_network_config( if cli_args.is_present("enr-match") { // Match the IP and UDP port in the ENR. - // Set the ENR address to localhost if the address is unspecified. if let Some(ipv4_addr) = config.listen_addrs().v4().cloned() { + // ensure the port is valid to be advertised + let disc_port = ipv4_addr + .disc_port + .try_into() + .map_err(|_| "enr-match can only be used with non-zero listening ports")?; + + // Set the ENR address to localhost if the address is unspecified. let ipv4_enr_addr = if ipv4_addr.addr == Ipv4Addr::UNSPECIFIED { Ipv4Addr::LOCALHOST } else { ipv4_addr.addr }; config.enr_address.0 = Some(ipv4_enr_addr); - config.enr_udp4_port = Some(ipv4_addr.disc_port); + config.enr_udp4_port = Some(disc_port); } if let Some(ipv6_addr) = config.listen_addrs().v6().cloned() { + // ensure the port is valid to be advertised + let disc_port = ipv6_addr + .disc_port + .try_into() + .map_err(|_| "enr-match can only be used with non-zero listening ports")?; + + // Set the ENR address to localhost if the address is unspecified. let ipv6_enr_addr = if ipv6_addr.addr == Ipv6Addr::UNSPECIFIED { Ipv6Addr::LOCALHOST } else { ipv6_addr.addr }; config.enr_address.1 = Some(ipv6_enr_addr); - config.enr_udp6_port = Some(ipv6_addr.disc_port); + config.enr_udp6_port = Some(disc_port); } } @@ -1400,6 +1421,9 @@ pub fn set_network_config( Some(config_str.parse()?) } }; + + config.disable_duplicate_warn_logs = cli_args.is_present("disable-duplicate-warn-logs"); + Ok(()) } diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 3bef69ce8..ee782c650 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -64,6 +64,7 @@ impl ProductionBeaconNode { let _datadir = client_config.create_data_dir()?; let db_path = client_config.create_db_path()?; let freezer_db_path = client_config.create_freezer_db_path()?; + let blobs_db_path = client_config.create_blobs_db_path()?; let executor = context.executor.clone(); if let Some(legacy_dir) = client_config.get_existing_legacy_data_dir() { @@ -85,14 +86,20 @@ impl ProductionBeaconNode { .chain_spec(spec) .beacon_processor(client_config.beacon_processor.clone()) .http_api_config(client_config.http_api.clone()) - .disk_store(&db_path, &freezer_db_path, store_config, log.clone())?; + .disk_store( + &db_path, + &freezer_db_path, + &blobs_db_path, + store_config, + log.clone(), + )?; let builder = if let Some(mut slasher_config) = client_config.slasher.clone() { match slasher_config.override_backend() { DatabaseBackendOverride::Success(old_backend) => { info!( log, - "Slasher backend overriden"; + "Slasher backend overridden"; "reason" => "database exists", "configured_backend" => %old_backend, "override_backend" => %slasher_config.backend, diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 32c386829..7bf1ef76b 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -19,7 +19,6 @@ types = { workspace = true } state_processing = { workspace = true } slog = { workspace = true } serde = { workspace = true } -serde_derive = "1.0.116" lazy_static = { workspace = true } lighthouse_metrics = { workspace = true } lru = { workspace = true } diff --git a/beacon_node/store/src/config.rs b/beacon_node/store/src/config.rs index 581003b4f..681d424e2 100644 --- a/beacon_node/store/src/config.rs +++ b/beacon_node/store/src/config.rs @@ -1,13 +1,17 @@ use crate::{DBColumn, Error, StoreItem}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; +use std::num::NonZeroUsize; +use types::non_zero_usize::new_non_zero_usize; use types::{EthSpec, MinimalEthSpec}; pub const PREV_DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 2048; pub const DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 8192; -pub const DEFAULT_BLOCK_CACHE_SIZE: usize = 5; -pub const DEFAULT_HISTORIC_STATE_CACHE_SIZE: usize = 1; +pub const DEFAULT_BLOCK_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(5); +pub const DEFAULT_HISTORIC_STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(1); +pub const DEFAULT_EPOCHS_PER_BLOB_PRUNE: u64 = 1; +pub const DEFAULT_BLOB_PUNE_MARGIN_EPOCHS: u64 = 0; /// Database configuration parameters. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] @@ -17,15 +21,22 @@ pub struct StoreConfig { /// Flag indicating whether the `slots_per_restore_point` was set explicitly by the user. pub slots_per_restore_point_set_explicitly: bool, /// Maximum number of blocks to store in the in-memory block cache. - pub block_cache_size: usize, + pub block_cache_size: NonZeroUsize, /// Maximum number of states from freezer database to store in the in-memory state cache. - pub historic_state_cache_size: usize, + pub historic_state_cache_size: NonZeroUsize, /// Whether to compact the database on initialization. pub compact_on_init: bool, /// Whether to compact the database during database pruning. pub compact_on_prune: bool, /// Whether to prune payloads on initialization and finalization. pub prune_payloads: bool, + /// Whether to prune blobs older than the blob data availability boundary. + pub prune_blobs: bool, + /// Frequency of blob pruning in epochs. Default: 1 (every epoch). + pub epochs_per_blob_prune: u64, + /// The margin for blob pruning in epochs. The oldest blobs are pruned up until + /// data_availability_boundary - blob_prune_margin_epochs. Default: 0. + pub blob_prune_margin_epochs: u64, } /// Variant of `StoreConfig` that gets written to disk. Contains immutable configuration params. @@ -50,6 +61,9 @@ impl Default for StoreConfig { compact_on_init: false, compact_on_prune: true, prune_payloads: true, + prune_blobs: true, + epochs_per_blob_prune: DEFAULT_EPOCHS_PER_BLOB_PRUNE, + blob_prune_margin_epochs: DEFAULT_BLOB_PUNE_MARGIN_EPOCHS, } } } diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index fcc40706b..96e02b80f 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -25,6 +25,8 @@ pub enum Error { SchemaMigrationError(String), /// The store's `anchor_info` was mutated concurrently, the latest modification wasn't applied. AnchorInfoConcurrentMutation, + /// The store's `blob_info` was mutated concurrently, the latest modification wasn't applied. + BlobInfoConcurrentMutation, /// The block or state is unavailable due to weak subjectivity sync. HistoryUnavailable, /// State reconstruction cannot commence because not all historic blocks are known. @@ -43,6 +45,8 @@ pub enum Error { BlockReplayError(BlockReplayError), AddPayloadLogicError, SlotClockUnavailableForMigration, + InvalidKey, + InvalidBytes, UnableToDowngrade, InconsistentFork(InconsistentFork), } diff --git a/beacon_node/store/src/forwards_iter.rs b/beacon_node/store/src/forwards_iter.rs index 125b73a45..1ccf1da1b 100644 --- a/beacon_node/store/src/forwards_iter.rs +++ b/beacon_node/store/src/forwards_iter.rs @@ -176,7 +176,7 @@ impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> store: &'a HotColdDB, start_slot: Slot, end_slot: Option, - get_state: impl FnOnce() -> (BeaconState, Hash256), + get_state: impl FnOnce() -> Result<(BeaconState, Hash256)>, spec: &ChainSpec, ) -> Result { use HybridForwardsIterator::*; @@ -200,7 +200,7 @@ impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> if end_slot.map_or(false, |end_slot| end_slot < freezer_upper_limit) { None } else { - Some(Box::new(get_state())) + Some(Box::new(get_state()?)) }; PreFinalization { iter, @@ -209,7 +209,7 @@ impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> } } else { PostFinalizationLazy { - continuation_data: Some(Box::new(get_state())), + continuation_data: Some(Box::new(get_state()?)), store, start_slot, } diff --git a/beacon_node/store/src/garbage_collection.rs b/beacon_node/store/src/garbage_collection.rs index 329133632..c70ef8986 100644 --- a/beacon_node/store/src/garbage_collection.rs +++ b/beacon_node/store/src/garbage_collection.rs @@ -31,7 +31,7 @@ where "Garbage collecting {} temporary states", delete_ops.len() / 2 ); - self.do_atomically(delete_ops)?; + self.do_atomically_with_block_and_blobs_cache(delete_ops)?; } Ok(()) diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 87f8e0ffc..63cd8e67d 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -12,9 +12,9 @@ use crate::leveldb_store::BytesKey; use crate::leveldb_store::LevelDB; use crate::memory_store::MemoryStore; use crate::metadata::{ - AnchorInfo, CompactionTimestamp, PruningCheckpoint, SchemaVersion, ANCHOR_INFO_KEY, - COMPACTION_TIMESTAMP_KEY, CONFIG_KEY, CURRENT_SCHEMA_VERSION, PRUNING_CHECKPOINT_KEY, - SCHEMA_VERSION_KEY, SPLIT_KEY, STATE_UPPER_LIMIT_NO_RETAIN, + AnchorInfo, BlobInfo, CompactionTimestamp, PruningCheckpoint, SchemaVersion, ANCHOR_INFO_KEY, + BLOB_INFO_KEY, COMPACTION_TIMESTAMP_KEY, CONFIG_KEY, CURRENT_SCHEMA_VERSION, + PRUNING_CHECKPOINT_KEY, SCHEMA_VERSION_KEY, SPLIT_KEY, STATE_UPPER_LIMIT_NO_RETAIN, }; use crate::metrics; use crate::{ @@ -25,7 +25,7 @@ use itertools::process_results; use leveldb::iterator::LevelDBIterator; use lru::LruCache; use parking_lot::{Mutex, RwLock}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use slog::{debug, error, info, trace, warn, Logger}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; @@ -35,9 +35,11 @@ use state_processing::{ use std::cmp::min; use std::convert::TryInto; use std::marker::PhantomData; +use std::num::NonZeroUsize; use std::path::Path; use std::sync::Arc; use std::time::Duration; +use types::blob_sidecar::BlobSidecarList; use types::*; /// On-disk database that stores finalized states efficiently. @@ -53,15 +55,19 @@ pub struct HotColdDB, Cold: ItemStore> { pub(crate) split: RwLock, /// The starting slots for the range of blocks & states stored in the database. anchor_info: RwLock>, + /// The starting slots for the range of blobs stored in the database. + blob_info: RwLock, pub(crate) config: StoreConfig, /// Cold database containing compact historical data. pub cold_db: Cold, + /// Database containing blobs. If None, store falls back to use `cold_db`. + pub blobs_db: Cold, /// Hot database containing duplicated but quick-to-access recent data. /// /// The hot database also contains all blocks. pub hot_db: Hot, - /// LRU cache of deserialized blocks. Updated whenever a block is loaded. - block_cache: Mutex>>, + /// LRU cache of deserialized blocks and blobs. Updated whenever a block or blob is loaded. + block_cache: Mutex>, /// LRU cache of replayed states. state_cache: Mutex>>, /// Chain spec. @@ -72,6 +78,43 @@ pub struct HotColdDB, Cold: ItemStore> { _phantom: PhantomData, } +#[derive(Debug)] +struct BlockCache { + block_cache: LruCache>, + blob_cache: LruCache>, +} + +impl BlockCache { + pub fn new(size: NonZeroUsize) -> Self { + Self { + block_cache: LruCache::new(size), + blob_cache: LruCache::new(size), + } + } + pub fn put_block(&mut self, block_root: Hash256, block: SignedBeaconBlock) { + self.block_cache.put(block_root, block); + } + pub fn put_blobs(&mut self, block_root: Hash256, blobs: BlobSidecarList) { + self.blob_cache.put(block_root, blobs); + } + pub fn get_block<'a>(&'a mut self, block_root: &Hash256) -> Option<&'a SignedBeaconBlock> { + self.block_cache.get(block_root) + } + pub fn get_blobs<'a>(&'a mut self, block_root: &Hash256) -> Option<&'a BlobSidecarList> { + self.blob_cache.get(block_root) + } + pub fn delete_block(&mut self, block_root: &Hash256) { + let _ = self.block_cache.pop(block_root); + } + pub fn delete_blobs(&mut self, block_root: &Hash256) { + let _ = self.blob_cache.pop(block_root); + } + pub fn delete(&mut self, block_root: &Hash256) { + let _ = self.block_cache.pop(block_root); + let _ = self.blob_cache.pop(block_root); + } +} + #[derive(Debug, PartialEq)] pub enum HotColdDBError { UnsupportedSchemaVersion { @@ -95,6 +138,7 @@ pub enum HotColdDBError { MissingExecutionPayload(Hash256), MissingFullBlockExecutionPayloadPruned(Hash256, Slot), MissingAnchorInfo, + BlobsPreviouslyInDefaultStore, HotStateSummaryError(BeaconStateError), RestorePointDecodeError(ssz::DecodeError), BlockReplayBeaconError(BeaconStateError), @@ -106,6 +150,8 @@ pub enum HotColdDBError { slots_per_historical_root: u64, slots_per_epoch: u64, }, + ZeroEpochsPerBlobPrune, + BlobPruneLogicError, RestorePointBlockHashError(BeaconStateError), IterationError { unexpected_key: BytesKey, @@ -115,6 +161,7 @@ pub enum HotColdDBError { request_slot: Slot, block_root: Hash256, }, + Rollback, } impl HotColdDB, MemoryStore> { @@ -123,14 +170,16 @@ impl HotColdDB, MemoryStore> { spec: ChainSpec, log: Logger, ) -> Result, MemoryStore>, Error> { - Self::verify_slots_per_restore_point(config.slots_per_restore_point)?; + Self::verify_config(&config)?; let db = HotColdDB { split: RwLock::new(Split::default()), anchor_info: RwLock::new(None), + blob_info: RwLock::new(BlobInfo::default()), cold_db: MemoryStore::open(), + blobs_db: MemoryStore::open(), hot_db: MemoryStore::open(), - block_cache: Mutex::new(LruCache::new(config.block_cache_size)), + block_cache: Mutex::new(BlockCache::new(config.block_cache_size)), state_cache: Mutex::new(LruCache::new(config.historic_state_cache_size)), config, spec, @@ -152,6 +201,7 @@ impl HotColdDB, LevelDB> { pub fn open( hot_path: &Path, cold_path: &Path, + blobs_db_path: &Path, migrate_schema: impl FnOnce(Arc, SchemaVersion, SchemaVersion) -> Result<(), Error>, config: StoreConfig, spec: ChainSpec, @@ -162,9 +212,11 @@ impl HotColdDB, LevelDB> { let mut db = HotColdDB { split: RwLock::new(Split::default()), anchor_info: RwLock::new(None), + blob_info: RwLock::new(BlobInfo::default()), cold_db: LevelDB::open(cold_path)?, + blobs_db: LevelDB::open(blobs_db_path)?, hot_db: LevelDB::open(hot_path)?, - block_cache: Mutex::new(LruCache::new(config.block_cache_size)), + block_cache: Mutex::new(BlockCache::new(config.block_cache_size)), state_cache: Mutex::new(LruCache::new(config.historic_state_cache_size)), config, spec, @@ -207,6 +259,44 @@ impl HotColdDB, LevelDB> { ); } + // Open separate blobs directory if configured and same configuration was used on previous + // run. + let blob_info = db.load_blob_info()?; + let deneb_fork_slot = db + .spec + .deneb_fork_epoch + .map(|epoch| epoch.start_slot(E::slots_per_epoch())); + let new_blob_info = match &blob_info { + Some(blob_info) => { + // If the oldest block slot is already set do not allow the blob DB path to be + // changed (require manual migration). + if blob_info.oldest_blob_slot.is_some() && !blob_info.blobs_db { + return Err(HotColdDBError::BlobsPreviouslyInDefaultStore.into()); + } + // Set the oldest blob slot to the Deneb fork slot if it is not yet set. + // Always initialize `blobs_db` to true, we no longer support storing the blobs + // in the freezer DB, because the UX is strictly worse for relocating the DB. + let oldest_blob_slot = blob_info.oldest_blob_slot.or(deneb_fork_slot); + BlobInfo { + oldest_blob_slot, + blobs_db: true, + } + } + // First start. + None => BlobInfo { + // Set the oldest blob slot to the Deneb fork slot if it is not yet set. + oldest_blob_slot: deneb_fork_slot, + blobs_db: true, + }, + }; + db.compare_and_set_blob_info_with_write(<_>::default(), new_blob_info.clone())?; + info!( + db.log, + "Blob DB initialized"; + "path" => ?blobs_db_path, + "oldest_blob_slot" => ?new_blob_info.oldest_blob_slot, + ); + // Ensure that the schema version of the on-disk database matches the software. // If the version is mismatched, an automatic migration will be attempted. let db = Arc::new(db); @@ -275,7 +365,7 @@ impl, Cold: ItemStore> HotColdDB let block = self.block_as_kv_store_ops(block_root, block, &mut ops)?; self.hot_db.do_atomically(ops)?; // Update cache. - self.block_cache.lock().put(*block_root, block); + self.block_cache.lock().put_block(*block_root, block); Ok(()) } @@ -327,15 +417,14 @@ impl, Cold: ItemStore> HotColdDB metrics::inc_counter(&metrics::BEACON_BLOCK_GET_COUNT); // Check the cache. - if let Some(block) = self.block_cache.lock().get(block_root) { + if let Some(block) = self.block_cache.lock().get_block(block_root) { metrics::inc_counter(&metrics::BEACON_BLOCK_CACHE_HIT_COUNT); return Ok(Some(DatabaseBlock::Full(block.clone()))); } // Load the blinded block. - let blinded_block = match self.get_blinded_block(block_root)? { - Some(block) => block, - None => return Ok(None), + let Some(blinded_block) = self.get_blinded_block(block_root)? else { + return Ok(None); }; // If the block is after the split point then we should have the full execution payload @@ -352,7 +441,9 @@ impl, Cold: ItemStore> HotColdDB let full_block = self.make_full_block(block_root, blinded_block)?; // Add to cache. - self.block_cache.lock().put(*block_root, full_block.clone()); + self.block_cache + .lock() + .put_block(*block_root, full_block.clone()); DatabaseBlock::Full(full_block) } else if !self.config.prune_payloads { @@ -473,6 +564,12 @@ impl, Cold: ItemStore> HotColdDB .map(|payload| payload.is_some()) } + /// Check if the blobs for a block exists on disk. + pub fn blobs_exist(&self, block_root: &Hash256) -> Result { + self.blobs_db + .key_exists(DBColumn::BeaconBlob.into(), block_root.as_bytes()) + } + /// Determine whether a block exists in the database. pub fn block_exists(&self, block_root: &Hash256) -> Result { self.hot_db @@ -481,11 +578,33 @@ impl, Cold: ItemStore> HotColdDB /// Delete a block from the store and the block cache. pub fn delete_block(&self, block_root: &Hash256) -> Result<(), Error> { - self.block_cache.lock().pop(block_root); + self.block_cache.lock().delete(block_root); self.hot_db .key_delete(DBColumn::BeaconBlock.into(), block_root.as_bytes())?; self.hot_db - .key_delete(DBColumn::ExecPayload.into(), block_root.as_bytes()) + .key_delete(DBColumn::ExecPayload.into(), block_root.as_bytes())?; + self.blobs_db + .key_delete(DBColumn::BeaconBlob.into(), block_root.as_bytes()) + } + + pub fn put_blobs(&self, block_root: &Hash256, blobs: BlobSidecarList) -> Result<(), Error> { + self.blobs_db.put_bytes( + DBColumn::BeaconBlob.into(), + block_root.as_bytes(), + &blobs.as_ssz_bytes(), + )?; + self.block_cache.lock().put_blobs(*block_root, blobs); + Ok(()) + } + + pub fn blobs_as_kv_store_ops( + &self, + key: &Hash256, + blobs: BlobSidecarList, + ops: &mut Vec, + ) { + let db_key = get_key_for_col(DBColumn::BeaconBlob.into(), key.as_bytes()); + ops.push(KeyValueStoreOp::PutKeyValue(db_key, blobs.as_ssz_bytes())); } pub fn put_state_summary( @@ -662,7 +781,7 @@ impl, Cold: ItemStore> HotColdDB self, start_slot, None, - || (end_state, end_block_root), + || Ok((end_state, end_block_root)), spec, ) } @@ -671,7 +790,7 @@ impl, Cold: ItemStore> HotColdDB &self, start_slot: Slot, end_slot: Slot, - get_state: impl FnOnce() -> (BeaconState, Hash256), + get_state: impl FnOnce() -> Result<(BeaconState, Hash256), Error>, spec: &ChainSpec, ) -> Result, Error> { HybridForwardsBlockRootsIterator::new(self, start_slot, Some(end_slot), get_state, spec) @@ -688,7 +807,7 @@ impl, Cold: ItemStore> HotColdDB self, start_slot, None, - || (end_state, end_state_root), + || Ok((end_state, end_state_root)), spec, ) } @@ -697,7 +816,7 @@ impl, Cold: ItemStore> HotColdDB &self, start_slot: Slot, end_slot: Slot, - get_state: impl FnOnce() -> (BeaconState, Hash256), + get_state: impl FnOnce() -> Result<(BeaconState, Hash256), Error>, spec: &ChainSpec, ) -> Result, Error> { HybridForwardsStateRootsIterator::new(self, start_slot, Some(end_slot), get_state, spec) @@ -773,6 +892,10 @@ impl, Cold: ItemStore> HotColdDB self.store_hot_state(&state_root, state, &mut key_value_batch)?; } + StoreOp::PutBlobs(block_root, blobs) => { + self.blobs_as_kv_store_ops(&block_root, blobs, &mut key_value_batch); + } + StoreOp::PutStateSummary(state_root, summary) => { key_value_batch.push(summary.as_kv_store_op(state_root)); } @@ -792,6 +915,11 @@ impl, Cold: ItemStore> HotColdDB key_value_batch.push(KeyValueStoreOp::DeleteKey(key)); } + StoreOp::DeleteBlobs(block_root) => { + let key = get_key_for_col(DBColumn::BeaconBlob.into(), block_root.as_bytes()); + key_value_batch.push(KeyValueStoreOp::DeleteKey(key)); + } + StoreOp::DeleteState(state_root, slot) => { let state_summary_key = get_key_for_col(DBColumn::BeaconStateSummary.into(), state_root.as_bytes()); @@ -817,17 +945,82 @@ impl, Cold: ItemStore> HotColdDB Ok(key_value_batch) } - pub fn do_atomically(&self, batch: Vec>) -> Result<(), Error> { - // Update the block cache whilst holding a lock, to ensure that the cache updates atomically - // with the database. + pub fn do_atomically_with_block_and_blobs_cache( + &self, + batch: Vec>, + ) -> Result<(), Error> { + let mut blobs_to_delete = Vec::new(); + let (blobs_ops, hot_db_ops): (Vec>, Vec>) = + batch.into_iter().partition(|store_op| match store_op { + StoreOp::PutBlobs(_, _) => true, + StoreOp::DeleteBlobs(block_root) => { + match self.get_blobs(block_root) { + Ok(Some(blob_sidecar_list)) => { + blobs_to_delete.push((*block_root, blob_sidecar_list)); + } + Err(e) => { + error!( + self.log, "Error getting blobs"; + "block_root" => %block_root, + "error" => ?e + ); + } + _ => (), + } + true + } + StoreOp::PutBlock(_, _) | StoreOp::DeleteBlock(_) => false, + _ => false, + }); + + // Update database whilst holding a lock on cache, to ensure that the cache updates + // atomically with the database. let mut guard = self.block_cache.lock(); - for op in &batch { + let blob_cache_ops = blobs_ops.clone(); + // Try to execute blobs store ops. + self.blobs_db + .do_atomically(self.convert_to_kv_batch(blobs_ops)?)?; + + let hot_db_cache_ops = hot_db_ops.clone(); + // Try to execute hot db store ops. + let tx_res = match self.convert_to_kv_batch(hot_db_ops) { + Ok(kv_store_ops) => self.hot_db.do_atomically(kv_store_ops), + Err(e) => Err(e), + }; + // Rollback on failure + if let Err(e) = tx_res { + error!( + self.log, + "Database write failed"; + "error" => ?e, + "action" => "reverting blob DB changes" + ); + let mut blob_cache_ops = blob_cache_ops; + for op in blob_cache_ops.iter_mut() { + let reverse_op = match op { + StoreOp::PutBlobs(block_root, _) => StoreOp::DeleteBlobs(*block_root), + StoreOp::DeleteBlobs(_) => match blobs_to_delete.pop() { + Some((block_root, blobs)) => StoreOp::PutBlobs(block_root, blobs), + None => return Err(HotColdDBError::Rollback.into()), + }, + _ => return Err(HotColdDBError::Rollback.into()), + }; + *op = reverse_op; + } + self.blobs_db + .do_atomically(self.convert_to_kv_batch(blob_cache_ops)?)?; + return Err(e); + } + + for op in hot_db_cache_ops { match op { StoreOp::PutBlock(block_root, block) => { - guard.put(*block_root, (**block).clone()); + guard.put_block(block_root, (*block).clone()); } + StoreOp::PutBlobs(_, _) => (), + StoreOp::PutState(_, _) => (), StoreOp::PutStateSummary(_, _) => (), @@ -837,9 +1030,11 @@ impl, Cold: ItemStore> HotColdDB StoreOp::DeleteStateTemporaryFlag(_) => (), StoreOp::DeleteBlock(block_root) => { - guard.pop(block_root); + guard.delete_block(&block_root); } + StoreOp::DeleteBlobs(_) => (), + StoreOp::DeleteState(_, _) => (), StoreOp::DeleteExecutionPayload(_) => (), @@ -848,8 +1043,20 @@ impl, Cold: ItemStore> HotColdDB } } - self.hot_db - .do_atomically(self.convert_to_kv_batch(batch)?)?; + for op in blob_cache_ops { + match op { + StoreOp::PutBlobs(block_root, blobs) => { + guard.put_blobs(block_root, blobs); + } + + StoreOp::DeleteBlobs(block_root) => { + guard.delete_blobs(&block_root); + } + + _ => (), + } + } + drop(guard); Ok(()) @@ -1090,7 +1297,7 @@ impl, Cold: ItemStore> HotColdDB let state_root_iter = self.forwards_state_roots_iterator_until( low_slot, slot, - || (high_restore_point, Hash256::zero()), + || Ok((high_restore_point, Hash256::zero())), &self.spec, )?; @@ -1218,6 +1425,29 @@ impl, Cold: ItemStore> HotColdDB }) } + /// Fetch blobs for a given block from the store. + pub fn get_blobs(&self, block_root: &Hash256) -> Result>, Error> { + // Check the cache. + if let Some(blobs) = self.block_cache.lock().get_blobs(block_root) { + metrics::inc_counter(&metrics::BEACON_BLOBS_CACHE_HIT_COUNT); + return Ok(Some(blobs.clone())); + } + + match self + .blobs_db + .get_bytes(DBColumn::BeaconBlob.into(), block_root.as_bytes())? + { + Some(ref blobs_bytes) => { + let blobs = BlobSidecarList::from_ssz_bytes(blobs_bytes)?; + self.block_cache + .lock() + .put_blobs(*block_root, blobs.clone()); + Ok(Some(blobs)) + } + None => Ok(None), + } + } + /// Get a reference to the `ChainSpec` used by the database. pub fn get_chain_spec(&self) -> &ChainSpec { &self.spec @@ -1251,10 +1481,17 @@ impl, Cold: ItemStore> HotColdDB let split_slot = self.get_split_slot(); let anchor = self.get_anchor_info(); - // There are no restore points stored if the state upper limit lies in the hot database. - // It hasn't been reached yet, and may never be. - if anchor.map_or(false, |a| a.state_upper_limit >= split_slot) { + // There are no restore points stored if the state upper limit lies in the hot database, + // and the lower limit is zero. It hasn't been reached yet, and may never be. + if anchor.as_ref().map_or(false, |a| { + a.state_upper_limit >= split_slot && a.state_lower_limit == 0 + }) { None + } else if let Some(lower_limit) = anchor + .map(|a| a.state_lower_limit) + .filter(|limit| *limit > 0) + { + Some(lower_limit) } else { Some( (split_slot - 1) / self.config.slots_per_restore_point @@ -1388,6 +1625,70 @@ impl, Cold: ItemStore> HotColdDB .map(|a| a.anchor_slot) } + /// Initialize the `BlobInfo` when starting from genesis or a checkpoint. + pub fn init_blob_info(&self, anchor_slot: Slot) -> Result { + let oldest_blob_slot = self.spec.deneb_fork_epoch.map(|fork_epoch| { + std::cmp::max(anchor_slot, fork_epoch.start_slot(E::slots_per_epoch())) + }); + let blob_info = BlobInfo { + oldest_blob_slot, + blobs_db: true, + }; + self.compare_and_set_blob_info(self.get_blob_info(), blob_info) + } + + /// Get a clone of the store's blob info. + /// + /// To do mutations, use `compare_and_set_blob_info`. + pub fn get_blob_info(&self) -> BlobInfo { + self.blob_info.read_recursive().clone() + } + + /// Atomically update the blob info from `prev_value` to `new_value`. + /// + /// Return a `KeyValueStoreOp` which should be written to disk, possibly atomically with other + /// values. + /// + /// Return an `BlobInfoConcurrentMutation` error if the `prev_value` provided + /// is not correct. + pub fn compare_and_set_blob_info( + &self, + prev_value: BlobInfo, + new_value: BlobInfo, + ) -> Result { + let mut blob_info = self.blob_info.write(); + if *blob_info == prev_value { + let kv_op = self.store_blob_info_in_batch(&new_value); + *blob_info = new_value; + Ok(kv_op) + } else { + Err(Error::BlobInfoConcurrentMutation) + } + } + + /// As for `compare_and_set_blob_info`, but also writes the blob info to disk immediately. + pub fn compare_and_set_blob_info_with_write( + &self, + prev_value: BlobInfo, + new_value: BlobInfo, + ) -> Result<(), Error> { + let kv_store_op = self.compare_and_set_blob_info(prev_value, new_value)?; + self.hot_db.do_atomically(vec![kv_store_op]) + } + + /// Load the blob info from disk, but do not set `self.blob_info`. + fn load_blob_info(&self) -> Result, Error> { + self.hot_db.get(&BLOB_INFO_KEY) + } + + /// Store the given `blob_info` to disk. + /// + /// The argument is intended to be `self.blob_info`, but is passed manually to avoid issues + /// with recursive locking. + fn store_blob_info_in_batch(&self, blob_info: &BlobInfo) -> KeyValueStoreOp { + blob_info.as_kv_store_op(BLOB_INFO_KEY) + } + /// Return the slot-window describing the available historic states. /// /// Returns `(lower_limit, upper_limit)`. @@ -1523,6 +1824,12 @@ impl, Cold: ItemStore> HotColdDB self.hot_db.get(state_root) } + /// Verify that a parsed config is valid. + fn verify_config(config: &StoreConfig) -> Result<(), HotColdDBError> { + Self::verify_slots_per_restore_point(config.slots_per_restore_point)?; + Self::verify_epochs_per_blob_prune(config.epochs_per_blob_prune) + } + /// Check that the restore point frequency is valid. /// /// Specifically, check that it is: @@ -1553,6 +1860,16 @@ impl, Cold: ItemStore> HotColdDB } } + // Check that epochs_per_blob_prune is at least 1 epoch to avoid attempting to prune the same + // epochs over and over again. + fn verify_epochs_per_blob_prune(epochs_per_blob_prune: u64) -> Result<(), HotColdDBError> { + if epochs_per_blob_prune > 0 { + Ok(()) + } else { + Err(HotColdDBError::ZeroEpochsPerBlobPrune) + } + } + /// Run a compaction pass to free up space used by deleted states. pub fn compact(&self) -> Result<(), Error> { self.hot_db.compact()?; @@ -1716,7 +2033,7 @@ impl, Cold: ItemStore> HotColdDB } } let payloads_pruned = ops.len(); - self.do_atomically(ops)?; + self.do_atomically_with_block_and_blobs_cache(ops)?; info!( self.log, "Execution payload pruning complete"; @@ -1724,6 +2041,343 @@ impl, Cold: ItemStore> HotColdDB ); Ok(()) } + + /// Try to prune blobs, approximating the current epoch from the split slot. + pub fn try_prune_most_blobs(&self, force: bool) -> Result<(), Error> { + let Some(deneb_fork_epoch) = self.spec.deneb_fork_epoch else { + debug!(self.log, "Deneb fork is disabled"); + return Ok(()); + }; + // The current epoch is >= split_epoch + 2. It could be greater if the database is + // configured to delay updating the split or finalization has ceased. In this instance we + // choose to also delay the pruning of blobs (we never prune without finalization anyway). + let min_current_epoch = self.get_split_slot().epoch(E::slots_per_epoch()) + 2; + let min_data_availability_boundary = std::cmp::max( + deneb_fork_epoch, + min_current_epoch.saturating_sub(self.spec.min_epochs_for_blob_sidecars_requests), + ); + + self.try_prune_blobs(force, min_data_availability_boundary) + } + + /// Try to prune blobs older than the data availability boundary. + /// + /// Blobs from the epoch `data_availability_boundary - blob_prune_margin_epochs` are retained. + /// This epoch is an _exclusive_ endpoint for the pruning process. + /// + /// This function only supports pruning blobs older than the split point, which is older than + /// (or equal to) finalization. Pruning blobs newer than finalization is not supported. + /// + /// This function also assumes that the split is stationary while it runs. It should only be + /// run from the migrator thread (where `migrate_database` runs) or the database manager. + pub fn try_prune_blobs( + &self, + force: bool, + data_availability_boundary: Epoch, + ) -> Result<(), Error> { + if self.spec.deneb_fork_epoch.is_none() { + debug!(self.log, "Deneb fork is disabled"); + return Ok(()); + } + + let pruning_enabled = self.get_config().prune_blobs; + let margin_epochs = self.get_config().blob_prune_margin_epochs; + let epochs_per_blob_prune = self.get_config().epochs_per_blob_prune; + + if !force && !pruning_enabled { + debug!( + self.log, + "Blob pruning is disabled"; + "prune_blobs" => pruning_enabled + ); + return Ok(()); + } + + let blob_info = self.get_blob_info(); + let Some(oldest_blob_slot) = blob_info.oldest_blob_slot else { + error!(self.log, "Slot of oldest blob is not known"); + return Err(HotColdDBError::BlobPruneLogicError.into()); + }; + + // Start pruning from the epoch of the oldest blob stored. + // The start epoch is inclusive (blobs in this epoch will be pruned). + let start_epoch = oldest_blob_slot.epoch(E::slots_per_epoch()); + + // Prune blobs up until the `data_availability_boundary - margin` or the split + // slot's epoch, whichever is older. We can't prune blobs newer than the split. + // The end epoch is also inclusive (blobs in this epoch will be pruned). + let split = self.get_split_info(); + let end_epoch = std::cmp::min( + data_availability_boundary - margin_epochs - 1, + split.slot.epoch(E::slots_per_epoch()) - 1, + ); + let end_slot = end_epoch.end_slot(E::slots_per_epoch()); + + let can_prune = end_epoch != 0 && start_epoch <= end_epoch; + let should_prune = start_epoch + epochs_per_blob_prune <= end_epoch + 1; + + if !force && !should_prune || !can_prune { + debug!( + self.log, + "Blobs are pruned"; + "oldest_blob_slot" => oldest_blob_slot, + "data_availability_boundary" => data_availability_boundary, + "split_slot" => split.slot, + "end_epoch" => end_epoch, + "start_epoch" => start_epoch, + ); + return Ok(()); + } + + // Sanity checks. + if let Some(anchor) = self.get_anchor_info() { + if oldest_blob_slot < anchor.oldest_block_slot { + error!( + self.log, + "Oldest blob is older than oldest block"; + "oldest_blob_slot" => oldest_blob_slot, + "oldest_block_slot" => anchor.oldest_block_slot + ); + return Err(HotColdDBError::BlobPruneLogicError.into()); + } + } + + // Iterate block roots forwards from the oldest blob slot. + debug!( + self.log, + "Pruning blobs"; + "start_epoch" => start_epoch, + "end_epoch" => end_epoch, + "data_availability_boundary" => data_availability_boundary, + ); + + let mut ops = vec![]; + let mut last_pruned_block_root = None; + + for res in self.forwards_block_roots_iterator_until( + oldest_blob_slot, + end_slot, + || { + let (_, split_state) = self + .get_advanced_hot_state(split.block_root, split.slot, split.state_root)? + .ok_or(HotColdDBError::MissingSplitState( + split.state_root, + split.slot, + ))?; + + Ok((split_state, split.block_root)) + }, + &self.spec, + )? { + let (block_root, slot) = match res { + Ok(tuple) => tuple, + Err(e) => { + warn!( + self.log, + "Stopping blob pruning early"; + "error" => ?e, + ); + break; + } + }; + + if Some(block_root) != last_pruned_block_root && self.blobs_exist(&block_root)? { + trace!( + self.log, + "Pruning blobs of block"; + "slot" => slot, + "block_root" => ?block_root, + ); + last_pruned_block_root = Some(block_root); + ops.push(StoreOp::DeleteBlobs(block_root)); + } + + if slot >= end_slot { + break; + } + } + let blob_lists_pruned = ops.len(); + let new_blob_info = BlobInfo { + oldest_blob_slot: Some(end_slot + 1), + blobs_db: blob_info.blobs_db, + }; + let update_blob_info = self.compare_and_set_blob_info(blob_info, new_blob_info)?; + ops.push(StoreOp::KeyValueOp(update_blob_info)); + + self.do_atomically_with_block_and_blobs_cache(ops)?; + debug!( + self.log, + "Blob pruning complete"; + "blob_lists_pruned" => blob_lists_pruned, + ); + + Ok(()) + } + + /// This function fills in missing block roots between last restore point slot and split + /// slot, if any. + pub fn heal_freezer_block_roots_at_split(&self) -> Result<(), Error> { + let split = self.get_split_info(); + let last_restore_point_slot = (split.slot - 1) / self.config.slots_per_restore_point + * self.config.slots_per_restore_point; + + // Load split state (which has access to block roots). + let (_, split_state) = self + .get_advanced_hot_state(split.block_root, split.slot, split.state_root)? + .ok_or(HotColdDBError::MissingSplitState( + split.state_root, + split.slot, + ))?; + + let mut batch = vec![]; + let mut chunk_writer = ChunkWriter::::new( + &self.cold_db, + last_restore_point_slot.as_usize(), + )?; + + for slot in (last_restore_point_slot.as_u64()..split.slot.as_u64()).map(Slot::new) { + let block_root = *split_state.get_block_root(slot)?; + chunk_writer.set(slot.as_usize(), block_root, &mut batch)?; + } + chunk_writer.write(&mut batch)?; + self.cold_db.do_atomically(batch)?; + + Ok(()) + } + + pub fn heal_freezer_block_roots_at_genesis(&self) -> Result<(), Error> { + let oldest_block_slot = self.get_oldest_block_slot(); + let split_slot = self.get_split_slot(); + + // Check if backfill has been completed AND the freezer db has data in it + if oldest_block_slot != 0 || split_slot == 0 { + return Ok(()); + } + + let mut block_root_iter = self.forwards_block_roots_iterator_until( + Slot::new(0), + split_slot - 1, + || { + Err(Error::DBError { + message: "Should not require end state".to_string(), + }) + }, + &self.spec, + )?; + + let (genesis_block_root, _) = block_root_iter.next().ok_or_else(|| Error::DBError { + message: "Genesis block root missing".to_string(), + })??; + + let slots_to_fix = itertools::process_results(block_root_iter, |iter| { + iter.take_while(|(block_root, _)| block_root.is_zero()) + .map(|(_, slot)| slot) + .collect::>() + })?; + + let Some(first_slot) = slots_to_fix.first() else { + return Ok(()); + }; + + let mut chunk_writer = + ChunkWriter::::new(&self.cold_db, first_slot.as_usize())?; + let mut ops = vec![]; + for slot in slots_to_fix { + chunk_writer.set(slot.as_usize(), genesis_block_root, &mut ops)?; + } + + chunk_writer.write(&mut ops)?; + self.cold_db.do_atomically(ops)?; + + Ok(()) + } + + /// Delete *all* states from the freezer database and update the anchor accordingly. + /// + /// WARNING: this method deletes the genesis state and replaces it with the provided + /// `genesis_state`. This is to support its use in schema migrations where the storage scheme of + /// the genesis state may be modified. It is the responsibility of the caller to ensure that the + /// genesis state is correct, else a corrupt database will be created. + pub fn prune_historic_states( + &self, + genesis_state_root: Hash256, + genesis_state: &BeaconState, + ) -> Result<(), Error> { + // Make sure there is no missing block roots before pruning + self.heal_freezer_block_roots_at_split()?; + + // Update the anchor to use the dummy state upper limit and disable historic state storage. + let old_anchor = self.get_anchor_info(); + let new_anchor = if let Some(old_anchor) = old_anchor.clone() { + AnchorInfo { + state_upper_limit: STATE_UPPER_LIMIT_NO_RETAIN, + state_lower_limit: Slot::new(0), + ..old_anchor.clone() + } + } else { + AnchorInfo { + anchor_slot: Slot::new(0), + oldest_block_slot: Slot::new(0), + oldest_block_parent: Hash256::zero(), + state_upper_limit: STATE_UPPER_LIMIT_NO_RETAIN, + state_lower_limit: Slot::new(0), + } + }; + + // Commit the anchor change immediately: if the cold database ops fail they can always be + // retried, and we can't do them atomically with this change anyway. + self.compare_and_set_anchor_info_with_write(old_anchor, Some(new_anchor))?; + + // Stage freezer data for deletion. Do not bother loading and deserializing values as this + // wastes time and is less schema-agnostic. My hope is that this method will be useful for + // migrating to the tree-states schema (delete everything in the freezer then start afresh). + let mut cold_ops = vec![]; + + let columns = [ + DBColumn::BeaconState, + DBColumn::BeaconStateSummary, + DBColumn::BeaconRestorePoint, + DBColumn::BeaconStateRoots, + DBColumn::BeaconHistoricalRoots, + DBColumn::BeaconRandaoMixes, + DBColumn::BeaconHistoricalSummaries, + ]; + + for column in columns { + for res in self.cold_db.iter_column_keys::>(column) { + let key = res?; + cold_ops.push(KeyValueStoreOp::DeleteKey(get_key_for_col( + column.as_str(), + &key, + ))); + } + } + + // XXX: We need to commit the mass deletion here *before* re-storing the genesis state, as + // the current schema performs reads as part of `store_cold_state`. This can be deleted + // once the target schema is tree-states. If the process is killed before the genesis state + // is written this can be fixed by re-running. + info!( + self.log, + "Deleting historic states"; + "num_kv" => cold_ops.len(), + ); + self.cold_db.do_atomically(std::mem::take(&mut cold_ops))?; + + // If we just deleted the the genesis state, re-store it using the *current* schema, which + // may be different from the schema of the genesis state we just deleted. + if self.get_split_slot() > 0 { + info!( + self.log, + "Re-storing genesis state"; + "state_root" => ?genesis_state_root, + ); + self.store_cold_state(&genesis_state_root, genesis_state, &mut cold_ops)?; + self.cold_db.do_atomically(cold_ops)?; + } + + Ok(()) + } } /// Advance the split point of the store, moving new finalized states to the freezer. @@ -1831,7 +2485,7 @@ pub fn migrate_database, Cold: ItemStore>( store.cold_db.do_atomically(cold_db_ops)?; // Warning: Critical section. We have to take care not to put any of the two databases in an - // inconsistent state if the OS process dies at any point during the freezeing + // inconsistent state if the OS process dies at any point during the freezing // procedure. // // Since it is pretty much impossible to be atomic across more than one database, we trade @@ -1847,7 +2501,7 @@ pub fn migrate_database, Cold: ItemStore>( let mut split_guard = store.split.write(); let latest_split_slot = split_guard.slot; - // Detect a sitation where the split point is (erroneously) changed from more than one + // Detect a situation where the split point is (erroneously) changed from more than one // place in code. if latest_split_slot != current_split_slot { error!( @@ -1880,7 +2534,7 @@ pub fn migrate_database, Cold: ItemStore>( } // Delete the states from the hot database if we got this far. - store.do_atomically(hot_db_ops)?; + store.do_atomically_with_block_and_blobs_cache(hot_db_ops)?; debug!( store.log, @@ -1892,7 +2546,7 @@ pub fn migrate_database, Cold: ItemStore>( } /// Struct for storing the split slot and state root in the database. -#[derive(Debug, Clone, Copy, PartialEq, Default, Encode, Decode, Deserialize, Serialize)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Encode, Decode, Deserialize, Serialize)] pub struct Split { pub slot: Slot, pub state_root: Hash256, diff --git a/beacon_node/store/src/impls/execution_payload.rs b/beacon_node/store/src/impls/execution_payload.rs index b5753f379..6445dad38 100644 --- a/beacon_node/store/src/impls/execution_payload.rs +++ b/beacon_node/store/src/impls/execution_payload.rs @@ -1,6 +1,9 @@ use crate::{DBColumn, Error, StoreItem}; use ssz::{Decode, Encode}; -use types::{EthSpec, ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge}; +use types::{ + BlobSidecarList, EthSpec, ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadDeneb, + ExecutionPayloadMerge, +}; macro_rules! impl_store_item { ($ty_name:ident) => { @@ -21,6 +24,8 @@ macro_rules! impl_store_item { } impl_store_item!(ExecutionPayloadMerge); impl_store_item!(ExecutionPayloadCapella); +impl_store_item!(ExecutionPayloadDeneb); +impl_store_item!(BlobSidecarList); /// This fork-agnostic implementation should be only used for writing. /// @@ -36,9 +41,13 @@ impl StoreItem for ExecutionPayload { } fn from_store_bytes(bytes: &[u8]) -> Result { - ExecutionPayloadCapella::from_ssz_bytes(bytes) - .map(Self::Capella) - .or_else(|_| ExecutionPayloadMerge::from_ssz_bytes(bytes).map(Self::Merge)) + ExecutionPayloadDeneb::from_ssz_bytes(bytes) + .map(Self::Deneb) + .or_else(|_| { + ExecutionPayloadCapella::from_ssz_bytes(bytes) + .map(Self::Capella) + .or_else(|_| ExecutionPayloadMerge::from_ssz_bytes(bytes).map(Self::Merge)) + }) .map_err(Into::into) } } diff --git a/beacon_node/store/src/leveldb_store.rs b/beacon_node/store/src/leveldb_store.rs index 7aac9f72d..62619dd2c 100644 --- a/beacon_node/store/src/leveldb_store.rs +++ b/beacon_node/store/src/leveldb_store.rs @@ -1,7 +1,6 @@ use super::*; use crate::hot_cold_store::HotColdDBError; use crate::metrics; -use db_key::Key; use leveldb::compaction::Compaction; use leveldb::database::batch::{Batch, Writebatch}; use leveldb::database::kv::KV; @@ -170,16 +169,15 @@ impl KeyValueStore for LevelDB { for (start_key, end_key) in [ endpoints(DBColumn::BeaconStateTemporary), endpoints(DBColumn::BeaconState), + endpoints(DBColumn::BeaconStateSummary), ] { self.db.compact(&start_key, &end_key); } Ok(()) } - /// Iterate through all keys and values in a particular column. - fn iter_column(&self, column: DBColumn) -> ColumnIter { - let start_key = - BytesKey::from_vec(get_key_for_col(column.into(), Hash256::zero().as_bytes())); + fn iter_column_from(&self, column: DBColumn, from: &[u8]) -> ColumnIter { + let start_key = BytesKey::from_vec(get_key_for_col(column.into(), from)); let iter = self.db.iter(self.read_options()); iter.seek(&start_key); @@ -187,21 +185,50 @@ impl KeyValueStore for LevelDB { Box::new( iter.take_while(move |(key, _)| key.matches_column(column)) .map(move |(bytes_key, value)| { - let key = - bytes_key - .remove_column(column) - .ok_or(HotColdDBError::IterationError { - unexpected_key: bytes_key, - })?; - Ok((key, value)) + let key = bytes_key.remove_column_variable(column).ok_or_else(|| { + HotColdDBError::IterationError { + unexpected_key: bytes_key.clone(), + } + })?; + Ok((K::from_bytes(key)?, value)) + }), + ) + } + + fn iter_raw_entries(&self, column: DBColumn, prefix: &[u8]) -> RawEntryIter { + let start_key = BytesKey::from_vec(get_key_for_col(column.into(), prefix)); + + let iter = self.db.iter(self.read_options()); + iter.seek(&start_key); + + Box::new( + iter.take_while(move |(key, _)| key.key.starts_with(start_key.key.as_slice())) + .map(move |(bytes_key, value)| { + let subkey = &bytes_key.key[column.as_bytes().len()..]; + Ok((Vec::from(subkey), value)) + }), + ) + } + + fn iter_raw_keys(&self, column: DBColumn, prefix: &[u8]) -> RawKeyIter { + let start_key = BytesKey::from_vec(get_key_for_col(column.into(), prefix)); + + let iter = self.db.keys_iter(self.read_options()); + iter.seek(&start_key); + + Box::new( + iter.take_while(move |key| key.key.starts_with(start_key.key.as_slice())) + .map(move |bytes_key| { + let subkey = &bytes_key.key[column.as_bytes().len()..]; + Ok(Vec::from(subkey)) }), ) } /// Iterate through all keys and values in a particular column. - fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter { + fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter { let start_key = - BytesKey::from_vec(get_key_for_col(column.into(), Hash256::zero().as_bytes())); + BytesKey::from_vec(get_key_for_col(column.into(), &vec![0; column.key_size()])); let iter = self.db.keys_iter(self.read_options()); iter.seek(&start_key); @@ -209,13 +236,12 @@ impl KeyValueStore for LevelDB { Box::new( iter.take_while(move |key| key.matches_column(column)) .map(move |bytes_key| { - let key = - bytes_key - .remove_column(column) - .ok_or(HotColdDBError::IterationError { - unexpected_key: bytes_key, - })?; - Ok(key) + let key = bytes_key.remove_column_variable(column).ok_or_else(|| { + HotColdDBError::IterationError { + unexpected_key: bytes_key.clone(), + } + })?; + K::from_bytes(key) }), ) } @@ -224,12 +250,12 @@ impl KeyValueStore for LevelDB { impl ItemStore for LevelDB {} /// Used for keying leveldb. -#[derive(Debug, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] pub struct BytesKey { key: Vec, } -impl Key for BytesKey { +impl db_key::Key for BytesKey { fn from_u8(key: &[u8]) -> Self { Self { key: key.to_vec() } } @@ -245,12 +271,20 @@ impl BytesKey { self.key.starts_with(column.as_bytes()) } - /// Remove the column from a key, returning its `Hash256` portion. + /// Remove the column from a 32 byte key, yielding the `Hash256` key. pub fn remove_column(&self, column: DBColumn) -> Option { + let key = self.remove_column_variable(column)?; + (column.key_size() == 32).then(|| Hash256::from_slice(key)) + } + + /// Remove the column from a key. + /// + /// Will return `None` if the value doesn't match the column or has the wrong length. + pub fn remove_column_variable(&self, column: DBColumn) -> Option<&[u8]> { if self.matches_column(column) { let subkey = &self.key[column.as_bytes().len()..]; - if subkey.len() == 32 { - return Some(Hash256::from_slice(subkey)); + if subkey.len() == column.key_size() { + return Some(subkey); } } None diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index ee01fa1ae..eacd28d2d 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -34,6 +34,7 @@ pub use self::hot_cold_store::{HotColdDB, HotStateSummary, Split}; pub use self::leveldb_store::LevelDB; pub use self::memory_store::MemoryStore; pub use self::partial_beacon_state::PartialBeaconState; +pub use crate::metadata::BlobInfo; pub use errors::Error; pub use impls::beacon_state::StorageContainer as BeaconStateStorageContainer; pub use metadata::AnchorInfo; @@ -43,8 +44,11 @@ use std::sync::Arc; use strum::{EnumString, IntoStaticStr}; pub use types::*; -pub type ColumnIter<'a> = Box), Error>> + 'a>; -pub type ColumnKeyIter<'a> = Box> + 'a>; +pub type ColumnIter<'a, K> = Box), Error>> + 'a>; +pub type ColumnKeyIter<'a, K> = Box> + 'a>; + +pub type RawEntryIter<'a> = Box, Vec), Error>> + 'a>; +pub type RawKeyIter<'a> = Box, Error>> + 'a>; pub trait KeyValueStore: Sync + Send + Sized + 'static { /// Retrieve some bytes in `column` with `key`. @@ -80,15 +84,42 @@ pub trait KeyValueStore: Sync + Send + Sized + 'static { fn compact(&self) -> Result<(), Error>; /// Iterate through all keys and values in a particular column. - fn iter_column(&self, _column: DBColumn) -> ColumnIter { - // Default impl for non LevelDB databases + fn iter_column(&self, column: DBColumn) -> ColumnIter { + self.iter_column_from(column, &vec![0; column.key_size()]) + } + + /// Iterate through all keys and values in a column from a given starting point. + fn iter_column_from(&self, column: DBColumn, from: &[u8]) -> ColumnIter; + + fn iter_raw_entries(&self, _column: DBColumn, _prefix: &[u8]) -> RawEntryIter { + Box::new(std::iter::empty()) + } + + fn iter_raw_keys(&self, _column: DBColumn, _prefix: &[u8]) -> RawKeyIter { Box::new(std::iter::empty()) } /// Iterate through all keys in a particular column. - fn iter_column_keys(&self, _column: DBColumn) -> ColumnKeyIter { - // Default impl for non LevelDB databases - Box::new(std::iter::empty()) + fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter; +} + +pub trait Key: Sized + 'static { + fn from_bytes(key: &[u8]) -> Result; +} + +impl Key for Hash256 { + fn from_bytes(key: &[u8]) -> Result { + if key.len() == 32 { + Ok(Hash256::from_slice(key)) + } else { + Err(Error::InvalidKey) + } + } +} + +impl Key for Vec { + fn from_bytes(key: &[u8]) -> Result { + Ok(key.to_vec()) } } @@ -99,6 +130,7 @@ pub fn get_key_for_col(column: &str, key: &[u8]) -> Vec { } #[must_use] +#[derive(Clone)] pub enum KeyValueStoreOp { PutKeyValue(Vec, Vec), DeleteKey(Vec), @@ -152,13 +184,16 @@ pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'stati /// Reified key-value storage operation. Helps in modifying the storage atomically. /// See also https://github.com/sigp/lighthouse/issues/692 +#[derive(Clone)] pub enum StoreOp<'a, E: EthSpec> { PutBlock(Hash256, Arc>), PutState(Hash256, &'a BeaconState), + PutBlobs(Hash256, BlobSidecarList), PutStateSummary(Hash256, HotStateSummary), PutStateTemporaryFlag(Hash256), DeleteStateTemporaryFlag(Hash256), DeleteBlock(Hash256), + DeleteBlobs(Hash256), DeleteState(Hash256, Option), DeleteExecutionPayload(Hash256), KeyValueOp(KeyValueStoreOp), @@ -172,6 +207,8 @@ pub enum DBColumn { BeaconMeta, #[strum(serialize = "blk")] BeaconBlock, + #[strum(serialize = "blb")] + BeaconBlob, /// For full `BeaconState`s in the hot database (finalized or fork-boundary states). #[strum(serialize = "ste")] BeaconState, @@ -214,6 +251,8 @@ pub enum DBColumn { OptimisticTransitionBlock, #[strum(serialize = "bhs")] BeaconHistoricalSummaries, + #[strum(serialize = "olc")] + OverflowLRUCache, } /// A block from the database, which might have an execution payload or not. @@ -230,6 +269,35 @@ impl DBColumn { pub fn as_bytes(self) -> &'static [u8] { self.as_str().as_bytes() } + + /// Most database keys are 32 bytes, but some freezer DB keys are 8 bytes. + /// + /// This function returns the number of bytes used by keys in a given column. + pub fn key_size(self) -> usize { + match self { + Self::OverflowLRUCache => 33, // See `OverflowKey` encode impl. + Self::BeaconMeta + | Self::BeaconBlock + | Self::BeaconState + | Self::BeaconBlob + | Self::BeaconStateSummary + | Self::BeaconStateTemporary + | Self::ExecPayload + | Self::BeaconChain + | Self::OpPool + | Self::Eth1Cache + | Self::ForkChoice + | Self::PubkeyCache + | Self::BeaconRestorePoint + | Self::DhtEnrs + | Self::OptimisticTransitionBlock => 32, + Self::BeaconBlockRoots + | Self::BeaconStateRoots + | Self::BeaconHistoricalRoots + | Self::BeaconHistoricalSummaries + | Self::BeaconRandaoMixes => 8, + } + } } /// An item that may stored in a `Store` by serializing and deserializing from bytes. diff --git a/beacon_node/store/src/memory_store.rs b/beacon_node/store/src/memory_store.rs index 1473f59a4..c2e494dce 100644 --- a/beacon_node/store/src/memory_store.rs +++ b/beacon_node/store/src/memory_store.rs @@ -1,17 +1,17 @@ -use super::{Error, ItemStore, KeyValueStore, KeyValueStoreOp}; -use crate::{ColumnIter, DBColumn}; +use crate::{ + get_key_for_col, leveldb_store::BytesKey, ColumnIter, ColumnKeyIter, DBColumn, Error, + ItemStore, Key, KeyValueStore, KeyValueStoreOp, +}; use parking_lot::{Mutex, MutexGuard, RwLock}; -use std::collections::{HashMap, HashSet}; +use std::collections::BTreeMap; use std::marker::PhantomData; use types::*; -type DBHashMap = HashMap, Vec>; -type DBKeyMap = HashMap, HashSet>>; +type DBMap = BTreeMap>; -/// A thread-safe `HashMap` wrapper. +/// A thread-safe `BTreeMap` wrapper. pub struct MemoryStore { - db: RwLock, - col_keys: RwLock, + db: RwLock, transaction_mutex: Mutex<()>, _phantom: PhantomData, } @@ -20,36 +20,24 @@ impl MemoryStore { /// Create a new, empty database. pub fn open() -> Self { Self { - db: RwLock::new(HashMap::new()), - col_keys: RwLock::new(HashMap::new()), + db: RwLock::new(BTreeMap::new()), transaction_mutex: Mutex::new(()), _phantom: PhantomData, } } - - fn get_key_for_col(col: &str, key: &[u8]) -> Vec { - let mut col = col.as_bytes().to_vec(); - col.append(&mut key.to_vec()); - col - } } impl KeyValueStore for MemoryStore { /// Get the value of some key from the database. Returns `None` if the key does not exist. fn get_bytes(&self, col: &str, key: &[u8]) -> Result>, Error> { - let column_key = Self::get_key_for_col(col, key); + let column_key = BytesKey::from_vec(get_key_for_col(col, key)); Ok(self.db.read().get(&column_key).cloned()) } /// Puts a key in the database. fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> { - let column_key = Self::get_key_for_col(col, key); + let column_key = BytesKey::from_vec(get_key_for_col(col, key)); self.db.write().insert(column_key, val.to_vec()); - self.col_keys - .write() - .entry(col.as_bytes().to_vec()) - .or_insert_with(HashSet::new) - .insert(key.to_vec()); Ok(()) } @@ -64,18 +52,14 @@ impl KeyValueStore for MemoryStore { /// Return true if some key exists in some column. fn key_exists(&self, col: &str, key: &[u8]) -> Result { - let column_key = Self::get_key_for_col(col, key); + let column_key = BytesKey::from_vec(get_key_for_col(col, key)); Ok(self.db.read().contains_key(&column_key)) } /// Delete some key from the database. fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> { - let column_key = Self::get_key_for_col(col, key); + let column_key = BytesKey::from_vec(get_key_for_col(col, key)); self.db.write().remove(&column_key); - self.col_keys - .write() - .get_mut(&col.as_bytes().to_vec()) - .map(|set| set.remove(key)); Ok(()) } @@ -83,35 +67,41 @@ impl KeyValueStore for MemoryStore { for op in batch { match op { KeyValueStoreOp::PutKeyValue(key, value) => { - self.db.write().insert(key, value); + self.db.write().insert(BytesKey::from_vec(key), value); } - KeyValueStoreOp::DeleteKey(hash) => { - self.db.write().remove(&hash); + KeyValueStoreOp::DeleteKey(key) => { + self.db.write().remove(&BytesKey::from_vec(key)); } } } Ok(()) } - // pub type ColumnIter<'a> = Box), Error>> + 'a>; - fn iter_column(&self, column: DBColumn) -> ColumnIter { + fn iter_column_from(&self, column: DBColumn, from: &[u8]) -> ColumnIter { + // We use this awkward pattern because we can't lock the `self.db` field *and* maintain a + // reference to the lock guard across calls to `.next()`. This would be require a + // struct with a field (the iterator) which references another field (the lock guard). + let start_key = BytesKey::from_vec(get_key_for_col(column.as_str(), from)); let col = column.as_str(); - if let Some(keys) = self - .col_keys + let keys = self + .db .read() - .get(col.as_bytes()) - .map(|set| set.iter().cloned().collect::>()) - { - Box::new(keys.into_iter().filter_map(move |key| { - let hash = Hash256::from_slice(&key); - self.get_bytes(col, &key) - .transpose() - .map(|res| res.map(|bytes| (hash, bytes))) - })) - } else { - Box::new(std::iter::empty()) - } + .range(start_key..) + .take_while(|(k, _)| k.remove_column_variable(column).is_some()) + .filter_map(|(k, _)| k.remove_column_variable(column).map(|k| k.to_vec())) + .collect::>(); + Box::new(keys.into_iter().filter_map(move |key| { + self.get_bytes(col, &key).transpose().map(|res| { + let k = K::from_bytes(&key)?; + let v = res?; + Ok((k, v)) + }) + })) + } + + fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter { + Box::new(self.iter_column(column).map(|res| res.map(|(k, _)| k))) } fn begin_rw_transaction(&self) -> MutexGuard<()> { diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index ccfddcf8f..1675051bd 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -1,10 +1,10 @@ use crate::{DBColumn, Error, StoreItem}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{Checkpoint, Hash256, Slot}; -pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(17); +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(19); // All the keys that get stored under the `BeaconMeta` column. // @@ -15,6 +15,7 @@ pub const SPLIT_KEY: Hash256 = Hash256::repeat_byte(2); pub const PRUNING_CHECKPOINT_KEY: Hash256 = Hash256::repeat_byte(3); pub const COMPACTION_TIMESTAMP_KEY: Hash256 = Hash256::repeat_byte(4); pub const ANCHOR_INFO_KEY: Hash256 = Hash256::repeat_byte(5); +pub const BLOB_INFO_KEY: Hash256 = Hash256::repeat_byte(6); /// State upper limit value used to indicate that a node is not storing historic states. pub const STATE_UPPER_LIMIT_NO_RETAIN: Slot = Slot::new(u64::MAX); @@ -122,3 +123,32 @@ impl StoreItem for AnchorInfo { Ok(Self::from_ssz_bytes(bytes)?) } } + +/// Database parameters relevant to blob sync. +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, Serialize, Deserialize, Default)] +pub struct BlobInfo { + /// The slot after which blobs are or *will be* available (>=). + /// + /// If this slot is in the future, then it is the first slot of the Deneb fork, from which blobs + /// will be available. + /// + /// If the `oldest_blob_slot` is `None` then this means that the Deneb fork epoch is not yet + /// known. + pub oldest_blob_slot: Option, + /// A separate blobs database is in use (deprecated, always `true`). + pub blobs_db: bool, +} + +impl StoreItem for BlobInfo { + fn db_column() -> DBColumn { + DBColumn::BeaconMeta + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + Ok(Self::from_ssz_bytes(bytes)?) + } +} diff --git a/beacon_node/store/src/metrics.rs b/beacon_node/store/src/metrics.rs index 72c5e6196..2d901fdd9 100644 --- a/beacon_node/store/src/metrics.rs +++ b/beacon_node/store/src/metrics.rs @@ -101,6 +101,10 @@ lazy_static! { "store_beacon_block_cache_hit_total", "Number of hits to the store's block cache" ); + pub static ref BEACON_BLOBS_CACHE_HIT_COUNT: Result = try_create_int_counter( + "store_beacon_blobs_cache_hit_total", + "Number of hits to the store's blob cache" + ); pub static ref BEACON_BLOCK_READ_TIMES: Result = try_create_histogram( "store_beacon_block_read_overhead_seconds", "Overhead on reading a beacon block from the DB (e.g., decoding)" diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 9f2532d0a..1fb5751a0 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -15,7 +15,7 @@ use types::*; /// /// Utilises lazy-loading from separate storage for its vector fields. #[superstruct( - variants(Base, Altair, Merge, Capella), + variants(Base, Altair, Merge, Capella, Deneb), variant_attributes(derive(Debug, PartialEq, Clone, Encode, Decode)) )] #[derive(Debug, PartialEq, Clone, Encode)] @@ -67,9 +67,9 @@ where pub current_epoch_attestations: VariableList, T::MaxPendingAttestations>, // Participation (Altair and later) - #[superstruct(only(Altair, Merge, Capella))] + #[superstruct(only(Altair, Merge, Capella, Deneb))] pub previous_epoch_participation: VariableList, - #[superstruct(only(Altair, Merge, Capella))] + #[superstruct(only(Altair, Merge, Capella, Deneb))] pub current_epoch_participation: VariableList, // Finality @@ -79,13 +79,13 @@ where pub finalized_checkpoint: Checkpoint, // Inactivity - #[superstruct(only(Altair, Merge, Capella))] + #[superstruct(only(Altair, Merge, Capella, Deneb))] pub inactivity_scores: VariableList, // Light-client sync committees - #[superstruct(only(Altair, Merge, Capella))] + #[superstruct(only(Altair, Merge, Capella, Deneb))] pub current_sync_committee: Arc>, - #[superstruct(only(Altair, Merge, Capella))] + #[superstruct(only(Altair, Merge, Capella, Deneb))] pub next_sync_committee: Arc>, // Execution @@ -99,15 +99,20 @@ where partial_getter(rename = "latest_execution_payload_header_capella") )] pub latest_execution_payload_header: ExecutionPayloadHeaderCapella, + #[superstruct( + only(Deneb), + partial_getter(rename = "latest_execution_payload_header_deneb") + )] + pub latest_execution_payload_header: ExecutionPayloadHeaderDeneb, // Capella - #[superstruct(only(Capella))] + #[superstruct(only(Capella, Deneb))] pub next_withdrawal_index: u64, - #[superstruct(only(Capella))] + #[superstruct(only(Capella, Deneb))] pub next_withdrawal_validator_index: u64, #[ssz(skip_serializing, skip_deserializing)] - #[superstruct(only(Capella))] + #[superstruct(only(Capella, Deneb))] pub historical_summaries: Option>, } @@ -222,6 +227,23 @@ impl PartialBeaconState { ], [historical_summaries] ), + BeaconState::Deneb(s) => impl_from_state_forgetful!( + s, + outer, + Deneb, + PartialBeaconStateDeneb, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header, + next_withdrawal_index, + next_withdrawal_validator_index + ], + [historical_summaries] + ), } } @@ -451,6 +473,22 @@ impl TryInto> for PartialBeaconState { ], [historical_summaries] ), + PartialBeaconState::Deneb(inner) => impl_try_into_beacon_state!( + inner, + Deneb, + BeaconStateDeneb, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header, + next_withdrawal_index, + next_withdrawal_validator_index + ], + [historical_summaries] + ), }; Ok(state) } diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index bac5d3cc8..8fe13777a 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -17,9 +17,7 @@ where Cold: ItemStore, { pub fn reconstruct_historic_states(self: &Arc) -> Result<(), Error> { - let mut anchor = if let Some(anchor) = self.get_anchor_info() { - anchor - } else { + let Some(mut anchor) = self.get_anchor_info() else { // Nothing to do, history is complete. return Ok(()); }; diff --git a/beacon_node/timer/src/lib.rs b/beacon_node/timer/src/lib.rs index 944846c86..7c2db6960 100644 --- a/beacon_node/timer/src/lib.rs +++ b/beacon_node/timer/src/lib.rs @@ -16,12 +16,10 @@ pub fn spawn_timer( let log = executor.log().clone(); let timer_future = async move { loop { - let duration_to_next_slot = match beacon_chain.slot_clock.duration_to_next_slot() { - Some(duration) => duration, - None => { - warn!(log, "Unable to determine duration to next slot"); - return; - } + let Some(duration_to_next_slot) = beacon_chain.slot_clock.duration_to_next_slot() + else { + warn!(log, "Unable to determine duration to next slot"); + return; }; sleep(duration_to_next_slot).await; diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 3f58d8aa4..e32365910 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -53,6 +53,13 @@ * [MEV](./builders.md) * [Merge Migration](./merge-migration.md) * [Late Block Re-orgs](./late-block-re-orgs.md) +* [Built-In Documentation](./help_general.md) + * [Beacon Node](./help_bn.md) + * [Validator Client](./help_vc.md) + * [Validator Manager](./help_vm.md) + * [Create](./help_vm_create.md) + * [Import](./help_vm_import.md) + * [Move](./help_vm_move.md) * [Contributing](./contributing.md) * [Development Environment](./setup.md) * [FAQs](./faq.md) diff --git a/book/src/api-bn.md b/book/src/api-bn.md index 11a006493..519ce5705 100644 --- a/book/src/api-bn.md +++ b/book/src/api-bn.md @@ -126,6 +126,22 @@ curl -X GET "http://localhost:5052/eth/v1/beacon/states/head/validators/1" -H " ``` You can replace `1` in the above command with the validator index that you would like to query. Other API query can be done similarly by changing the link according to the Beacon API. +### Events API +The [events API](https://ethereum.github.io/beacon-APIs/#/Events/eventstream) provides information such as the payload attributes that are of interest to block builders and relays. To query the payload attributes, it is necessary to run Lighthouse beacon node with the flag `--always-prepare-payload`. It is also recommended to add the flag `--prepare-payload-lookahead 8000` which configures the payload attributes to be sent at 4s into each slot (or 8s from the start of the next slot). An example of the command is: + +```bash +curl -X 'GET' \ +'http://localhost:5052/eth/v1/events?topics=payload_attributes' \ +-H 'accept: text/event-stream' +``` + +An example of response is: + +```json +data:{"version":"capella","data":{"proposal_slot":"11047","proposer_index":"336057","parent_block_root":"0x26f8999d270dd4677c2a1c815361707157a531f6c599f78fa942c98b545e1799","parent_block_number":"9259","parent_block_hash":"0x7fb788cd7afa814e578afa00a3edd250cdd4c8e35c22badd327d981b5bda33d2","payload_attributes":{"timestamp":"1696034964","prev_randao":"0xeee34d7a3f6b99ade6c6a881046c9c0e96baab2ed9469102d46eb8d6e4fde14c","suggested_fee_recipient":"0x0000000000000000000000000000000000000001","withdrawals":[{"index":"40705","validator_index":"360712","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1202941"},{"index":"40706","validator_index":"360713","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1201138"},{"index":"40707","validator_index":"360714","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1215255"},{"index":"40708","validator_index":"360715","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1161977"},{"index":"40709","validator_index":"360716","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1257278"},{"index":"40710","validator_index":"360717","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1247740"},{"index":"40711","validator_index":"360718","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1204337"},{"index":"40712","validator_index":"360719","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1183575"},{"index":"40713","validator_index":"360720","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1157785"},{"index":"40714","validator_index":"360721","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1143371"},{"index":"40715","validator_index":"360722","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1234787"},{"index":"40716","validator_index":"360723","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1286673"},{"index":"40717","validator_index":"360724","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1419241"},{"index":"40718","validator_index":"360725","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1231015"},{"index":"40719","validator_index":"360726","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1304321"},{"index":"40720","validator_index":"360727","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1236543"}]}}} +``` + + ## Serving the HTTP API over TLS > **Warning**: This feature is currently experimental. diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index 7626d6401..32c967c9e 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -125,7 +125,7 @@ curl -X GET "http://localhost:5052/lighthouse/ui/validator_count" -H "accept: ap ### `/lighthouse/ui/validator_metrics` -Re-exposes certain metrics from the validator monitor to the HTTP API. This API requires that the beacon node to have the flag `--validator-monitor-auto`. This API will only return metrics for the validators currently being monitored and present in the POST data, or the validators running in the validator client. +Re-exposes certain metrics from the validator monitor to the HTTP API. This API requires that the beacon node to have the flag `--validator-monitor-auto`. This API will only return metrics for the validators currently being monitored and present in the POST data, or the validators running in the validator client. ```bash curl -X POST "http://localhost:5052/lighthouse/ui/validator_metrics" -d '{"indices": [12345]}' -H "Content-Type: application/json" | jq ``` @@ -356,7 +356,7 @@ health of the execution node that the beacon node is connected to. - `latest_cached_block_number` & `latest_cached_block_timestamp`: the block number and timestamp of the latest block we have in our block cache. - For correct execution client voting this timestamp should be later than the -`voting_target_timestamp`. +`voting_target_timestamp`. - `voting_target_timestamp`: The latest timestamp allowed for an execution layer block in this voting period. - `eth1_node_sync_status_percentage` (float): An estimate of how far the head of the @@ -480,9 +480,9 @@ curl -X GET "http://localhost:5052/lighthouse/beacon/states/0/ssz" | jq ### `/lighthouse/liveness` POST request that checks if any of the given validators have attested in the given epoch. Returns a list -of objects, each including the validator index, epoch, and `is_live` status of a requested validator. +of objects, each including the validator index, epoch, and `is_live` status of a requested validator. -This endpoint is used in doppelganger detection, and can only provide accurate information for the current, previous, or next epoch. +This endpoint is used in doppelganger detection, and can only provide accurate information for the current, previous, or next epoch. > Note that for this API, if you insert an arbitrary epoch other than the previous, current or next epoch of the network, it will return `"code:400"` and `BAD_REQUEST`. @@ -547,26 +547,6 @@ reconstruction has yet to be completed. For more information on the specific meanings of these fields see the docs on [Checkpoint Sync](./checkpoint-sync.md#reconstructing-states). -### `/lighthouse/database/reconstruct` - -Instruct Lighthouse to begin reconstructing historic states, see -[Reconstructing States](./checkpoint-sync.md#reconstructing-states). This is an alternative -to the `--reconstruct-historic-states` flag. - -``` -curl -X POST "http://localhost:5052/lighthouse/database/reconstruct" | jq -``` - -```json -"success" -``` - -The endpoint will return immediately. See the beacon node logs for an indication of progress. - -### `/lighthouse/database/historical_blocks` - -Manually provide `SignedBeaconBlock`s to backfill the database. This is intended -for use by Lighthouse developers during testing only. ### `/lighthouse/merge_readiness` Returns the current difficulty and terminal total difficulty of the network. Before [The Merge](https://ethereum.org/en/roadmap/merge/) on 15th September 2022, you will see that the current difficulty is less than the terminal total difficulty, An example is shown below: @@ -719,7 +699,7 @@ The first few lines of the response would look like: ] } } -] +] ``` Caveats: @@ -816,4 +796,4 @@ An open port will return: ```json { "data": true -} \ No newline at end of file +} diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index ee0cfd200..d3b2edafe 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -243,6 +243,7 @@ Example Response Body "INACTIVITY_SCORE_RECOVERY_RATE": "16", "EJECTION_BALANCE": "16000000000", "MIN_PER_EPOCH_CHURN_LIMIT": "4", + "MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT": "8", "CHURN_LIMIT_QUOTIENT": "65536", "PROPOSER_SCORE_BOOST": "40", "DEPOSIT_CHAIN_ID": "5", @@ -426,7 +427,7 @@ Example Response Body ## `PATCH /lighthouse/validators/:voting_pubkey` -Update some values for the validator with `voting_pubkey`. Possible fields: `enabled`, `gas_limit`, `builder_proposals`, +Update some values for the validator with `voting_pubkey`. Possible fields: `enabled`, `gas_limit`, `builder_proposals`, `builder_boost_factor`, `prefer_builder_proposals` and `graffiti`. The following example updates a validator from `enabled: true` to `enabled: false`. ### HTTP Specification diff --git a/book/src/builders.md b/book/src/builders.md index 2be4841dd..014e43211 100644 --- a/book/src/builders.md +++ b/book/src/builders.md @@ -21,9 +21,7 @@ The beacon node and validator client each require a new flag for lighthouse to b ``` lighthouse bn --builder https://mainnet-builder.test ``` -The `--builder` flag will cause the beacon node to query the provided URL during block production for a block -payload with stubbed-out transactions. If this request fails, Lighthouse will fall back to the local -execution engine and produce a block using transactions gathered and verified locally. +The `--builder` flag will cause the beacon node to simultaneously query the provided URL and the local execution engine during block production for a block payload with stubbed-out transactions. If either fails, the successful result will be used; If both succeed, the more profitable result will be used. The beacon node will *only* query for this type of block (a "blinded" block) when a validator specifically requests it. Otherwise, it will continue to serve full blocks as normal. In order to configure the validator client to query for @@ -33,6 +31,18 @@ blinded blocks, you should use the following flag: lighthouse vc --builder-proposals ``` With the `--builder-proposals` flag, the validator client will ask for blinded blocks for all validators it manages. + +``` +lighthouse vc --prefer-builder-proposals +``` +With the `--prefer-builder-proposals` flag, the validator client will always prefer blinded blocks, regardless of the payload value, for all validators it manages. + +``` +lighthouse vc --builder-boost-factor +``` +With the `--builder-boost-factor` flag, a percentage multiplier is applied to the builder's payload value when choosing between a +builder payload header and payload from the paired execution node. + In order to configure whether a validator queries for blinded blocks check out [this section.](#validator-client-configuration) ## Multiple builders @@ -48,9 +58,9 @@ relays, run one of the following services and configure lighthouse to use it wit In the validator client you can configure gas limit and fee recipient on a per-validator basis. If no gas limit is configured, Lighthouse will use a default gas limit of 30,000,000, which is the current default value used in execution engines. You can also enable or disable use of external builders on a per-validator basis rather than using -`--builder-proposals`, which enables external builders for all validators. In order to manage these configurations -per-validator, you can either make updates to the `validator_definitions.yml` file or you can use the HTTP requests -described below. +`--builder-proposals`, `--builder-boost-factor` or `--prefer-builder-proposals`, which apply builder related preferences for all validators. +In order to manage these configurations per-validator, you can either make updates to the `validator_definitions.yml` file +or you can use the HTTP requests described below. Both the gas limit and fee recipient will be passed along as suggestions to connected builders. If there is a discrepancy in either, it will *not* keep you from proposing a block with the builder. This is because the bounds on gas limit are @@ -178,31 +188,6 @@ By default, Lighthouse is strict with these conditions, but we encourage users t - `--builder-fallback-disable-checks` - This flag disables all checks related to chain health. This means the builder API will always be used for payload construction, regardless of recent chain conditions. -## Builder Profit Threshold - -If you are generally uneasy with the risks associated with outsourced payload production (liveness/censorship) but would -consider using it for the chance of out-sized rewards, this flag may be useful: - -`--builder-profit-threshold ` - -The number provided indicates the minimum reward that an external payload must provide the proposer for it to be considered -for inclusion in a proposal. For example, if you'd only like to use an external payload for a reward of >= 0.25 ETH, you -would provide your beacon node with `--builder-profit-threshold 250000000000000000`. If it's your turn to propose and the -most valuable payload offered by builders is only 0.1 ETH, the local execution engine's payload will be used. - -Since the [Capella](https://ethereum.org/en/history/#capella) upgrade, a comparison of the external payload and local payload will be made according to the [engine_getPayloadV2](https://github.com/ethereum/execution-apis/blob/main/src/engine/shanghai.md#engine_getpayloadv2) API. The logic is as follows: - -``` -if local payload value >= builder payload value: - use local payload -else if builder payload value >= builder_profit_threshold or builder_profit_threshold == 0: - use builder payload -else: - use local payload -``` - -If you would like to always use the builder payload, you can add the flag `--always-prefer-builder-payload` to the beacon node. - ## Checking your builder config You can check that your builder is configured correctly by looking for these log messages. @@ -258,6 +243,9 @@ used in place of one from the builder: INFO Reconstructing a full block using a local payload ``` +## Information for block builders and relays +Block builders and relays can query beacon node events from the [Events API](https://ethereum.github.io/beacon-APIs/#/Events/eventstream). An example of querying the payload attributes in the Events API is outlined in [Beacon node API - Events API](./api-bn.md#events-api) + [mev-rs]: https://github.com/ralexstokes/mev-rs [mev-boost]: https://github.com/flashbots/mev-boost [gas-limit-api]: https://ethereum.github.io/keymanager-APIs/#/Gas%20Limit diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md index 9b60ca2e1..a4d28452d 100644 --- a/book/src/database-migrations.md +++ b/book/src/database-migrations.md @@ -16,27 +16,25 @@ validator client or the slasher**. | Lighthouse version | Release date | Schema version | Downgrade available? | |--------------------|--------------|----------------|----------------------| -| v2.0.0 | Oct 2021 | v5 | no | -| v2.1.0 | Jan 2022 | v8 | no | -| v2.2.0 | Apr 2022 | v8 | no | -| v2.3.0 | May 2022 | v9 | yes from <= v3.3.0 | -| v2.4.0 | Jul 2022 | v9 | yes from <= v3.3.0 | -| v2.5.0 | Aug 2022 | v11 | yes | -| v3.0.0 | Aug 2022 | v11 | yes | -| v3.1.0 | Sep 2022 | v12 | yes | -| v3.2.0 | Oct 2022 | v12 | yes | -| v3.3.0 | Nov 2022 | v13 | yes | -| v3.4.0 | Jan 2023 | v13 | yes | -| v3.5.0 | Feb 2023 | v15 | yes before Capella | -| v4.0.1 | Mar 2023 | v16 | yes before Capella | +| v4.6.0 | Dec 2023 | v19 | yes before Deneb | +| v4.6.0-rc.0 | Dec 2023 | v18 | yes before Deneb | +| v4.5.0 | Sep 2023 | v17 | yes | +| v4.4.0 | Aug 2023 | v17 | yes | +| v4.3.0 | Jul 2023 | v17 | yes | | v4.2.0 | May 2023 | v17 | yes | +| v4.1.0 | Apr 2023 | v16 | no | +| v4.0.1 | Mar 2023 | v16 | no | -> **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release -> (e.g. v2.3.0). +> **Note**: All point releases (e.g. v4.4.1) are schema-compatible with the prior minor release +> (e.g. v4.4.0). > **Note**: Support for old schemas is gradually removed from newer versions of Lighthouse. We -usually do this after a major version has been out for a while and everyone has upgraded. In this -case the above table will continue to record the deprecated schema changes for reference. +usually do this after a major version has been out for a while and everyone has upgraded. Deprecated +schema versions for previous releases are archived under +[Full list of schema versions](#full-list-of-schema-versions). If you get stuck and are unable +to upgrade a **testnet** node to the latest version, sometimes it is possible to upgrade via an +intermediate version (e.g. upgrade from v3.5.0 to v4.6.0 via v4.0.1). This is never necessary +on mainnet. ## How to apply a database downgrade @@ -44,9 +42,7 @@ To apply a downgrade you need to use the `lighthouse db migrate` command with th 1. Make sure you have a copy of the latest version of Lighthouse. This will be the version that knows about the latest schema change, and has the ability to revert it. -2. Work out the schema version you would like to downgrade to by checking the table above, or the - Lighthouse release notes. E.g. if you want to downgrade from v2.3.0, which upgraded the version - from v8 to v9, then you'll want to _downgrade_ to v8 in order to run v2.2.x or earlier. +2. Work out the schema version you would like to downgrade to by checking the table above, or the [Full list of schema versions](#full-list-of-schema-versions) below. E.g. if you want to downgrade from v4.2.0, which upgraded the version from v16 to v17, then you'll want to downgrade to v16 in order to run v4.0.1. 3. **Ensure that downgrading is feasible**. Not all schema upgrades can be reverted, and some of them are time-sensitive. The release notes will state whether a downgrade is available and whether any caveats apply to it. @@ -59,14 +55,13 @@ To apply a downgrade you need to use the `lighthouse db migrate` command with th sudo -u "$LH_USER" lighthouse db migrate --to "$VERSION" --datadir "$LH_DATADIR" --network "$NET" ``` -For example if you want to downgrade to Lighthouse v2.1 or v2.2 from v2.3 and you followed Somer -Esat's guide, you would run: +For example if you want to downgrade to Lighthouse v4.0.1 from v4.2.0 and you followed Somer Esat's guide, you would run: ``` -sudo -u lighthousebeacon lighthouse db migrate --to 8 --datadir /var/lib/lighthouse --network mainnet +sudo -u lighthousebeacon lighthouse db migrate --to 16 --datadir /var/lib/lighthouse --network mainnet ``` -Where `lighthouse` is Lighthouse v2.3.0+. After the downgrade succeeds you can then replace your +Where `lighthouse` is Lighthouse v4.2.0+. After the downgrade succeeds you can then replace your global `lighthouse` binary with the older version and start your node again. ## How to apply a database upgrade @@ -158,3 +153,63 @@ lighthouse db version --network mainnet ``` [run-correctly]: #how-to-run-lighthouse-db-correctly + +## How to prune historic states + +Pruning historic states helps in managing the disk space used by the Lighthouse beacon node by removing old beacon +states from the freezer database. This can be especially useful when the database has accumulated a significant amount +of historic data. This command is intended for nodes synced before 4.4.1, as newly synced node no longer store +historic states by default. + +Here are the steps to prune historic states: + +1. Before running the prune command, make sure that the Lighthouse beacon node is not running. If you are using systemd, you might stop the Lighthouse beacon node with a command like: + + ```bash + sudo systemctl stop lighthousebeacon + ``` + +2. Use the `prune-states` command to prune the historic states. You can do a test run without the `--confirm` flag to check that the database can be pruned: + + ```bash + sudo -u "$LH_USER" lighthouse db prune-states --datadir "$LH_DATADIR" --network "$NET" + ``` + +3. If you are ready to prune the states irreversibly, add the `--confirm` flag to commit the changes: + + ```bash + sudo -u "$LH_USER" lighthouse db prune-states --confirm --datadir "$LH_DATADIR" --network "$NET" + ``` + + The `--confirm` flag ensures that you are aware the action is irreversible, and historic states will be permanently removed. + +4. After successfully pruning the historic states, you can restart the Lighthouse beacon node: + + ```bash + sudo systemctl start lighthousebeacon + ``` + +## Full list of schema versions + +| Lighthouse version | Release date | Schema version | Downgrade available? | +|--------------------|--------------|----------------|-------------------------------------| +| v4.6.0 | Dec 2023 | v19 | yes before Deneb | +| v4.6.0-rc.0 | Dec 2023 | v18 | yes before Deneb | +| v4.5.0 | Sep 2023 | v17 | yes | +| v4.4.0 | Aug 2023 | v17 | yes | +| v4.3.0 | Jul 2023 | v17 | yes | +| v4.2.0 | May 2023 | v17 | yes | +| v4.1.0 | Apr 2023 | v16 | yes before Capella using <= v4.5.0 | +| v4.0.1 | Mar 2023 | v16 | yes before Capella using <= v4.5.0 | +| v3.5.0 | Feb 2023 | v15 | yes before Capella using <= v4.5.0 | +| v3.4.0 | Jan 2023 | v13 | yes using <= 4.5.0 | +| v3.3.0 | Nov 2022 | v13 | yes using <= 4.5.0 | +| v3.2.0 | Oct 2022 | v12 | yes using <= 4.5.0 | +| v3.1.0 | Sep 2022 | v12 | yes using <= 4.5.0 | +| v3.0.0 | Aug 2022 | v11 | yes using <= 4.5.0 | +| v2.5.0 | Aug 2022 | v11 | yes using <= 4.5.0 | +| v2.4.0 | Jul 2022 | v9 | yes using <= v3.3.0 | +| v2.3.0 | May 2022 | v9 | yes using <= v3.3.0 | +| v2.2.0 | Apr 2022 | v8 | no | +| v2.1.0 | Jan 2022 | v8 | no | +| v2.0.0 | Oct 2021 | v5 | no | diff --git a/book/src/developers.md b/book/src/developers.md index 2ba09bd34..ab12bed5b 100644 --- a/book/src/developers.md +++ b/book/src/developers.md @@ -48,4 +48,5 @@ custom RPC error messages. | Code | Message | Description | | ---- | ---- | ---- | -| 139 | Rate Limited | The peer has been rate limited so we return this error as a response | \ No newline at end of file +| 139 | Rate Limited | The peer has been rate limited so we return this error as a response | +| 140 | Blobs Not Found For Block | We do not possess the blobs for the requested block | diff --git a/book/src/docker.md b/book/src/docker.md index d67b084da..defa89517 100644 --- a/book/src/docker.md +++ b/book/src/docker.md @@ -82,7 +82,7 @@ The `modernity` is: The `features` is: -* `-dev` for a development build with `minimal-spec` preset enabled. +* `-dev` for a development build with `minimal` preset enabled (`spec-minimal` feature). * empty for a standard build with no custom feature enabled. diff --git a/book/src/help_bn.md b/book/src/help_bn.md new file mode 100644 index 000000000..dff2ab687 --- /dev/null +++ b/book/src/help_bn.md @@ -0,0 +1,510 @@ +# Beacon Node + +``` +Sigma Prime +The primary component which connects to the Ethereum 2.0 P2P network and downloads, verifies and stores blocks. Provides +a HTTP API for querying the beacon chain and publishing messages to the network. + +USAGE: + lighthouse beacon_node [FLAGS] [OPTIONS] + +FLAGS: + --allow-insecure-genesis-sync Enable syncing from genesis, which is generally insecure and incompatible + with data availability checks. Checkpoint syncing is the preferred method + for syncing a node. Only use this flag when testing. DO NOT use on + mainnet! + --always-prefer-builder-payload This flag is deprecated and has no effect. + --always-prepare-payload Send payload attributes with every fork choice update. This is intended + for use by block builders, relays and developers. You should set a fee + recipient on this BN and also consider adjusting the --prepare-payload- + lookahead flag. + --builder-fallback-disable-checks This flag disables all checks related to chain health. This means the + builder API will always be used for payload construction, regardless of + recent chain conditions. + --compact-db If present, apply compaction to the database on start-up. Use with + caution. It is generally not recommended unless auto-compaction is + disabled. + --disable-backfill-rate-limiting Disable the backfill sync rate-limiting. This allow users to just sync + the entire chain as fast as possible, however it can result in resource + contention which degrades staking performance. Stakers should generally + choose to avoid this flag since backfill sync is not required for + staking. + --disable-deposit-contract-sync Explicitly disables syncing of deposit logs from the execution node. This + overrides any previous option that depends on it. Useful if you intend to + run a non-validating beacon node. + --disable-duplicate-warn-logs Disable warning logs for duplicate gossip messages. The WARN level log is + useful for detecting a duplicate validator key running elsewhere. + However, this may result in excessive warning logs if the validator is + broadcasting messages to multiple beacon nodes via the validator client + --broadcast flag. In this case, disabling these warn logs may be useful. + -x, --disable-enr-auto-update Discovery automatically updates the nodes local ENR with an external IP + address and port as seen by other peers on the network. This disables + this feature, fixing the ENR's IP/PORT to those specified on boot. + --disable-lock-timeouts Disable the timeouts applied to some internal locks by default. This can + lead to less spurious failures on slow hardware but is considered + experimental as it may obscure performance issues. + --disable-log-timestamp If present, do not include timestamps in logging output. + --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag + will generally increase memory usage, it should only be provided when + debugging specific memory allocation issues. + --disable-optimistic-finalized-sync Force Lighthouse to verify every execution block hash with the execution + client during finalized sync. By default block hashes will be checked in + Lighthouse and only passed to the EL if initial verification fails. + --disable-packet-filter Disables the discovery packet filter. Useful for testing in smaller + networks + --disable-proposer-reorgs Do not attempt to reorg late blocks from other validators when proposing. + --disable-quic Disables the quic transport. The node will rely solely on the TCP + transport for libp2p connections. + --disable-upnp Disables UPnP support. Setting this will prevent Lighthouse from + attempting to automatically establish external port mappings. + --dummy-eth1 If present, uses an eth1 backend that generates static dummy + data.Identical to the method used at the 2019 Canada interop. + --enable-private-discovery Lighthouse by default does not discover private IP addresses. Set this + flag to enable connection attempts to local addresses. + -e, --enr-match Sets the local ENR IP address and port to match those set for lighthouse. + Specifically, the IP address will be the value of --listen-address and + the UDP port will be --discovery-port. + --eth1 If present the node will connect to an eth1 node. This is required for + block production, you must use this flag if you wish to serve a + validator. + --eth1-purge-cache Purges the eth1 block and deposit caches + --genesis-backfill Attempts to download blocks all the way back to genesis when checkpoint + syncing. + --gui Enable the graphical user interface and all its requirements. This + enables --http and --validator-monitor-auto and enables SSE logging. + -h, --help Prints help information + --http Enable the RESTful HTTP API server. Disabled by default. + --http-allow-sync-stalled Forces the HTTP to indicate that the node is synced when sync is actually + stalled. This is useful for very small testnets. TESTING ONLY. DO NOT USE + ON MAINNET. + --http-enable-tls Serves the RESTful HTTP API server over TLS. This feature is currently + experimental. + --import-all-attestations Import and aggregate all attestations, regardless of validator + subscriptions. This will only import attestations from already-subscribed + subnets, use with --subscribe-all-subnets to ensure all attestations are + received for import. + --light-client-server Act as a full node supporting light clients on the p2p network + [experimental] + --log-color Force outputting colors when emitting logs to the terminal. + --logfile-compress If present, compress old log files. This can help reduce the space needed + to store old logs. + --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they + can be read by any user on the machine. Note that logs can often contain + sensitive information about your validator and so this flag should be + used with caution. For Windows users, the log file permissions will be + inherited from the parent folder. + --metrics Enable the Prometheus metrics HTTP server. Disabled by default. + --private Prevents sending various client identification information. + --proposer-only Sets this beacon node at be a block proposer only node. This will run the + beacon node in a minimal configuration that is sufficient for block + publishing only. This flag should be used for a beacon node being + referenced by validator client using the --proposer-node flag. This + configuration is for enabling more secure setups. + --purge-db If present, the chain database will be deleted. Use with caution. + --reconstruct-historic-states After a checkpoint sync, reconstruct historic states in the database. + This requires syncing all the way back to genesis. + --reset-payload-statuses When present, Lighthouse will forget the payload statuses of any already- + imported blocks. This can assist in the recovery from a consensus + failure caused by the execution layer. + --shutdown-after-sync Shutdown beacon node as soon as sync is completed. Backfill sync will not + be performed before shutdown. + --slasher Run a slasher alongside the beacon node. It is currently only recommended + for expert users because of the immaturity of the slasher UX and the + extra resources required. + --staking Standard option for a staking beacon node. This will enable the HTTP + server on localhost:5052 and import deposit logs from the execution node. + This is equivalent to `--http` on merge-ready networks, or `--http + --eth1` pre-merge + --subscribe-all-subnets Subscribe to all subnets regardless of validator count. This will also + advertise the beacon node as being long-lived subscribed to all subnets. + --validator-monitor-auto Enables the automatic detection and monitoring of validators connected to + the HTTP API and using the subnet subscription endpoint. This generally + has the effect of providing additional logging and metrics for locally + controlled validators. + -V, --version Prints version information + -z, --zero-ports Sets all listening TCP/UDP ports to 0, allowing the OS to choose some + arbitrary free ports. + +OPTIONS: + --auto-compact-db + Enable or disable automatic compaction of the database on finalization. [default: true] + + --beacon-processor-aggregate-batch-size + Specifies the number of gossip aggregate attestations in a signature verification batch. Higher values may + reduce CPU usage in a healthy network while lower values may increase CPU usage in an unhealthy or hostile + network. [default: 64] + --beacon-processor-attestation-batch-size + Specifies the number of gossip attestations in a signature verification batch. Higher values may reduce CPU + usage in a healthy network whilst lower values may increase CPU usage in an unhealthy or hostile network. + [default: 64] + --beacon-processor-max-workers + Specifies the maximum concurrent tasks for the task scheduler. Increasing this value may increase resource + consumption. Reducing the value may result in decreased resource usage and diminished performance. The + default value is the number of logical CPU cores on the host. + --beacon-processor-reprocess-queue-len + Specifies the length of the queue for messages requiring delayed processing. Higher values may prevent + messages from being dropped while lower values may help protect the node from becoming overwhelmed. + [default: 12288] + --beacon-processor-work-queue-len + Specifies the length of the inbound event queue. Higher values may prevent messages from being dropped while + lower values may help protect the node from becoming overwhelmed. [default: 16384] + --blob-prune-margin-epochs + The margin for blob pruning in epochs. The oldest blobs are pruned up until data_availability_boundary - + blob_prune_margin_epochs. [default: 0] + --blobs-dir + Data directory for the blobs database. + + --block-cache-size + Specifies how many blocks the database should cache in memory [default: 5] + + --boot-nodes + One or more comma-delimited base64-encoded ENR's to bootstrap the p2p network. Multiaddr is also supported. + + --builder + The URL of a service compatible with the MEV-boost API. + + --builder-fallback-epochs-since-finalization + If this node is proposing a block and the chain has not finalized within this number of epochs, it will NOT + query any connected builders, and will use the local execution engine for payload construction. Setting this + value to anything less than 2 will cause the node to NEVER query connected builders. Setting it to 2 will + cause this condition to be hit if there are skips slots at the start of an epoch, right before this node is + set to propose. [default: 3] + --builder-fallback-skips + If this node is proposing a block and has seen this number of skip slots on the canonical chain in a row, it + will NOT query any connected builders, and will use the local execution engine for payload construction. + [default: 3] + --builder-fallback-skips-per-epoch + If this node is proposing a block and has seen this number of skip slots on the canonical chain in the past + `SLOTS_PER_EPOCH`, it will NOT query any connected builders, and will use the local execution engine for + payload construction. [default: 8] + --builder-profit-threshold + This flag is deprecated and has no effect. + + --builder-user-agent + The HTTP user agent to send alongside requests to the builder URL. The default is Lighthouse's version + string. + --checkpoint-block + Set a checkpoint block to start syncing from. Must be aligned and match --checkpoint-state. Using + --checkpoint-sync-url instead is recommended. + --checkpoint-state + Set a checkpoint state to start syncing from. Must be aligned and match --checkpoint-block. Using + --checkpoint-sync-url instead is recommended. + --checkpoint-sync-url + Set the remote beacon node HTTP endpoint to use for checkpoint sync. + + --checkpoint-sync-url-timeout + Set the timeout for checkpoint sync calls to remote beacon node HTTP endpoint. [default: 180] + + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and databases. Defaults to + $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify + separate custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: + info, debug, trace, warn, error, crit] + --discovery-port + The UDP port that discovery will listen on. Defaults to `port` + + --discovery-port6 + The UDP port that discovery will listen on over IPv6 if listening over both IPv4 and IPv6. Defaults to + `port6` + --enr-address
... + The IP address/ DNS address to broadcast to other peers on how to reach this node. If a DNS address is + provided, the enr-address is set to the IP address it resolves to and does not auto-update based on PONG + responses in discovery. Set this only if you are sure other nodes can connect to your local node on this + address. This will update the `ip4` or `ip6` ENR fields accordingly. To update both, set this flag twice + with the different values. + --enr-quic-port + The quic UDP4 port that will be set on the local ENR. Set this only if you are sure other nodes can connect + to your local node on this port over IPv4. + --enr-quic6-port + The quic UDP6 port that will be set on the local ENR. Set this only if you are sure other nodes can connect + to your local node on this port over IPv6. + --enr-tcp-port + The TCP4 port of the local ENR. Set this only if you are sure other nodes can connect to your local node on + this port over IPv4. The --port flag is used if this is not set. + --enr-tcp6-port + The TCP6 port of the local ENR. Set this only if you are sure other nodes can connect to your local node on + this port over IPv6. The --port6 flag is used if this is not set. + --enr-udp-port + The UDP4 port of the local ENR. Set this only if you are sure other nodes can connect to your local node on + this port over IPv4. + --enr-udp6-port + The UDP6 port of the local ENR. Set this only if you are sure other nodes can connect to your local node on + this port over IPv6. + --epochs-per-blob-prune + The epoch interval with which to prune blobs from Lighthouse's database when they are older than the data + availability boundary relative to the current epoch. [default: 1] + --epochs-per-migration + The number of epochs to wait between running the migration of data from the hot DB to the cold DB. Less + frequent runs can be useful for minimizing disk writes [default: 1] + --eth1-blocks-per-log-query + Specifies the number of blocks that a deposit log query should span. This will reduce the size of responses + from the Eth1 endpoint. [default: 1000] + --eth1-cache-follow-distance + Specifies the distance between the Eth1 chain head and the last block which should be imported into the + cache. Setting this value lower can help compensate for irregular Proof-of-Work block times, but setting it + too low can make the node vulnerable to re-orgs. + --execution-endpoint + Server endpoint for an execution layer JWT-authenticated HTTP JSON-RPC connection. Uses the same endpoint to + populate the deposit cache. + --execution-jwt + File path which contains the hex-encoded JWT secret for the execution endpoint provided in the --execution- + endpoint flag. + --execution-jwt-id + Used by the beacon node to communicate a unique identifier to execution nodes during JWT authentication. It + corresponds to the 'id' field in the JWT claims object.Set to empty by default + --execution-jwt-secret-key + Hex-encoded JWT secret for the execution endpoint provided in the --execution-endpoint flag. + + --execution-jwt-version + Used by the beacon node to communicate a client version to execution nodes during JWT authentication. It + corresponds to the 'clv' field in the JWT claims object.Set to empty by default + --execution-timeout-multiplier + Unsigned integer to multiply the default execution timeouts by. [default: 1] + + --fork-choice-before-proposal-timeout + Set the maximum number of milliseconds to wait for fork choice before proposing a block. You can prevent + waiting at all by setting the timeout to 0, however you risk proposing atop the wrong parent block. + [default: 250] + --freezer-dir + Data directory for the freezer database. + + --genesis-state-url + A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server + URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may + be used. If the genesis state is already included in this binary then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. [default: 180] + + --graffiti + Specify your custom graffiti to be included in blocks. Defaults to the current version and commit, truncated + to fit in 32 bytes. + --historic-state-cache-size + Specifies how many states from the freezer database should cache in memory [default: 1] + + --http-address
+ Set the listen address for the RESTful HTTP API server. + + --http-allow-origin + Set the value of the Access-Control-Allow-Origin response HTTP header. Use * to allow any origin (not + recommended in production). If no value is supplied, the CORS allowed origin is set to the listen address of + this server (e.g., http://localhost:5052). + --http-duplicate-block-status + Status code to send when a block that is already known is POSTed to the HTTP API. + + --http-enable-beacon-processor + The beacon processor is a scheduler which provides quality-of-service and DoS protection. When set to + "true", HTTP API requests will be queued and scheduled alongside other tasks. When set to "false", HTTP API + responses will be executed immediately. + --http-port + Set the listen TCP port for the RESTful HTTP API server. + + --http-spec-fork + Serve the spec for a specific hard fork on /eth/v1/config/spec. It should not be necessary to set this flag. + + --http-sse-capacity-multiplier + Multiplier to apply to the length of HTTP server-sent-event (SSE) channels. Increasing this value can + prevent messages from being dropped. + --http-tls-cert + The path of the certificate to be used when serving the HTTP API server over TLS. + + --http-tls-key + The path of the private key to be used when serving the HTTP API server over TLS. Must not be password- + protected. + --invalid-gossip-verified-blocks-path + If a block succeeds gossip validation whilst failing full validation, store the block SSZ as a file at this + path. This feature is only recommended for developers. This directory is not pruned, users should be careful + to avoid filling up their disks. + --libp2p-addresses + One or more comma-delimited multiaddrs to manually connect to a libp2p peer without an ENR. + + --listen-address
... + The address lighthouse will listen for UDP and TCP connections. To listen over IpV4 and IpV6 set this flag + twice with the different values. + Examples: + - --listen-address '0.0.0.0' will listen over IPv4. + - --listen-address '::' will listen over IPv6. + - --listen-address '0.0.0.0' --listen-address '::' will listen over both IPv4 and IPv6. The order of the + given addresses is not relevant. However, multiple IPv4, or multiple IPv6 addresses will not be accepted. + [default: 0.0.0.0] + --log-format + Specifies the log format used when emitting logs to the terminal. [possible values: JSON] + + --logfile + File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a + new log file is generated where future logs are stored. Once the number of log files exceeds the value + specified in `--logfile-max-number` the oldest log file will be overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, + debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] + + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, background file logging is disabled. + [default: 5] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is + disabled. [default: 200] + --max-skip-slots + Refuse to skip more than this many slots when processing an attestation. This prevents nodes on minority + forks from wasting our time and disk space, but could also cause unnecessary consensus failures, so is + disabled by default. + --metrics-address
+ Set the listen address for the Prometheus metrics HTTP server. + + --metrics-allow-origin + Set the value of the Access-Control-Allow-Origin response HTTP header. Use * to allow any origin (not + recommended in production). If no value is supplied, the CORS allowed origin is set to the listen address of + this server (e.g., http://localhost:5054). + --metrics-port + Set the listen TCP port for the Prometheus metrics HTTP server. + + --monitoring-endpoint
+ Enables the monitoring service for sending system metrics to a remote endpoint. This can be used to monitor + your setup on certain services (e.g. beaconcha.in). This flag sets the endpoint where the beacon node + metrics will be sent. Note: This will send information to a remote sever which may identify and associate + your validators, IP address and other personal information. Always use a HTTPS connection and never provide + an untrusted URL. + --monitoring-endpoint-period + Defines how many seconds to wait between each message sent to the monitoring-endpoint. Default: 60s + + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, + chiado, sepolia, holesky] + --network-dir + Data directory for network keys. Defaults to network/ inside the beacon node dir. + + --port + The TCP/UDP ports to listen on. There are two UDP ports. The discovery UDP port will be set to this value + and the Quic UDP port will be set to this value + 1. The discovery port can be modified by the --discovery- + port flag and the quic port can be modified by the --quic-port flag. If listening over both IPv4 + and IPv6 the --port flag will apply to the IPv4 address and --port6 to the IPv6 address. [default: 9000] + --port6 + The TCP/UDP ports to listen on over IPv6 when listening over both IPv4 and IPv6. Defaults to 9090 when + required. The Quic UDP port will be set to this value + 1. [default: 9090] + --prepare-payload-lookahead + The time before the start of a proposal slot at which payload attributes should be sent. Low values are + useful for execution nodes which don't improve their payload after the first call, and high values are + useful for ensuring the EL is given ample notice. Default: 1/3 of a slot. + --progressive-balances + Control the progressive balances cache mode. The default `fast` mode uses the cache to speed up fork choice. + A more conservative `checked` mode compares the cache's results against results without the cache. If there + is a mismatch, it falls back to the cache-free result. Using the default `fast` mode is recommended unless + advised otherwise by the Lighthouse team. [possible values: disabled, checked, strict, fast] + --proposer-reorg-cutoff + Maximum delay after the start of the slot at which to propose a reorging block. Lower values can prevent + failed reorgs by ensuring the block has ample time to propagate and be processed by the network. The default + is 1/12th of a slot (1 second on mainnet) + --proposer-reorg-disallowed-offsets + Comma-separated list of integer offsets which can be used to avoid proposing reorging blocks at certain + slots. An offset of N means that reorging proposals will not be attempted at any slot such that `slot % + SLOTS_PER_EPOCH == N`. By default only re-orgs at offset 0 will be avoided. Any offsets supplied with this + flag will impose additional restrictions. + --proposer-reorg-epochs-since-finalization + Maximum number of epochs since finalization at which proposer reorgs are allowed. Default: 2 + + --proposer-reorg-threshold + Percentage of vote weight below which to attempt a proposer reorg. Default: 20% + + --prune-blobs + Prune blobs from Lighthouse's database when they are older than the data data availability boundary relative + to the current epoch. [default: true] + --prune-payloads + Prune execution payloads from Lighthouse's database. This saves space but imposes load on the execution + client, as payloads need to be reconstructed and sent to syncing peers. [default: true] + --quic-port + The UDP port that quic will listen on. Defaults to `port` + 1 + + --quic-port6 + The UDP port that quic will listen on over IPv6 if listening over both IPv4 and IPv6. Defaults to `port6` + + 1 + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should + only be used if the user has a clear understanding that the broad Ethereum community has elected to override + this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause + your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. + --shuffling-cache-size + Some HTTP API requests can be optimised by caching the shufflings at each epoch. This flag allows the user + to set the shuffling cache size in epochs. Shufflings are dependent on validator count and setting this + value to a large number can consume a large amount of memory. + --slasher-att-cache-size + Set the maximum number of attestation roots for the slasher to cache + + --slasher-backend + Set the database backend to be used by the slasher. [possible values: lmdb, disabled] + + --slasher-broadcast + Broadcast slashings found by the slasher to the rest of the network [Enabled by default]. [default: true] + + --slasher-chunk-size + Number of epochs per validator per chunk stored on disk. + + --slasher-dir + Set the slasher's database directory. + + --slasher-history-length + Configure how many epochs of history the slasher keeps. Immutable after initialization. + + --slasher-max-db-size + Maximum size of the MDBX database used by the slasher. + + --slasher-slot-offset + Set the delay from the start of the slot at which the slasher should ingest attestations. Only effective if + the slasher-update-period is a multiple of the slot duration. + --slasher-update-period + Configure how often the slasher runs batch processing. + + --slasher-validator-chunk-size + Number of validators per chunk stored on disk. + + --slots-per-restore-point + Specifies how often a freezer DB restore point should be stored. Cannot be changed after initialization. + [default: 8192 (mainnet) or 64 (minimal)] + --suggested-fee-recipient + Emergency fallback fee recipient for use in case the validator client does not have one configured. You + should set this flag on the validator client instead of (or in addition to) setting it here. + --target-peers + The target number of peers. + + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should + only be used if the user has a clear understanding that the broad Ethereum community has elected to override + the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. + Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if + the user has a clear understanding that the broad Ethereum community has elected to override the terminal + PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely + careful with this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal + integer (not a hex value). This flag should only be used if the user has a clear understanding that the + broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will + cause your node to experience a consensus failure. Be extremely careful with this flag. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective + if there is no existing database. + --trusted-peers + One or more comma-delimited trusted peer ids which always have the highest score according to the peer + scoring system. + --trusted-setup-file-override + Path to a json file containing the trusted setup params. NOTE: This will override the trusted setup that is + generated from the mainnet kzg ceremony. Use with caution + --validator-monitor-file + As per --validator-monitor-pubkeys, but the comma-separated list is contained within a file at the given + path. + --validator-monitor-individual-tracking-threshold + Once the validator monitor reaches this number of local validators it will stop collecting per-validator + Prometheus metrics and issuing per-validator logs. Instead, it will provide aggregate metrics and logs. This + avoids infeasibly high cardinality in the Prometheus database and high log volume when using many + validators. Defaults to 64. + --validator-monitor-pubkeys + A comma-separated list of 0x-prefixed validator public keys. These validators will receive special + monitoring and additional logging. + --wss-checkpoint + Specify a weak subjectivity checkpoint in `block_root:epoch` format to verify the node's sync against. The + block root should be 0x-prefixed. Note that this flag is for verification only, to perform a checkpoint sync + from a recent state use --checkpoint-sync-url. +``` \ No newline at end of file diff --git a/book/src/help_general.md b/book/src/help_general.md new file mode 100644 index 000000000..fbe05693e --- /dev/null +++ b/book/src/help_general.md @@ -0,0 +1,107 @@ +# Lighthouse General Commands + +``` +Sigma Prime +Ethereum 2.0 client by Sigma Prime. Provides a full-featured beacon node, a validator client and utilities for managing +validator accounts. + +USAGE: + lighthouse [FLAGS] [OPTIONS] [SUBCOMMAND] + +FLAGS: + --disable-log-timestamp If present, do not include timestamps in logging output. + --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag will + generally increase memory usage, it should only be provided when debugging + specific memory allocation issues. + -l DEPRECATED Enables environment logging giving access to sub-protocol logs such + as discv5 and libp2p + -h, --help Prints help information + --log-color Force outputting colors when emitting logs to the terminal. + --logfile-compress If present, compress old log files. This can help reduce the space needed to + store old logs. + --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they can be + read by any user on the machine. Note that logs can often contain sensitive + information about your validator and so this flag should be used with caution. + For Windows users, the log file permissions will be inherited from the parent + folder. + -V, --version Prints version information + +OPTIONS: + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and databases. Defaults to + $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify + separate custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: + info, debug, trace, warn, error, crit] + --genesis-state-url + A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server + URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may + be used. If the genesis state is already included in this binary then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. [default: 180] + + --log-format + Specifies the log format used when emitting logs to the terminal. [possible values: JSON] + + --logfile + File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a + new log file is generated where future logs are stored. Once the number of log files exceeds the value + specified in `--logfile-max-number` the oldest log file will be overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, + debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] + + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, background file logging is disabled. + [default: 5] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is + disabled. [default: 200] + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, + chiado, sepolia, holesky] + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should + only be used if the user has a clear understanding that the broad Ethereum community has elected to override + this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause + your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should + only be used if the user has a clear understanding that the broad Ethereum community has elected to override + the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. + Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if + the user has a clear understanding that the broad Ethereum community has elected to override the terminal + PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely + careful with this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal + integer (not a hex value). This flag should only be used if the user has a clear understanding that the + broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will + cause your node to experience a consensus failure. Be extremely careful with this flag. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective + if there is no existing database. + +SUBCOMMANDS: + account_manager Utilities for generating and managing Ethereum 2.0 accounts. [aliases: a, am, account, + account_manager] + beacon_node The primary component which connects to the Ethereum 2.0 P2P network and downloads, + verifies and stores blocks. Provides a HTTP API for querying the beacon chain and + publishing messages to the network. [aliases: b, bn, beacon] + boot_node Start a special Lighthouse process that only serves as a discv5 boot-node. This process + will *not* import blocks or perform most typical beacon node functions. Instead, it will + simply run the discv5 service and assist nodes on the network to discover each other. This + is the recommended way to provide a network boot-node since it has a reduced attack surface + compared to a full beacon node. + database_manager Manage a beacon node database [aliases: db] + help Prints this message or the help of the given subcommand(s) + validator_client When connected to a beacon node, performs the duties of a staked validator (e.g., proposing + blocks and attestations). [aliases: v, vc, validator] + validator_manager Utilities for managing a Lighthouse validator client via the HTTP API. [aliases: vm, + validator-manager, validator_manager] +``` \ No newline at end of file diff --git a/book/src/help_vc.md b/book/src/help_vc.md new file mode 100644 index 000000000..bc6deec1e --- /dev/null +++ b/book/src/help_vc.md @@ -0,0 +1,212 @@ +# Validator Client + +``` +When connected to a beacon node, performs the duties of a staked validator (e.g., proposing blocks and attestations). + +USAGE: + lighthouse validator_client [FLAGS] [OPTIONS] + +FLAGS: + --builder-proposals + If this flag is set, Lighthouse will query the Beacon Node for only block headers during proposals and will + sign over headers. Useful for outsourcing execution payload construction during proposals. + --disable-auto-discover + If present, do not attempt to discover new validators in the validators-dir. Validators will need to be + manually added to the validator_definitions.yml file. + --disable-log-timestamp If present, do not include timestamps in logging output. + --disable-malloc-tuning + If present, do not configure the system allocator. Providing this flag will generally increase memory usage, + it should only be provided when debugging specific memory allocation issues. + --disable-run-on-all + DEPRECATED. Use --broadcast. By default, Lighthouse publishes attestation, sync committee subscriptions and + proposer preparation messages to all beacon nodes provided in the `--beacon-nodes flag`. This option changes + that behaviour such that these api calls only go out to the first available and synced beacon node + --enable-doppelganger-protection + If this flag is set, Lighthouse will delay startup for three epochs and monitor for messages on the network + by any of the validators managed by this client. This will result in three (possibly four) epochs worth of + missed attestations. If an attestation is detected during this period, it means it is very likely that you + are running a second validator client with the same keys. This validator client will immediately shutdown if + this is detected in order to avoid potentially committing a slashable offense. Use this flag in order to + ENABLE this functionality, without this flag Lighthouse will begin attesting immediately. + --enable-high-validator-count-metrics + Enable per validator metrics for > 64 validators. Note: This flag is automatically enabled for <= 64 + validators. Enabling this metric for higher validator counts will lead to higher volume of prometheus + metrics being collected. + -h, --help Prints help information + --http Enable the RESTful HTTP API server. Disabled by default. + --http-allow-keystore-export + If present, allow access to the DELETE /lighthouse/keystores HTTP API method, which allows exporting + keystores and passwords to HTTP API consumers who have access to the API token. This method is useful for + exporting validators, however it should be used with caution since it exposes private key data to authorized + users. + --http-store-passwords-in-secrets-dir + If present, any validators created via the HTTP will have keystore passwords stored in the secrets-dir + rather than the validator definitions file. + --init-slashing-protection + If present, do not require the slashing protection database to exist before running. You SHOULD NOT use this + flag unless you're certain that a new slashing protection database is required. Usually, your database will + have been initialized when you imported your validator keys. If you misplace your database and then run with + this flag you risk being slashed. + --log-color Force outputting colors when emitting logs to the terminal. + --logfile-compress + If present, compress old log files. This can help reduce the space needed to store old logs. + + --logfile-no-restricted-perms + If present, log files will be generated as world-readable meaning they can be read by any user on the + machine. Note that logs can often contain sensitive information about your validator and so this flag should + be used with caution. For Windows users, the log file permissions will be inherited from the parent folder. + --metrics Enable the Prometheus metrics HTTP server. Disabled by default. + --prefer-builder-proposals + If this flag is set, Lighthouse will always prefer blocks constructed by builders, regardless of payload + value. + --produce-block-v3 + Enable block production via the block v3 endpoint for this validator client. This should only be enabled + when paired with a beacon node that has this endpoint implemented. This flag will be enabled by default in + future. + --unencrypted-http-transport + This is a safety flag to ensure that the user is aware that the http transport is unencrypted and using a + custom HTTP address is unsafe. + --use-long-timeouts + If present, the validator client will use longer timeouts for requests made to the beacon node. This flag is + generally not recommended, longer timeouts can cause missed duties when fallbacks are used. + -V, --version Prints version information + +OPTIONS: + --beacon-nodes + Comma-separated addresses to one or more beacon node HTTP APIs. Default is http://localhost:5052. + + --beacon-nodes-tls-certs + Comma-separated paths to custom TLS certificates to use when connecting to a beacon node (and/or proposer + node). These certificates must be in PEM format and are used in addition to the OS trust store. Commas must + only be used as a delimiter, and must not be part of the certificate path. + --broadcast + Comma-separated list of beacon API topics to broadcast to all beacon nodes. Possible values are: none, + attestations, blocks, subscriptions, sync-committee. Default (when flag is omitted) is to broadcast + subscriptions only. + --builder-boost-factor + Defines the boost factor, a percentage multiplier to apply to the builder's payload value when choosing + between a builder payload header and payload from the local execution node. + --builder-registration-timestamp-override + This flag takes a unix timestamp value that will be used to override the timestamp used in the builder api + registration + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and databases. Defaults to + $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify + separate custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: + info, debug, trace, warn, error, crit] + --gas-limit + The gas limit to be used in all builder proposals for all validators managed by this validator client. Note + this will not necessarily be used if the gas limit set here moves too far from the previous block's gas + limit. [default: 30,000,000] + --genesis-state-url + A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server + URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may + be used. If the genesis state is already included in this binary then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. [default: 180] + + --graffiti + Specify your custom graffiti to be included in blocks. + + --graffiti-file + Specify a graffiti file to load validator graffitis from. + + --http-address
+ Set the address for the HTTP address. The HTTP server is not encrypted and therefore it is unsafe to publish + on a public network. When this flag is used, it additionally requires the explicit use of the + `--unencrypted-http-transport` flag to ensure the user is aware of the risks involved. For access via the + Internet, users should apply transport-layer security like a HTTPS reverse-proxy or SSH tunnelling. + --http-allow-origin + Set the value of the Access-Control-Allow-Origin response HTTP header. Use * to allow any origin (not + recommended in production). If no value is supplied, the CORS allowed origin is set to the listen address of + this server (e.g., http://localhost:5062). + --http-port + Set the listen TCP port for the RESTful HTTP API server. + + --latency-measurement-service + Set to 'true' to enable a service that periodically attempts to measure latency to BNs. Set to 'false' to + disable. [default: true] + --log-format + Specifies the log format used when emitting logs to the terminal. [possible values: JSON] + + --logfile + File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a + new log file is generated where future logs are stored. Once the number of log files exceeds the value + specified in `--logfile-max-number` the oldest log file will be overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, + debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] + + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, background file logging is disabled. + [default: 5] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is + disabled. [default: 200] + --metrics-address
+ Set the listen address for the Prometheus metrics HTTP server. + + --metrics-allow-origin + Set the value of the Access-Control-Allow-Origin response HTTP header. Use * to allow any origin (not + recommended in production). If no value is supplied, the CORS allowed origin is set to the listen address of + this server (e.g., http://localhost:5064). + --metrics-port + Set the listen TCP port for the Prometheus metrics HTTP server. + + --monitoring-endpoint
+ Enables the monitoring service for sending system metrics to a remote endpoint. This can be used to monitor + your setup on certain services (e.g. beaconcha.in). This flag sets the endpoint where the beacon node + metrics will be sent. Note: This will send information to a remote sever which may identify and associate + your validators, IP address and other personal information. Always use a HTTPS connection and never provide + an untrusted URL. + --monitoring-endpoint-period + Defines how many seconds to wait between each message sent to the monitoring-endpoint. Default: 60s + + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, + chiado, sepolia, holesky] + --proposer-nodes + Comma-separated addresses to one or more beacon node HTTP APIs. These specify nodes that are used to send + beacon block proposals. A failure will revert back to the standard beacon nodes specified in --beacon-nodes. + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should + only be used if the user has a clear understanding that the broad Ethereum community has elected to override + this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause + your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. + --secrets-dir + The directory which contains the password to unlock the validator voting keypairs. Each password should be + contained in a file where the name is the 0x-prefixed hex representation of the validators voting public + key. Defaults to ~/.lighthouse/{network}/secrets. + --suggested-fee-recipient + Once the merge has happened, this address will receive transaction fees from blocks proposed by this + validator client. If a fee recipient is configured in the validator definitions it takes priority over this + value. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should + only be used if the user has a clear understanding that the broad Ethereum community has elected to override + the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. + Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if + the user has a clear understanding that the broad Ethereum community has elected to override the terminal + PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely + careful with this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal + integer (not a hex value). This flag should only be used if the user has a clear understanding that the + broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will + cause your node to experience a consensus failure. Be extremely careful with this flag. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective + if there is no existing database. + --validator-registration-batch-size + Defines the number of validators per validator/register_validator request sent to the BN. This value can be + reduced to avoid timeouts from builders. [default: 500] + --validators-dir + The directory which contains the validator keystores, deposit data for each validator along with the common + slashing protection database and the validator_definitions.yml +``` \ No newline at end of file diff --git a/book/src/help_vm.md b/book/src/help_vm.md new file mode 100644 index 000000000..fa08aa4f6 --- /dev/null +++ b/book/src/help_vm.md @@ -0,0 +1,97 @@ +# Validator Manager + +``` +Utilities for managing a Lighthouse validator client via the HTTP API. + +USAGE: + lighthouse validator_manager [FLAGS] [OPTIONS] [SUBCOMMAND] + +FLAGS: + --disable-log-timestamp If present, do not include timestamps in logging output. + --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag will + generally increase memory usage, it should only be provided when debugging + specific memory allocation issues. + -h, --help Prints help information + --log-color Force outputting colors when emitting logs to the terminal. + --logfile-compress If present, compress old log files. This can help reduce the space needed to + store old logs. + --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they can be + read by any user on the machine. Note that logs can often contain sensitive + information about your validator and so this flag should be used with caution. + For Windows users, the log file permissions will be inherited from the parent + folder. + -V, --version Prints version information + +OPTIONS: + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and databases. Defaults to + $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify + separate custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: + info, debug, trace, warn, error, crit] + --genesis-state-url + A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server + URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may + be used. If the genesis state is already included in this binary then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. [default: 180] + + --log-format + Specifies the log format used when emitting logs to the terminal. [possible values: JSON] + + --logfile + File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a + new log file is generated where future logs are stored. Once the number of log files exceeds the value + specified in `--logfile-max-number` the oldest log file will be overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, + debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] + + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, background file logging is disabled. + [default: 5] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is + disabled. [default: 200] + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, + chiado, sepolia, holesky] + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should + only be used if the user has a clear understanding that the broad Ethereum community has elected to override + this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause + your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should + only be used if the user has a clear understanding that the broad Ethereum community has elected to override + the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. + Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if + the user has a clear understanding that the broad Ethereum community has elected to override the terminal + PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely + careful with this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal + integer (not a hex value). This flag should only be used if the user has a clear understanding that the + broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will + cause your node to experience a consensus failure. Be extremely careful with this flag. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective + if there is no existing database. + +SUBCOMMANDS: + create Creates new validators from BIP-39 mnemonic. A JSON file will be created which contains all the + validator keystores and other validator data. This file can then be imported to a validator client + using the "import-validators" command. Another, optional JSON file is created which contains a list of + validator deposits in the same format as the "ethereum/staking-deposit-cli" tool. + help Prints this message or the help of the given subcommand(s) + import Uploads validators to a validator client using the HTTP API. The validators are defined in a JSON file + which can be generated using the "create-validators" command. + move Uploads validators to a validator client using the HTTP API. The validators are defined in a JSON file + which can be generated using the "create-validators" command. This command only supports validators + signing via a keystore on the local file system (i.e., not Web3Signer validators). +``` \ No newline at end of file diff --git a/book/src/help_vm_create.md b/book/src/help_vm_create.md new file mode 100644 index 000000000..71db3cc59 --- /dev/null +++ b/book/src/help_vm_create.md @@ -0,0 +1,137 @@ +# Validator Manager Create + +``` +Creates new validators from BIP-39 mnemonic. A JSON file will be created which contains all the validator keystores and +other validator data. This file can then be imported to a validator client using the "import-validators" command. +Another, optional JSON file is created which contains a list of validator deposits in the same format as the +"ethereum/staking-deposit-cli" tool. + +USAGE: + lighthouse validator_manager create [FLAGS] [OPTIONS] --output-path + +FLAGS: + --disable-deposits When provided don't generate the deposits JSON file that is commonly used + for submitting validator deposits via a web UI. Using this flag will save + several seconds per validator if the user has an alternate strategy for + submitting deposits. + --disable-log-timestamp If present, do not include timestamps in logging output. + --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag + will generally increase memory usage, it should only be provided when + debugging specific memory allocation issues. + --force-bls-withdrawal-credentials If present, allows BLS withdrawal credentials rather than an execution + address. This is not recommended. + -h, --help Prints help information + --log-color Force outputting colors when emitting logs to the terminal. + --logfile-compress If present, compress old log files. This can help reduce the space needed + to store old logs. + --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they can + be read by any user on the machine. Note that logs can often contain + sensitive information about your validator and so this flag should be used + with caution. For Windows users, the log file permissions will be + inherited from the parent folder. + --specify-voting-keystore-password If present, the user will be prompted to enter the voting keystore + password that will be used to encrypt the voting keystores. If this flag + is not provided, a random password will be used. It is not necessary to + keep backups of voting keystore passwords if the mnemonic is safely backed + up. + --stdin-inputs If present, read all user inputs from stdin instead of tty. + -V, --version Prints version information + +OPTIONS: + --beacon-node + A HTTP(S) address of a beacon node using the beacon-API. If this value is provided, an error will be raised + if any validator key here is already known as a validator by that beacon node. This helps prevent the same + validator being created twice and therefore slashable conditions. + --builder-boost-factor + Defines the boost factor, a percentage multiplier to apply to the builder's payload value when choosing + between a builder payload header and payload from the local execution node. + --builder-proposals + When provided, all created validators will attempt to create blocks via builder rather than the local EL. + [possible values: true, false] + --count + The number of validators to create, regardless of how many already exist + + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and databases. Defaults to + $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify + separate custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: + info, debug, trace, warn, error, crit] + --deposit-gwei + The GWEI value of the deposit amount. Defaults to the minimum amount required for an active validator + (MAX_EFFECTIVE_BALANCE) + --eth1-withdrawal-address + If this field is set, the given eth1 address will be used to create the withdrawal credentials. Otherwise, + it will generate withdrawal credentials with the mnemonic-derived withdrawal public key in EIP-2334 format. + --first-index + The first of consecutive key indexes you wish to create. [default: 0] + + --gas-limit + All created validators will use this gas limit. It is recommended to leave this as the default value by not + specifying this flag. + --genesis-state-url + A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server + URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may + be used. If the genesis state is already included in this binary then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. [default: 180] + + --log-format + Specifies the log format used when emitting logs to the terminal. [possible values: JSON] + + --logfile + File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a + new log file is generated where future logs are stored. Once the number of log files exceeds the value + specified in `--logfile-max-number` the oldest log file will be overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, + debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] + + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, background file logging is disabled. + [default: 5] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is + disabled. [default: 200] + --mnemonic-path + If present, the mnemonic will be read in from this file. + + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, + chiado, sepolia, holesky] + --output-path + The path to a directory where the validator and (optionally) deposits files will be created. The directory + will be created if it does not exist. + --prefer-builder-proposals + If this flag is set, Lighthouse will always prefer blocks constructed by builders, regardless of payload + value. [possible values: true, false] + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should + only be used if the user has a clear understanding that the broad Ethereum community has elected to override + this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause + your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. + --suggested-fee-recipient + All created validators will use this value for the suggested fee recipient. Omit this flag to use the + default value from the VC. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should + only be used if the user has a clear understanding that the broad Ethereum community has elected to override + the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. + Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if + the user has a clear understanding that the broad Ethereum community has elected to override the terminal + PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely + careful with this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal + integer (not a hex value). This flag should only be used if the user has a clear understanding that the + broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will + cause your node to experience a consensus failure. Be extremely careful with this flag. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective + if there is no existing database. +``` \ No newline at end of file diff --git a/book/src/help_vm_import.md b/book/src/help_vm_import.md new file mode 100644 index 000000000..3960a55f1 --- /dev/null +++ b/book/src/help_vm_import.md @@ -0,0 +1,101 @@ +# Validator Manager Import + +``` +Uploads validators to a validator client using the HTTP API. The validators are defined in a JSON file which can be +generated using the "create-validators" command. + +USAGE: + lighthouse validator_manager import [FLAGS] [OPTIONS] --validators-file + +FLAGS: + --disable-log-timestamp If present, do not include timestamps in logging output. + --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag will + generally increase memory usage, it should only be provided when debugging + specific memory allocation issues. + -h, --help Prints help information + --ignore-duplicates If present, ignore any validators which already exist on the VC. Without this + flag, the process will terminate without making any changes. This flag should + be used with caution, whilst it does not directly cause slashable conditions, + it might be an indicator that something is amiss. Users should also be careful + to avoid submitting duplicate deposits for validators that already exist on the + VC. + --log-color Force outputting colors when emitting logs to the terminal. + --logfile-compress If present, compress old log files. This can help reduce the space needed to + store old logs. + --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they can be + read by any user on the machine. Note that logs can often contain sensitive + information about your validator and so this flag should be used with caution. + For Windows users, the log file permissions will be inherited from the parent + folder. + -V, --version Prints version information + +OPTIONS: + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and databases. Defaults to + $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify + separate custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: + info, debug, trace, warn, error, crit] + --genesis-state-url + A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server + URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may + be used. If the genesis state is already included in this binary then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. [default: 180] + + --log-format + Specifies the log format used when emitting logs to the terminal. [possible values: JSON] + + --logfile + File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a + new log file is generated where future logs are stored. Once the number of log files exceeds the value + specified in `--logfile-max-number` the oldest log file will be overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, + debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] + + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, background file logging is disabled. + [default: 5] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is + disabled. [default: 200] + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, + chiado, sepolia, holesky] + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should + only be used if the user has a clear understanding that the broad Ethereum community has elected to override + this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause + your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should + only be used if the user has a clear understanding that the broad Ethereum community has elected to override + the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. + Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if + the user has a clear understanding that the broad Ethereum community has elected to override the terminal + PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely + careful with this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal + integer (not a hex value). This flag should only be used if the user has a clear understanding that the + broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will + cause your node to experience a consensus failure. Be extremely careful with this flag. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective + if there is no existing database. + --validators-file + The path to a JSON file containing a list of validators to be imported to the validator client. This file is + usually named "validators.json". + --vc-token + The file containing a token required by the validator client. + + --vc-url + A HTTP(S) address of a validator client using the keymanager-API. If this value is not supplied then a 'dry + run' will be conducted where no changes are made to the validator client. [default: http://localhost:5062] +``` \ No newline at end of file diff --git a/book/src/help_vm_move.md b/book/src/help_vm_move.md new file mode 100644 index 000000000..a89af437a --- /dev/null +++ b/book/src/help_vm_move.md @@ -0,0 +1,118 @@ +# Validator Manager Move + +``` +Uploads validators to a validator client using the HTTP API. The validators are defined in a JSON file which can be +generated using the "create-validators" command. This command only supports validators signing via a keystore on the +local file system (i.e., not Web3Signer validators). + +USAGE: + lighthouse validator_manager move [FLAGS] [OPTIONS] --dest-vc-token --dest-vc-url --src-vc-token --src-vc-url + +FLAGS: + --disable-log-timestamp If present, do not include timestamps in logging output. + --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag will + generally increase memory usage, it should only be provided when debugging + specific memory allocation issues. + -h, --help Prints help information + --log-color Force outputting colors when emitting logs to the terminal. + --logfile-compress If present, compress old log files. This can help reduce the space needed to + store old logs. + --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they can be + read by any user on the machine. Note that logs can often contain sensitive + information about your validator and so this flag should be used with caution. + For Windows users, the log file permissions will be inherited from the parent + folder. + --stdin-inputs If present, read all user inputs from stdin instead of tty. + -V, --version Prints version information + +OPTIONS: + --builder-boost-factor + Defines the boost factor, a percentage multiplier to apply to the builder's payload value when choosing + between a builder payload header and payload from the local execution node. + --builder-proposals + When provided, all created validators will attempt to create blocks via builder rather than the local EL. + [possible values: true, false] + --count The number of validators to move. + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and databases. Defaults to + $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify + separate custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: + info, debug, trace, warn, error, crit] + --dest-vc-token + The file containing a token required by the destination validator client. + + --dest-vc-url + A HTTP(S) address of a validator client using the keymanager-API. This validator client is the "destination" + and will have new validators added as they are removed from the "source" validator client. + --gas-limit + All created validators will use this gas limit. It is recommended to leave this as the default value by not + specifying this flag. + --genesis-state-url + A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server + URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may + be used. If the genesis state is already included in this binary then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. [default: 180] + + --log-format + Specifies the log format used when emitting logs to the terminal. [possible values: JSON] + + --logfile + File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a + new log file is generated where future logs are stored. Once the number of log files exceeds the value + specified in `--logfile-max-number` the oldest log file will be overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, + debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] + + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, background file logging is disabled. + [default: 5] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is + disabled. [default: 200] + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, + chiado, sepolia, holesky] + --prefer-builder-proposals + If this flag is set, Lighthouse will always prefer blocks constructed by builders, regardless of payload + value. [possible values: true, false] + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should + only be used if the user has a clear understanding that the broad Ethereum community has elected to override + this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause + your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. + --src-vc-token + The file containing a token required by the source validator client. + + --src-vc-url + A HTTP(S) address of a validator client using the keymanager-API. This validator client is the "source" and + contains the validators that are to be moved. + --suggested-fee-recipient + All created validators will use this value for the suggested fee recipient. Omit this flag to use the + default value from the VC. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should + only be used if the user has a clear understanding that the broad Ethereum community has elected to override + the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. + Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if + the user has a clear understanding that the broad Ethereum community has elected to override the terminal + PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely + careful with this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal + integer (not a hex value). This flag should only be used if the user has a clear understanding that the + broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will + cause your node to experience a consensus failure. Be extremely careful with this flag. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective + if there is no existing database. + --validators + The validators to be moved. Either a list of 0x-prefixed validator pubkeys or the keyword "all". +``` \ No newline at end of file diff --git a/book/src/homebrew.md b/book/src/homebrew.md index 317dc0e0f..486de371f 100644 --- a/book/src/homebrew.md +++ b/book/src/homebrew.md @@ -31,6 +31,6 @@ Alternatively, you can find the `lighthouse` binary at: The [formula][] is kept up-to-date by the Homebrew community and a bot that lists for new releases. -The package source can be found in the [homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/lighthouse.rb) repo. +The package source can be found in the [homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/l/lighthouse.rb) repo. [formula]: https://formulae.brew.sh/formula/lighthouse diff --git a/book/src/redundancy.md b/book/src/redundancy.md index 77cec3253..11b984565 100644 --- a/book/src/redundancy.md +++ b/book/src/redundancy.md @@ -75,7 +75,38 @@ lighthouse bn \ Prior to v3.2.0 fallback beacon nodes also required the `--subscribe-all-subnets` and `--import-all-attestations` flags. These flags are no longer required as the validator client will now broadcast subscriptions to all connected beacon nodes by default. This broadcast behaviour -can be disabled using the `--disable-run-on-all` flag for `lighthouse vc`. +can be disabled using the `--broadcast none` flag for `lighthouse vc` (or `--disable-run-on-all` +[deprecated]). + +### Broadcast modes + +Since v4.6.0, the Lighthouse VC can be configured to broadcast messages to all configured beacon +nodes rather than just the first available. + +The flag to control this behaviour is `--broadcast`, which takes multiple comma-separated values +from this list: + +- `subscriptions`: Send subnet subscriptions & other control messages which keep the beacon nodes + primed and ready to process messages. It is recommended to leave this enabled. +- `attestations`: Send attestations & aggregates to all beacon nodes. This can improve + propagation of attestations throughout the network, at the cost of increased load on the beacon + nodes and increased bandwidth between the VC and the BNs. +- `blocks`: Send proposed blocks to all beacon nodes. This can improve propagation of blocks + throughout the network, at the cost of slightly increased load on the beacon nodes and increased + bandwidth between the VC and the BNs. If you are looking to improve performance in a multi-BN + setup this is the first option we would recommend enabling. +- `sync-committee`: Send sync committee signatures & aggregates to all beacon nodes. This can + improve propagation of sync committee messages with similar tradeoffs to broadcasting + attestations, although occurring less often due to the infrequency of sync committee duties. +- `none`: Disable all broadcasting. This option only has an effect when provided alone, otherwise + it is ignored. Not recommended except for expert tweakers. + +Broadcasting attestation, blocks and sync committee messages may result in excessive warning logs in the beacon node +due to duplicate gossip messages. In this case, it may be desirable to disable warning logs for duplicates using the +beacon node `--disable-duplicate-warn-logs` flag. + +The default is `--broadcast subscriptions`. To also broadcast blocks for example, use +`--broadcast subscriptions,blocks`. ## Redundant execution nodes diff --git a/book/src/setup.md b/book/src/setup.md index d7eafbdf9..87f431f9b 100644 --- a/book/src/setup.md +++ b/book/src/setup.md @@ -25,7 +25,8 @@ Commands to run the test suite are available via the `Makefile` in the project root for the benefit of CI/CD. We list some of these commands below so you can run them locally and avoid CI failures: -- `$ make cargo-fmt`: (fast) runs a Rust code linter. +- `$ make cargo-fmt`: (fast) runs a Rust code formatting check. +- `$ make lint`: (fast) runs a Rust code linter. - `$ make test`: (medium) runs unit tests across the whole project. - `$ make test-ef`: (medium) runs the Ethereum Foundation test vectors. - `$ make test-full`: (slow) runs the full test suite (including all previous diff --git a/book/src/slasher.md b/book/src/slasher.md index 41bc3baf7..c8506922b 100644 --- a/book/src/slasher.md +++ b/book/src/slasher.md @@ -71,7 +71,7 @@ If an MDBX database is already found on disk, then Lighthouse will try to use it in a log at start-up: ``` -INFO Slasher backend overriden reason: database exists, configured_backend: lmdb, overriden_backend: mdbx +INFO Slasher backend overridden reason: database exists, configured_backend: lmdb, overridden_backend: mdbx ``` If the running Lighthouse binary doesn't have the MDBX backend enabled but an existing database is diff --git a/book/src/validator-inclusion.md b/book/src/validator-inclusion.md index ef81b2b75..cd31d78d6 100644 --- a/book/src/validator-inclusion.md +++ b/book/src/validator-inclusion.md @@ -8,7 +8,7 @@ These endpoints are not stable or included in the Ethereum consensus standard AP they are subject to change or removal without a change in major release version. -In order to apply these APIs, you need to have historical states information in the database of your node. This means adding the flag `--reconstruct-historic-states` in the beacon node or using the [/lighthouse/database/reconstruct API](./api-lighthouse.md#lighthousedatabasereconstruct). Once the state reconstruction process is completed, you can apply these APIs to any epoch. +In order to apply these APIs, you need to have historical states information in the database of your node. This means adding the flag `--reconstruct-historic-states` in the beacon node. Once the state reconstruction process is completed, you can apply these APIs to any epoch. ## Endpoints diff --git a/book/src/validator-manager-create.md b/book/src/validator-manager-create.md index 0cec150da..6ba894a43 100644 --- a/book/src/validator-manager-create.md +++ b/book/src/validator-manager-create.md @@ -139,7 +139,7 @@ In order to import the validators, the location of the VC `api-token.txt` file must be known. The location of the file varies, but it is located in the "validator directory" of your data directory. For example: `~/.lighthouse/mainnet/validators/api-token.txt`. We will use `` -to subsitute this value. If you are unsure of the `api-token.txt` path, you can run `curl http://localhost:5062/lighthouse/auth` which will show the path. +to substitute this value. If you are unsure of the `api-token.txt` path, you can run `curl http://localhost:5062/lighthouse/auth` which will show the path. Once the VC is running, use the `import` command to import the validators to the VC: diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 0c815ca9a..bdf01914a 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "4.5.0" +version = "4.6.0" authors = ["Sigma Prime "] edition = { workspace = true } @@ -21,7 +21,6 @@ slog-scope = "4.3.0" slog-stdlog = "4.0.0" hex = { workspace = true } serde = { workspace = true } -serde_derive = "1.0.116" serde_json = { workspace = true } serde_yaml = { workspace = true } eth2_network_config = { workspace = true } diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index 5d7853bd2..6fb1ea9bf 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -1,13 +1,12 @@ use beacon_node::{get_data_dir, set_network_config}; use clap::ArgMatches; use eth2_network_config::Eth2NetworkConfig; -use lighthouse_network::discovery::create_enr_builder_from_config; -use lighthouse_network::discv5::{enr::CombinedKey, Discv5Config, Enr}; +use lighthouse_network::discv5::{self, enr::CombinedKey, Enr}; use lighthouse_network::{ discovery::{load_enr_from_disk, use_or_load_enr}, load_private_key, CombinedKeyExt, NetworkConfig, }; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::Encode; use std::net::{SocketAddrV4, SocketAddrV6}; use std::time::Duration; @@ -20,7 +19,7 @@ pub struct BootNodeConfig { pub boot_nodes: Vec, pub local_enr: Enr, pub local_key: CombinedKey, - pub discv5_config: Discv5Config, + pub discv5_config: discv5::Config, phantom: PhantomData, } @@ -60,19 +59,25 @@ impl BootNodeConfig { // Set the Enr Discovery ports to the listening ports if not present. if let Some(listening_addr_v4) = network_config.listen_addrs().v4() { - network_config.enr_udp4_port = Some( - network_config - .enr_udp4_port - .unwrap_or(listening_addr_v4.disc_port), - ) + if network_config.enr_udp4_port.is_none() { + network_config.enr_udp4_port = + Some(network_config.enr_udp4_port.unwrap_or( + listening_addr_v4.disc_port.try_into().map_err(|_| { + "boot node enr-udp-port not set and listening port is zero" + })?, + )) + } }; if let Some(listening_addr_v6) = network_config.listen_addrs().v6() { - network_config.enr_udp6_port = Some( - network_config - .enr_udp6_port - .unwrap_or(listening_addr_v6.disc_port), - ) + if network_config.enr_udp6_port.is_none() { + network_config.enr_udp6_port = + Some(network_config.enr_udp6_port.unwrap_or( + listening_addr_v6.disc_port.try_into().map_err(|_| { + "boot node enr-udp-port not set and listening port is zero" + })?, + )) + } }; // By default this is enabled. If it is not set, revert to false. @@ -124,8 +129,25 @@ impl BootNodeConfig { // Build the local ENR let mut local_enr = { - let enable_tcp = false; - let mut builder = create_enr_builder_from_config(&network_config, enable_tcp); + let (maybe_ipv4_address, maybe_ipv6_address) = network_config.enr_address; + let mut builder = discv5::Enr::builder(); + + if let Some(ip) = maybe_ipv4_address { + builder.ip4(ip); + } + + if let Some(ip) = maybe_ipv6_address { + builder.ip6(ip); + } + + if let Some(udp4_port) = network_config.enr_udp4_port { + builder.udp4(udp4_port.get()); + } + + if let Some(udp6_port) = network_config.enr_udp6_port { + builder.udp6(udp6_port.get()); + } + // If we know of the ENR field, add it to the initial construction if let Some(enr_fork_bytes) = enr_fork { builder.add_value("eth2", &enr_fork_bytes); @@ -151,7 +173,7 @@ impl BootNodeConfig { /// The set of configuration parameters that can safely be (de)serialized. /// -/// Its fields are a subset of the fields of `BootNodeConfig`, some of them are copied from `Discv5Config`. +/// Its fields are a subset of the fields of `BootNodeConfig`, some of them are copied from `discv5::Config`. #[derive(Serialize, Deserialize)] pub struct BootNodeConfigSerialization { pub ipv4_listen_socket: Option, diff --git a/boot_node/src/lib.rs b/boot_node/src/lib.rs index d76e7906b..0421ce268 100644 --- a/boot_node/src/lib.rs +++ b/boot_node/src/lib.rs @@ -48,11 +48,8 @@ pub fn run( log::Level::Error => drain.filter_level(Level::Error), }; - let logger = Logger::root(drain.fuse(), o!()); - let _scope_guard = slog_scope::set_global_logger(logger); - slog_stdlog::init_with_level(debug_level).unwrap(); + let log = Logger::root(drain.fuse(), o!()); - let log = slog_scope::logger(); // Run the main function emitting any errors if let Err(e) = match eth_spec_id { EthSpecId::Minimal => { diff --git a/boot_node/src/server.rs b/boot_node/src/server.rs index 5a5729dc0..8260038a0 100644 --- a/boot_node/src/server.rs +++ b/boot_node/src/server.rs @@ -5,7 +5,7 @@ use crate::config::BootNodeConfigSerialization; use clap::ArgMatches; use eth2_network_config::Eth2NetworkConfig; use lighthouse_network::{ - discv5::{enr::NodeId, Discv5, Discv5Event}, + discv5::{self, enr::NodeId, Discv5}, EnrExt, Eth2Enr, }; use slog::info; @@ -144,17 +144,17 @@ pub async fn run( } Some(event) = event_stream.recv() => { match event { - Discv5Event::Discovered(_enr) => { + discv5::Event::Discovered(_enr) => { // An ENR has bee obtained by the server // Ignore these events here } - Discv5Event::EnrAdded { .. } => {} // Ignore - Discv5Event::TalkRequest(_) => {} // Ignore - Discv5Event::NodeInserted { .. } => {} // Ignore - Discv5Event::SocketUpdated(socket_addr) => { + discv5::Event::EnrAdded { .. } => {} // Ignore + discv5::Event::TalkRequest(_) => {} // Ignore + discv5::Event::NodeInserted { .. } => {} // Ignore + discv5::Event::SocketUpdated(socket_addr) => { info!(log, "Advertised socket address updated"; "socket_addr" => %socket_addr); } - Discv5Event::SessionEstablished{ .. } => {} // Ignore + discv5::Event::SessionEstablished{ .. } => {} // Ignore } } } diff --git a/bors.toml b/bors.toml deleted file mode 100644 index 9e633d63f..000000000 --- a/bors.toml +++ /dev/null @@ -1,29 +0,0 @@ -status = [ - "cargo-fmt", - "release-tests-ubuntu", - "release-tests-windows", - "debug-tests-ubuntu", - "state-transition-vectors-ubuntu", - "ef-tests-ubuntu", - "dockerfile-ubuntu", - "eth1-simulator-ubuntu", - "merge-transition-ubuntu", - "no-eth1-simulator-ubuntu", - "check-benchmarks", - "clippy", - "arbitrary-check", - "cargo-audit", - "cargo-udeps", - "beacon-chain-tests", - "op-pool-tests", - "doppelganger-protection-test", - "execution-engine-integration-ubuntu", - "cargo-vendor", - "check-msrv", - "slasher-tests", - "syncing-simulator-ubuntu", - "compile-with-beta-compiler" -] -use_squash_merge = true -timeout_sec = 10800 -pr_status = ["license/cla", "target-branch-check"] diff --git a/common/account_utils/Cargo.toml b/common/account_utils/Cargo.toml index 10113ab32..e66bf1423 100644 --- a/common/account_utils/Cargo.toml +++ b/common/account_utils/Cargo.toml @@ -13,7 +13,6 @@ eth2_keystore = { workspace = true } filesystem = { workspace = true } zeroize = { workspace = true } serde = { workspace = true } -serde_derive = "1.0.116" serde_yaml = { workspace = true } slog = { workspace = true } types = { workspace = true } diff --git a/common/account_utils/src/lib.rs b/common/account_utils/src/lib.rs index e566d7cdd..8707ae531 100644 --- a/common/account_utils/src/lib.rs +++ b/common/account_utils/src/lib.rs @@ -8,7 +8,7 @@ use eth2_wallet::{ }; use filesystem::{create_with_600_perms, Error as FsError}; use rand::{distributions::Alphanumeric, Rng}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::fs::{self, File}; use std::io; use std::io::prelude::*; diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index c91e717d1..f228ce5fd 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -9,7 +9,7 @@ use crate::{ use directory::ensure_dir_exists; use eth2_keystore::Keystore; use regex::Regex; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use slog::{error, Logger}; use std::collections::HashSet; use std::fs::{self, File}; @@ -157,6 +157,12 @@ pub struct ValidatorDefinition { #[serde(skip_serializing_if = "Option::is_none")] pub builder_proposals: Option, #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub builder_boost_factor: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub prefer_builder_proposals: Option, + #[serde(default)] pub description: String, #[serde(flatten)] pub signing_definition: SigningDefinition, @@ -169,6 +175,7 @@ impl ValidatorDefinition { /// ## Notes /// /// This function does not check the password against the keystore. + #[allow(clippy::too_many_arguments)] pub fn new_keystore_with_password>( voting_keystore_path: P, voting_keystore_password_storage: PasswordStorage, @@ -176,6 +183,8 @@ impl ValidatorDefinition { suggested_fee_recipient: Option
, gas_limit: Option, builder_proposals: Option, + builder_boost_factor: Option, + prefer_builder_proposals: Option, ) -> Result { let voting_keystore_path = voting_keystore_path.as_ref().into(); let keystore = @@ -196,6 +205,8 @@ impl ValidatorDefinition { suggested_fee_recipient, gas_limit, builder_proposals, + builder_boost_factor, + prefer_builder_proposals, signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, voting_keystore_password_path, @@ -344,6 +355,8 @@ impl ValidatorDefinitions { suggested_fee_recipient: None, gas_limit: None, builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, voting_keystore_password_path, @@ -367,7 +380,8 @@ impl ValidatorDefinitions { pub fn save>(&self, validators_dir: P) -> Result<(), Error> { let config_path = validators_dir.as_ref().join(CONFIG_FILENAME); let temp_path = validators_dir.as_ref().join(CONFIG_TEMP_FILENAME); - let bytes = serde_yaml::to_vec(self).map_err(Error::UnableToEncodeFile)?; + let mut bytes = vec![]; + serde_yaml::to_writer(&mut bytes, self).map_err(Error::UnableToEncodeFile)?; write_file_via_temporary(&config_path, &temp_path, &bytes) .map_err(Error::UnableToWriteFile)?; diff --git a/common/compare_fields/Cargo.toml b/common/compare_fields/Cargo.toml index 8df989e72..9972ca75c 100644 --- a/common/compare_fields/Cargo.toml +++ b/common/compare_fields/Cargo.toml @@ -4,6 +4,9 @@ version = "0.2.0" authors = ["Paul Hauner "] edition = { workspace = true } +[dependencies] +itertools = { workspace = true } + [dev-dependencies] compare_fields_derive = { workspace = true } diff --git a/common/compare_fields/src/lib.rs b/common/compare_fields/src/lib.rs index bc2f5446a..27baf1480 100644 --- a/common/compare_fields/src/lib.rs +++ b/common/compare_fields/src/lib.rs @@ -81,11 +81,8 @@ //! } //! ]; //! assert_eq!(bar_a.compare_fields(&bar_b), bar_a_b); -//! -//! -//! -//! // TODO: //! ``` +use itertools::{EitherOrBoth, Itertools}; use std::fmt::Debug; #[derive(Debug, PartialEq, Clone)] @@ -112,13 +109,38 @@ impl Comparison { } pub fn from_slice>(field_name: String, a: &[T], b: &[T]) -> Self { - let mut children = vec![]; + Self::from_iter(field_name, a.iter(), b.iter()) + } - for i in 0..std::cmp::max(a.len(), b.len()) { - children.push(FieldComparison::new(format!("{i}"), &a.get(i), &b.get(i))); + pub fn from_into_iter<'a, T: Debug + PartialEq + 'a>( + field_name: String, + a: impl IntoIterator, + b: impl IntoIterator, + ) -> Self { + Self::from_iter(field_name, a.into_iter(), b.into_iter()) + } + + pub fn from_iter<'a, T: Debug + PartialEq + 'a>( + field_name: String, + a: impl Iterator, + b: impl Iterator, + ) -> Self { + let mut children = vec![]; + let mut all_equal = true; + + for (i, entry) in a.zip_longest(b).enumerate() { + let comparison = match entry { + EitherOrBoth::Both(x, y) => { + FieldComparison::new(format!("{i}"), &Some(x), &Some(y)) + } + EitherOrBoth::Left(x) => FieldComparison::new(format!("{i}"), &Some(x), &None), + EitherOrBoth::Right(y) => FieldComparison::new(format!("{i}"), &None, &Some(y)), + }; + all_equal = all_equal && comparison.equal(); + children.push(comparison); } - Self::parent(field_name, a == b, children) + Self::parent(field_name, all_equal, children) } pub fn retain_children(&mut self, f: F) diff --git a/common/compare_fields_derive/src/lib.rs b/common/compare_fields_derive/src/lib.rs index a8b92b3d5..01c5a8f6e 100644 --- a/common/compare_fields_derive/src/lib.rs +++ b/common/compare_fields_derive/src/lib.rs @@ -4,10 +4,11 @@ use proc_macro::TokenStream; use quote::quote; use syn::{parse_macro_input, DeriveInput}; -fn is_slice(field: &syn::Field) -> bool { +fn is_iter(field: &syn::Field) -> bool { field.attrs.iter().any(|attr| { attr.path.is_ident("compare_fields") - && attr.tokens.to_string().replace(' ', "") == "(as_slice)" + && (attr.tokens.to_string().replace(' ', "") == "(as_slice)" + || attr.tokens.to_string().replace(' ', "") == "(as_iter)") }) } @@ -18,29 +19,26 @@ pub fn compare_fields_derive(input: TokenStream) -> TokenStream { let name = &item.ident; let (impl_generics, ty_generics, where_clause) = &item.generics.split_for_impl(); - let struct_data = match &item.data { - syn::Data::Struct(s) => s, - _ => panic!("compare_fields_derive only supports structs."), + let syn::Data::Struct(struct_data) = &item.data else { + panic!("compare_fields_derive only supports structs."); }; let mut quotes = vec![]; for field in struct_data.fields.iter() { - let ident_a = match &field.ident { - Some(ref ident) => ident, - _ => panic!("compare_fields_derive only supports named struct fields."), + let Some(ident_a) = &field.ident else { + panic!("compare_fields_derive only supports named struct fields."); }; - let field_name = ident_a.to_string(); let ident_b = ident_a.clone(); - let quote = if is_slice(field) { + let quote = if is_iter(field) { quote! { - comparisons.push(compare_fields::Comparison::from_slice( + comparisons.push(compare_fields::Comparison::from_into_iter( #field_name.to_string(), &self.#ident_a, - &b.#ident_b) - ); + &b.#ident_b + )); } } else { quote! { diff --git a/common/directory/src/lib.rs b/common/directory/src/lib.rs index 62b98aab9..e8585c504 100644 --- a/common/directory/src/lib.rs +++ b/common/directory/src/lib.rs @@ -10,6 +10,7 @@ pub const DEFAULT_NETWORK_DIR: &str = "network"; pub const DEFAULT_VALIDATOR_DIR: &str = "validators"; pub const DEFAULT_SECRET_DIR: &str = "secrets"; pub const DEFAULT_WALLET_DIR: &str = "wallets"; +pub const DEFAULT_TRACING_DIR: &str = "tracing"; /// Base directory name for unnamed testnets passed through the --testnet-dir flag pub const CUSTOM_TESTNET_DIR: &str = "custom"; diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index f2911fd3d..0f27bb667 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -9,6 +9,8 @@ edition = { workspace = true } [dependencies] serde = { workspace = true } serde_json = { workspace = true } +ssz_types = { workspace = true } +tree_hash = { workspace = true } types = { workspace = true } reqwest = { workspace = true } lighthouse_network = { workspace = true } @@ -34,7 +36,7 @@ pretty_reqwest_error = { workspace = true } tokio = { workspace = true } [target.'cfg(target_os = "linux")'.dependencies] -psutil = { version = "3.2.2", optional = true } +psutil = { version = "3.3.0", optional = true } procfs = { version = "0.15.1", optional = true } [features] diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 5b43ee3a7..16801be8e 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -31,6 +31,7 @@ use serde::{de::DeserializeOwned, Serialize}; use ssz::Encode; use std::convert::TryFrom; use std::fmt; +use std::future::Future; use std::iter::Iterator; use std::path::PathBuf; use std::time::Duration; @@ -38,8 +39,15 @@ use store::fork_versioned_response::ExecutionOptimisticFinalizedForkVersionedRes pub const V1: EndpointVersion = EndpointVersion(1); pub const V2: EndpointVersion = EndpointVersion(2); +pub const V3: EndpointVersion = EndpointVersion(3); pub const CONSENSUS_VERSION_HEADER: &str = "Eth-Consensus-Version"; +pub const EXECUTION_PAYLOAD_BLINDED_HEADER: &str = "Eth-Execution-Payload-Blinded"; +pub const EXECUTION_PAYLOAD_VALUE_HEADER: &str = "Eth-Execution-Payload-Value"; +pub const CONSENSUS_BLOCK_VALUE_HEADER: &str = "Eth-Consensus-Block-Value"; + +pub const CONTENT_TYPE_HEADER: &str = "Content-Type"; +pub const SSZ_CONTENT_TYPE_HEADER: &str = "application/octet-stream"; #[derive(Debug)] pub enum Error { @@ -63,6 +71,8 @@ pub enum Error { InvalidJson(serde_json::Error), /// The server returned an invalid server-sent event. InvalidServerSentEvent(String), + /// The server sent invalid response headers. + InvalidHeaders(String), /// The server returned an invalid SSZ response. InvalidSsz(ssz::DecodeError), /// An I/O error occurred while loading an API token from disk. @@ -93,6 +103,7 @@ impl Error { Error::MissingSignatureHeader => None, Error::InvalidJson(_) => None, Error::InvalidServerSentEvent(_) => None, + Error::InvalidHeaders(_) => None, Error::InvalidSsz(_) => None, Error::TokenReadError(..) => None, Error::NoServerPubkey | Error::NoToken => None, @@ -120,7 +131,7 @@ pub struct Timeouts { pub get_beacon_blocks_ssz: Duration, pub get_debug_beacon_states: Duration, pub get_deposit_snapshot: Duration, - pub get_validator_block_ssz: Duration, + pub get_validator_block: Duration, } impl Timeouts { @@ -136,7 +147,7 @@ impl Timeouts { get_beacon_blocks_ssz: timeout, get_debug_beacon_states: timeout, get_deposit_snapshot: timeout, - get_validator_block_ssz: timeout, + get_validator_block: timeout, } } } @@ -268,6 +279,32 @@ impl BeaconNodeHttpClient { } } + /// Perform a HTTP GET request using an 'accept' header, returning `None` on a 404 error. + pub async fn get_response_with_response_headers( + &self, + url: U, + accept_header: Accept, + timeout: Duration, + parser: impl FnOnce(Response, HeaderMap) -> F, + ) -> Result, Error> + where + F: Future>, + { + let opt_response = self + .get_response(url, |b| b.accept(accept_header).timeout(timeout)) + .await + .optional()?; + + match opt_response { + Some(resp) => { + let response_headers = resp.headers().clone(); + let parsed_response = parser(resp, response_headers).await?; + Ok(Some(parsed_response)) + } + None => Ok(None), + } + } + /// Perform a HTTP POST request. async fn post(&self, url: U, body: &T) -> Result<(), Error> { self.post_generic(url, body, None).await?; @@ -288,6 +325,18 @@ impl BeaconNodeHttpClient { .map_err(Into::into) } + async fn post_with_opt_response( + &self, + url: U, + body: &T, + ) -> Result, Error> { + if let Some(response) = self.post_generic(url, body, None).await.optional()? { + response.json().await.map_err(Into::into) + } else { + Ok(None) + } + } + /// Perform a HTTP POST request with a custom timeout. async fn post_with_timeout( &self, @@ -328,25 +377,6 @@ impl BeaconNodeHttpClient { ok_or_error(response).await } - /// Generic POST function supporting arbitrary responses and timeouts. - async fn post_generic_with_ssz_body, U: IntoUrl>( - &self, - url: U, - body: T, - timeout: Option, - ) -> Result { - let mut builder = self.client.post(url); - if let Some(timeout) = timeout { - builder = builder.timeout(timeout); - } - let response = builder - .header("Content-Type", "application/octet-stream") - .body(body) - .send() - .await?; - ok_or_error(response).await - } - /// Generic POST function supporting arbitrary responses and timeouts. async fn post_generic_with_consensus_version( &self, @@ -495,6 +525,29 @@ impl BeaconNodeHttpClient { self.get_opt(path).await } + /// `POST beacon/states/{state_id}/validator_balances` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn post_beacon_states_validator_balances( + &self, + state_id: StateId, + ids: Vec, + ) -> Result>>, Error> + { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("validator_balances"); + + let request = ValidatorBalancesRequestBody { ids }; + + self.post_with_opt_response(path, &request).await + } + /// `GET beacon/states/{state_id}/validators?id,status` /// /// Returns `Ok(None)` on a 404 error. @@ -534,6 +587,29 @@ impl BeaconNodeHttpClient { self.get_opt(path).await } + /// `POST beacon/states/{state_id}/validators` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn post_beacon_states_validators( + &self, + state_id: StateId, + ids: Option>, + statuses: Option>, + ) -> Result>>, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("validators"); + + let request = ValidatorsRequestBody { ids, statuses }; + + self.post_with_opt_response(path, &request).await + } + /// `GET beacon/states/{state_id}/committees?slot,index,epoch` /// /// Returns `Ok(None)` on a 404 error. @@ -638,6 +714,59 @@ impl BeaconNodeHttpClient { self.get_opt(path).await } + /// `GET beacon/light_client/bootstrap` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_light_client_bootstrap( + &self, + block_root: Hash256, + ) -> Result>>, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("light_client") + .push("bootstrap") + .push(&format!("{:?}", block_root)); + + self.get_opt(path).await + } + + /// `GET beacon/light_client/optimistic_update` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_light_client_optimistic_update( + &self, + ) -> Result>>, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("light_client") + .push("optimistic_update"); + + self.get_opt(path).await + } + + /// `GET beacon/light_client/finality_update` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_light_client_finality_update( + &self, + ) -> Result>>, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("light_client") + .push("finality_update"); + + self.get_opt(path).await + } + /// `GET beacon/headers?slot,parent_root` /// /// Returns `Ok(None)` on a 404 error. @@ -687,9 +816,9 @@ impl BeaconNodeHttpClient { /// `POST beacon/blocks` /// /// Returns `Ok(None)` on a 404 error. - pub async fn post_beacon_blocks>( + pub async fn post_beacon_blocks( &self, - block: &SignedBeaconBlock, + block_contents: &PublishBlockRequest, ) -> Result<(), Error> { let mut path = self.eth_path(V1)?; @@ -698,7 +827,7 @@ impl BeaconNodeHttpClient { .push("beacon") .push("blocks"); - self.post_with_timeout(path, block, self.timeouts.proposal) + self.post_with_timeout(path, block_contents, self.timeouts.proposal) .await?; Ok(()) @@ -707,9 +836,9 @@ impl BeaconNodeHttpClient { /// `POST beacon/blocks` /// /// Returns `Ok(None)` on a 404 error. - pub async fn post_beacon_blocks_ssz>( + pub async fn post_beacon_blocks_ssz( &self, - block: &SignedBeaconBlock, + block_contents: &PublishBlockRequest, ) -> Result<(), Error> { let mut path = self.eth_path(V1)?; @@ -718,8 +847,13 @@ impl BeaconNodeHttpClient { .push("beacon") .push("blocks"); - self.post_generic_with_ssz_body(path, block.as_ssz_bytes(), Some(self.timeouts.proposal)) - .await?; + self.post_generic_with_consensus_version_and_ssz_body( + path, + block_contents.as_ssz_bytes(), + Some(self.timeouts.proposal), + block_contents.signed_block().fork_name_unchecked(), + ) + .await?; Ok(()) } @@ -727,9 +861,9 @@ impl BeaconNodeHttpClient { /// `POST beacon/blinded_blocks` /// /// Returns `Ok(None)` on a 404 error. - pub async fn post_beacon_blinded_blocks>( + pub async fn post_beacon_blinded_blocks( &self, - block: &SignedBeaconBlock, + block: &SignedBlindedBeaconBlock, ) -> Result<(), Error> { let mut path = self.eth_path(V1)?; @@ -747,9 +881,9 @@ impl BeaconNodeHttpClient { /// `POST beacon/blinded_blocks` /// /// Returns `Ok(None)` on a 404 error. - pub async fn post_beacon_blinded_blocks_ssz>( + pub async fn post_beacon_blinded_blocks_ssz( &self, - block: &SignedBeaconBlock, + block: &SignedBlindedBeaconBlock, ) -> Result<(), Error> { let mut path = self.eth_path(V1)?; @@ -758,8 +892,13 @@ impl BeaconNodeHttpClient { .push("beacon") .push("blinded_blocks"); - self.post_generic_with_ssz_body(path, block.as_ssz_bytes(), Some(self.timeouts.proposal)) - .await?; + self.post_generic_with_consensus_version_and_ssz_body( + path, + block.as_ssz_bytes(), + Some(self.timeouts.proposal), + block.fork_name_unchecked(), + ) + .await?; Ok(()) } @@ -801,16 +940,16 @@ impl BeaconNodeHttpClient { } /// `POST v2/beacon/blocks` - pub async fn post_beacon_blocks_v2>( + pub async fn post_beacon_blocks_v2( &self, - block: &SignedBeaconBlock, + block_contents: &PublishBlockRequest, validation_level: Option, ) -> Result<(), Error> { self.post_generic_with_consensus_version( self.post_beacon_blocks_v2_path(validation_level)?, - block, + block_contents, Some(self.timeouts.proposal), - block.message().body().fork_name(), + block_contents.signed_block().message().body().fork_name(), ) .await?; @@ -818,16 +957,16 @@ impl BeaconNodeHttpClient { } /// `POST v2/beacon/blocks` - pub async fn post_beacon_blocks_v2_ssz>( + pub async fn post_beacon_blocks_v2_ssz( &self, - block: &SignedBeaconBlock, + block_contents: &PublishBlockRequest, validation_level: Option, ) -> Result<(), Error> { self.post_generic_with_consensus_version_and_ssz_body( self.post_beacon_blocks_v2_path(validation_level)?, - block.as_ssz_bytes(), + block_contents.as_ssz_bytes(), Some(self.timeouts.proposal), - block.message().body().fork_name(), + block_contents.signed_block().message().body().fork_name(), ) .await?; @@ -837,14 +976,14 @@ impl BeaconNodeHttpClient { /// `POST v2/beacon/blinded_blocks` pub async fn post_beacon_blinded_blocks_v2( &self, - block: &SignedBlindedBeaconBlock, + signed_block: &SignedBlindedBeaconBlock, validation_level: Option, ) -> Result<(), Error> { self.post_generic_with_consensus_version( self.post_beacon_blinded_blocks_v2_path(validation_level)?, - block, + signed_block, Some(self.timeouts.proposal), - block.message().body().fork_name(), + signed_block.message().body().fork_name(), ) .await?; @@ -854,14 +993,14 @@ impl BeaconNodeHttpClient { /// `POST v2/beacon/blinded_blocks` pub async fn post_beacon_blinded_blocks_v2_ssz( &self, - block: &SignedBlindedBeaconBlock, + signed_block: &SignedBlindedBeaconBlock, validation_level: Option, ) -> Result<(), Error> { self.post_generic_with_consensus_version_and_ssz_body( self.post_beacon_blinded_blocks_v2_path(validation_level)?, - block.as_ssz_bytes(), + signed_block.as_ssz_bytes(), Some(self.timeouts.proposal), - block.message().body().fork_name(), + signed_block.message().body().fork_name(), ) .await?; @@ -879,6 +1018,17 @@ impl BeaconNodeHttpClient { Ok(path) } + /// Path for `v1/beacon/blob_sidecars/{block_id}` + pub fn get_blobs_path(&self, block_id: BlockId) -> Result { + let mut path = self.eth_path(V1)?; + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("blob_sidecars") + .push(&block_id.to_string()); + Ok(path) + } + /// Path for `v1/beacon/blinded_blocks/{block_id}` pub fn get_beacon_blinded_blocks_path(&self, block_id: BlockId) -> Result { let mut path = self.eth_path(V1)?; @@ -901,9 +1051,33 @@ impl BeaconNodeHttpClient { Error, > { let path = self.get_beacon_blocks_path(block_id)?; - let response = match self.get_response(path, |b| b).await.optional()? { - Some(res) => res, - None => return Ok(None), + let Some(response) = self.get_response(path, |b| b).await.optional()? else { + return Ok(None); + }; + + Ok(Some(response.json().await?)) + } + + /// `GET v1/beacon/blob_sidecars/{block_id}` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_blobs( + &self, + block_id: BlockId, + indices: Option<&[u64]>, + ) -> Result>>, Error> { + let mut path = self.get_blobs_path(block_id)?; + if let Some(indices) = indices { + let indices_string = indices + .iter() + .map(|i| i.to_string()) + .collect::>() + .join(","); + path.query_pairs_mut() + .append_pair("indices", &indices_string); + } + let Some(response) = self.get_response(path, |b| b).await.optional()? else { + return Ok(None); }; Ok(Some(response.json().await?)) @@ -920,9 +1094,8 @@ impl BeaconNodeHttpClient { Error, > { let path = self.get_beacon_blinded_blocks_path(block_id)?; - let response = match self.get_response(path, |b| b).await.optional()? { - Some(res) => res, - None => return Ok(None), + let Some(response) = self.get_response(path, |b| b).await.optional()? else { + return Ok(None); }; Ok(Some(response.json().await?)) @@ -1590,18 +1763,33 @@ impl BeaconNodeHttpClient { } /// `GET v2/validator/blocks/{slot}` - pub async fn get_validator_blocks>( + pub async fn get_validator_blocks( &self, slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, - ) -> Result>, Error> { + ) -> Result>, Error> { self.get_validator_blocks_modular(slot, randao_reveal, graffiti, SkipRandaoVerification::No) .await } + /// `GET v2/validator/blocks/{slot}` + pub async fn get_validator_blocks_modular( + &self, + slot: Slot, + randao_reveal: &SignatureBytes, + graffiti: Option<&Graffiti>, + skip_randao_verification: SkipRandaoVerification, + ) -> Result>, Error> { + let path = self + .get_validator_blocks_path::(slot, randao_reveal, graffiti, skip_randao_verification) + .await?; + + self.get(path).await + } + /// returns `GET v2/validator/blocks/{slot}` URL path - pub async fn get_validator_blocks_path>( + pub async fn get_validator_blocks_path( &self, slot: Slot, randao_reveal: &SignatureBytes, @@ -1632,34 +1820,197 @@ impl BeaconNodeHttpClient { Ok(path) } - /// `GET v2/validator/blocks/{slot}` - pub async fn get_validator_blocks_modular>( + /// returns `GET v3/validator/blocks/{slot}` URL path + pub async fn get_validator_blocks_v3_path( &self, slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, skip_randao_verification: SkipRandaoVerification, - ) -> Result>, Error> { + builder_booster_factor: Option, + ) -> Result { + let mut path = self.eth_path(V3)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("blocks") + .push(&slot.to_string()); + + path.query_pairs_mut() + .append_pair("randao_reveal", &randao_reveal.to_string()); + + if let Some(graffiti) = graffiti { + path.query_pairs_mut() + .append_pair("graffiti", &graffiti.to_string()); + } + + if skip_randao_verification == SkipRandaoVerification::Yes { + path.query_pairs_mut() + .append_pair("skip_randao_verification", ""); + } + + if let Some(builder_booster_factor) = builder_booster_factor { + path.query_pairs_mut() + .append_pair("builder_boost_factor", &builder_booster_factor.to_string()); + } + + Ok(path) + } + + /// `GET v3/validator/blocks/{slot}` + pub async fn get_validator_blocks_v3( + &self, + slot: Slot, + randao_reveal: &SignatureBytes, + graffiti: Option<&Graffiti>, + builder_booster_factor: Option, + ) -> Result<(JsonProduceBlockV3Response, ProduceBlockV3Metadata), Error> { + self.get_validator_blocks_v3_modular( + slot, + randao_reveal, + graffiti, + SkipRandaoVerification::No, + builder_booster_factor, + ) + .await + } + + /// `GET v3/validator/blocks/{slot}` + pub async fn get_validator_blocks_v3_modular( + &self, + slot: Slot, + randao_reveal: &SignatureBytes, + graffiti: Option<&Graffiti>, + skip_randao_verification: SkipRandaoVerification, + builder_booster_factor: Option, + ) -> Result<(JsonProduceBlockV3Response, ProduceBlockV3Metadata), Error> { let path = self - .get_validator_blocks_path::( + .get_validator_blocks_v3_path( slot, randao_reveal, graffiti, skip_randao_verification, + builder_booster_factor, ) .await?; - self.get(path).await + let opt_result = self + .get_response_with_response_headers( + path, + Accept::Json, + self.timeouts.get_validator_block, + |response, headers| async move { + let header_metadata = ProduceBlockV3Metadata::try_from(&headers) + .map_err(Error::InvalidHeaders)?; + if header_metadata.execution_payload_blinded { + let blinded_response = response + .json::, + ProduceBlockV3Metadata>>() + .await? + .map_data(ProduceBlockV3Response::Blinded); + Ok((blinded_response, header_metadata)) + } else { + let full_block_response= response + .json::, + ProduceBlockV3Metadata>>() + .await? + .map_data(ProduceBlockV3Response::Full); + Ok((full_block_response, header_metadata)) + } + }, + ) + .await?; + + // Generic handler is optional but this route should never 404 unless unimplemented, so + // treat that as an error. + opt_result.ok_or(Error::StatusCode(StatusCode::NOT_FOUND)) + } + + /// `GET v3/validator/blocks/{slot}` in ssz format + pub async fn get_validator_blocks_v3_ssz( + &self, + slot: Slot, + randao_reveal: &SignatureBytes, + graffiti: Option<&Graffiti>, + builder_booster_factor: Option, + ) -> Result<(ProduceBlockV3Response, ProduceBlockV3Metadata), Error> { + self.get_validator_blocks_v3_modular_ssz::( + slot, + randao_reveal, + graffiti, + SkipRandaoVerification::No, + builder_booster_factor, + ) + .await + } + + /// `GET v3/validator/blocks/{slot}` in ssz format + pub async fn get_validator_blocks_v3_modular_ssz( + &self, + slot: Slot, + randao_reveal: &SignatureBytes, + graffiti: Option<&Graffiti>, + skip_randao_verification: SkipRandaoVerification, + builder_booster_factor: Option, + ) -> Result<(ProduceBlockV3Response, ProduceBlockV3Metadata), Error> { + let path = self + .get_validator_blocks_v3_path( + slot, + randao_reveal, + graffiti, + skip_randao_verification, + builder_booster_factor, + ) + .await?; + + let opt_response = self + .get_response_with_response_headers( + path, + Accept::Ssz, + self.timeouts.get_validator_block, + |response, headers| async move { + let metadata = ProduceBlockV3Metadata::try_from(&headers) + .map_err(Error::InvalidHeaders)?; + let response_bytes = response.bytes().await?; + + // Parse bytes based on metadata. + let response = if metadata.execution_payload_blinded { + ProduceBlockV3Response::Blinded( + BlindedBeaconBlock::from_ssz_bytes_for_fork( + &response_bytes, + metadata.consensus_version, + ) + .map_err(Error::InvalidSsz)?, + ) + } else { + ProduceBlockV3Response::Full( + FullBlockContents::from_ssz_bytes_for_fork( + &response_bytes, + metadata.consensus_version, + ) + .map_err(Error::InvalidSsz)?, + ) + }; + + Ok((response, metadata)) + }, + ) + .await?; + + // Generic handler is optional but this route should never 404 unless unimplemented, so + // treat that as an error. + opt_response.ok_or(Error::StatusCode(StatusCode::NOT_FOUND)) } /// `GET v2/validator/blocks/{slot}` in ssz format - pub async fn get_validator_blocks_ssz>( + pub async fn get_validator_blocks_ssz( &self, slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, ) -> Result>, Error> { - self.get_validator_blocks_modular_ssz::( + self.get_validator_blocks_modular_ssz::( slot, randao_reveal, graffiti, @@ -1669,7 +2020,7 @@ impl BeaconNodeHttpClient { } /// `GET v2/validator/blocks/{slot}` in ssz format - pub async fn get_validator_blocks_modular_ssz>( + pub async fn get_validator_blocks_modular_ssz( &self, slot: Slot, randao_reveal: &SignatureBytes, @@ -1677,25 +2028,20 @@ impl BeaconNodeHttpClient { skip_randao_verification: SkipRandaoVerification, ) -> Result>, Error> { let path = self - .get_validator_blocks_path::( - slot, - randao_reveal, - graffiti, - skip_randao_verification, - ) + .get_validator_blocks_path::(slot, randao_reveal, graffiti, skip_randao_verification) .await?; - self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_validator_block_ssz) + self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_validator_block) .await } /// `GET v2/validator/blinded_blocks/{slot}` - pub async fn get_validator_blinded_blocks>( + pub async fn get_validator_blinded_blocks( &self, slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, - ) -> Result>, Error> { + ) -> Result>, Error> { self.get_validator_blinded_blocks_modular( slot, randao_reveal, @@ -1706,7 +2052,7 @@ impl BeaconNodeHttpClient { } /// returns `GET v1/validator/blinded_blocks/{slot}` URL path - pub async fn get_validator_blinded_blocks_path>( + pub async fn get_validator_blinded_blocks_path( &self, slot: Slot, randao_reveal: &SignatureBytes, @@ -1738,18 +2084,15 @@ impl BeaconNodeHttpClient { } /// `GET v1/validator/blinded_blocks/{slot}` - pub async fn get_validator_blinded_blocks_modular< - T: EthSpec, - Payload: AbstractExecPayload, - >( + pub async fn get_validator_blinded_blocks_modular( &self, slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, skip_randao_verification: SkipRandaoVerification, - ) -> Result>, Error> { + ) -> Result>, Error> { let path = self - .get_validator_blinded_blocks_path::( + .get_validator_blinded_blocks_path::( slot, randao_reveal, graffiti, @@ -1761,13 +2104,13 @@ impl BeaconNodeHttpClient { } /// `GET v2/validator/blinded_blocks/{slot}` in ssz format - pub async fn get_validator_blinded_blocks_ssz>( + pub async fn get_validator_blinded_blocks_ssz( &self, slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, ) -> Result>, Error> { - self.get_validator_blinded_blocks_modular_ssz::( + self.get_validator_blinded_blocks_modular_ssz::( slot, randao_reveal, graffiti, @@ -1776,10 +2119,7 @@ impl BeaconNodeHttpClient { .await } - pub async fn get_validator_blinded_blocks_modular_ssz< - T: EthSpec, - Payload: AbstractExecPayload, - >( + pub async fn get_validator_blinded_blocks_modular_ssz( &self, slot: Slot, randao_reveal: &SignatureBytes, @@ -1787,7 +2127,7 @@ impl BeaconNodeHttpClient { skip_randao_verification: SkipRandaoVerification, ) -> Result>, Error> { let path = self - .get_validator_blinded_blocks_path::( + .get_validator_blinded_blocks_path::( slot, randao_reveal, graffiti, @@ -1795,7 +2135,7 @@ impl BeaconNodeHttpClient { ) .await?; - self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_validator_block_ssz) + self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_validator_block) .await } @@ -1897,7 +2237,7 @@ impl BeaconNodeHttpClient { pub async fn post_validator_liveness_epoch( &self, epoch: Epoch, - indices: Vec, + indices: &Vec, ) -> Result>, Error> { let mut path = self.eth_path(V1)?; @@ -1907,7 +2247,7 @@ impl BeaconNodeHttpClient { .push("liveness") .push(&epoch.to_string()); - self.post_with_timeout_and_response(path, &indices, self.timeouts.liveness) + self.post_with_timeout_and_response(path, indices, self.timeouts.liveness) .await } diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index dfc19db49..11706f309 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -20,7 +20,7 @@ use reqwest::IntoUrl; use serde::{Deserialize, Serialize}; use ssz::four_byte_option_impl; use ssz_derive::{Decode, Encode}; -use store::{AnchorInfo, Split, StoreConfig}; +use store::{AnchorInfo, BlobInfo, Split, StoreConfig}; pub use attestation_performance::{ AttestationPerformance, AttestationPerformanceQuery, AttestationPerformanceStatistics, @@ -243,6 +243,8 @@ pub struct ProcessHealth { pub pid_mem_resident_set_size: u64, /// The total virtual memory used by this pid. pub pid_mem_virtual_memory_size: u64, + /// The total shared memory used by this pid. + pub pid_mem_shared_memory_size: u64, /// Number of cpu seconds consumed by this pid. pub pid_process_seconds_total: u64, } @@ -277,6 +279,7 @@ impl ProcessHealth { pid_num_threads: stat.num_threads, pid_mem_resident_set_size: process_mem.rss(), pid_mem_virtual_memory_size: process_mem.vms(), + pid_mem_shared_memory_size: process_mem.shared(), pid_process_seconds_total: process_times.busy().as_secs() + process_times.children_system().as_secs() + process_times.children_system().as_secs(), @@ -364,6 +367,7 @@ pub struct DatabaseInfo { pub config: StoreConfig, pub split: Split, pub anchor: Option, + pub blob_info: BlobInfo, } impl BeaconNodeHttpClient { diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index b2d53c5e0..83aeea4bf 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -226,11 +226,32 @@ impl ValidatorClientHttpClient { ok_or_error(response).await } + /// Perform a HTTP DELETE request, returning the `Response` for further processing. + async fn delete_response(&self, url: U) -> Result { + let response = self + .client + .delete(url) + .headers(self.headers()?) + .send() + .await + .map_err(Error::from)?; + ok_or_error(response).await + } + async fn get(&self, url: U) -> Result { let response = self.get_response(url).await?; self.signed_json(response).await } + async fn delete(&self, url: U) -> Result<(), Error> { + let response = self.delete_response(url).await?; + if response.status().is_success() { + Ok(()) + } else { + Err(Error::StatusCode(response.status())) + } + } + async fn get_unsigned(&self, url: U) -> Result { self.get_response(url) .await? @@ -462,12 +483,15 @@ impl ValidatorClientHttpClient { } /// `PATCH lighthouse/validators/{validator_pubkey}` + #[allow(clippy::too_many_arguments)] pub async fn patch_lighthouse_validators( &self, voting_pubkey: &PublicKeyBytes, enabled: Option, gas_limit: Option, builder_proposals: Option, + builder_boost_factor: Option, + prefer_builder_proposals: Option, graffiti: Option, ) -> Result<(), Error> { let mut path = self.server.full.clone(); @@ -484,6 +508,8 @@ impl ValidatorClientHttpClient { enabled, gas_limit, builder_proposals, + builder_boost_factor, + prefer_builder_proposals, graffiti, }, ) @@ -537,6 +563,18 @@ impl ValidatorClientHttpClient { Ok(url) } + fn make_graffiti_url(&self, pubkey: &PublicKeyBytes) -> Result { + let mut url = self.server.full.clone(); + url.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1") + .push("validator") + .push(&pubkey.to_string()) + .push("graffiti"); + Ok(url) + } + fn make_gas_limit_url(&self, pubkey: &PublicKeyBytes) -> Result { let mut url = self.server.full.clone(); url.path_segments_mut() @@ -684,6 +722,34 @@ impl ValidatorClientHttpClient { self.post(path, &()).await } + + /// `GET /eth/v1/validator/{pubkey}/graffiti` + pub async fn get_graffiti( + &self, + pubkey: &PublicKeyBytes, + ) -> Result { + let url = self.make_graffiti_url(pubkey)?; + self.get(url) + .await + .map(|generic: GenericResponse| generic.data) + } + + /// `POST /eth/v1/validator/{pubkey}/graffiti` + pub async fn set_graffiti( + &self, + pubkey: &PublicKeyBytes, + graffiti: GraffitiString, + ) -> Result<(), Error> { + let url = self.make_graffiti_url(pubkey)?; + let set_graffiti_request = SetGraffitiRequest { graffiti }; + self.post(url, &set_graffiti_request).await + } + + /// `DELETE /eth/v1/validator/{pubkey}/graffiti` + pub async fn delete_graffiti(&self, pubkey: &PublicKeyBytes) -> Result<(), Error> { + let url = self.make_graffiti_url(pubkey)?; + self.delete(url).await + } } /// Returns `Ok(response)` if the response is a `200 OK` response or a diff --git a/common/eth2/src/lighthouse_vc/std_types.rs b/common/eth2/src/lighthouse_vc/std_types.rs index 33e2f764e..ab90d336f 100644 --- a/common/eth2/src/lighthouse_vc/std_types.rs +++ b/common/eth2/src/lighthouse_vc/std_types.rs @@ -1,7 +1,7 @@ use account_utils::ZeroizeString; use eth2_keystore::Keystore; use serde::{Deserialize, Serialize}; -use types::{Address, PublicKeyBytes}; +use types::{Address, Graffiti, PublicKeyBytes}; pub use slashing_protection::interchange::Interchange; @@ -172,3 +172,9 @@ pub enum DeleteRemotekeyStatus { pub struct DeleteRemotekeysResponse { pub data: Vec>, } + +#[derive(Debug, Deserialize, Serialize)] +pub struct GetGraffitiResponse { + pub pubkey: PublicKeyBytes, + pub graffiti: Graffiti, +} diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index f1a91b4ef..d903d7b73 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -32,6 +32,12 @@ pub struct ValidatorRequest { #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub builder_proposals: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub builder_boost_factor: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub prefer_builder_proposals: Option, #[serde(with = "serde_utils::quoted_u64")] pub deposit_gwei: u64, } @@ -86,6 +92,12 @@ pub struct ValidatorPatchRequest { #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub graffiti: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub builder_boost_factor: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub prefer_builder_proposals: Option, } #[derive(Clone, PartialEq, Serialize, Deserialize)] @@ -105,6 +117,12 @@ pub struct KeystoreValidatorsPostRequest { #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub builder_proposals: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub builder_boost_factor: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub prefer_builder_proposals: Option, } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] @@ -135,6 +153,12 @@ pub struct Web3SignerValidatorRequest { pub client_identity_path: Option, #[serde(skip_serializing_if = "Option::is_none")] pub client_identity_password: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub builder_boost_factor: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub prefer_builder_proposals: Option, } #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] @@ -168,3 +192,8 @@ pub struct SingleExportKeystoresResponse { #[serde(skip_serializing_if = "Option::is_none")] pub validating_keystore_password: Option, } + +#[derive(Serialize, Deserialize, Debug)] +pub struct SetGraffitiRequest { + pub graffiti: GraffitiString, +} diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 822f88179..a301055f3 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1,14 +1,23 @@ //! This module exposes a superset of the `types` crate. It adds additional types that are only //! required for the HTTP API. -use crate::Error as ServerError; +use crate::{ + Error as ServerError, CONSENSUS_BLOCK_VALUE_HEADER, CONSENSUS_VERSION_HEADER, + EXECUTION_PAYLOAD_BLINDED_HEADER, EXECUTION_PAYLOAD_VALUE_HEADER, +}; use lighthouse_network::{ConnectionDirection, Enr, Multiaddr, PeerConnectionStatus}; use mediatype::{names, MediaType, MediaTypeList}; -use serde::{Deserialize, Serialize}; +use reqwest::header::HeaderMap; +use serde::{Deserialize, Deserializer, Serialize}; +use serde_json::Value; +use ssz::{Decode, DecodeError}; +use ssz_derive::{Decode, Encode}; use std::convert::TryFrom; use std::fmt::{self, Display}; use std::str::{from_utf8, FromStr}; +use std::sync::Arc; use std::time::Duration; +use types::beacon_block_body::KzgCommitments; pub use types::*; #[cfg(feature = "lighthouse")] @@ -274,17 +283,18 @@ pub struct FinalityCheckpointsData { } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(try_from = "&str")] +#[serde(into = "String")] +#[serde(try_from = "std::borrow::Cow")] pub enum ValidatorId { PublicKey(PublicKeyBytes), Index(u64), } -impl TryFrom<&str> for ValidatorId { +impl TryFrom> for ValidatorId { type Error = String; - fn try_from(s: &str) -> Result { - Self::from_str(s) + fn try_from(s: std::borrow::Cow) -> Result { + Self::from_str(&s) } } @@ -313,6 +323,12 @@ impl fmt::Display for ValidatorId { } } +impl From for String { + fn from(id: ValidatorId) -> String { + id.to_string() + } +} + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ValidatorData { #[serde(with = "serde_utils::quoted_u64")] @@ -488,6 +504,15 @@ pub struct ValidatorsQuery { pub status: Option>, } +#[derive(Debug, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct ValidatorsRequestBody { + #[serde(default)] + pub ids: Option>, + #[serde(default)] + pub statuses: Option>, +} + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct CommitteeData { #[serde(with = "serde_utils::quoted_u64")] @@ -652,6 +677,19 @@ pub struct ValidatorBalancesQuery { pub id: Option>, } +#[derive(Clone, Serialize, Deserialize)] +#[serde(transparent)] +pub struct ValidatorBalancesRequestBody { + pub ids: Vec, +} + +#[derive(Clone, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct BlobIndicesQuery { + #[serde(default, deserialize_with = "option_query_vec")] + pub indices: Option>, +} + #[derive(Clone, Serialize, Deserialize)] #[serde(transparent)] pub struct ValidatorIndexData(#[serde(with = "serde_utils::quoted_u64_vec")] pub Vec); @@ -692,6 +730,7 @@ pub struct ValidatorBlocksQuery { pub randao_reveal: SignatureBytes, pub graffiti: Option, pub skip_randao_verification: SkipRandaoVerification, + pub builder_boost_factor: Option, } #[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Deserialize)] @@ -875,6 +914,28 @@ pub struct SseBlock { pub execution_optimistic: bool, } +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +pub struct SseBlobSidecar { + pub block_root: Hash256, + #[serde(with = "serde_utils::quoted_u64")] + pub index: u64, + pub slot: Slot, + pub kzg_commitment: KzgCommitment, + pub versioned_hash: VersionedHash, +} + +impl SseBlobSidecar { + pub fn from_blob_sidecar(blob_sidecar: &BlobSidecar) -> SseBlobSidecar { + SseBlobSidecar { + block_root: blob_sidecar.block_root(), + index: blob_sidecar.index, + slot: blob_sidecar.slot(), + kzg_commitment: blob_sidecar.kzg_commitment, + versioned_hash: blob_sidecar.kzg_commitment.calculate_versioned_hash(), + } + } +} + #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] pub struct SseFinalizedCheckpoint { pub block: Hash256, @@ -923,7 +984,7 @@ pub struct SseLateHead { } #[superstruct( - variants(V1, V2), + variants(V1, V2, V3), variant_attributes(derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)) )] #[derive(Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)] @@ -936,8 +997,10 @@ pub struct SsePayloadAttributes { pub prev_randao: Hash256, #[superstruct(getter(copy))] pub suggested_fee_recipient: Address, - #[superstruct(only(V2))] + #[superstruct(only(V2, V3))] pub withdrawals: Vec, + #[superstruct(only(V3), partial_getter(copy))] + pub parent_beacon_block_root: Hash256, } #[derive(PartialEq, Debug, Deserialize, Serialize, Clone)] @@ -967,6 +1030,9 @@ impl ForkVersionDeserialize for SsePayloadAttributes { ForkName::Capella => serde_json::from_value(value) .map(Self::V2) .map_err(serde::de::Error::custom), + ForkName::Deneb => serde_json::from_value(value) + .map(Self::V3) + .map_err(serde::de::Error::custom), ForkName::Base | ForkName::Altair => Err(serde::de::Error::custom(format!( "SsePayloadAttributes deserialization for {fork_name} not implemented" ))), @@ -1000,12 +1066,15 @@ impl ForkVersionDeserialize for SseExtendedPayloadAttributes { pub enum EventKind { Attestation(Box>), Block(SseBlock), + BlobSidecar(SseBlobSidecar), FinalizedCheckpoint(SseFinalizedCheckpoint), Head(SseHead), VoluntaryExit(SignedVoluntaryExit), ChainReorg(SseChainReorg), ContributionAndProof(Box>), LateHead(SseLateHead), + LightClientFinalityUpdate(Box>), + LightClientOptimisticUpdate(Box>), #[cfg(feature = "lighthouse")] BlockReward(BlockReward), PayloadAttributes(VersionedSsePayloadAttributes), @@ -1016,6 +1085,7 @@ impl EventKind { match self { EventKind::Head(_) => "head", EventKind::Block(_) => "block", + EventKind::BlobSidecar(_) => "blob_sidecar", EventKind::Attestation(_) => "attestation", EventKind::VoluntaryExit(_) => "voluntary_exit", EventKind::FinalizedCheckpoint(_) => "finalized_checkpoint", @@ -1023,6 +1093,8 @@ impl EventKind { EventKind::ContributionAndProof(_) => "contribution_and_proof", EventKind::PayloadAttributes(_) => "payload_attributes", EventKind::LateHead(_) => "late_head", + EventKind::LightClientFinalityUpdate(_) => "light_client_finality_update", + EventKind::LightClientOptimisticUpdate(_) => "light_client_optimistic_update", #[cfg(feature = "lighthouse")] EventKind::BlockReward(_) => "block_reward", } @@ -1053,6 +1125,9 @@ impl EventKind { "block" => Ok(EventKind::Block(serde_json::from_str(data).map_err( |e| ServerError::InvalidServerSentEvent(format!("Block: {:?}", e)), )?)), + "blob_sidecar" => Ok(EventKind::BlobSidecar(serde_json::from_str(data).map_err( + |e| ServerError::InvalidServerSentEvent(format!("Blob Sidecar: {:?}", e)), + )?)), "chain_reorg" => Ok(EventKind::ChainReorg(serde_json::from_str(data).map_err( |e| ServerError::InvalidServerSentEvent(format!("Chain Reorg: {:?}", e)), )?)), @@ -1082,6 +1157,22 @@ impl EventKind { ServerError::InvalidServerSentEvent(format!("Payload Attributes: {:?}", e)) })?, )), + "light_client_finality_update" => Ok(EventKind::LightClientFinalityUpdate( + serde_json::from_str(data).map_err(|e| { + ServerError::InvalidServerSentEvent(format!( + "Light Client Finality Update: {:?}", + e + )) + })?, + )), + "light_client_optimistic_update" => Ok(EventKind::LightClientOptimisticUpdate( + serde_json::from_str(data).map_err(|e| { + ServerError::InvalidServerSentEvent(format!( + "Light Client Optimistic Update: {:?}", + e + )) + })?, + )), #[cfg(feature = "lighthouse")] "block_reward" => Ok(EventKind::BlockReward(serde_json::from_str(data).map_err( |e| ServerError::InvalidServerSentEvent(format!("Block Reward: {:?}", e)), @@ -1105,6 +1196,7 @@ pub struct EventQuery { pub enum EventTopic { Head, Block, + BlobSidecar, Attestation, VoluntaryExit, FinalizedCheckpoint, @@ -1112,6 +1204,8 @@ pub enum EventTopic { ContributionAndProof, LateHead, PayloadAttributes, + LightClientFinalityUpdate, + LightClientOptimisticUpdate, #[cfg(feature = "lighthouse")] BlockReward, } @@ -1123,6 +1217,7 @@ impl FromStr for EventTopic { match s { "head" => Ok(EventTopic::Head), "block" => Ok(EventTopic::Block), + "blob_sidecar" => Ok(EventTopic::BlobSidecar), "attestation" => Ok(EventTopic::Attestation), "voluntary_exit" => Ok(EventTopic::VoluntaryExit), "finalized_checkpoint" => Ok(EventTopic::FinalizedCheckpoint), @@ -1130,6 +1225,8 @@ impl FromStr for EventTopic { "contribution_and_proof" => Ok(EventTopic::ContributionAndProof), "payload_attributes" => Ok(EventTopic::PayloadAttributes), "late_head" => Ok(EventTopic::LateHead), + "light_client_finality_update" => Ok(EventTopic::LightClientFinalityUpdate), + "light_client_optimistic_update" => Ok(EventTopic::LightClientOptimisticUpdate), #[cfg(feature = "lighthouse")] "block_reward" => Ok(EventTopic::BlockReward), _ => Err("event topic cannot be parsed.".to_string()), @@ -1142,6 +1239,7 @@ impl fmt::Display for EventTopic { match self { EventTopic::Head => write!(f, "head"), EventTopic::Block => write!(f, "block"), + EventTopic::BlobSidecar => write!(f, "blob_sidecar"), EventTopic::Attestation => write!(f, "attestation"), EventTopic::VoluntaryExit => write!(f, "voluntary_exit"), EventTopic::FinalizedCheckpoint => write!(f, "finalized_checkpoint"), @@ -1149,6 +1247,8 @@ impl fmt::Display for EventTopic { EventTopic::ContributionAndProof => write!(f, "contribution_and_proof"), EventTopic::PayloadAttributes => write!(f, "payload_attributes"), EventTopic::LateHead => write!(f, "late_head"), + EventTopic::LightClientFinalityUpdate => write!(f, "light_client_finality_update"), + EventTopic::LightClientOptimisticUpdate => write!(f, "light_client_optimistic_update"), #[cfg(feature = "lighthouse")] EventTopic::BlockReward => write!(f, "block_reward"), } @@ -1339,6 +1439,7 @@ pub mod serde_status_code { #[cfg(test)] mod tests { use super::*; + use ssz::Encode; #[test] fn query_vec() { @@ -1373,4 +1474,525 @@ mod tests { Accept::Any ); } + + #[test] + fn ssz_signed_block_contents_pre_deneb() { + type E = MainnetEthSpec; + let spec = ForkName::Capella.make_genesis_spec(E::default_spec()); + + let block: PublishBlockRequest = Arc::new(SignedBeaconBlock::from_block( + BeaconBlock::::Capella(BeaconBlockCapella::empty(&spec)), + Signature::empty(), + )) + .try_into() + .expect("should convert into signed block contents"); + + let decoded: PublishBlockRequest = + PublishBlockRequest::from_ssz_bytes(&block.as_ssz_bytes(), ForkName::Capella) + .expect("should decode Block"); + assert!(matches!(decoded, PublishBlockRequest::Block(_))); + } + + #[test] + fn ssz_signed_block_contents_with_blobs() { + type E = MainnetEthSpec; + let spec = ForkName::Deneb.make_genesis_spec(E::default_spec()); + + let block = SignedBeaconBlock::from_block( + BeaconBlock::::Deneb(BeaconBlockDeneb::empty(&spec)), + Signature::empty(), + ); + let blobs = BlobsList::::from(vec![Blob::::default()]); + let kzg_proofs = KzgProofs::::from(vec![KzgProof::empty()]); + let signed_block_contents = + PublishBlockRequest::new(Arc::new(block), Some((kzg_proofs, blobs))); + + let decoded: PublishBlockRequest = PublishBlockRequest::from_ssz_bytes( + &signed_block_contents.as_ssz_bytes(), + ForkName::Deneb, + ) + .expect("should decode BlockAndBlobSidecars"); + assert!(matches!(decoded, PublishBlockRequest::BlockContents(_))); + } +} + +#[derive(Debug, Encode, Serialize, Deserialize)] +#[serde(untagged)] +#[serde(bound = "E: EthSpec")] +#[ssz(enum_behaviour = "transparent")] +pub enum ProduceBlockV3Response { + Full(FullBlockContents), + Blinded(BlindedBeaconBlock), +} + +pub type JsonProduceBlockV3Response = + ForkVersionedResponse, ProduceBlockV3Metadata>; + +/// A wrapper over a [`BeaconBlock`] or a [`BlockContents`]. +#[derive(Debug, Encode, Serialize, Deserialize)] +#[serde(untagged)] +#[serde(bound = "T: EthSpec")] +#[ssz(enum_behaviour = "transparent")] +pub enum FullBlockContents { + /// This is a full deneb variant with block and blobs. + BlockContents(BlockContents), + /// This variant is for all pre-deneb full blocks. + Block(BeaconBlock), +} + +pub type BlockContentsTuple = (BeaconBlock, Option<(KzgProofs, BlobsList)>); + +// This value should never be used +fn dummy_consensus_version() -> ForkName { + ForkName::Base +} + +/// Metadata about a `ProduceBlockV3Response` which is returned in the body & headers. +#[derive(Debug, Deserialize, Serialize)] +pub struct ProduceBlockV3Metadata { + // The consensus version is serialized & deserialized by `ForkVersionedResponse`. + #[serde( + skip_serializing, + skip_deserializing, + default = "dummy_consensus_version" + )] + pub consensus_version: ForkName, + pub execution_payload_blinded: bool, + #[serde(with = "serde_utils::u256_dec")] + pub execution_payload_value: Uint256, + #[serde(with = "serde_utils::u256_dec")] + pub consensus_block_value: Uint256, +} + +impl FullBlockContents { + pub fn new(block: BeaconBlock, blob_data: Option<(KzgProofs, BlobsList)>) -> Self { + match blob_data { + Some((kzg_proofs, blobs)) => Self::BlockContents(BlockContents { + block, + kzg_proofs, + blobs, + }), + None => Self::Block(block), + } + } + + /// SSZ decode with fork variant determined by slot. + pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { + let slot_len = ::ssz_fixed_len(); + let slot_bytes = bytes + .get(0..slot_len) + .ok_or(DecodeError::InvalidByteLength { + len: bytes.len(), + expected: slot_len, + })?; + let slot = Slot::from_ssz_bytes(slot_bytes)?; + let fork_at_slot = spec.fork_name_at_slot::(slot); + Self::from_ssz_bytes_for_fork(bytes, fork_at_slot) + } + + /// SSZ decode with fork variant passed in explicitly. + pub fn from_ssz_bytes_for_fork( + bytes: &[u8], + fork_name: ForkName, + ) -> Result { + match fork_name { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + BeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) + .map(|block| FullBlockContents::Block(block)) + } + ForkName::Deneb => { + let mut builder = ssz::SszDecoderBuilder::new(bytes); + + builder.register_anonymous_variable_length_item()?; + builder.register_type::>()?; + builder.register_type::>()?; + + let mut decoder = builder.build()?; + let block = decoder.decode_next_with(|bytes| { + BeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) + })?; + let kzg_proofs = decoder.decode_next()?; + let blobs = decoder.decode_next()?; + + Ok(FullBlockContents::new(block, Some((kzg_proofs, blobs)))) + } + } + } + + pub fn block(&self) -> &BeaconBlock { + match self { + FullBlockContents::BlockContents(block_and_sidecars) => &block_and_sidecars.block, + FullBlockContents::Block(block) => block, + } + } + + pub fn deconstruct(self) -> BlockContentsTuple { + match self { + FullBlockContents::BlockContents(block_and_sidecars) => ( + block_and_sidecars.block, + Some((block_and_sidecars.kzg_proofs, block_and_sidecars.blobs)), + ), + FullBlockContents::Block(block) => (block, None), + } + } + + /// Signs `self`, producing a `SignedBlockContents`. + pub fn sign( + self, + secret_key: &SecretKey, + fork: &Fork, + genesis_validators_root: Hash256, + spec: &ChainSpec, + ) -> PublishBlockRequest { + let (block, maybe_blobs) = self.deconstruct(); + let signed_block = block.sign(secret_key, fork, genesis_validators_root, spec); + PublishBlockRequest::new(Arc::new(signed_block), maybe_blobs) + } +} + +impl ForkVersionDeserialize for FullBlockContents { + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + match fork_name { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + Ok(FullBlockContents::Block( + BeaconBlock::deserialize_by_fork::<'de, D>(value, fork_name)?, + )) + } + ForkName::Deneb => Ok(FullBlockContents::BlockContents( + BlockContents::deserialize_by_fork::<'de, D>(value, fork_name)?, + )), + } + } +} + +impl Into> for FullBlockContents { + fn into(self) -> BeaconBlock { + match self { + Self::BlockContents(block_and_sidecars) => block_and_sidecars.block, + Self::Block(block) => block, + } + } +} + +pub type SignedBlockContentsTuple = ( + Arc>, + Option<(KzgProofs, BlobsList)>, +); + +fn parse_required_header( + headers: &HeaderMap, + header_name: &str, + parse: impl FnOnce(&str) -> Result, +) -> Result { + let str_value = headers + .get(header_name) + .ok_or_else(|| format!("missing required header {header_name}"))? + .to_str() + .map_err(|e| format!("invalid value in {header_name}: {e}"))?; + parse(str_value) +} + +impl TryFrom<&HeaderMap> for ProduceBlockV3Metadata { + type Error = String; + + fn try_from(headers: &HeaderMap) -> Result { + let consensus_version = parse_required_header(headers, CONSENSUS_VERSION_HEADER, |s| { + s.parse::() + .map_err(|e| format!("invalid {CONSENSUS_VERSION_HEADER}: {e:?}")) + })?; + let execution_payload_blinded = + parse_required_header(headers, EXECUTION_PAYLOAD_BLINDED_HEADER, |s| { + s.parse::() + .map_err(|e| format!("invalid {EXECUTION_PAYLOAD_BLINDED_HEADER}: {e:?}")) + })?; + let execution_payload_value = + parse_required_header(headers, EXECUTION_PAYLOAD_VALUE_HEADER, |s| { + s.parse::() + .map_err(|e| format!("invalid {EXECUTION_PAYLOAD_VALUE_HEADER}: {e:?}")) + })?; + let consensus_block_value = + parse_required_header(headers, CONSENSUS_BLOCK_VALUE_HEADER, |s| { + s.parse::() + .map_err(|e| format!("invalid {CONSENSUS_BLOCK_VALUE_HEADER}: {e:?}")) + })?; + + Ok(ProduceBlockV3Metadata { + consensus_version, + execution_payload_blinded, + execution_payload_value, + consensus_block_value, + }) + } +} + +/// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBlockContents`]. +#[derive(Clone, Debug, Encode, Serialize, Deserialize)] +#[serde(untagged)] +#[serde(bound = "T: EthSpec")] +#[ssz(enum_behaviour = "transparent")] +pub enum PublishBlockRequest { + BlockContents(SignedBlockContents), + Block(Arc>), +} + +impl PublishBlockRequest { + pub fn new( + block: Arc>, + blob_items: Option<(KzgProofs, BlobsList)>, + ) -> Self { + match blob_items { + Some((kzg_proofs, blobs)) => Self::BlockContents(SignedBlockContents { + signed_block: block, + kzg_proofs, + blobs, + }), + None => Self::Block(block), + } + } + + /// SSZ decode with fork variant determined by `fork_name`. + pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { + match fork_name { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + SignedBeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) + .map(|block| PublishBlockRequest::Block(Arc::new(block))) + } + ForkName::Deneb => { + let mut builder = ssz::SszDecoderBuilder::new(bytes); + builder.register_anonymous_variable_length_item()?; + builder.register_type::>()?; + builder.register_type::>()?; + + let mut decoder = builder.build()?; + let block = decoder.decode_next_with(|bytes| { + SignedBeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) + })?; + let kzg_proofs = decoder.decode_next()?; + let blobs = decoder.decode_next()?; + Ok(PublishBlockRequest::new( + Arc::new(block), + Some((kzg_proofs, blobs)), + )) + } + } + } + + pub fn signed_block(&self) -> &Arc> { + match self { + PublishBlockRequest::BlockContents(block_and_sidecars) => { + &block_and_sidecars.signed_block + } + PublishBlockRequest::Block(block) => block, + } + } + + pub fn deconstruct(self) -> SignedBlockContentsTuple { + match self { + PublishBlockRequest::BlockContents(block_and_sidecars) => ( + block_and_sidecars.signed_block, + Some((block_and_sidecars.kzg_proofs, block_and_sidecars.blobs)), + ), + PublishBlockRequest::Block(block) => (block, None), + } + } +} + +/// Converting from a `SignedBlindedBeaconBlock` into a full `SignedBlockContents`. +pub fn into_full_block_and_blobs( + blinded_block: SignedBlindedBeaconBlock, + maybe_full_payload_contents: Option>, +) -> Result, String> { + match maybe_full_payload_contents { + None => { + let signed_block = blinded_block + .try_into_full_block(None) + .ok_or("Failed to build full block with payload".to_string())?; + Ok(PublishBlockRequest::new(Arc::new(signed_block), None)) + } + // This variant implies a pre-deneb block + Some(FullPayloadContents::Payload(execution_payload)) => { + let signed_block = blinded_block + .try_into_full_block(Some(execution_payload)) + .ok_or("Failed to build full block with payload".to_string())?; + Ok(PublishBlockRequest::new(Arc::new(signed_block), None)) + } + // This variant implies a post-deneb block + Some(FullPayloadContents::PayloadAndBlobs(payload_and_blobs)) => { + let signed_block = blinded_block + .try_into_full_block(Some(payload_and_blobs.execution_payload)) + .ok_or("Failed to build full block with payload".to_string())?; + + Ok(PublishBlockRequest::new( + Arc::new(signed_block), + Some(( + payload_and_blobs.blobs_bundle.proofs, + payload_and_blobs.blobs_bundle.blobs, + )), + )) + } + } +} + +impl TryFrom>> for PublishBlockRequest { + type Error = &'static str; + fn try_from(block: Arc>) -> Result { + match *block { + SignedBeaconBlock::Base(_) + | SignedBeaconBlock::Altair(_) + | SignedBeaconBlock::Merge(_) + | SignedBeaconBlock::Capella(_) => Ok(PublishBlockRequest::Block(block)), + SignedBeaconBlock::Deneb(_) => { + Err("deneb block contents cannot be fully constructed from just the signed block") + } + } + } +} + +impl From> for PublishBlockRequest { + fn from(block_contents_tuple: SignedBlockContentsTuple) -> Self { + PublishBlockRequest::new(block_contents_tuple.0, block_contents_tuple.1) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, Encode)] +#[serde(bound = "T: EthSpec")] +pub struct SignedBlockContents { + pub signed_block: Arc>, + pub kzg_proofs: KzgProofs, + #[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")] + pub blobs: BlobsList, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Encode)] +#[serde(bound = "T: EthSpec")] +pub struct BlockContents { + pub block: BeaconBlock, + pub kzg_proofs: KzgProofs, + #[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")] + pub blobs: BlobsList, +} + +impl ForkVersionDeserialize for BlockContents { + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + #[derive(Deserialize)] + #[serde(bound = "T: EthSpec")] + struct Helper { + block: serde_json::Value, + kzg_proofs: KzgProofs, + #[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")] + blobs: BlobsList, + } + let helper: Helper = serde_json::from_value(value).map_err(serde::de::Error::custom)?; + + Ok(Self { + block: BeaconBlock::deserialize_by_fork::<'de, D>(helper.block, fork_name)?, + kzg_proofs: helper.kzg_proofs, + blobs: helper.blobs, + }) + } +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode)] +#[serde(untagged)] +#[serde(bound = "E: EthSpec")] +#[ssz(enum_behaviour = "transparent")] +pub enum FullPayloadContents { + Payload(ExecutionPayload), + PayloadAndBlobs(ExecutionPayloadAndBlobs), +} + +impl FullPayloadContents { + pub fn new( + execution_payload: ExecutionPayload, + maybe_blobs: Option>, + ) -> Self { + match maybe_blobs { + None => Self::Payload(execution_payload), + Some(blobs_bundle) => Self::PayloadAndBlobs(ExecutionPayloadAndBlobs { + execution_payload, + blobs_bundle, + }), + } + } + + pub fn payload_ref(&self) -> &ExecutionPayload { + match self { + FullPayloadContents::Payload(payload) => payload, + FullPayloadContents::PayloadAndBlobs(payload_and_blobs) => { + &payload_and_blobs.execution_payload + } + } + } + + pub fn block_hash(&self) -> ExecutionBlockHash { + self.payload_ref().block_hash() + } + + pub fn deconstruct(self) -> (ExecutionPayload, Option>) { + match self { + FullPayloadContents::Payload(payload) => (payload, None), + FullPayloadContents::PayloadAndBlobs(payload_and_blobs) => ( + payload_and_blobs.execution_payload, + Some(payload_and_blobs.blobs_bundle), + ), + } + } +} + +impl ForkVersionDeserialize for FullPayloadContents { + fn deserialize_by_fork<'de, D: Deserializer<'de>>( + value: Value, + fork_name: ForkName, + ) -> Result { + match fork_name { + ForkName::Merge | ForkName::Capella => serde_json::from_value(value) + .map(Self::Payload) + .map_err(serde::de::Error::custom), + ForkName::Deneb => serde_json::from_value(value) + .map(Self::PayloadAndBlobs) + .map_err(serde::de::Error::custom), + ForkName::Base | ForkName::Altair => Err(serde::de::Error::custom(format!( + "FullPayloadContents deserialization for {fork_name} not implemented" + ))), + } + } +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode)] +#[serde(bound = "E: EthSpec")] +pub struct ExecutionPayloadAndBlobs { + pub execution_payload: ExecutionPayload, + pub blobs_bundle: BlobsBundle, +} + +#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize, Encode, Decode)] +#[serde(bound = "E: EthSpec")] +pub struct BlobsBundle { + pub commitments: KzgCommitments, + pub proofs: KzgProofs, + #[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")] + pub blobs: BlobsList, +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn validator_id_serde() { + let id_str = "\"1\""; + let x: ValidatorId = serde_json::from_str(id_str).unwrap(); + assert_eq!(x, ValidatorId::Index(1)); + assert_eq!(serde_json::to_string(&x).unwrap(), id_str); + + let pubkey_str = "\"0xb824b5ede33a7b05a378a84b183b4bc7e7db894ce48b659f150c97d359edca2f503081d6678d1200f582ec7cafa9caf2\""; + let y: ValidatorId = serde_json::from_str(pubkey_str).unwrap(); + assert_eq!(serde_json::to_string(&y).unwrap(), pubkey_str); + } } diff --git a/common/eth2_interop_keypairs/Cargo.toml b/common/eth2_interop_keypairs/Cargo.toml index ded62653e..6f92acc84 100644 --- a/common/eth2_interop_keypairs/Cargo.toml +++ b/common/eth2_interop_keypairs/Cargo.toml @@ -13,7 +13,6 @@ ethereum_hashing = { workspace = true } hex = { workspace = true } serde_yaml = { workspace = true } serde = { workspace = true } -serde_derive = "1.0.116" bls = { workspace = true } [dev-dependencies] diff --git a/common/eth2_interop_keypairs/src/lib.rs b/common/eth2_interop_keypairs/src/lib.rs index 7b5fa7a8e..3d4ff02c3 100644 --- a/common/eth2_interop_keypairs/src/lib.rs +++ b/common/eth2_interop_keypairs/src/lib.rs @@ -22,7 +22,7 @@ extern crate lazy_static; use bls::{Keypair, PublicKey, SecretKey}; use ethereum_hashing::hash; use num_bigint::BigUint; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::convert::TryInto; use std::fs::File; use std::path::PathBuf; diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index 18dfe19da..3807c2e99 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -16,6 +16,7 @@ tokio = { workspace = true } [dependencies] serde_yaml = { workspace = true } +serde_json = { workspace = true } types = { workspace = true } ethereum_ssz = { workspace = true } eth2_config = { workspace = true } diff --git a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml index 47b285a65..8064ea555 100644 --- a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml @@ -1,143 +1,38 @@ -# Extends the mainnet preset -PRESET_BASE: gnosis -# needs to exist because of Prysm. Otherwise it conflicts with mainnet genesis -CONFIG_NAME: chiado +PRESET_BASE: 'gnosis' + +# Free-form short name of the network that this configuration applies to - known +# canonical network names include: +# * 'mainnet' - there can be only one +# * 'prater' - testnet +# Must match the regex: [a-z0-9\-] +CONFIG_NAME: 'chiado' + +# Transition +# --------------------------------------------------------------- +# Projected time: 2022-11-04T15:00:00.000Z, block: 680928 +TERMINAL_TOTAL_DIFFICULTY: 231707791542740786049188744689299064356246512 +# By default, don't use these params +TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 +TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 + # Genesis +# --------------------------------------------------------------- +# *CUSTOM MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 6000 # 10 October 2022 10:00:00 GMT+0000 MIN_GENESIS_TIME: 1665396000 -GENESIS_DELAY: 300 - -# Projected time: 2022-11-04T15:00:00.000Z, block: 680928 -TERMINAL_TOTAL_DIFFICULTY: 231707791542740786049188744689299064356246512 - -# Deposit contract -# --------------------------------------------------------------- -# NOTE: Don't use a value too high, or Teku rejects it (4294906129 NOK) -DEPOSIT_CHAIN_ID: 10200 -DEPOSIT_NETWORK_ID: 10200 -DEPOSIT_CONTRACT_ADDRESS: 0xb97036A26259B7147018913bD58a774cf91acf25 - -# Misc -# --------------------------------------------------------------- -# 2**6 (= 64) -MAX_COMMITTEES_PER_SLOT: 64 -# 2**7 (= 128) -TARGET_COMMITTEE_SIZE: 128 -# 2**11 (= 2,048) -MAX_VALIDATORS_PER_COMMITTEE: 2048 -# 2**2 (= 4) -MIN_PER_EPOCH_CHURN_LIMIT: 4 -# 2**12 (= 4096) -CHURN_LIMIT_QUOTIENT: 4096 -# See issue 563 -SHUFFLE_ROUND_COUNT: 90 -# 4 -HYSTERESIS_QUOTIENT: 4 -# 1 (minus 0.25) -HYSTERESIS_DOWNWARD_MULTIPLIER: 1 -# 5 (plus 1.25) -HYSTERESIS_UPWARD_MULTIPLIER: 5 -# Validator -# --------------------------------------------------------------- -# 2**10 (= 1024) ~1.4 hour -ETH1_FOLLOW_DISTANCE: 1024 -# 2**4 (= 16) -TARGET_AGGREGATORS_PER_COMMITTEE: 16 -# 2**0 (= 1) -RANDOM_SUBNETS_PER_VALIDATOR: 1 -# 2**8 (= 256) -EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION: 256 -# 6 (estimate from xDai mainnet) -SECONDS_PER_ETH1_BLOCK: 6 - -# Gwei values -# --------------------------------------------------------------- -# 2**0 * 10**9 (= 1,000,000,000) Gwei -MIN_DEPOSIT_AMOUNT: 1000000000 -# 2**5 * 10**9 (= 32,000,000,000) Gwei -MAX_EFFECTIVE_BALANCE: 32000000000 -# 2**4 * 10**9 (= 16,000,000,000) Gwei -EJECTION_BALANCE: 16000000000 -# 2**0 * 10**9 (= 1,000,000,000) Gwei -EFFECTIVE_BALANCE_INCREMENT: 1000000000 -# Initial values -# --------------------------------------------------------------- # GBC area code GENESIS_FORK_VERSION: 0x0000006f -BLS_WITHDRAWAL_PREFIX: 0x00 -# Time parameters -# --------------------------------------------------------------- -# 5 seconds -SECONDS_PER_SLOT: 5 -# 2**0 (= 1) slots 12 seconds -MIN_ATTESTATION_INCLUSION_DELAY: 1 -# 2**4 (= 16) slots 1.87 minutes -SLOTS_PER_EPOCH: 16 -# 2**0 (= 1) epochs 1.87 minutes -MIN_SEED_LOOKAHEAD: 1 -# 2**2 (= 4) epochs 7.47 minutes -MAX_SEED_LOOKAHEAD: 4 -# 2**6 (= 64) epochs ~2 hours -EPOCHS_PER_ETH1_VOTING_PERIOD: 64 -# 2**13 (= 8,192) slots ~15.9 hours -SLOTS_PER_HISTORICAL_ROOT: 8192 -# 2**8 (= 256) epochs ~8 hours -MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 -# 2**8 (= 256) epochs ~8 hours -SHARD_COMMITTEE_PERIOD: 256 -# 2**2 (= 4) epochs 7.47 minutes -MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4 +# *CUSTOM +GENESIS_DELAY: 300 -# State vector lengths + +# Forking # --------------------------------------------------------------- -# 2**16 (= 65,536) epochs ~85 days -EPOCHS_PER_HISTORICAL_VECTOR: 65536 -# 2**13 (= 8,192) epochs ~10.6 days -EPOCHS_PER_SLASHINGS_VECTOR: 8192 -# 2**24 (= 16,777,216) historical roots, ~15,243 years -HISTORICAL_ROOTS_LIMIT: 16777216 -# 2**40 (= 1,099,511,627,776) validator spots -VALIDATOR_REGISTRY_LIMIT: 1099511627776 -# Reward and penalty quotients -# --------------------------------------------------------------- -# 25 -BASE_REWARD_FACTOR: 25 -# 2**9 (= 512) -WHISTLEBLOWER_REWARD_QUOTIENT: 512 -# 2**3 (= 8) -PROPOSER_REWARD_QUOTIENT: 8 -# 2**26 (= 67,108,864) -INACTIVITY_PENALTY_QUOTIENT: 67108864 -# 2**7 (= 128) (lower safety margin at Phase 0 genesis) -MIN_SLASHING_PENALTY_QUOTIENT: 128 -# 1 (lower safety margin at Phase 0 genesis) -PROPORTIONAL_SLASHING_MULTIPLIER: 1 -# Max operations per block -# --------------------------------------------------------------- -# 2**4 (= 16) -MAX_PROPOSER_SLASHINGS: 16 -# 2**1 (= 2) -MAX_ATTESTER_SLASHINGS: 2 -# 2**7 (= 128) -MAX_ATTESTATIONS: 128 -# 2**4 (= 16) -MAX_DEPOSITS: 16 -# 2**4 (= 16) -MAX_VOLUNTARY_EXITS: 16 -# Signature domains -# --------------------------------------------------------------- -DOMAIN_BEACON_PROPOSER: 0x00000000 -DOMAIN_BEACON_ATTESTER: 0x01000000 -DOMAIN_RANDAO: 0x02000000 -DOMAIN_DEPOSIT: 0x03000000 -DOMAIN_VOLUNTARY_EXIT: 0x04000000 -DOMAIN_SELECTION_PROOF: 0x05000000 -DOMAIN_AGGREGATE_AND_PROOF: 0x06000000 -DOMAIN_SYNC_COMMITTEE: 0x07000000 -DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF: 0x08000000 -DOMAIN_CONTRIBUTION_AND_PROOF: 0x09000000 +# Some forks are disabled for now: +# - These may be re-assigned to another fork-version later +# - Temporarily set to max uint64 value: 2**64 - 1 # Altair ALTAIR_FORK_VERSION: 0x0100006f @@ -148,7 +43,95 @@ BELLATRIX_FORK_EPOCH: 180 # Mon Oct 10 2022 14:00:00 GMT+0000 # Capella CAPELLA_FORK_VERSION: 0x0300006f CAPELLA_FORK_EPOCH: 244224 # Wed May 24 2023 13:12:00 GMT+0000 +# Deneb +DENEB_FORK_VERSION: 0x0400006f +DENEB_FORK_EPOCH: 516608 # Wed Jan 31 2024 18:15:40 GMT+0000 + +# Time parameters +# --------------------------------------------------------------- +# 5 seconds +SECONDS_PER_SLOT: 5 +# 6 (estimate from xDai mainnet) +SECONDS_PER_ETH1_BLOCK: 6 +# 2**8 (= 256) epochs ~5.7 hours +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 +# 2**8 (= 256) epochs ~5.7 hours +SHARD_COMMITTEE_PERIOD: 256 +# 2**10 (= 1024) ~1.4 hour +ETH1_FOLLOW_DISTANCE: 1024 + + +# Validator cycle +# --------------------------------------------------------------- +# 2**2 (= 4) INACTIVITY_SCORE_BIAS: 4 # 2**4 (= 16) INACTIVITY_SCORE_RECOVERY_RATE: 16 +# 2**4 * 10**9 (= 16,000,000,000) Gwei +EJECTION_BALANCE: 16000000000 +# 2**2 (= 4) +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**12 (= 4096) +CHURN_LIMIT_QUOTIENT: 4096 +# [New in Deneb:EIP7514] 2* +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 2 + +# Fork choice +# --------------------------------------------------------------- +# 40% +PROPOSER_SCORE_BOOST: 40 +# 20% +REORG_HEAD_WEIGHT_THRESHOLD: 20 +# 160% +REORG_PARENT_WEIGHT_THRESHOLD: 160 +# `2` epochs +REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 + + +# Deposit contract +# --------------------------------------------------------------- +# xDai Mainnet +DEPOSIT_CHAIN_ID: 10200 +DEPOSIT_NETWORK_ID: 10200 +DEPOSIT_CONTRACT_ADDRESS: 0xb97036A26259B7147018913bD58a774cf91acf25 + +# Networking +# --------------------------------------------------------------- +# `10 * 2**20` (= 10485760, 10 MiB) +GOSSIP_MAX_SIZE: 10485760 +# `2**10` (= 1024) +MAX_REQUEST_BLOCKS: 1024 +# `2**8` (= 256) +EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 +# 33024, ~31 days +MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 +# `10 * 2**20` (=10485760, 10 MiB) +MAX_CHUNK_SIZE: 10485760 +# 5s +TTFB_TIMEOUT: 5 +# 10s +RESP_TIMEOUT: 10 +ATTESTATION_PROPAGATION_SLOT_RANGE: 32 +# 500ms +MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500 +MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 +MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 +# 2 subnets per node +SUBNETS_PER_NODE: 2 +# 2**8 (= 64) +ATTESTATION_SUBNET_COUNT: 64 +ATTESTATION_SUBNET_EXTRA_BITS: 0 +# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS +ATTESTATION_SUBNET_PREFIX_BITS: 6 + +# Deneb +# `2**7` (=128) +MAX_REQUEST_BLOCKS_DENEB: 128 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK +MAX_REQUEST_BLOB_SIDECARS: 768 +# `2**14` (= 16384 epochs, ~15 days) +MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 16384 +# `6` +BLOB_SIDECAR_SUBNET_COUNT: 6 + diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index 8e7a9dd07..940fad361 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -39,6 +39,9 @@ BELLATRIX_FORK_EPOCH: 385536 # Capella CAPELLA_FORK_VERSION: 0x03000064 CAPELLA_FORK_EPOCH: 648704 +# Deneb +DENEB_FORK_VERSION: 0x04000064 +DENEB_FORK_EPOCH: 18446744073709551615 # Sharding SHARDING_FORK_VERSION: 0x03000064 SHARDING_FORK_EPOCH: 18446744073709551615 @@ -71,6 +74,8 @@ INACTIVITY_SCORE_RECOVERY_RATE: 16 EJECTION_BALANCE: 16000000000 # 2**2 (= 4) MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**3 (= 8) +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 # 2**12 (= 4096) CHURN_LIMIT_QUOTIENT: 4096 diff --git a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml index 845d20830..0bb72ebd8 100644 --- a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml @@ -33,6 +33,10 @@ TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 CAPELLA_FORK_VERSION: 0x04017000 CAPELLA_FORK_EPOCH: 256 +# Deneb +DENEB_FORK_VERSION: 0x05017000 +DENEB_FORK_EPOCH: 29696 + # Time parameters # --------------------------------------------------------------- # 12 seconds @@ -59,6 +63,8 @@ EJECTION_BALANCE: 28000000000 MIN_PER_EPOCH_CHURN_LIMIT: 4 # 2**16 (= 65,536) CHURN_LIMIT_QUOTIENT: 65536 +# [New in Deneb:EIP7514] 2**3 (= 8) +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 # Fork choice # --------------------------------------------------------------- @@ -109,5 +115,3 @@ MAX_REQUEST_BLOB_SIDECARS: 768 MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 # `6` BLOB_SIDECAR_SUBNET_COUNT: 6 -# `uint64(6)` -MAX_BLOBS_PER_BLOCK: 6 diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml index 7442f6576..1ae519387 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml @@ -9,9 +9,7 @@ - enr:-Ku4QPn5eVhcoF1opaFEvg1b6JNFD2rqVkHQ8HApOKK61OIcIXD127bKWgAtbwI7pnxx6cDyk_nI88TrZKQaGMZj0q0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhDayLMaJc2VjcDI1NmsxoQK2sBOLGcUb4AwuYzFuAVCaNHA-dy24UuEKkeFNgCVCsIN1ZHCCIyg - enr:-Ku4QEWzdnVtXc2Q0ZVigfCGggOVB2Vc1ZCPEc6j21NIFLODSJbvNaef1g4PxhPwl_3kax86YPheFUSLXPRs98vvYsoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhDZBrP2Jc2VjcDI1NmsxoQM6jr8Rb1ktLEsVcKAPa08wCsKUmvoQ8khiOl_SLozf9IN1ZHCCIyg # Teku team (Consensys) -- enr:-KG4QOtcP9X1FbIMOe17QNMKqDxCpm14jcX5tiOE4_TyMrFqbmhPZHK_ZPG2Gxb1GE2xdtodOfx9-cgvNtxnRyHEmC0ghGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQDE8KdiXNlY3AyNTZrMaEDhpehBDbZjM_L9ek699Y7vhUJ-eAdMyQW_Fil522Y0fODdGNwgiMog3VkcIIjKA -- enr:-KG4QL-eqFoHy0cI31THvtZjpYUu_Jdw_MO7skQRJxY1g5HTN1A0epPCU6vi0gLGUgrzpU-ygeMSS8ewVxDpKfYmxMMGhGV0aDKQtTA_KgAAAAD__________4JpZIJ2NIJpcIQ2_DUbiXNlY3AyNTZrMaED8GJ2vzUqgL6-KD1xalo1CsmY4X1HaDnyl6Y_WayCo9GDdGNwgiMog3VkcIIjKA -- enr:-KG4QMOEswP62yzDjSwWS4YEjtTZ5PO6r65CPqYBkgTTkrpaedQ8uEUo1uMALtJIvb2w_WWEVmg5yt1UAuK1ftxUU7QDhGV0aDKQu6TalgMAAAD__________4JpZIJ2NIJpcIQEnfA2iXNlY3AyNTZrMaEDfol8oLr6XJ7FsdAYE7lpJhKMls4G_v6qQOGKJUWGb_uDdGNwgiMog3VkcIIjKA +- enr:-KG4QNTx85fjxABbSq_Rta9wy56nQ1fHK0PewJbGjLm1M4bMGx5-3Qq4ZX2-iFJ0pys_O90sVXNNOxp2E7afBsGsBrgDhGV0aDKQu6TalgMAAAD__________4JpZIJ2NIJpcIQEnfA2iXNlY3AyNTZrMaECGXWQ-rQ2KZKRH1aOW4IlPDBkY4XDphxg9pxKytFCkayDdGNwgiMog3VkcIIjKA - enr:-KG4QF4B5WrlFcRhUU6dZETwY5ZzAXnA0vGC__L1Kdw602nDZwXSTs5RFXFIFUnbQJmhNGVU6OIX7KVrCSTODsz1tK4DhGV0aDKQu6TalgMAAAD__________4JpZIJ2NIJpcIQExNYEiXNlY3AyNTZrMaECQmM9vp7KhaXhI-nqL_R0ovULLCFSFTa9CPPSdb1zPX6DdGNwgiMog3VkcIIjKA # Prysm team (Prysmatic Labs) - enr:-Ku4QImhMc1z8yCiNJ1TyUxdcfNucje3BGwEHzodEZUan8PherEo4sF7pPHPSIB1NNuSg5fZy7qFsjmUKs2ea1Whi0EBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQOVphkDqal4QzPMksc5wnpuC3gvSC8AfbFOnZY_On34wIN1ZHCCIyg diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index 98984f3b7..ed96df291 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -39,6 +39,9 @@ BELLATRIX_FORK_EPOCH: 144896 # Sept 6, 2022, 11:34:47am UTC # Capella CAPELLA_FORK_VERSION: 0x03000000 CAPELLA_FORK_EPOCH: 194048 # April 12, 2023, 10:27:35pm UTC +# Deneb +DENEB_FORK_VERSION: 0x04000000 +DENEB_FORK_EPOCH: 18446744073709551615 # Sharding SHARDING_FORK_VERSION: 0x03000000 SHARDING_FORK_EPOCH: 18446744073709551615 @@ -71,6 +74,8 @@ INACTIVITY_SCORE_RECOVERY_RATE: 16 EJECTION_BALANCE: 16000000000 # 2**2 (= 4) MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**3 (= 8) +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 # 2**16 (= 65,536) CHURN_LIMIT_QUOTIENT: 65536 diff --git a/common/eth2_network_config/built_in_network_configs/prater/config.yaml b/common/eth2_network_config/built_in_network_configs/prater/config.yaml index a0dd85fec..1928aeb30 100644 --- a/common/eth2_network_config/built_in_network_configs/prater/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/prater/config.yaml @@ -1,16 +1,19 @@ # Prater config # Extends the mainnet preset -CONFIG_NAME: 'prater' PRESET_BASE: 'mainnet' +CONFIG_NAME: 'prater' + # Transition # --------------------------------------------------------------- +# Expected August 10, 2022 TERMINAL_TOTAL_DIFFICULTY: 10790000 # By default, don't use these params TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 + # Genesis # --------------------------------------------------------------- # `2**14` (= 16,384) @@ -32,19 +35,15 @@ GENESIS_DELAY: 1919188 # Altair ALTAIR_FORK_VERSION: 0x01001020 ALTAIR_FORK_EPOCH: 36660 -# Merge +# Bellatrix BELLATRIX_FORK_VERSION: 0x02001020 BELLATRIX_FORK_EPOCH: 112260 # Capella CAPELLA_FORK_VERSION: 0x03001020 CAPELLA_FORK_EPOCH: 162304 -# Sharding -SHARDING_FORK_VERSION: 0x04001020 -SHARDING_FORK_EPOCH: 18446744073709551615 - -# TBD, 2**32 is a placeholder. Merge transition approach is in active R&D. -TRANSITION_TOTAL_DIFFICULTY: 4294967296 - +# DENEB +DENEB_FORK_VERSION: 0x04001020 +DENEB_FORK_EPOCH: 231680 # Time parameters # --------------------------------------------------------------- @@ -72,7 +71,8 @@ EJECTION_BALANCE: 16000000000 MIN_PER_EPOCH_CHURN_LIMIT: 4 # 2**16 (= 65,536) CHURN_LIMIT_QUOTIENT: 65536 - +# [New in Deneb:EIP7514] 2**3 (= 8) +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 # Fork choice # --------------------------------------------------------------- @@ -87,16 +87,41 @@ DEPOSIT_NETWORK_ID: 5 # Prater test deposit contract on Goerli Testnet DEPOSIT_CONTRACT_ADDRESS: 0xff50ed3d0ec03aC01D4C79aAd74928BFF48a7b2b -# Network +# Networking # --------------------------------------------------------------- -SUBNETS_PER_NODE: 2 +# `10 * 2**20` (= 10485760, 10 MiB) GOSSIP_MAX_SIZE: 10485760 +# `2**10` (= 1024) +MAX_REQUEST_BLOCKS: 1024 +# `2**8` (= 256) +EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 +# `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 +# `10 * 2**20` (=10485760, 10 MiB) MAX_CHUNK_SIZE: 10485760 +# 5s TTFB_TIMEOUT: 5 +# 10s RESP_TIMEOUT: 10 +ATTESTATION_PROPAGATION_SLOT_RANGE: 32 +# 500ms +MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500 MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 +# 2 subnets per node +SUBNETS_PER_NODE: 2 +# 2**8 (= 64) ATTESTATION_SUBNET_COUNT: 64 ATTESTATION_SUBNET_EXTRA_BITS: 0 +# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS ATTESTATION_SUBNET_PREFIX_BITS: 6 + +# Deneb +# `2**7` (=128) +MAX_REQUEST_BLOCKS_DENEB: 128 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK +MAX_REQUEST_BLOB_SIDECARS: 768 +# `2**12` (= 4096 epochs, ~18 days) +MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 +# `6` +BLOB_SIDECAR_SUBNET_COUNT: 6 diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index e3674cf7d..33a5ccb3f 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -32,9 +32,9 @@ TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 CAPELLA_FORK_VERSION: 0x90000072 CAPELLA_FORK_EPOCH: 56832 -# Sharding -SHARDING_FORK_VERSION: 0x04001020 -SHARDING_FORK_EPOCH: 18446744073709551615 +# Deneb +DENEB_FORK_VERSION: 0x90000073 +DENEB_FORK_EPOCH: 132608 # Time parameters # --------------------------------------------------------------- @@ -62,7 +62,8 @@ EJECTION_BALANCE: 16000000000 MIN_PER_EPOCH_CHURN_LIMIT: 4 # 2**16 (= 65,536) CHURN_LIMIT_QUOTIENT: 65536 - +# [New in Deneb:EIP7514] 2**3 (= 8) +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 # Fork choice # --------------------------------------------------------------- @@ -75,16 +76,41 @@ DEPOSIT_CHAIN_ID: 11155111 DEPOSIT_NETWORK_ID: 11155111 DEPOSIT_CONTRACT_ADDRESS: 0x7f02C3E3c98b133055B8B348B2Ac625669Ed295D -# Network +# Networking # --------------------------------------------------------------- -SUBNETS_PER_NODE: 2 +# `10 * 2**20` (= 10485760, 10 MiB) GOSSIP_MAX_SIZE: 10485760 +# `2**10` (= 1024) +MAX_REQUEST_BLOCKS: 1024 +# `2**8` (= 256) +EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 +# `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 +# `10 * 2**20` (=10485760, 10 MiB) MAX_CHUNK_SIZE: 10485760 +# 5s TTFB_TIMEOUT: 5 +# 10s RESP_TIMEOUT: 10 +ATTESTATION_PROPAGATION_SLOT_RANGE: 32 +# 500ms +MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500 MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 +# 2 subnets per node +SUBNETS_PER_NODE: 2 +# 2**8 (= 64) ATTESTATION_SUBNET_COUNT: 64 ATTESTATION_SUBNET_EXTRA_BITS: 0 +# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS ATTESTATION_SUBNET_PREFIX_BITS: 6 + +# Deneb +# `2**7` (=128) +MAX_REQUEST_BLOCKS_DENEB: 128 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK +MAX_REQUEST_BLOB_SIDECARS: 768 +# `2**12` (= 4096 epochs, ~18 days) +MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 +# `6` +BLOB_SIDECAR_SUBNET_COUNT: 6 diff --git a/common/eth2_network_config/built_in_network_configs/trusted_setup.json b/common/eth2_network_config/built_in_network_configs/trusted_setup.json new file mode 100644 index 000000000..de2bf0ac5 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/trusted_setup.json @@ -0,0 +1 @@ +{"g1_lagrange":["0xa0413c0dcafec6dbc9f47d66785cf1e8c981044f7d13cfe3e4fcbb71b5408dfde6312493cb3c1d30516cb3ca88c03654","0x8b997fb25730d661918371bb41f2a6e899cac23f04fc5365800b75433c0a953250e15e7a98fb5ca5cc56a8cd34c20c57","0x83302852db89424d5699f3f157e79e91dc1380f8d5895c5a772bb4ea3a5928e7c26c07db6775203ce33e62a114adaa99","0xa759c48b7e4a685e735c01e5aa6ef9c248705001f470f9ad856cd87806983e917a8742a3bd5ee27db8d76080269b7c83","0x967f8dc45ebc3be14c8705f43249a30ff48e96205fb02ae28daeab47b72eb3f45df0625928582aa1eb4368381c33e127","0xa418eb1e9fb84cb32b370610f56f3cb470706a40ac5a47c411c464299c45c91f25b63ae3fcd623172aa0f273c0526c13","0x8f44e3f0387293bc7931e978165abbaed08f53acd72a0a23ac85f6da0091196b886233bcee5b4a194db02f3d5a9b3f78","0x97173434b336be73c89412a6d70d416e170ea355bf1956c32d464090b107c090ef2d4e1a467a5632fbc332eeb679bf2d","0xa24052ad8d55ad04bc5d951f78e14213435681594110fd18173482609d5019105b8045182d53ffce4fc29fc8810516c1","0xb950768136b260277590b5bec3f56bbc2f7a8bc383d44ce8600e85bf8cf19f479898bcc999d96dfbd2001ede01d94949","0x92ab8077871037bd3b57b95cbb9fb10eb11efde9191690dcac655356986fd02841d8fdb25396faa0feadfe3f50baf56d","0xa79b096dff98038ac30f91112dd14b78f8ad428268af36d20c292e2b3b6d9ed4fb28480bb04e465071cc67d05786b6d1","0xb9ff71461328f370ce68bf591aa7fb13027044f42a575517f3319e2be4aa4843fa281e756d0aa5645428d6dfa857cef2","0x8d765808c00b3543ff182e2d159c38ae174b12d1314da88ea08e13bd9d1c37184cb515e6bf6420531b5d41767987d7ce","0xb8c9a837d20c3b53e6f578e4a257bb7ef8fc43178614ec2a154915b267ad2be135981d01ed2ee1b5fbd9d9bb27f0800a","0xa9773d92cf23f65f98ef68f6cf95c72b53d0683af2f9bf886bb9036e4a38184b1131b26fd24397910b494fbef856f3aa","0xb41ebe38962d112da4a01bf101cb248d808fbd50aaf749fc7c151cf332032eb3e3bdbd716db899724b734d392f26c412","0x90fbb030167fb47dcc13d604a726c0339418567c1d287d1d87423fa0cb92eec3455fbb46bcbe2e697144a2d3972142e4","0xb11d298bd167464b35fb923520d14832bd9ed50ed841bf6d7618424fd6f3699190af21759e351b89142d355952149da1","0x8bc36066f69dc89f7c4d1e58d67497675050c6aa002244cebd9fc957ec5e364c46bab4735ea3db02b73b3ca43c96e019","0xab7ab92c5d4d773068e485aa5831941ebd63db7118674ca38089635f3b4186833af2455a6fb9ed2b745df53b3ce96727","0xaf191ca3089892cb943cd97cf11a51f38e38bd9be50844a4e8da99f27e305e876f9ed4ab0628e8ae3939066b7d34a15f","0xa3204c1747feabc2c11339a542195e7cb6628fd3964f846e71e2e3f2d6bb379a5e51700682ea1844eba12756adb13216","0x903a29883846b7c50c15968b20e30c471aeac07b872c40a4d19eb1a42da18b649d5bbfde4b4cf6225d215a461b0deb6d","0x8e6e9c15ffbf1e16e5865a5fef7ed751dc81957a9757b535cb38b649e1098cda25d42381dc4f776778573cdf90c3e6e0","0xa8f6dd26100b512a8c96c52e00715c4b2cb9ac457f17aed8ffe1cf1ea524068fe5a1ddf218149845fc1417b789ecfc98","0xa5b0ffc819451ea639cfd1c18cbc9365cc79368d3b2e736c0ae54eba2f0801e6eb0ee14a5f373f4a70ca463bdb696c09","0x879f91ccd56a1b9736fbfd20d8747354da743fb121f0e308a0d298ff0d9344431890e41da66b5009af3f442c636b4f43","0x81bf3a2d9755e206b515a508ac4d1109bf933c282a46a4ae4a1b4cb4a94e1d23642fad6bd452428845afa155742ade7e","0x8de778d4742f945df40004964e165592f9c6b1946263adcdd5a88b00244bda46c7bb49098c8eb6b3d97a0dd46148a8ca","0xb7a57b21d13121907ee28c5c1f80ee2e3e83a3135a8101e933cf57171209a96173ff5037f5af606e9fd6d066de6ed693","0xb0877d1963fd9200414a38753dffd9f23a10eb3198912790d7eddbc9f6b477019d52ddd4ebdcb9f60818db076938a5a9","0x88da2d7a6611bc16adc55fc1c377480c828aba4496c645e3efe0e1a67f333c05a0307f7f1d2df8ac013602c655c6e209","0x95719eb02e8a9dede1a888c656a778b1c69b7716fbe3d1538fe8afd4a1bc972183c7d32aa7d6073376f7701df80116d8","0x8e8a1ca971f2444b35af3376e85dccda3abb8e8e11d095d0a4c37628dfe5d3e043a377c3de68289ef142e4308e9941a0","0xb720caaff02f6d798ac84c4f527203e823ff685869e3943c979e388e1c34c3f77f5c242c6daa7e3b30e511aab917b866","0x86040d55809afeec10e315d1ad950d269d37cfee8c144cd8dd4126459e3b15a53b3e68df5981df3c2346d23c7b4baaf4","0x82d8cabf13ab853db0377504f0aec00dba3a5cd3119787e8ad378ddf2c40b022ecfc67c642b7acc8c1e3dd03ab50993e","0xb8d873927936719d2484cd03a6687d65697e17dcf4f0d5aed6f5e4750f52ef2133d4645894e7ebfc4ef6ce6788d404c8","0xb1235594dbb15b674a419ff2b2deb644ad2a93791ca05af402823f87114483d6aa1689b7a9bea0f547ad12fe270e4344","0xa53fda86571b0651f5affb74312551a082fffc0385cfd24c1d779985b72a5b1cf7c78b42b4f7e51e77055f8e5e915b00","0xb579adcfd9c6ef916a5a999e77a0cb21d378c4ea67e13b7c58709d5da23a56c2e54218691fc4ac39a4a3d74f88cc31f7","0xab79e584011713e8a2f583e483a91a0c2a40771b77d91475825b5acbea82db4262132901cb3e4a108c46d7c9ee217a4e","0xa0fe58ea9eb982d7654c8aaf9366230578fc1362f6faae0594f8b9e659bcb405dff4aac0c7888bbe07f614ecf0d800a6","0x867e50e74281f28ecd4925560e2e7a6f8911b135557b688254623acce0dbc41e23ac3e706a184a45d54c586edc416eb0","0x89f81b61adda20ea9d0b387a36d0ab073dc7c7cbff518501962038be19867042f11fcc7ff78096e5d3b68c6d8dc04d9b","0xa58ee91bb556d43cf01f1398c5811f76dc0f11efdd569eed9ef178b3b0715e122060ec8f945b4dbf6eebfa2b90af6fa6","0xac460be540f4c840def2eef19fc754a9af34608d107cbadb53334cf194cc91138d53b9538fcd0ec970b5d4aa455b224a","0xb09b91f929de52c09d48ca0893be6eb44e2f5210a6c394689dc1f7729d4be4e11d0474b178e80cea8c2ac0d081f0e811","0x8d37a442a76b06a02a4e64c2504aea72c8b9b020ab7bcc94580fe2b9603c7c50d7b1e9d70d2a7daea19c68667e8f8c31","0xa9838d4c4e3f3a0075a952cf7dd623307ec633fcc81a7cf9e52e66c31780de33dbb3d74c320dc7f0a4b72f7a49949515","0xa44766b6251af458fe4f5f9ed1e02950f35703520b8656f09fc42d9a2d38a700c11a7c8a0436ac2e5e9f053d0bb8ff91","0xad78d9481c840f5202546bea0d13c776826feb8b1b7c72e83d99a947622f0bf38a4208551c4c41beb1270d7792075457","0xb619ffa8733b470039451e224b777845021e8dc1125f247a4ff2476cc774657d0ff9c5279da841fc1236047de9d81c60","0xaf760b0a30a1d6af3bc5cd6686f396bd41779aeeb6e0d70a09349bd5da17ca2e7965afc5c8ec22744198fbe3f02fb331","0xa0cc209abdb768b589fcb7b376b6e1cac07743288c95a1cf1a0354b47f0cf91fca78a75c1fcafa6f5926d6c379116608","0x864add673c89c41c754eeb3cd8dcff5cdde1d739fce65c30e474a082bb5d813cba6412e61154ce88fdb6c12c5d9be35b","0xb091443b0ce279327dc37cb484e9a5b69b257a714ce21895d67539172f95ffa326903747b64a3649e99aea7bb10d03f7","0xa8c452b8c4ca8e0a61942a8e08e28f17fb0ef4c5b018b4e6d1a64038280afa2bf1169202f05f14af24a06ca72f448ccd","0xa23c24721d18bc48d5dcf70effcbef89a7ae24e67158d70ae1d8169ee75d9a051d34b14e9cf06488bac324fe58549f26","0x92a730e30eb5f3231feb85f6720489dbb1afd42c43f05a1610c6b3c67bb949ec8fde507e924498f4ffc646f7b07d9123","0x8dbe5abf4031ec9ba6bb06d1a47dd1121fb9e03b652804069250967fd5e9577d0039e233441b7f837a7c9d67ba18c28e","0xaa456bcfef6a21bb88181482b279df260297b3778e84594ebddbdf337e85d9e3d46ca1d0b516622fb0b103df8ec519b7","0xa3b31ae621bd210a2b767e0e6f22eb28fe3c4943498a7e91753225426168b9a26da0e02f1dc5264da53a5ad240d9f51b","0xaa8d66857127e6e71874ce2202923385a7d2818b84cb73a6c42d71afe70972a70c6bdd2aad1a6e8c5e4ca728382a8ea8","0xac7e8e7a82f439127a5e40558d90d17990f8229852d21c13d753c2e97facf077cf59582b603984c3dd3faebd80aff4f5","0x93a8bcf4159f455d1baa73d2ef2450dcd4100420de84169bbe28b8b7a5d1746273f870091a87a057e834f754f34204b1","0x89d0ebb287c3613cdcae7f5acc43f17f09c0213fc40c074660120b755d664109ffb9902ed981ede79e018ddb0c845698","0xa87ccbfad431406aadbee878d9cf7d91b13649d5f7e19938b7dfd32645a43b114eef64ff3a13201398bd9b0337832e5a","0x833c51d0d0048f70c3eefb4e70e4ff66d0809c41838e8d2c21c288dd3ae9d9dfaf26d1742bf4976dab83a2b381677011","0x8bcd6b1c3b02fffead432e8b1680bad0a1ac5a712d4225e220690ee18df3e7406e2769e1f309e2e803b850bc96f0e768","0xb61e3dbd88aaf4ff1401521781e2eea9ef8b66d1fac5387c83b1da9e65c2aa2a56c262dea9eceeb4ad86c90211672db0","0x866d3090db944ecf190dd0651abf67659caafd31ae861bab9992c1e3915cb0952da7c561cc7e203560a610f48fae633b","0xa5e8971543c14274a8dc892b0be188c1b4fbc75c692ed29f166e0ea80874bc5520c2791342b7c1d2fb5dd454b03b8a5b","0x8f2f9fc50471bae9ea87487ebd1bc8576ef844cc42d606af5c4c0969670fdf2189afd643e4de3145864e7773d215f37f","0xb1bb0f2527db6d51f42b9224383c0f96048bbc03d469bf01fe1383173ef8b1cc9455d9dd8ba04d46057f46949bfc92b5","0xaa7c99d906b4d7922296cfe2520473fc50137c03d68b7865c5bfb8adbc316b1034310ec4b5670c47295f4a80fb8d61e9","0xa5d1da4d6aba555919df44cbaa8ff79378a1c9e2cfdfbf9d39c63a4a00f284c5a5724e28ecbc2d9dba27fe4ee5018bd5","0xa8db53224f70af4d991b9aae4ffe92d2aa5b618ad9137784b55843e9f16cefbfd25ada355d308e9bbf55f6d2f7976fb3","0xb6536c4232bb20e22af1a8bb12de76d5fec2ad9a3b48af1f38fa67e0f8504ef60f305a73d19385095bb6a9603fe29889","0x87f7e371a1817a63d6838a8cf4ab3a8473d19ce0d4f40fd013c03d5ddd5f4985df2956531cc9f187928ef54c68f4f9a9","0xae13530b1dbc5e4dced9d909ea61286ec09e25c12f37a1ed2f309b0eb99863d236c3b25ed3484acc8c076ad2fa8cd430","0x98928d850247c6f7606190e687d5c94a627550198dbdbea0161ef9515eacdb1a0f195cae3bb293112179082daccf8b35","0x918528bb8e6a055ad4db6230d3a405e9e55866da15c4721f5ddd1f1f37962d4904aad7a419218fe6d906fe191a991806","0xb71e31a06afe065773dd3f4a6e9ef81c3292e27a3b7fdfdd452d03e05af3b6dd654c355f7516b2a93553360c6681a73a","0x8870b83ab78a98820866f91ac643af9f3ff792a2b7fda34185a9456a63abdce42bfe8ad4dc67f08a6392f250d4062df4","0x91eea1b668e52f7a7a5087fabf1cab803b0316f78d9fff469fbfde2162f660c250e4336a9eea4cb0450bd30ac067bc8b","0x8b74990946de7b72a92147ceac1bd9d55999a8b576e8df68639e40ed5dc2062cfcd727903133de482b6dca19d0aaed82","0x8ebad537fece090ebbab662bdf2618e21ca30cf6329c50935e8346d1217dcbe3c1fe1ea28efca369c6003ce0a94703c1","0xa8640479556fb59ebd1c40c5f368fbd960932fdbb782665e4a0e24e2bdb598fc0164ce8c0726d7759cfc59e60a62e182","0xa9a52a6bf98ee4d749f6d38be2c60a6d54b64d5cbe4e67266633dc096cf28c97fe998596707d31968cbe2064b72256bf","0x847953c48a4ce6032780e9b39d0ed4384e0be202c2bbe2dfda3910f5d87aa5cd3c2ffbfcfae4dddce16d6ab657599b95","0xb6f6e1485d3ec2a06abaecd23028b200b2e4a0096c16144d07403e1720ff8f9ba9d919016b5eb8dc5103880a7a77a1d3","0x98dfc2065b1622f596dbe27131ea60bef7a193b12922cecb27f8c571404f483014f8014572e86ae2e341ab738e4887ef","0xacb0d205566bacc87bbe2e25d10793f63f7a1f27fd9e58f4f653ceae3ffeba511eaf658e068fad289eeb28f9edbeb35b","0xae4411ed5b263673cee894c11fe4abc72a4bf642d94022a5c0f3369380fcdfc1c21e277f2902972252503f91ada3029a","0xac4a7a27ba390a75d0a247d93d4a8ef1f0485f8d373a4af4e1139369ec274b91b3464d9738eeaceb19cd6f509e2f8262","0x87379c3bf231fdafcf6472a79e9e55a938d851d4dd662ab6e0d95fd47a478ed99e2ad1e6e39be3c0fc4f6d996a7dd833","0x81316904b035a8bcc2041199a789a2e6879486ba9fddcba0a82c745cc8dd8374a39e523b91792170cd30be7aa3005b85","0xb8206809c6cd027ed019f472581b45f7e12288f89047928ba32b4856b6560ad30395830d71e5e30c556f6f182b1fe690","0x88d76c028f534a62e019b4a52967bb8642ede6becfa3807be68fdd36d366fc84a4ac8dc176e80a68bc59eb62caf5dff9","0x8c3b8be685b0f8aad131ee7544d0e12f223f08a6f8edaf464b385ac644e0ddc9eff7cc7cb5c1b50ab5d71ea0f41d2213","0x8d91410e004f76c50fdc05784157b4d839cb5090022c629c7c97a5e0c3536eeafee17a527b54b1165c3cd81774bb54ce","0xb25c2863bc28ec5281ce800ddf91a7e1a53f4c6d5da1e6c86ef4616e93bcf55ed49e297216d01379f5c6e7b3c1e46728","0x865f7b09ac3ca03f20be90c48f6975dd2588838c2536c7a3532a6aa5187ed0b709cd03d91ff4048061c10d0aa72b69ce","0xb3f7477c90c11596eb4f8bbf34adbcb832638c4ff3cdd090d4d477ee50472ac9ddaf5be9ad7eca3f148960d362bbd098","0x8db35fd53fca04faecd1c76a8227160b3ab46ac1af070f2492445a19d8ff7c25bbaef6c9fa0c8c088444561e9f7e4eb2","0xa478b6e9d058a2e01d2fc053b739092e113c23a6a2770a16afbef044a3709a9e32f425ace9ba7981325f02667c3f9609","0x98caa6bd38916c08cf221722a675a4f7577f33452623de801d2b3429595f988090907a7e99960fff7c076d6d8e877b31","0xb79aaaacefc49c3038a14d2ac468cfec8c2161e88bdae91798d63552cdbe39e0e02f9225717436b9b8a40a022c633c6e","0x845a31006c680ee6a0cc41d3dc6c0c95d833fcf426f2e7c573fa15b2c4c641fbd6fe5ebb0e23720cc3467d6ee1d80dc4","0xa1bc287e272cf8b74dbf6405b3a5190883195806aa351f1dc8e525aa342283f0a35ff687e3b434324dedee74946dd185","0xa4fd2dc8db75d3783a020856e2b3aa266dc6926e84f5c491ef739a3bddd46dc8e9e0fc1177937839ef1b18d062ffbb9e","0xacbf0d3c697f57c202bb8c5dc4f3fc341b8fc509a455d44bd86acc67cad2a04495d5537bcd3e98680185e8aa286f2587","0xa5caf423a917352e1b8e844f5968a6da4fdeae467d10c6f4bbd82b5eea46a660b82d2f5440d3641c717b2c3c9ed0be52","0x8a39d763c08b926599ab1233219c49c825368fad14d9afc7c0c039224d37c00d8743293fd21645bf0b91eaf579a99867","0xb2b53a496def0ba06e80b28f36530fbe0fb5d70a601a2f10722e59abee529369c1ae8fd0f2db9184dd4a2519bb832d94","0xa73980fcef053f1b60ebbb5d78ba6332a475e0b96a0c724741a3abf3b59dd344772527f07203cf4c9cb5155ebed81fa0","0xa070d20acce42518ece322c9db096f16aed620303a39d8d5735a0df6e70fbeceb940e8d9f5cc38f3314b2240394ec47b","0xa50cf591f522f19ca337b73089557f75929d9f645f3e57d4f241e14cdd1ea3fb48d84bcf05e4f0377afbb789fbdb5d20","0x82a5ffce451096aca8eeb0cd2ae9d83db3ed76da3f531a80d9a70a346359bf05d74863ce6a7c848522b526156a5e20cd","0x88e0e84d358cbb93755a906f329db1537c3894845f32b9b0b691c29cbb455373d9452fadd1e77e20a623f6eaf624de6f","0xaa07ac7b84a6d6838826e0b9e350d8ec75e398a52e9824e6b0da6ae4010e5943fec4f00239e96433f291fef9d1d1e609","0xac8887bf39366034bc63f6cc5db0c26fd27307cbc3d6cce47894a8a019c22dd51322fb5096edc018227edfafc053a8f6","0xb7d26c26c5b33f77422191dca94977588ab1d4b9ce7d0e19c4a3b4cd1c25211b78c328dbf81e755e78cd7d1d622ad23e","0x99a676d5af49f0ba44047009298d8474cabf2d5bca1a76ba21eff7ee3c4691a102fdefea27bc948ccad8894a658abd02","0xb0d09a91909ab3620c183bdf1d53d43d39eb750dc7a722c661c3de3a1a5d383ad221f71bae374f8a71867505958a3f76","0x84681a883de8e4b93d68ac10e91899c2bbb815ce2de74bb48a11a6113b2a3f4df8aceabda1f5f67bc5aacac8c9da7221","0x9470259957780fa9b43521fab3644f555f5343281c72582b56d2efd11991d897b3b481cafa48681c5aeb80c9663b68f7","0xab1b29f7ece686e6fa968a4815da1d64f3579fed3bc92e1f3e51cd13a3c076b6cf695ed269d373300a62463dc98a4234","0x8ab415bfcd5f1061f7687597024c96dd9c7cb4942b5989379a7a3b5742f7d394337886317659cbeacaf030234a24f972","0xb9b524aad924f9acc63d002d617488f31b0016e0f0548f050cada285ce7491b74a125621638f19e9c96eabb091d945be","0x8c4c373e79415061837dd0def4f28a2d5d74d21cb13a76c9049ad678ca40228405ab0c3941df49249847ecdefc1a5b78","0xa8edf4710b5ab2929d3db6c1c0e3e242261bbaa8bcec56908ddadd7d2dad2dca9d6eb9de630b960b122ebeea41040421","0x8d66bb3b50b9df8f373163629f9221b3d4b6980a05ea81dc3741bfe9519cf3ebba7ab98e98390bae475e8ede5821bd5c","0x8d3c21bae7f0cfb97c56952bb22084b58e7bb718890935b73103f33adf5e4d99cd262f929c6eeab96209814f0dbae50a","0xa5c66cfab3d9ebf733c4af24bebc97070e7989fe3c73e79ac85fb0e4d40ae44fb571e0fad4ad72560e13ed453900d14f","0x9362e6b50b43dbefbc3254471372297b5dcce809cd3b60bf74a1268ab68bdb50e46e462cbd78f0d6c056330e982846af","0x854630d08e3f0243d570cc2e856234cb4c1a158d9c1883bf028a76525aaa34be897fe918d5f6da9764a3735fa9ebd24a","0x8c7d246985469ff252c3f4df6c7c9196fc79f05c1c66a609d84725c78001d0837c7a7049394ba5cf7e863e2d58af8417","0xae050271e01b528925302e71903f785b782f7bf4e4e7a7f537140219bc352dc7540c657ed03d3a297ad36798ecdb98cd","0x8d2ae9179fcf2b0c69850554580b52c1f4a5bd865af5f3028f222f4acad9c1ad69a8ef6c7dc7b03715ee5c506b74325e","0xb8ef8de6ce6369a8851cd36db0ccf00a85077e816c14c4e601f533330af9e3acf0743a95d28962ed8bfcfc2520ef3cfe","0xa6ecad6fdfb851b40356a8b1060f38235407a0f2706e7b8bb4a13465ca3f81d4f5b99466ac2565c60af15f022d26732e","0x819ff14cdea3ab89d98e133cd2d0379361e2e2c67ad94eeddcdb9232efd509f51d12f4f03ebd4dd953bd262a886281f7","0x8561cd0f7a6dbcddd83fcd7f472d7dbcba95b2d4fb98276f48fccf69f76d284e626d7e41314b633352df8e6333fd52a1","0xb42557ccce32d9a894d538c48712cb3e212d06ac05cd5e0527ccd2db1078ee6ae399bf6a601ffdab1f5913d35fc0b20c","0x89b4008d767aad3c6f93c349d3b956e28307311a5b1cec237e8d74bb0dee7e972c24f347fd56afd915a2342bd7bc32f0","0x877487384b207e53f5492f4e36c832c2227f92d1bb60542cfeb35e025a4a7afc2b885fae2528b33b40ab09510398f83e","0x8c411050b63c9053dd0cd81dacb48753c3d7f162028098e024d17cd6348482703a69df31ad6256e3d25a8bbf7783de39","0xa8506b54a88d17ac10fb1b0d1fe4aa40eae7553a064863d7f6b52ccc4236dd4b82d01dca6ba87da9a239e3069ba879fb","0xb1a24caef9df64750c1350789bb8d8a0db0f39474a1c74ea9ba064b1516db6923f00af8d57c632d58844fb8786c3d47a","0x959d6e255f212b0708c58a2f75cb1fe932248c9d93424612c1b8d1e640149656059737e4db2139afd5556bcdacf3eda2","0x84525af21a8d78748680b6535bbc9dc2f0cf9a1d1740d12f382f6ecb2e73811d6c1da2ad9956070b1a617c61fcff9fe5","0xb74417d84597a485d0a8e1be07bf78f17ebb2e7b3521b748f73935b9afbbd82f34b710fb7749e7d4ab55b0c7f9de127d","0xa4a9aecb19a6bab167af96d8b9d9aa5308eab19e6bfb78f5a580f9bf89bdf250a7b52a09b75f715d651cb73febd08e84","0x9777b30be2c5ffe7d29cc2803a562a32fb43b59d8c3f05a707ab60ec05b28293716230a7d264d7cd9dd358fc031cc13e","0x95dce7a3d4f23ac0050c510999f5fbf8042f771e8f8f94192e17bcbfa213470802ebdbe33a876cb621cf42e275cbfc8b","0xb0b963ebcbbee847ab8ae740478544350b3ac7e86887e4dfb2299ee5096247cd2b03c1de74c774d9bde94ae2ee2dcd59","0xa4ab20bafa316030264e13f7ef5891a2c3b29ab62e1668fcb5881f50a9acac6adbe3d706c07e62f2539715db768f6c43","0x901478a297669d608e406fe4989be75264b6c8be12169aa9e0ad5234f459ca377f78484ffd2099a2fe2db5e457826427","0x88c76e5c250810c057004a03408b85cd918e0c8903dc55a0dd8bb9b4fc2b25c87f9b8cf5943eb19fbbe99d36490050c5","0x91607322bbad4a4f03fc0012d0821eff5f8c516fda45d1ec1133bface6f858bf04b25547be24159cab931a7aa08344d4","0x843203e07fce3c6c81f84bc6dc5fb5e9d1c50c8811ace522dc66e8658433a0ef9784c947e6a62c11bf705307ef05212e","0x91dd8813a5d6dddcda7b0f87f672b83198cd0959d8311b2b26fb1fae745185c01f796fbd03aad9db9b58482483fdadd8","0x8d15911aacf76c8bcd7136e958febd6963104addcd751ce5c06b6c37213f9c4fb0ffd4e0d12c8e40c36d658999724bfd","0x8a36c5732d3f1b497ebe9250610605ee62a78eaa9e1a45f329d09aaa1061131cf1d9df00f3a7d0fe8ad614a1ff9caaae","0xa407d06affae03660881ce20dab5e2d2d6cddc23cd09b95502a9181c465e57597841144cb34d22889902aff23a76d049","0xb5fd856d0578620a7e25674d9503be7d97a2222900e1b4738c1d81ff6483b144e19e46802e91161e246271f90270e6cf","0x91b7708869cdb5a7317f88c0312d103f8ce90be14fb4f219c2e074045a2a83636fdc3e69e862049fc7c1ef000e832541","0xb64719cc5480709d1dae958f1d3082b32a43376da446c8f9f64cb02a301effc9c34d9102051733315a8179aed94d53cc","0x94347a9542ff9d18f7d9eaa2f4d9b832d0e535fe49d52aa2de08aa8192400eddabdb6444a2a78883e27c779eed7fdf5a","0x840ef44a733ff1376466698cd26f82cf56bb44811e196340467f932efa3ae1ef9958a0701b3b032f50fd9c1d2aed9ab5","0x90ab3f6f67688888a31ffc2a882bb37adab32d1a4b278951a21646f90d03385fc976715fc639a785d015751171016f10","0xb56f35d164c24b557dbcbc8a4bfa681ec916f8741ffcb27fb389c164f4e3ed2be325210ef5bdaeae7a172ca9599ab442","0xa7921a5a80d7cf6ae81ba9ee05e0579b18c20cd2852762c89d6496aa4c8ca9d1ca2434a67b2c16d333ea8e382cdab1e3","0xa506bcfbd7e7e5a92f68a1bd87d07ad5fe3b97aeee40af2bf2cae4efcd77fff03f872732c5b7883aa6584bee65d6f8cb","0xa8c46cff58931a1ce9cbe1501e1da90b174cddd6d50f3dfdfb759d1d4ad4673c0a8feed6c1f24c7af32865a7d6c984e5","0xb45686265a83bff69e312c5149db7bb70ac3ec790dc92e392b54d9c85a656e2bf58596ce269f014a906eafc97461aa5f","0x8d4009a75ccb2f29f54a5f16684b93202c570d7a56ec1a8b20173269c5f7115894f210c26b41e8d54d4072de2d1c75d0","0xaef8810af4fc676bf84a0d57b189760ddc3375c64e982539107422e3de2580b89bd27aa6da44e827b56db1b5555e4ee8","0x888f0e1e4a34f48eb9a18ef4de334c27564d72f2cf8073e3d46d881853ac1424d79e88d8ddb251914890588937c8f711","0xb64b0aa7b3a8f6e0d4b3499fe54e751b8c3e946377c0d5a6dbb677be23736b86a7e8a6be022411601dd75012012c3555","0x8d57776f519f0dd912ea14f79fbab53a30624e102f9575c0bad08d2dc754e6be54f39b11278c290977d9b9c7c0e1e0ad","0xa018fc00d532ceb2e4de908a15606db9b6e0665dd77190e2338da7c87a1713e6b9b61554e7c1462f0f6d4934b960b15c","0x8c932be83ace46f65c78e145b384f58e41546dc0395270c1397874d88626fdeda395c8a289d602b4c312fe98c1311856","0x89174838e21639d6bdd91a0621f04dc056907b88e305dd66e46a08f6d65f731dea72ae87ca5e3042d609e8de8de9aa26","0xb7b7f508bb74f7a827ac8189daa855598ff1d96fa3a02394891fd105d8f0816224cd50ac4bf2ed1cf469ace516c48184","0xb31877ad682583283baadd68dc1bebd83f5748b165aadd7fe9ef61a343773b88bcd3a022f36d6c92f339b7bfd72820a9","0xb79d77260b25daf9126dab7a193df2d7d30542786fa1733ffaf6261734770275d3ca8bae1d9915d1181a78510b3439db","0x91894fb94cd4c1dd2ceaf9c53a7020c5799ba1217cf2d251ea5bc91ed26e1159dd758e98282ebe35a0395ef9f1ed15a0","0xab59895cdafd33934ceedfc3f0d5d89880482cba6c99a6db93245f9e41987efd76e0640e80aef31782c9a8c7a83fccec","0xaa22ea63654315e033e09d4d4432331904a6fc5fb1732557987846e3c564668ca67c60a324b4af01663a23af11a9ce4b","0xb53ba3ef342601467e1f71aa280e100fbabbd38518fa0193e0099505036ee517c1ac78e96e9baeb549bb6879bb698fb0","0x943fd69fd656f37487cca3605dc7e5a215fddd811caf228595ec428751fc1de484a0cb84c667fe4d7c35599bfa0e5e34","0x9353128b5ebe0dddc555093cf3e5942754f938173541033e8788d7331fafc56f68d9f97b4131e37963ab7f1c8946f5f1","0xa76cd3c566691f65cfb86453b5b31dbaf3cab8f84fe1f795dd1e570784b9b01bdd5f0b3c1e233942b1b5838290e00598","0x983d84b2e53ffa4ae7f3ba29ef2345247ea2377686b74a10479a0ef105ecf90427bf53b74c96dfa346d0f842b6ffb25b","0x92e0fe9063306894a2c6970c001781cff416c87e87cb5fbac927a3192655c3da4063e6fa93539f6ff58efac6adcc5514","0xb00a81f03c2b8703acd4e2e4c21e06973aba696415d0ea1a648ace2b0ea19b242fede10e4f9d7dcd61c546ab878bc8f9","0xb0d08d880f3b456a10bf65cff983f754f545c840c413aea90ce7101a66eb0a0b9b1549d6c4d57725315828607963f15a","0x90cb64d03534f913b411375cce88a9e8b1329ce67a9f89ca5df8a22b8c1c97707fec727dbcbb9737f20c4cf751359277","0x8327c2d42590dfcdb78477fc18dcf71608686ad66c49bce64d7ee874668be7e1c17cc1042a754bbc77c9daf50b2dae07","0x8532171ea13aa7e37178e51a6c775da469d2e26ec854eb16e60f3307db4acec110d2155832c202e9ba525fc99174e3b0","0x83ca44b15393d021de2a511fa5511c5bd4e0ac7d67259dce5a5328f38a3cce9c3a269405959a2486016bc27bb140f9ff","0xb1d36e8ca812be545505c8214943b36cabee48112cf0de369957afa796d37f86bf7249d9f36e8e990f26f1076f292b13","0x9803abf45be5271e2f3164c328d449efc4b8fc92dfc1225d38e09630909fe92e90a5c77618daa5f592d23fc3ad667094","0xb268ad68c7bf432a01039cd889afae815c3e120f57930d463aece10af4fd330b5bd7d8869ef1bcf6b2e78e4229922edc","0xa4c91a0d6f16b1553264592b4cbbbf3ca5da32ab053ffbdd3dbb1aed1afb650fb6e0dc5274f71a51d7160856477228db","0xad89d043c2f0f17806277ffdf3ecf007448e93968663f8a0b674254f36170447b7527d5906035e5e56f4146b89b5af56","0x8b6964f757a72a22a642e4d69102951897e20c21449184e44717bd0681d75f7c5bfa5ee5397f6e53febf85a1810d6ed1","0xb08f5cdaabec910856920cd6e836c830b863eb578423edf0b32529488f71fe8257d90aed4a127448204df498b6815d79","0xaf26bb3358be9d280d39b21d831bb53145c4527a642446073fee5a86215c4c89ff49a3877a7a549486262f6f57a0f476","0xb4010b37ec4d7c2af20800e272539200a6b623ae4636ecbd0e619484f4ab9240d02bc5541ace3a3fb955dc0a3d774212","0x82752ab52bdcc3cc2fc405cb05a2e694d3df4a3a68f2179ec0652536d067b43660b96f85f573f26fbd664a9ef899f650","0x96d392dde067473a81faf2d1fea55b6429126b88b160e39b4210d31d0a82833ffd3a80e07d24d495aea2d96be7251547","0xa76d8236d6671204d440c33ac5b8deb71fa389f6563d80e73be8b043ec77d4c9b06f9a586117c7f957f4af0331cbc871","0xb6c90961f68b5e385d85c9830ec765d22a425f506904c4d506b87d8944c2b2c09615e740ed351df0f9321a7b93979cae","0xa6ec5ea80c7558403485b3b1869cdc63bde239bafdf936d9b62a37031628402a36a2cfa5cfbb8e26ac922cb0a209b3ba","0x8c3195bbdbf9bc0fc95fa7e3d7f739353c947f7767d1e3cb24d8c8602d8ea0a1790ac30b815be2a2ba26caa5227891e2","0xa7f8a63d809f1155722c57f375ea00412b00147776ae4444f342550279ef4415450d6f400000a326bf11fea6c77bf941","0x97fa404df48433a00c85793440e89bb1af44c7267588ae937a1f5d53e01e1c4d4fc8e4a6d517f3978bfdd6c2dfde012f","0xa984a0a3836de3d8d909c4629a2636aacb85393f6f214a2ef68860081e9db05ad608024762db0dc35e895dc00e2d4cdd","0x9526cf088ab90335add1db4d3a4ac631b58cbfbe88fa0845a877d33247d1cfeb85994522e1eb8f8874651bfb1df03e2a","0xac83443fd0afe99ad49de9bf8230158c118e2814c9c89db5ac951c240d6c2ce45e7677221279d9e97848ec466b99aafe","0xaeeefdbaba612e971697798ceaf63b247949dc823a0ad771ae5b988a5e882b338a98d3d0796230f49d533ec5ba411b39","0xae3f248b5a7b0f92b7820a6c5ae21e5bd8f4265d4f6e21a22512079b8ee9be06393fd3133ce8ebac0faf23f4f8517e36","0xa64a831b908eee784b8388b45447d2885ec0551b26b0c2b15e5f417d0a12c79e867fb7bd3d008d0af98b44336f8ec1ad","0xb242238cd8362b6e440ba21806905714dd55172db25ec7195f3fc4937b2aba146d5cbf3cf691a1384b4752dc3b54d627","0x819f97f337eea1ffb2a678cc25f556f1aab751c6b048993a1d430fe1a3ddd8bb411c152e12ca60ec6e057c190cd1db9a","0xb9d7d187407380df54ee9fef224c54eec1bfabf17dc8abf60765b7951f538f59aa26fffd5846cfe05546c35f59b573f4","0xaa6e3c14efa6a5962812e3f94f8ce673a433f4a82d07a67577285ea0eaa07f8be7115853122d12d6d4e1fdf64c504be1","0x82268bee9c1662d3ddb5fb785abfae6fb8b774190f30267f1d47091d2cd4b3874db4372625aa36c32f27b0eee986269b","0xb236459565b7b966166c4a35b2fa71030b40321821b8e96879d95f0e83a0baf33fa25721f30af4a631df209e25b96061","0x8708d752632d2435d2d5b1db4ad1fa2558d776a013655f88e9a3556d86b71976e7dfe5b8834fdec97682cd94560d0d0d","0xae1424a68ae2dbfb0f01211f11773732a50510b5585c1fb005cb892b2c6a58f4a55490b5c5b4483c6fce40e9d3236a52","0xb3f5f722af9dddb07293c871ce97abbccba0093ca98c8d74b1318fa21396fc1b45b69c15084f63d728f9908442024506","0x9606f3ce5e63886853ca476dc0949e7f1051889d529365c0cb0296fdc02abd088f0f0318ecd2cf36740a3634132d36f6","0xb11a833a49fa138db46b25ff8cdda665295226595bc212c0931b4931d0a55c99da972c12b4ef753f7e37c6332356e350","0xafede34e7dab0a9e074bc19a7daddb27df65735581ca24ad70c891c98b1349fcebbcf3ba6b32c2617fe06a5818dabc2d","0x97993d456e459e66322d01f8eb13918979761c3e8590910453944bdff90b24091bb018ac6499792515c9923be289f99f","0x977e3e967eff19290a192cd11df3667d511b398fb3ac9a5114a0f3707e25a0edcb56105648b1b85a8b7519fc529fc6f6","0xb873a7c88bf58731fe1bf61ff6828bf114cf5228f254083304a4570e854e83748fc98683ddba62d978fff7909f2c5c47","0xad4b2691f6f19da1d123aaa23cca3e876247ed9a4ab23c599afdbc0d3aa49776442a7ceaa996ac550d0313d9b9a36cee","0xb9210713c78e19685608c6475bfa974b57ac276808a443f8b280945c5d5f9c39da43effa294bfb1a6c6f7b6b9f85bf6c","0xa65152f376113e61a0e468759de38d742caa260291b4753391ee408dea55927af08a4d4a9918600a3bdf1df462dffe76","0x8bf8c27ad5140dde7f3d2280fd4cc6b29ab76537e8d7aa7011a9d2796ee3e56e9a60c27b5c2da6c5e14fc866301dc195","0x92fde8effc9f61393a2771155812b863cff2a0c5423d7d40aa04d621d396b44af94ddd376c28e7d2f53c930aea947484","0x97a01d1dd9ee30553ce676011aea97fa93d55038ada95f0057d2362ae9437f3ed13de8290e2ff21e3167dd7ba10b9c3f","0x89affffaa63cb2df3490f76f0d1e1d6ca35c221dd34057176ba739fa18d492355e6d2a5a5ad93a136d3b1fed0bb8aa19","0x928b8e255a77e1f0495c86d3c63b83677b4561a5fcbbe5d3210f1e0fc947496e426d6bf3b49394a5df796c9f25673fc4","0x842a0af91799c9b533e79ee081efe2a634cac6c584c2f054fb7d1db67dde90ae36de36cbf712ec9cd1a0c7ee79e151ea","0xa65b946cf637e090baf2107c9a42f354b390e7316beb8913638130dbc67c918926eb87bec3b1fe92ef72bc77a170fa3b","0xaafc0f19bfd71ab5ae4a8510c7861458b70ad062a44107b1b1dbacbfa44ba3217028c2824bd7058e2fa32455f624040b","0x95269dc787653814e0be899c95dba8cfa384f575a25e671c0806fd80816ad6797dc819d30ae06e1d0ed9cb01c3950d47","0xa1e760f7fa5775a1b2964b719ff961a92083c5c617f637fc46e0c9c20ab233f8686f7f38c3cb27d825c54dd95e93a59b","0xac3b8a7c2317ea967f229eddc3e23e279427f665c4705c7532ed33443f1243d33453c1088f57088d2ab1e3df690a9cc9","0xb787beeddfbfe36dd51ec4efd9cf83e59e84d354c3353cc9c447be53ae53d366ed1c59b686e52a92f002142c8652bfe0","0xb7a64198300cb6716aa7ac6b25621f8bdec46ad5c07a27e165b3f774cdf65bcfdbf31e9bae0c16b44de4b00ada7a4244","0xb8ae9f1452909e0c412c7a7fe075027691ea8df1347f65a5507bc8848f1d2c833d69748076db1129e5b4fb912f65c86c","0x9682e41872456b9fa67def89e71f06d362d6c8ca85c9c48536615bc401442711e1c9803f10ab7f8ab5feaec0f9df20a6","0x88889ff4e271dc1c7e21989cc39f73cde2f0475acd98078281591ff6c944fadeb9954e72334319050205d745d4df73df","0x8f79b5b8159e7fd0d93b0645f3c416464f39aec353b57d99ecf24f96272df8a068ad67a6c90c78d82c63b40bb73989bb","0x838c01a009a3d8558a3f0bdd5e22de21af71ca1aefc8423c91dc577d50920e9516880e87dce3e6d086e11cd45c9052d9","0xb97f1c6eee8a78f137c840667cc288256e39294268a3009419298a04a1d0087c9c9077b33c917c65caf76637702dda8a","0x972284ce72f96a61c899260203dfa06fc3268981732bef74060641c1a5068ead723e3399431c247ca034b0dae861e8df","0x945a8d52d6d3db6663dbd3110c6587f9e9c44132045eeffba15621576d178315cb52870fa5861669f84f0bee646183fe","0xa0a547b5f0967b1c3e5ec6c6a9a99f0578521489180dfdfbb5561f4d166baac43a2f06f950f645ce991664e167537eed","0xa0592cda5cdddf1340033a745fd13a6eff2021f2e26587116c61c60edead067e0f217bc2bef4172a3c9839b0b978ab35","0xb9c223b65a3281587fa44ec829e609154b32f801fd1de6950e01eafb07a8324243b960d5735288d0f89f0078b2c42b5b","0x99ebfc3b8f9f98249f4d37a0023149ed85edd7a5abe062c8fb30c8c84555258b998bdcdd1d400bc0fa2a4aaa8b224466","0x955b68526e6cb3937b26843270f4e60f9c6c8ece2fa9308fe3e23afa433309c068c66a4bc16ee2cf04220f095e9afce4","0xb766caeafcc00378135ae53397f8a67ed586f5e30795462c4a35853de6681b1f17401a1c40958de32b197c083b7279c1","0x921bf87cad947c2c33fa596d819423c10337a76fe5a63813c0a9dc78a728207ae7b339407a402fc4d0f7cba3af6da6fc","0xa74ba1f3bc3e6c025db411308f49b347ec91da1c916bda9da61e510ec8d71d25e0ac0f124811b7860e5204f93099af27","0xa29b4d144e0bf17a7e8353f2824cef0ce85621396babe8a0b873ca1e8a5f8d508b87866cf86da348470649fceefd735c","0xa8040e12ffc3480dd83a349d06741d1572ef91932c46f5cf03aee8454254156ee95786fd013d5654725e674c920cec32","0x8c4cf34ca60afd33923f219ffed054f90cd3f253ffeb2204a3b61b0183417e366c16c07fae860e362b0f2bfe3e1a1d35","0x8195eede4ddb1c950459df6c396b2e99d83059f282b420acc34220cadeed16ab65c856f2c52568d86d3c682818ed7b37","0x91fff19e54c15932260aa990c7fcb3c3c3da94845cc5aa8740ef56cf9f58d19b4c3c55596f8d6c877f9f4d22921d93aa","0xa3e0bf7e5d02a80b75cf75f2db7e66cb625250c45436e3c136d86297d652590ec97c2311bafe407ad357c79ab29d107b","0x81917ff87e5ed2ae4656b481a63ced9e6e5ff653b8aa6b7986911b8bc1ee5b8ef4f4d7882c3f250f2238e141b227e510","0x915fdbe5e7de09c66c0416ae14a8750db9412e11dc576cf6158755fdcaf67abdbf0fa79b554cac4fe91c4ec245be073f","0x8df27eafb5c3996ba4dc5773c1a45ca77e626b52e454dc1c4058aa94c2067c18332280630cc3d364821ee53bf2b8c130","0x934f8a17c5cbb827d7868f5c8ca00cb027728a841000a16a3428ab16aa28733f16b52f58c9c4fbf75ccc45df72d9c4df","0xb83f4da811f9183c25de8958bc73b504cf790e0f357cbe74ef696efa7aca97ad3b7ead1faf76e9f982c65b6a4d888fc2","0x87188213c8b5c268dc2b6da413f0501c95749e953791b727450af3e43714149c115b596b33b63a2f006a1a271b87efd0","0x83e9e888ab9c3e30761de635d9aabd31248cdd92f7675fc43e4b21fd96a03ec1dc4ad2ec94fec857ffb52683ac98e360","0xb4b9a1823fe2d983dc4ec4e3aaea297e581c3fc5ab4b4af5fa1370caa37af2d1cc7fc6bfc5e7da60ad8fdce27dfe4b24","0x856388bc78aef465dbcdd1f559252e028c9e9a2225c37d645c138e78f008f764124522705822a61326a6d1c79781e189","0xa6431b36db93c3b47353ba22e7c9592c9cdfb9cbdd052ecf2cc3793f5b60c1e89bc96e6bae117bfd047f2308da00dd2f","0xb619972d48e7e4291542dcde08f7a9cdc883c892986ded2f23ccb216e245cd8d9ad1d285347b0f9d7611d63bf4cee2bc","0x8845cca6ff8595955f37440232f8e61d5351500bd016dfadd182b9d39544db77a62f4e0102ff74dd4173ae2c181d24ef","0xb2f5f7fa26dcd3b6550879520172db2d64ee6aaa213cbef1a12befbce03f0973a22eb4e5d7b977f466ac2bf8323dcedd","0x858b7f7e2d44bdf5235841164aa8b4f3d33934e8cb122794d90e0c1cac726417b220529e4f896d7b77902ab0ccd35b3a","0x80b0408a092dae2b287a5e32ea1ad52b78b10e9c12f49282976cd738f5d834e03d1ad59b09c5ccaccc39818b87d06092","0xb996b0a9c6a2d14d984edcd6ab56bc941674102980d65b3ad9733455f49473d3f587c8cbf661228a7e125ddbe07e3198","0x90224fcebb36865293bd63af786e0c5ade6b67c4938d77eb0cbae730d514fdd0fe2d6632788e858afd29d46310cf86df","0xb71351fdfff7168b0a5ec48397ecc27ac36657a8033d9981e97002dcca0303e3715ce6dd3f39423bc8ef286fa2e9e669","0xae2a3f078b89fb753ce4ed87e0c1a58bb19b4f0cfb6586dedb9fcab99d097d659a489fb40e14651741e1375cfc4b6c5f","0x8ef476b118e0b868caed297c161f4231bbeb863cdfa5e2eaa0fc6b6669425ce7af50dc374abceac154c287de50c22307","0x92e46ab472c56cfc6458955270d3c72b7bde563bb32f7d4ab4d959db6f885764a3d864e1aa19802fefaa5e16b0cb0b54","0x96a3f68323d1c94e73d5938a18a377af31b782f56212de3f489d22bc289cf24793a95b37f1d6776edf88114b5c1fa695","0x962cc068cfce6faaa27213c4e43e44eeff0dfbb6d25b814e82c7da981fb81d7d91868fa2344f05fb552362f98cfd4a72","0x895d4e4c4ad670abf66d43d59675b1add7afad7438ada8f42a0360c704cee2060f9ac15b4d27e9b9d0996bb801276fe3","0xb3ad18d7ece71f89f2ef749b853c45dc56bf1c796250024b39a1e91ed11ca32713864049c9aaaea60cde309b47486bbf","0x8f05404e0c0258fdbae50e97ccb9b72ee17e0bd2400d9102c0dad981dac8c4c71585f03e9b5d50086d0a2d3334cb55d1","0x8bd877e9d4591d02c63c6f9fc9976c109de2d0d2df2bfa5f6a3232bab5b0b8b46e255679520480c2d7a318545efa1245","0x8d4c16b5d98957c9da13d3f36c46f176e64e5be879f22be3179a2c0e624fe4758a82bf8c8027410002f973a3b84cd55a","0x86e2a8dea86427b424fa8eada881bdff896907084a495546e66556cbdf070b78ba312bf441eb1be6a80006d25d5097a3","0x8608b0c117fd8652fdab0495b08fadbeba95d9c37068e570de6fddfef1ba4a1773b42ac2be212836141d1bdcdef11a17","0xa13d6febf5fb993ae76cae08423ca28da8b818d6ef0fde32976a4db57839cd45b085026b28ee5795f10a9a8e3098c683","0x8e261967fa6de96f00bc94a199d7f72896a6ad8a7bbb1d6187cca8fad824e522880e20f766620f4f7e191c53321d70f9","0x8b8e8972ac0218d7e3d922c734302803878ad508ca19f5f012bc047babd8a5c5a53deb5fe7c15a4c00fd6d1cb9b1dbd0","0xb5616b233fb3574a2717d125a434a2682ff68546dccf116dd8a3b750a096982f185614b9fb6c7678107ff40a451f56fa","0xaa6adf9b0c3334b0d0663f583a4914523b2ac2e7adffdb026ab9109295ff6af003ef8357026dbcf789896d2afded8d73","0xacb72df56a0b65496cd534448ed4f62950bb1e11e50873b6ed349c088ee364441821294ce0f7c61bd7d38105bea3b442","0xabae12df83e01ec947249fedd0115dc501d2b03ff7232092979eda531dbbca29ace1d46923427c7dde4c17bdf3fd7708","0x820b4fc2b63a9fda7964acf5caf19a2fc4965007cb6d6b511fcafcb1f71c3f673a1c0791d3f86e3a9a1eb6955b191cc0","0xaf277259d78c6b0f4f030a10c53577555df5e83319ddbad91afbd7c30bc58e7671c56d00d66ec3ab5ef56470cd910cee","0xad4a861c59f1f5ca1beedd488fb3d131dea924fffd8e038741a1a7371fad7370ca5cf80dc01f177fbb9576713bb9a5b3","0xb67a5162982ce6a55ccfb2f177b1ec26b110043cf18abd6a6c451cf140b5af2d634591eb4f28ad92177d8c7e5cd0a5e8","0x96176d0a83816330187798072d449cbfccff682561e668faf6b1220c9a6535b32a6e4f852e8abb00f79abb87493df16b","0xb0afe6e7cb672e18f0206e4423f51f8bd0017bf464c4b186d46332c5a5847647f89ff7fa4801a41c1b0b42f6135bcc92","0x8fc5e7a95ef20c1278c645892811f6fe3f15c431ebc998a32ec0da44e7213ea934ed2be65239f3f49b8ec471e9914160","0xb7793e41adda6c82ba1f2a31f656f6205f65bf8a3d50d836ee631bc7ce77c153345a2d0fc5c60edf8b37457c3729c4ec","0xa504dd7e4d6b2f4379f22cc867c65535079c75ccc575955f961677fa63ecb9f74026fa2f60c9fb6323c1699259e5e9c8","0xab899d00ae693649cc1afdf30fb80d728973d2177c006e428bf61c7be01e183866614e05410041bc82cb14a33330e69c","0x8a3bd8b0b1be570b65c4432a0f6dc42f48a2000e30ab089cf781d38f4090467b54f79c0d472fcbf18ef6a00df69cc6f3","0xb4d7028f7f76a96a3d7803fca7f507ae11a77c5346e9cdfccb120a833a59bda1f4264e425aa588e7a16f8e7638061d84","0xb9c7511a76ea5fb105de905d44b02edb17008335766ee357ed386b7b3cf19640a98b38785cb14603c1192bee5886c9b6","0x8563afb12e53aed71ac7103ab8602bfa8371ae095207cb0d59e8fd389b6ad1aff0641147e53cb6a7ca16c7f37c9c5e6b","0x8e108be614604e09974a9ed90960c28c4ea330a3d9a0cb4af6dd6f193f84ab282b243ecdf549b3131036bebc8905690c","0xb794d127fbedb9c5b58e31822361706ffac55ce023fbfe55716c3c48c2fd2f2c7660a67346864dfe588812d369cb50b6","0xb797a3442fc3b44f41baefd30346f9ac7f96e770d010d53c146ce74ce424c10fb62758b7e108b8abfdc5fafd89d745cb","0x993bb71e031e8096442e6205625e1bfddfe6dd6a83a81f3e2f84fafa9e5082ab4cad80a099f21eff2e81c83457c725c3","0x8711ab833fc03e37acf2e1e74cfd9133b101ff4144fe30260654398ae48912ab46549d552eb9d15d2ea57760d35ac62e","0xb21321fd2a12083863a1576c5930e1aecb330391ef83326d9d92e1f6f0d066d1394519284ddab55b2cb77417d4b0292f","0x877d98f731ffe3ee94b0b5b72d127630fa8a96f6ca4f913d2aa581f67732df6709493693053b3e22b0181632ac6c1e3b","0xae391c12e0eb8c145103c62ea64f41345973311c3bf7281fa6bf9b7faafac87bcf0998e5649b9ef81e288c369c827e07","0xb83a2842f36998890492ab1cd5a088d9423d192681b9a3a90ec518d4c541bce63e6c5f4df0f734f31fbfdd87785a2463","0xa21b6a790011396e1569ec5b2a423857b9bec16f543e63af28024e116c1ea24a3b96e8e4c75c6537c3e4611fd265e896","0xb4251a9c4aab3a495da7a42e684ba4860dbcf940ad1da4b6d5ec46050cbe8dab0ab9ae6b63b5879de97b905723a41576","0x8222f70aebfe6ac037f8543a08498f4cadb3edaac00336fc00437eb09f2cba758f6c38e887cc634b4d5b7112b6334836","0x86f05038e060594c46b5d94621a1d9620aa8ba59a6995baf448734e21f58e23c1ea2993d3002ad5250d6edd5ba59b34f","0xa7c0c749baef811ab31b973c39ceb1d94750e2bc559c90dc5eeb20d8bb6b78586a2b363c599ba2107d6be65cd435f24e","0x861d46a5d70b38d6c1cd72817a2813803d9f34c00320c8b62f8b9deb67f5b5687bc0b37c16d28fd017367b92e05da9ca","0xb3365d3dab639bffbe38e35383686a435c8c88b397b717cd4aeced2772ea1053ceb670f811f883f4e02975e5f1c4ac58","0xa5750285f61ab8f64cd771f6466e2c0395e01b692fd878f2ef2d5c78bdd8212a73a3b1dfa5e4c8d9e1afda7c84857d3b","0x835a10809ccf939bc46cf950a33b36d71be418774f51861f1cd98a016ade30f289114a88225a2c11e771b8b346cbe6ef","0xa4f59473a037077181a0a62f1856ec271028546ca9452b45cedfcb229d0f4d1aabfc13062b07e536cc8a0d4b113156a2","0x95cd14802180b224d44a73cc1ed599d6c4ca62ddcaa503513ccdc80aaa8be050cc98bd4b4f3b639549beb4587ac6caf9","0x973b731992a3e69996253d7f36dd7a0af1982b5ed21624b77a7965d69e9a377b010d6dabf88a8a97eec2a476259859cc","0xaf8a1655d6f9c78c8eb9a95051aa3baaf9c811adf0ae8c944a8d3fcba87b15f61021f3baf6996fa0aa51c81b3cb69de1","0x835aad5c56872d2a2d6c252507b85dd742bf9b8c211ccb6b25b52d15c07245b6d89b2a40f722aeb5083a47cca159c947","0xabf4e970b02bef8a102df983e22e97e2541dd3650b46e26be9ee394a3ea8b577019331857241d3d12b41d4eacd29a3ac","0xa13c32449dbedf158721c13db9539ae076a6ce5aeaf68491e90e6ad4e20e20d1cdcc4a89ed9fd49cb8c0dd50c17633c1","0x8c8f78f88b7e22dd7e9150ab1c000f10c28e696e21d85d6469a6fe315254740f32e73d81ab1f3c1cf8f544c86df506e8","0xb4b77f2acfe945abf81f2605f906c10b88fb4d28628487fb4feb3a09f17f28e9780445dfcee4878349d4c6387a9d17d4","0x8d255c235f3812c6ecc646f855fa3832be5cb4dbb9c9e544989fafdf3f69f05bfd370732eaf954012f0044aa013fc9c6","0xb982efd3f34b47df37c910148ac56a84e8116647bea24145a49e34e0a6c0176e3284d838dae6230cb40d0be91c078b85","0x983f365aa09bd85df2a6a2ad8e4318996b1e27d02090755391d4486144e40d80b1fbfe1c798d626db92f52e33aa634da","0x95fd1981271f3ea3a41d654cf497e6696730d9ff7369f26bc4d7d15c7adb4823dd0c42e4a005a810af12d234065e5390","0xa9f5219bd4b913c186ef30c02f995a08f0f6f1462614ea5f236964e02bdaa33db9d9b816c4aee5829947840a9a07ba60","0x9210e6ceb05c09b46fd09d036287ca33c45124ab86315e5d6911ff89054f1101faaa3e83d123b7805056d388bcec6664","0x8ed9cbf69c6ff3a5c62dd9fe0d7264578c0f826a29e614bc2fb4d621d90c8c9992438accdd7a614b1dca5d1bb73dc315","0x85cf2a8cca93e00da459e3cecd22c342d697eee13c74d5851634844fc215f60053cf84b0e03c327cb395f48d1c71a8a4","0x8818a18e9a2ec90a271b784400c1903089ffb0e0b40bc5abbbe12fbebe0f731f91959d98c5519ef1694543e31e2016d4","0x8dabc130f296fa7a82870bf9a8405aaf542b222ed9276bba9bd3c3555a0f473acb97d655ee7280baff766a827a8993f0","0xac7952b84b0dc60c4d858f034093b4d322c35959605a3dad2b806af9813a4680cb038c6d7f4485b4d6b2ff502aaeca25","0xad65cb6d57b48a2602568d2ec8010baed0eb440eec7638c5ec8f02687d764e9de5b5d42ad5582934e592b48471c22d26","0xa02ab8bd4c3d114ea23aebdd880952f9495912817da8c0c08eabc4e6755439899d635034413d51134c72a6320f807f1c","0x8319567764b8295402ec1ebef4c2930a138480b37e6d7d01c8b4c9cd1f2fc3f6e9a44ae6e380a0c469b25b06db23305f","0xafec53b2301dc0caa8034cd9daef78c48905e6068d692ca23d589b84a6fa9ddc2ed24a39480597e19cb3e83eec213b3f","0xac0b4ffdb5ae08e586a9cdb98f9fe56f4712af3a97065e89e274feacfb52b53c839565aee93c4cfaaccfe51432c4fab0","0x8972cbf07a738549205b1094c5987818124144bf187bc0a85287c94fdb22ce038c0f11df1aa16ec5992e91b44d1af793","0xb7267aa6f9e3de864179b7da30319f1d4cb2a3560f2ea980254775963f1523b44c680f917095879bebfa3dc2b603efcf","0x80f68f4bfc337952e29504ee5149f15093824ea7ab02507efd1317a670f6cbc3611201848560312e3e52e9d9af72eccf","0x8897fee93ce8fc1e1122e46b6d640bba309384dbd92e46e185e6364aa8210ebf5f9ee7e5e604b6ffba99aa80a10dd7d0","0xb58ea6c02f2360be60595223d692e82ee64874fda41a9f75930f7d28586f89be34b1083e03bbc1575bbfdda2d30db1ea","0x85a523a33d903280d70ac5938770453a58293480170c84926457ac2df45c10d5ff34322ab130ef4a38c916e70d81af53","0xa2cbf045e1bed38937492c1f2f93a5ba41875f1f262291914bc1fc40c60bd0740fb3fea428faf6da38b7c180fe8ac109","0x8c09328770ed8eb17afc6ac7ddd87bb476de18ed63cab80027234a605806895959990c47bd10d259d7f3e2ecb50074c9","0xb4b9e19edb4a33bde8b7289956568a5b6b6557404e0a34584b5721fe6f564821091013fbb158e2858c6d398293bb4b59","0x8a47377df61733a2aa5a0e945fce00267f8e950f37e109d4487d92d878fb8b573317bb382d902de515b544e9e233458d","0xb5804c9d97efeff5ca94f3689b8088c62422d92a1506fd1d8d3b1b30e8a866ad0d6dad4abfa051dfc4471250cac4c5d9","0x9084a6ee8ec22d4881e9dcc8a9eb3c2513523d8bc141942370fd191ad2601bf9537a0b1e84316f3209b3d8a54368051e","0x85447eea2fa26656a649f8519fa67279183044791d61cf8563d0783d46d747d96af31d0a93507bbb2242666aa87d3720","0x97566a84481027b60116c751aec552adfff2d9038e68d48c4db9811fb0cbfdb3f1d91fc176a0b0d988a765f8a020bce1","0xae87e5c1b9e86c49a23dceda4ecfd1dcf08567f1db8e5b6ec752ebd45433c11e7da4988573cdaebbb6f4135814fc059e","0xabee05cf9abdbc52897ac1ce9ed157f5466ed6c383d6497de28616238d60409e5e92619e528af8b62cc552bf09970dc2","0xae6d31cd7bf9599e5ee0828bab00ceb4856d829bba967278a73706b5f388465367aa8a6c7da24b5e5f1fdd3256ef8e63","0xac33e7b1ee47e1ee4af472e37ab9e9175260e506a4e5ce449788075da1b53c44cb035f3792d1eea2aa24b1f688cc6ed3","0x80f65b205666b0e089bb62152251c48c380a831e5f277f11f3ef4f0d52533f0851c1b612267042802f019ec900dc0e8f","0x858520ad7aa1c9fed738e3b583c84168f2927837ad0e1d326afe9935c26e9b473d7f8c382e82ef1fe37d2b39bb40a1ee","0xb842dd4af8befe00a97c2d0f0c33c93974761e2cb9e5ab8331b25170318ddd5e4bdbc02d8f90cbfdd5f348f4f371c1f7","0x8bf2cb79bc783cb57088aae7363320cbeaabd078ffdec9d41bc74ff49e0043d0dad0086a30e5112b689fd2f5a606365d","0x982eb03bbe563e8850847cd37e6a3306d298ab08c4d63ab6334e6b8c1fa13fce80cf2693b09714c7621d74261a0ff306","0xb143edb113dec9f1e5105d4a93fbe502b859e587640d3db2f628c09a17060e6aec9e900e2c8c411cda99bc301ff96625","0xaf472d9befa750dcebc5428fe1a024f18ec1c07bca0f95643ce6b5f4189892a910285afb03fd7ed7068fbe614e80d33c","0xa97e3bc57ede73ecd1bbf02de8f51b4e7c1a067da68a3cd719f4ba26a0156cbf1cef2169fd35a18c5a4cced50d475998","0xa862253c937cf3d75d7183e5f5be6a4385d526aeda5171c1c60a8381fea79f88f5f52a4fab244ecc70765d5765e6dfd5","0x90cb776f8e5a108f1719df4a355bebb04bf023349356382cae55991b31720f0fd03206b895fa10c56c98f52453be8778","0xa7614e8d0769dccd520ea4b46f7646e12489951efaef5176bc889e9eb65f6e31758df136b5bf1e9107e68472fa9b46ec","0xac3a9b80a3254c42e5ed3a090a0dd7aee2352f480de96ad187027a3bb6c791eddfc3074b6ffd74eea825188f107cda4d","0x82a01d0168238ef04180d4b6e0a0e39024c02c2d75b065017c2928039e154d093e1af4503f4d1f3d8a948917abb5d09f","0x8fab000a2b0eef851a483aec8d2dd85fe60504794411a2f73ed82e116960547ac58766cb73df71aea71079302630258d","0x872451a35c6db61c63e9b8bb9f16b217f985c20be4451c14282c814adb29d7fb13f201367c664435c7f1d4d9375d7a58","0x887d9ff54cc96b35d562df4a537ff972d7c4b3fd91ab06354969a4cfede0b9fc68bbffb61d0dbf1a58948dc701e54f5a","0x8cb5c2a6bd956875d88f41ae24574434f1308514d44057b55c9c70f13a3366ed054150eed0955a38fda3f757be73d55f","0x89ad0163cad93e24129d63f8e38422b7674632a8d0a9016ee8636184cab177659a676c4ee7efba3abe1a68807c656d60","0xb9ec01c7cab6d00359b5a0b4a1573467d09476e05ca51a9227cd16b589a9943d161eef62dcc73f0de2ec504d81f4d252","0x8031d17635d39dfe9705c485d2c94830b6fc9bc67b91300d9d2591b51e36a782e77ab5904662effa9382d9cca201f525","0x8be5a5f6bc8d680e5092d6f9a6585acbaaaa2ddc671da560dcf5cfa4472f4f184b9597b5b539438accd40dda885687cc","0xb1fc0f052fae038a2e3de3b3a96b0a1024b009de8457b8b3adb2d315ae68a89af905720108a30038e5ab8d0d97087785","0x8b8bdc77bd3a6bc7ca5492b6f8c614852c39a70d6c8a74916eaca0aeb4533b11898b8820a4c2620a97bf35e275480029","0xaf35f4dc538d4ad5cdf710caa38fd1eb496c3fa890a047b6a659619c5ad3054158371d1e88e0894428282eed9f47f76b","0x8166454a7089cc07758ad78724654f4e7a1a13e305bbf88ddb86f1a4b2904c4fc8ab872d7da364cdd6a6c0365239e2ad","0xab287c7d3addce74ce40491871c768abe01daaa0833481276ff2e56926b38a7c6d2681ffe837d2cc323045ad1a4414f9","0xb90317f4505793094d89365beb35537f55a6b5618904236258dd04ca61f21476837624a2f45fef8168acf732cab65579","0x98ae5ea27448e236b6657ab5ef7b1cccb5372f92ab25f5fa651fbac97d08353a1dae1b280b1cd42b17d2c6a70a63ab9d","0xadcf54e752d32cbaa6cb98fbca48d8cd087b1db1d131d465705a0d8042c8393c8f4d26b59006eb50129b21e6240f0c06","0xb591a3e4db18a7345fa935a8dd7994bbac5cc270b8ebd84c8304c44484c7a74afb45471fdbe4ab22156a30fae1149b40","0x806b53ac049a42f1dcc1d6335505371da0bf27c614f441b03bbf2e356be7b2fb4eed7117eabcce9e427a542eaa2bf7d8","0x800482e7a772d49210b81c4a907f5ce97f270b959e745621ee293cf8c71e8989363d61f66a98f2d16914439544ca84c7","0x99de9eafdad3617445312341644f2bb888680ff01ce95ca9276b1d2e5ef83fa02dab5e948ebf66c17df0752f1bd37b70","0x961ee30810aa4c93ae157fbe9009b8e443c082192bd36a73a6764ff9b2ad8b0948fe9a73344556e01399dd77badb4257","0xae0a361067c52efbe56c8adf982c00432cd478929459fc7f74052c8ee9531cd031fe1335418fde53f7c2ef34254eb7ac","0xa3503d16b6b27eb20c1b177bcf90d13706169220523a6271b85b2ce35a9a2b9c5bed088540031c0a4ebfdae3a4c6ab04","0x909420122c3e723289ca4e7b81c2df5aff312972a2203f4c45821b176e7c862bf9cac7f7df3adf1d59278f02694d06e7","0x989f42380ae904b982f85d0c6186c1aef5d6bcba29bcfbb658e811b587eb2749c65c6e4a8cc6409c229a107499a4f5d7","0x8037a6337195c8e26a27ea4ef218c6e7d79a9720aaab43932d343192abc2320fe72955f5e431c109093bda074103330a","0xb312e168663842099b88445e940249cc508f080ab0c94331f672e7760258dbd86be5267e4cf25ea25facb80bff82a7e9","0xaaa3ff8639496864fcdbfdda1ac97edc4f08e3c9288b768f6c8073038c9fbbf7e1c4bea169b4d45c31935cdf0680d45e","0x97dbd3df37f0b481a311dfc5f40e59227720f367912200d71908ef6650f32cc985cb05b981e3eea38958f7e48d10a15d","0xa89d49d1e267bb452d6cb621b9a90826fe55e9b489c0427b94442d02a16f390eed758e209991687f73f6b5a032321f42","0x9530dea4e0e19d6496f536f2e75cf7d814d65fde567055eb20db48fd8d20d501cd2a22fb506db566b94c9ee10f413d43","0x81a7009b9e67f1965fa7da6a57591c307de91bf0cd35ab4348dc4a98a4961e096d004d7e7ad318000011dc4342c1b809","0x83440a9402b766045d7aca61a58bba2aa29cac1cf718199e472ba086f5d48093d9dda4d135292ba51d049a23964eceae","0xa06c9ce5e802df14f6b064a3d1a0735d429b452f0e2e276042800b0a4f16df988fd94cf3945921d5dd3802ab2636f867","0xb1359e358b89936dee9e678a187aad3e9ab14ac40e96a0a68f70ee2583cdcf467ae03bef4215e92893f4e12f902adec8","0x835304f8619188b4d14674d803103d5a3fa594d48e96d9699e653115dd05fdc2dda6ba3641cf7ad53994d448da155f02","0x8327cba5a9ff0d3f5cd0ae55e77167448926d5fcf76550c0ad978092a14122723090c51c415e88e42a2b62eb07cc3981","0xb373dcdaea85f85ce9978b1426a7ef4945f65f2d3467a9f1cc551a99766aac95df4a09e2251d3f89ca8c9d1a7cfd7b0e","0xab1422dc41af2a227b973a6fd124dfcb2367e2a11a21faa1d381d404f51b7257e5bc82e9cf20cd7fe37d7ae761a2ab37","0xa93774a03519d2f20fdf2ef46547b0a5b77c137d6a3434b48d56a2cbef9e77120d1b85d0092cf8842909213826699477","0x8eb967a495a38130ea28711580b7e61bcd1d051cd9e4f2dbf62f1380bd86e0d60e978d72f6f31e909eb97b3b9a2b867c","0xae8213378da1287ba1fe4242e1acaec19b877b6fe872400013c6eac1084b8d03156792fa3020201725b08228a1e80f49","0xb143daf6893d674d607772b3b02d8ac48f294237e2f2c87963c0d4e26d9227d94a2a13512457c3d5883544bbc259f0ef","0xb343bd2aca8973888e42542218924e2dda2e938fd1150d06878af76f777546213912b7c7a34a0f94186817d80ffa185c","0xb188ebc6a8c3007001aa347ae72cc0b15d09bc6c19a80e386ee4b334734ec0cc2fe8b493c2422f38d1e6d133cc3db6fe","0xb795f6a8b9b826aaeee18ccd6baf6c5adeeec85f95eb5b6d19450085ec7217e95a2d9e221d77f583b297d0872073ba0e","0xb1c7dbd998ad32ae57bfa95deafa147024afd57389e98992c36b6e52df915d3d5a39db585141ec2423173e85d212fed8","0x812bcdeb9fe5f12d0e1df9964798056e1f1c3de3b17b6bd2919b6356c4b86d8e763c01933efbe0224c86a96d5198a4be","0xb19ebeda61c23d255cbf472ef0b8a441f4c55b70f0d8ed47078c248b1d3c7c62e076b43b95c00a958ec8b16d5a7cb0d7","0xb02adc9aaa20e0368a989c2af14ff48b67233d28ebee44ff3418bb0473592e6b681af1cc45450bd4b175df9051df63d9","0x8d87f0714acee522eb58cec00360e762adc411901dba46adc9227124fa70ee679f9a47e91a6306d6030dd4eb8de2f3c1","0x8be54cec21e74bcc71de29dc621444263737db15f16d0bb13670f64e42f818154e04b484593d19ef95f2ee17e4b3fe21","0xab8e20546c1db38d31493b5d5f535758afb17e459645c1b70813b1cf7d242fd5d1f4354a7c929e8f7259f6a25302e351","0x89f035a1ed8a1e302ac893349ba8ddf967580fcb6e73d44af09e3929cde445e97ff60c87dafe489e2c0ab9c9986cfa00","0x8b2b0851a795c19191a692af55f7e72ad2474efdc5401bc3733cfdd910e34c918aaebe69d5ea951bdddf3c01cabbfc67","0xa4edb52c2b51495ccd1ee6450fc14b7b3ede8b3d106808929d02fb31475bacb403e112ba9c818d2857651e508b3a7dd1","0x9569341fded45d19f00bcf3cbf3f20eb2b4d82ef92aba3c8abd95866398438a2387437e580d8b646f17cf6fde8c5af23","0xaa4b671c6d20f72f2f18a939a6ff21cc37e0084b44b4a717f1be859a80b39fb1be026b3205adec2a66a608ec2bcd578f","0x94902e980de23c4de394ad8aec91b46f888d18f045753541492bfbb92c59d3daa8de37ae755a6853744af8472ba7b72b","0xaf651ef1b2a0d30a7884557edfad95b6b5d445a7561caebdc46a485aedd25932c62c0798465c340a76f6feaa196dd712","0xb7b669b8e5a763452128846dd46b530dca4893ace5cc5881c7ddcd3d45969d7e73fbebdb0e78aa81686e5f7b22ec5759","0x82507fd4ebe9fa656a7f2e084d64a1fa6777a2b0bc106d686e2d9d2edafc58997e58cb6bfd0453b2bf415704aa82ae62","0xb40bce2b42b88678400ecd52955bbdadd15f8b9e1b3751a1a3375dc0efb5ca3ee258cf201e1140b3c09ad41217d1d49e","0xb0210d0cbb3fbf3b8cdb39e862f036b0ff941cd838e7aaf3a8354e24246e64778d22f3de34572e6b2a580614fb6425be","0x876693cba4301b251523c7d034108831df3ce133d8be5a514e7a2ca494c268ca0556fa2ad8310a1d92a16b55bcd99ea9","0x8660281406d22a4950f5ef050bf71dd3090edb16eff27fa29ef600cdea628315e2054211ed2cc6eaf8f2a1771ef689fd","0xa610e7e41e41ab66955b809ba4ade0330b8e9057d8efc9144753caed81995edeb1a42a53f93ce93540feca1fae708dac","0xa49e2c176a350251daef1218efaccc07a1e06203386ede59c136699d25ca5cb2ac1b800c25b28dd05678f14e78e51891","0x83e0915aa2b09359604566080d411874af8c993beba97d4547782fdbe1a68e59324b800ff1f07b8db30c71adcbd102a8","0xa19e84e3541fb6498e9bb8a099c495cbfcad113330e0262a7e4c6544495bb8a754b2208d0c2d895c93463558013a5a32","0x87f2bd49859a364912023aca7b19a592c60214b8d6239e2be887ae80b69ebdeb59742bdebcfa73a586ab23b2c945586c","0xb8e8fdddae934a14b57bc274b8dcd0d45ebb95ddbaabef4454e0f6ce7d3a5a61c86181929546b3d60c447a15134d08e1","0x87e0c31dcb736ea4604727e92dc1d9a3cf00adcff79df3546e02108355260f3dd171531c3c0f57be78d8b28058fcc8c0","0x9617d74e8f808a4165a8ac2e30878c349e1c3d40972006f0787b31ea62d248c2d9f3fc3da83181c6e57e95feedfd0e8c","0x8949e2cee582a2f8db86e89785a6e46bc1565c2d8627d5b6bf43ba71ffadfab7e3c5710f88dcb5fb2fc6edf6f4fae216","0xad3fa7b0edceb83118972a2935a09f409d09a8db3869f30be3a76f67aa9fb379cabb3a3aff805ba023a331cad7d7eb64","0x8c95718a4112512c4efbd496be38bf3ca6cdcaad8a0d128f32a3f9aae57f3a57bdf295a3b372a8c549fda8f4707cffed","0x88f3261d1e28a58b2dee3fcc799777ad1c0eb68b3560f9b4410d134672d9533532a91ea7be28a041784872632d3c9d80","0xb47472a41d72dd2e8b72f5c4f8ad626737dde3717f63d6bc776639ab299e564cbad0a2ad5452a07f02ff49a359c437e5","0x9896d21dc2e8aad87b76d6df1654f10cd7bceed4884159d50a818bea391f8e473e01e14684814c7780235f28e69dca6e","0x82d47c332bbd31bbe83b5eb44a23da76d4a7a06c45d7f80f395035822bc27f62f59281d5174e6f8e77cc9b5c3193d6f0","0x95c74cd46206e7f70c9766117c34c0ec45c2b0f927a15ea167901a160e1530d8522943c29b61e03568aa0f9c55926c53","0xa89d7757825ae73a6e81829ff788ea7b3d7409857b378ebccd7df73fdbe62c8d9073741cf038314971b39af6c29c9030","0x8c1cd212d0b010905d560688cfc036ae6535bc334fa8b812519d810b7e7dcf1bb7c5f43deaa40f097158358987324a7f","0xb86993c383c015ed8d847c6b795164114dd3e9efd25143f509da318bfba89389ea72a420699e339423afd68b6512fafb","0x8d06bd379c6d87c6ed841d8c6e9d2d0de21653a073725ff74be1934301cc3a79b81ef6dd0aad4e7a9dc6eac9b73019bc","0x81af4d2d87219985b9b1202d724fe39ef988f14fef07dfe3c3b11714e90ffba2a97250838e8535eb63f107abfe645e96","0x8c5e0af6330a8becb787e4b502f34f528ef5756e298a77dc0c7467433454347f3a2e0bd2641fbc2a45b95e231c6e1c02","0x8e2a8f0f04562820dc8e7da681d5cad9fe2e85dd11c785fb6fba6786c57a857e0b3bd838fb849b0376c34ce1665e4837","0xa39be8269449bfdfc61b1f62077033649f18dae9bef7c6163b9314ca8923691fb832f42776f0160b9e8abd4d143aa4e1","0x8c154e665706355e1cc98e0a4cabf294ab019545ba9c4c399d666e6ec5c869ca9e1faf8fb06cd9c0a5c2f51a7d51b70a","0xa046a7d4de879d3ebd4284f08f24398e9e3bf006cd4e25b5c67273ade248689c69affff92ae810c07941e4904296a563","0xafd94c1cb48758e5917804df03fb38a6da0e48cd9b6262413ea13b26973f9e266690a1b7d9d24bbaf7e82718e0e594b0","0x859e21080310c8d6a38e12e2ac9f90a156578cdeb4bb2e324700e97d9a5511cd6045dc39d1d0de3f94aeed043a24119d","0xa219fb0303c379d0ab50893264919f598e753aac9065e1f23ef2949abc992577ab43c636a1d2c089203ec9ddb941e27d","0xb0fdb639d449588a2ca730afcba59334e7c387342d56defdfb7ef79c493f7fd0e5277eff18e7203e756c7bdda5803047","0x87f9c3b7ed01f54368aca6dbcf2f6e06bff96e183c4b2c65f8baa23b377988863a0a125d5cdd41a072da8462ced4c070","0x99ef7a5d5ac2f1c567160e1f8c95f2f38d41881850f30c461a205f7b1b9fb181277311333839b13fb3ae203447e17727","0xaeaca9b1c2afd24e443326cc68de67b4d9cedb22ad7b501a799d30d39c85bb2ea910d4672673e39e154d699e12d9b3dc","0xa11675a1721a4ba24dd3d0e4c3c33a6edf4cd1b9f6b471070b4386c61f77452266eae6e3f566a40cfc885eada9a29f23","0xb228334445e37b9b49cb4f2cc56b454575e92173ddb01370a553bba665adadd52df353ad74470d512561c2c3473c7bb9","0xa18177087c996572d76f81178d18ed1ceebc8362a396348ce289f1d8bd708b9e99539be6fccd4acb1112381cfc5749b4","0x8e7b8bf460f0d3c99abb19803b9e43422e91507a1c0c22b29ee8b2c52d1a384da4b87c292e28eff040db5be7b1f8641f","0xb03d038d813e29688b6e6f444eb56fec3abba64c3d6f890a6bcf2e916507091cdb2b9d2c7484617be6b26552ed1c56cb","0xa1c88ccd30e934adfc5494b72655f8afe1865a84196abfb376968f22ddc07761210b6a9fb7638f1413d1b4073d430290","0x961b714faebf172ad2dbc11902461e286e4f24a99a939152a53406117767682a571057044decbeb3d3feef81f4488497","0xa03dc4059b46effdd786a0a03cc17cfee8585683faa35bb07936ded3fa3f3a097f518c0b8e2db92fd700149db1937789","0xadf60180c99ca574191cbcc23e8d025b2f931f98ca7dfcebfc380226239b6329347100fcb8b0fcb12db108c6ad101c07","0x805d4f5ef24d46911cbf942f62cb84b0346e5e712284f82b0db223db26d51aabf43204755eb19519b00e665c7719fcaa","0x8dea7243e9c139662a7fe3526c6c601eee72fd8847c54c8e1f2ad93ef7f9e1826b170afe58817dac212427164a88e87f","0xa2ba42356606d651b077983de1ad643650997bb2babb188c9a3b27245bb65d2036e46667c37d4ce02cb1be5ae8547abe","0xaf2ae50b392bdc013db2d12ce2544883472d72424fc767d3f5cb0ca2d973fc7d1f425880101e61970e1a988d0670c81b","0x98e6bec0568d3939b31d00eb1040e9b8b2a35db46ddf4369bdaee41bbb63cc84423d29ee510a170fb5b0e2df434ba589","0x822ff3cd12fbef4f508f3ca813c04a2e0b9b799c99848e5ad3563265979e753ee61a48f6adc2984a850f1b46c1a43d35","0x891e8b8b92a394f36653d55725ef514bd2e2a46840a0a2975c76c2a935577f85289026aaa74384da0afe26775cbddfb9","0xb2a3131a5d2fe7c8967047aa66e4524babae941d90552171cc109527f345f42aa0df06dcbb2fa01b33d0043917bbed69","0x80c869469900431f3eeefafdbe07b8afd8cee7739e659e6d0109b397cacff85a88247698f87dc4e2fe39a592f250ac64","0x9091594f488b38f9d2bb5df49fd8b4f8829d9c2f11a197dd1431ed5abbc5c954bbde3387088f9ee3a5a834beb7619bce","0xb472e241e6956146cca57b97a8a204668d050423b4e76f857bad5b47f43b203a04c8391ba9d9c3e95093c071f9d376a1","0xb7dd2de0284844392f7dfb56fe7ca3ede41e27519753ffc579a0a8d2d65ceb8108d06b6b0d4c3c1a2588951297bd1a1e","0x902116ce70d0a079ac190321c1f48701318c05f8e69ee09694754885d33a835a849cafe56f499a2f49f6cda413ddf9a7","0xb18105cc736787fafaf7c3c11c448bce9466e683159dff52723b7951dff429565e466e4841d982e3aaa9ee2066838666","0x97ab9911f3f659691762d568ae0b7faa1047b0aed1009c319fa79d15d0db8db9f808fc385dc9a68fa388c10224985379","0xb2a2cba65f5b927e64d2904ba412e2bac1cf18c9c3eda9c72fb70262497ecf505b640827e2afebecf10eebbcf48ccd3e","0xb36a3fd677baa0d3ef0dac4f1548ff50a1730286b8c99d276a0a45d576e17b39b3cbadd2fe55e003796d370d4be43ce3","0xa5dfec96ca3c272566e89dc453a458909247e3895d3e44831528130bc47cc9d0a0dac78dd3cad680a4351d399d241967","0x8029382113909af6340959c3e61db27392531d62d90f92370a432aec3eb1e4c36ae1d4ef2ba8ec6edb4d7320c7a453f6","0x971d85121ea108e6769d54f9c51299b0381ece8b51d46d49c89f65bedc123bab4d5a8bc14d6f67f4f680077529cbae4c","0x98ff6afc01d0bec80a278f25912e1b1ebff80117adae72e31d5b9fa4d9624db4ba2065b444df49b489b0607c45e26c4c","0x8fa29be10fb3ab30ce25920fec0187e6e91e458947009dabb869aade7136c8ba23602682b71e390c251f3743164cbdaa","0xb3345c89eb1653418fe3940cf3e56a9a9c66526389b98f45ca02dd62bfb37baa69a4baaa7132d7320695f8ea6ad1fd94","0xb72c7f5541c9ac6b60a7ec9f5415e7fb14da03f7164ea529952a29399f3a071576608dbbcc0d45994f21f92ddbeb1e19","0xaa3450bb155a5f9043d0ef95f546a2e6ade167280bfb75c9f09c6f9cdb1fffb7ce8181436161a538433afa3681c7a141","0x92a18fecaded7854b349f441e7102b638ababa75b1b0281dd0bded6541abe7aa37d96693595be0b01fe0a2e2133d50f9","0x980756ddf9d2253cfe6c94960b516c94889d09e612810935150892627d2ecee9a2517e04968eea295d0106850c04ca44","0xae68c6ccc454318cdd92f32b11d89116a3b8350207a36d22a0f626718cad671d960090e054c0c77ac3162ae180ecfd4b","0x99f31f66eaaa551749ad91d48a0d4e3ff4d82ef0e8b28f3184c54e852422ba1bdafd53b1e753f3a070f3b55f3c23b6a2","0xa44eaeaa6589206069e9c0a45ff9fc51c68da38d4edff1d15529b7932e6f403d12b9387019c44a1488a5d5f27782a51f","0xb80b5d54d4b344840e45b79e621bd77a3f83fb4ce6d8796b7d6915107b3f3c34d2e7d95bdafd120f285669e5acf2437a","0xb36c069ec085a612b5908314d6b84c00a83031780261d1c77a0384c406867c9847d5b0845deddfa512cc04a8df2046fb","0xb09dbe501583220f640d201acea7ee3e39bf9eda8b91aa07b5c50b7641d86d71acb619b38d27835ce97c3759787f08e9","0x87403d46a2bf63170fff0b857acacf42ee801afe9ccba8e5b4aea967b68eac73a499a65ca46906c2eb4c8f27bc739faa","0x82b93669f42a0a2aa5e250ffe6097269da06a9c02fcd1801abbad415a7729a64f830754bafc702e64600ba47671c2208","0x8e3a3029be7edb8dd3ab1f8216664c8dc50d395f603736061d802cef77627db7b859ef287ed850382c13b4d22d6a2d80","0x968e9ec7194ff424409d182ce0259acd950c384c163c04463bc8700a40b79beba6146d22b7fa7016875a249b7b31c602","0x8b42c984bbe4996e0c20862059167c6bdc5164b1ffcd928f29512664459212d263e89f0f0e30eed4e672ffa5ed0b01b5","0x96bac54062110dada905363211133f1f15dc7e4fd80a4c6e4a83bc9a0bcbbaba11cd2c7a13debcf0985e1a954c1da66b","0xa16dc8a653d67a7cd7ae90b2fffac0bf1ca587005430fe5ba9403edd70ca33e38ba5661d2ed6e9d2864400d997626a62","0xa68ab11a570a27853c8d67e491591dcba746bfbee08a2e75ae0790399130d027ed387f41ef1d7de8df38b472df309161","0x92532b74886874447c0300d07eda9bbe4b41ed25349a3da2e072a93fe32c89d280f740d8ff70d5816793d7f2b97373cc","0x88e35711b471e89218fd5f4d0eadea8a29405af1cd81974427bc4a5fb26ed60798daaf94f726c96e779b403a2cd82820","0xb5c72aa4147c19f8c4f3a0a62d32315b0f4606e0a7025edc5445571eaf4daff64f4b7a585464821574dd50dbe1b49d08","0x9305d9b4095258e79744338683fd93f9e657367b3ab32d78080e51d54eec331edbc224fad5093ebf8ee4bd4286757eb8","0xb2a17abb3f6a05bcb14dc7b98321fa8b46d299626c73d7c6eb12140bf4c3f8e1795250870947af817834f033c88a59d6","0xb3477004837dbd8ba594e4296f960fc91ab3f13551458445e6c232eb04b326da803c4d93e2e8dcd268b4413305ff84da","0x924b4b2ebaafdcfdfedb2829a8bf46cd32e1407d8d725a5bd28bdc821f1bafb3614f030ea4352c671076a63494275a3f","0x8b81b9ef6125c82a9bece6fdcb9888a767ac16e70527753428cc87c56a1236e437da8be4f7ecfe57b9296dc3ae7ba807","0x906e19ec8b8edd58bdf9ae05610a86e4ea2282b1bbc1e8b00b7021d093194e0837d74cf27ac9916bdb8ec308b00da3da","0xb41c5185869071760ac786078a57a2ab4e2af60a890037ac0c0c28d6826f15c2cf028fddd42a9b6de632c3d550bfbc14","0xa646e5dec1b713ae9dfdf7bdc6cd474d5731a320403c7dfcfd666ffc9ae0cff4b5a79530e8df3f4aa9cb80568cb138e9","0xb0efad22827e562bd3c3e925acbd0d9425d19057868608d78c2209a531cccd0f2c43dc5673acf9822247428ffa2bb821","0xa94c19468d14b6f99002fc52ac06bbe59e5c472e4a0cdb225144a62f8870b3f10593749df7a2de0bd3c9476ce682e148","0x803864a91162f0273d49271dafaab632d93d494d1af935aefa522768af058fce52165018512e8d6774976d52bd797e22","0xa08711c2f7d45c68fb340ac23597332e1bcaec9198f72967b9921204b9d48a7843561ff318f87908c05a44fc35e3cc9d","0x91c3cad94a11a3197ae4f9461faab91a669e0dddb0371d3cab3ed9aeb1267badc797d8375181130e461eadd05099b2a2","0x81bdaaf48aae4f7b480fc13f1e7f4dd3023a41439ba231760409ce9292c11128ab2b0bdbbf28b98af4f97b3551f363af","0x8d60f9df9fd303f625af90e8272c4ecb95bb94e6efc5da17b8ab663ee3b3f673e9f6420d890ccc94acf4d2cae7a860d8","0xa7b75901520c06e9495ab983f70b61483504c7ff2a0980c51115d11e0744683ce022d76e3e09f4e99e698cbd21432a0d","0x82956072df0586562fda7e7738226f694e1c73518dd86e0799d2e820d7f79233667192c9236dcb27637e4c65ef19d493","0xa586beb9b6ffd06ad200957490803a7cd8c9bf76e782734e0f55e04a3dc38949de75dc607822ec405736c576cf83bca3","0xa179a30d00def9b34a7e85607a447eea0401e32ab5abeee1a281f2acd1cf6ec81a178020666f641d9492b1bdf66f05a3","0x83e129705c538787ed8e0fdc1275e6466a3f4ee21a1e6abedd239393b1df72244723b92f9d9d9339a0cab6ebf28f5a16","0x811bd8d1e3722b64cd2f5b431167e7f91456e8bba2cc669d3fbbce7d553e29c3c19f629fcedd2498bc26d33a24891d17","0xa243c030c858f1f60cccd26b45b024698cc6d9d9e6198c1ed4964a235d9f8d0baf9cde10c8e63dfaa47f8e74e51a6e85","0xab839eb82e23ca52663281f863b55b0a3d6d4425c33ffb4eeb1d7979488ab068bf99e2a60e82cea4dc42c56c26cbfebe","0x8b896f9bb21d49343e67aec6ad175b58c0c81a3ca73d44d113ae4354a0065d98eb1a5cafedaf232a2bb9cdc62152f309","0xaf6230340cc0b66f5bf845540ed4fc3e7d6077f361d60762e488d57834c3e7eb7eacc1b0ed73a7d134f174a01410e50c","0x88975e1b1af678d1b5179f72300a30900736af580dd748fd9461ef7afccc91ccd9bed33f9da55c8711a7635b800e831f","0xa97486bb9047391661718a54b8dd5a5e363964e495eae6c692730264478c927cf3e66dd3602413189a3699fbeae26e15","0xa5973c161ab38732885d1d2785fd74bf156ba34881980cba27fe239caef06b24a533ffe6dbbbeca5e6566682cc00300a","0xa24776e9a840afda0003fa73b415d5bd6ecd9b5c2cc842b643ee51b8c6087f4eead4d0bfbd987eb174c489a7b952ff2a","0xa8a6ee06e3af053b705a12b59777267c546f33ba8a0f49493af8e6df4e15cf8dd2d4fb4daf7e84c6b5d3a7363118ff03","0xa28e59ce6ad02c2ce725067c0123117e12ac5a52c8f5af13eec75f4a9efc4f696777db18a374fa33bcae82e0734ebd16","0x86dfc3b78e841c708aff677baa8ee654c808e5d257158715097c1025d46ece94993efe12c9d188252ad98a1e0e331fec","0xa88d0275510f242eab11fdb0410ff6e1b9d7a3cbd3658333539815f1b450a84816e6613d15aa8a8eb15d87cdad4b27a2","0x8440acea2931118a5b481268ff9f180ee4ede85d14a52c026adc882410825b8275caa44aff0b50c2b88d39f21b1a0696","0xa7c3182eab25bd6785bacf12079d0afb0a9b165d6ed327814e2177148539f249eb9b5b2554538f54f3c882d37c0a8abe","0x85291fbe10538d7da38efdd55a7acebf03b1848428a2f664c3ce55367aece60039f4f320b1771c9c89a35941797f717c","0xa2c6414eeb1234728ab0de94aa98fc06433a58efa646ca3fcbd97dbfb8d98ae59f7ce6d528f669c8149e1e13266f69c9","0x840c8462785591ee93aee2538d9f1ec44ba2ca61a569ab51d335ac873f5d48099ae8d7a7efa0725d9ff8f9475bfa4f56","0xa7065a9d02fb3673acf7702a488fbc01aa69580964932f6f40b6c2d1c386b19e50b0e104fcac24ea26c4e723611d0238","0xb72db6d141267438279e032c95e6106c2ccb3164b842ba857a2018f3a35f4b040da92680881eb17cd61d0920d5b8f006","0xa8005d6c5960e090374747307ef0be2871a7a43fa4e76a16c35d2baab808e9777b496e9f57a4218b23390887c33a0b55","0x8e152cea1e00a451ca47c20a1e8875873419700af15a5f38ee2268d3fbc974d4bd5f4be38008fa6f404dbdedd6e6e710","0xa3391aed1fcd68761f06a7d1008ec62a09b1cb3d0203cd04e300a0c91adfed1812d8bc1e4a3fd7976dc0aae0e99f52f1","0x967eb57bf2aa503ee0c6e67438098149eac305089c155f1762cf5e84e31f0fbf27c34a9af05621e34645c1ec96afaec8","0x88af97ddc4937a95ec0dcd25e4173127260f91c8db2f6eac84afb789b363705fb3196235af631c70cafd09411d233589","0xa32df75b3f2c921b8767638fd289bcfc61e08597170186637a7128ffedd52c798c434485ac2c7de07014f9e895c2c3d8","0xb0a783832153650aa0d766a3a73ec208b6ce5caeb40b87177ffc035ab03c7705ecdd1090b6456a29f5fb7e90e2fa8930","0xb59c8e803b4c3486777d15fc2311b97f9ded1602fa570c7b0200bada36a49ee9ef4d4c1474265af8e1c38a93eb66b18b","0x982f2c85f83e852022998ff91bafbb6ff093ef22cf9d5063e083a48b29175ccbd51b9c6557151409e439096300981a6c","0x939e3b5989fefebb9d272a954659a4eb125b98c9da6953f5e628d26266bd0525ec38304b8d56f08d65abc4d6da4a8dbb","0x8898212fe05bc8de7d18503cb84a1c1337cc2c09d1eeef2b475aa79185b7322bf1f8e065f1bf871c0c927dd19faf1f6d","0x94b0393a41cd00f724aee2d4bc72103d626a5aecb4b5486dd1ef8ac27528398edf56df9db5c3d238d8579af368afeb09","0x96ac564450d998e7445dd2ea8e3fc7974d575508fa19e1c60c308d83b645864c029f2f6b7396d4ff4c1b24e92e3bac37","0x8adf6638e18aff3eb3b47617da696eb6c4bdfbecbbc3c45d3d0ab0b12cbad00e462fdfbe0c35780d21aa973fc150285e","0xb53f94612f818571b5565bbb295e74bada9b5f9794b3b91125915e44d6ddcc4da25510eab718e251a09c99534d6042d9","0x8b96462508d77ee083c376cd90807aebad8de96bca43983c84a4a6f196d5faf6619a2351f43bfeec101864c3bf255519","0xaeadf34657083fc71df33bd44af73bf5281c9ca6d906b9c745536e1819ea90b56107c55e2178ebad08f3ba75b3f81c86","0x9784ba29b2f0057b5af1d3ab2796d439b8753f1f749c73e791037461bdfc3f7097394283105b8ab01788ea5255a96710","0x8756241bda159d4a33bf74faba0d4594d963c370fb6a18431f279b4a865b070b0547a6d1613cf45b8cfb5f9236bbf831","0xb03ebfd6b71421dfd49a30460f9f57063eebfe31b9ceaa2a05c37c61522b35bdc09d7db3ad75c76c253c00ba282d3cd2","0xb34e7e6341fa9d854b2d3153bdda0c4ae2b2f442ab7af6f99a0975d45725aa48e36ae5f7011edd249862e91f499687d4","0xb462ee09dc3963a14354244313e3444de5cc37ea5ccfbf14cd9aca8027b59c4cb2a949bc30474497cab8123e768460e6","0xaea753290e51e2f6a21a9a0ee67d3a2713f95c2a5c17fe41116c87d3aa77b1683761264d704df1ac34f8b873bc88ef7b","0x98430592afd414394f98ddfff9f280fcb1c322dbe3510f45e1e9c4bb8ee306b3e0cf0282c0ee73ebb8ba087d4d9e0858","0xb95d3b5aaf54ffca11f4be8d57f76e14afdb20afc859dc7c7471e0b42031e8f3d461b726ecb979bdb2f353498dfe95ea","0x984d17f9b11a683132e0b5a9ee5945e3ff7054c2d5c716be73b29078db1d36f54c6e652fd2f52a19da313112e97ade07","0xab232f756b3fff3262be418a1af61a7e0c95ceebbc775389622a8e10610508cd6784ab7960441917a83cc191c58829ea","0xa28f41678d6e60de76b0e36ab10e4516e53e02e9c77d2b5af3cfeee3ce94cfa30c5797bd1daab20c98e1cad83ad0f633","0xb55395fca84dd3ccc05dd480cb9b430bf8631ff06e24cb51d54519703d667268c2f8afcde4ba4ed16bece8cc7bc8c6e0","0x8a8a5392a0e2ea3c7a8c51328fab11156004e84a9c63483b64e8f8ebf18a58b6ffa8fe8b9d95af0a2f655f601d096396","0xab480000fe194d23f08a7a9ec1c392334e9c687e06851f083845121ce502c06b54dda8c43092bcc1035df45cc752fe9b","0xb265644c29f628d1c7e8e25a5e845cabb21799371814730a41a363e1bda8a7be50fee7c3996a365b7fcba4642add10db","0xb8a915a3c685c2d4728f6931c4d29487cad764c5ce23c25e64b1a3259ac27235e41b23bfe7ae982921b4cb84463097df","0x8efa7338442a4b6318145a5440fc213b97869647eeae41b9aa3c0a27ee51285b73e3ae3b4a9423df255e6add58864aa9","0x9106d65444f74d217f4187dfc8fcf3810b916d1e4275f94f6a86d1c4f3565b131fd6cde1fa708bc05fe183c49f14941a","0x948252dac8026bbbdb0a06b3c9d66ec4cf9532163bab68076fda1bd2357b69e4b514729c15aaa83b5618b1977bbc60c4","0xae6596ccfdf5cbbc5782efe3bb0b101bb132dbe1d568854ca24cacc0b2e0e9fabcb2ca7ab42aecec412efd15cf8cb7a2","0x84a0b6c198ff64fd7958dfd1b40eac9638e8e0b2c4cd8cf5d8cdf80419baee76a05184bce6c5b635f6bf2d30055476a7","0x8893118be4a055c2b3da593dbca51b1ae2ea2469911acfb27ee42faf3e6c3ad0693d3914c508c0b05b36a88c8b312b76","0xb097479e967504deb6734785db7e60d1d8034d6ca5ba9552887e937f5e17bb413fccac2c1d1082154ed76609127860ad","0xa0294e6b9958f244d29943debf24b00b538b3da1116269b6e452bb12dc742226712fd1a15b9c88195afeb5d2415f505c","0xb3cc15f635080bc038f61b615f62b5b5c6f2870586191f59476e8368a73641d6ac2f7d0c1f54621982defdb318020230","0x99856f49b9fe1604d917c94d09cc0ed753d13d015d30587a94e6631ffd964b214e607deb8a69a8b5e349a7edf4309206","0xa8571e113ea22b4b4fce41a094da8c70de37830ae32e62c65c2fa5ad06a9bc29e884b945e73d448c72b176d6ecebfb58","0xa9e9c6e52beb0013273c29844956b3ce291023678107cdc785f7b44eff5003462841ad8780761b86aefc6b734adde7cf","0x80a784b0b27edb51ef2bad3aee80e51778dcaa0f3f5d3dcb5dc5d4f4b2cf7ae35b08de6680ea9dac53f8438b92eb09ef","0x827b543e609ea328e97e373f70ad72d4915a2d1daae0c60d44ac637231070e164c43a2a58db80a64df1c624a042b38f9","0xb449c65e8195202efdcb9bdb4e869a437313b118fef8b510cbbf8b79a4e99376adb749b37e9c20b51b31ed3310169e27","0x8ea3028f4548a79a94c717e1ed28ad4d8725b8d6ab18b021063ce46f665c79da3c49440c6577319dab2d036b7e08f387","0x897798431cfb17fe39f08f5f854005dc37b1c1ec1edba6c24bc8acb3b88838d0534a75475325a5ea98b326ad47dbad75","0x89cf232e6303b0751561960fd4dea5754a28c594daf930326b4541274ffb03c7dd75938e411eb9a375006a70ce38097f","0x9727c6ae7f0840f0b6c8bfb3a1a5582ceee705e0b5c59b97def7a7a2283edd4d3f47b7971e902a3a2079e40b53ff69b8","0xb76ed72b122c48679d221072efc0eeea063cb205cbf5f9ef0101fd10cb1075b8628166c83577cced654e1c001c7882f7","0xae908c42d208759da5ee9b405df85a6532ea35c6f0f6a1288d22870f59d98edc896841b8ac890a538e6c8d1e8b02d359","0x809d12fe4039a0ec80dc9be6a89acaab7797e5f7f9b163378f52f9a75a1d73b2e9ae6e3dd49e32ced439783c1cabbef5","0xa4149530b7f85d1098ba534d69548c6c612c416e8d35992fc1f64f4deeb41e09e49c6cf7aadbed7e846b91299358fe2d","0xa49342eacd1ec1148b8df1e253b1c015f603c39de11fa0a364ccb86ea32d69c34fd7aa6980a1fadcd8e785a57fa46f60","0x87d43eff5a006dc4dddcf76cc96c656a1f3a68f19f124181feab86c6cc9a52cb9189cdbb423414defdd9bb0ca8ff1ddc","0x861367e87a9aa2f0f68296ba50aa5dbc5713008d260cc2c7e62d407c2063064749324c4e8156dc21b749656cfebce26b","0xb5303c2f72e84e170e66ae1b0fbd51b8c7a6f27476eaf5694b64e8737d5c84b51fe90100b256465a4c4156dd873cddb0","0xb62849a4f891415d74f434cdc1d23c4a69074487659ca96e1762466b2b7a5d8525b056b891d0feea6fe6845cba8bc7fb","0x923dd9e0d6590a9307e8c4c23f13bae3306b580e297a937711a8b13e8de85e41a61462f25b7d352b682e8437bf2b4ab3","0x9147379860cd713cd46c94b8cdf75125d36c37517fbecf81ace9680b98ce6291cd1c3e472f84249cc3b2b445e314b1b6","0xa808a4f17ac21e3fb5cfef404e61fae3693ca3e688d375f99b6116779696059a146c27b06de3ac36da349b0649befd56","0x87787e9322e1b75e66c1f0d9ea0915722a232770930c2d2a95e9478c4b950d15ab767e30cea128f9ed65893bfc2d0743","0x9036a6ee2577223be105defe1081c48ea7319e112fff9110eb9f61110c319da25a6cea0464ce65e858635b079691ef1f","0xaf5548c7c24e1088c23b57ee14d26c12a83484c9fd9296edf1012d8dcf88243f20039b43c8c548c265ef9a1ffe9c1c88","0xa0fff520045e14065965fb8accd17e878d3fcaf9e0af2962c8954e50be6683d31fa0bf4816ab68f08630dbac6bfce52a","0xb4c1b249e079f6ae1781af1d97a60b15855f49864c50496c09c91fe1946266915b799f0406084d7783f5b1039116dd8b","0x8b0ffa5e7c498cb3879dddca34743b41eee8e2dea3d4317a6e961b58adb699ef0c92400c068d5228881a2b08121226bf","0x852ae8b19a1d80aa8ae5382e7ee5c8e7670ceb16640871c56b20b96b66b3b60e00015a3dde039446972e57b49a999ddd","0xa49942f04234a7d8492169da232cfff8051df86e8e1ba3db46aede02422c689c87dc1d99699c25f96cb763f5ca0983e5","0xb04b597b7760cf5dcf411ef896d1661e6d5b0db3257ac2cf64b20b60c6cc18fa10523bb958a48d010b55bac7b02ab3b1","0xa494591b51ea8285daecc194b5e5bd45ae35767d0246ac94fae204d674ee180c8e97ff15f71f28b7aeb175b8aea59710","0x97d2624919e78406e7460730680dea8e71c8571cf988e11441aeea54512b95bd820e78562c99372d535d96f7e200d20d","0xac693ddb00e48f76e667243b9b6a7008424043fb779e4f2252330285232c3fccac4da25cbd6d95fe9ad959ff305a91f6","0x8d20ca0a71a64a3f702a0825bb46bd810d03bebfb227683680d474a52f965716ff99e19a165ebaf6567987f4f9ee3c94","0xa5c516a438f916d1d68ca76996404792e0a66e97b7f18fc54c917bf10cf3211b62387932756e39e67e47b0bd6e88385a","0xb089614d830abc0afa435034cec7f851f2f095d479cacf1a3fb57272da826c499a52e7dcbc0eb85f4166fb94778e18e9","0xa8dacc943765d930848288192f4c69e2461c4b9bc6e79e30eeef9a543318cf9ae9569d6986c65c5668a89d49993f8e07","0xab5a9361fa339eec8c621bdad0a58078983abd8942d4282b22835d7a3a47e132d42414b7c359694986f7db39386c2e19","0x94230517fb57bd8eb26c6f64129b8b2abd0282323bf7b94b8bac7fab27b4ecc2c4290c294275e1a759de19f2216134f3","0xb8f158ea5006bc3b90b285246625faaa6ac9b5f5030dc69701b12f3b79a53ec7e92eeb5a63bbd1f9509a0a3469ff3ffc","0x8b6944fd8cb8540957a91a142fdcda827762aa777a31e8810ca6d026e50370ee1636fc351724767e817ca38804ebe005","0x82d1ee40fe1569c29644f79fa6c4033b7ed45cd2c3b343881f6eb0de2e79548fded4787fae19bed6ee76ed76ff9f2f11","0xa8924c7035e99eaed244ca165607e7e568b6c8085510dcdbaf6ebdbed405af2e6c14ee27d94ffef10d30aa52a60bf66d","0x956f82a6c2ae044635e85812581e4866c5fa2f427b01942047d81f6d79a14192f66fbbe77c9ffeaef4e6147097fdd2b5","0xb1100255a1bcf5e05b6aff1dfeb6e1d55b5d68d43a7457ba10cc76b61885f67f4d0d5179abda786e037ae95deb8eea45","0x99510799025e3e5e8fbf06dedb14c060c6548ba2bda824f687d3999dc395e794b1fb6514b9013f3892b6cf65cb0d65aa","0x8f9091cebf5e9c809aab415942172258f894e66e625d7388a05289183f01b8d994d52e05a8e69f784fba41db9ea357f0","0xa13d2eeb0776bdee9820ecb6693536720232848c51936bb4ef4fe65588d3f920d08a21907e1fdb881c1ad70b3725e726","0xa68b8f18922d550284c5e5dc2dda771f24c21965a6a4d5e7a71678178f46df4d8a421497aad8fcb4c7e241aba26378a0","0x8b7601f0a3c6ad27f03f2d23e785c81c1460d60100f91ea9d1cab978aa03b523150206c6d52ce7c7769c71d2c8228e9e","0xa8e02926430813caa851bb2b46de7f0420f0a64eb5f6b805401c11c9091d3b6d67d841b5674fa2b1dce0867714124cd8","0xb7968ecba568b8193b3058400af02c183f0a6df995a744450b3f7e0af7a772454677c3857f99c140bbdb2a09e832e8e0","0x8f20b1e9ba87d0a3f35309b985f3c18d2e8800f1ca7f0c52cadef773f1496b6070c936eea48c4a1cae83fd2524e9d233","0x88aef260042db0d641a51f40639dbeeefa9e9811df30bee695f3791f88a2f84d318f04e8926b7f47bf25956cb9e3754f","0x9725345893b647e9ba4e6a29e12f96751f1ae25fcaec2173e9a259921a1a7edb7a47159b3c8767e44d9e2689f5aa0f72","0x8c281e6f72752cb11e239e4df9341c45106eb7993c160e54423c2bffe10bc39d42624b45a1f673936ef2e1a02fc92f1a","0x90aba2f68bddb2fcce6c51430dacdfeec43ea8dc379660c99095df11017691ccf5faa27665cf4b9f0eea7728ae53c327","0xb7022695c16521c5704f49b7ddbdbec9b5f57ce0ceebe537bc0ebb0906d8196cc855a9afeb8950a1710f6a654464d93f","0x8fe1b9dd3c6a258116415d36e08374e094b22f0afb104385a5da48be17123e86fb8327baacc4f0d9ebae923d55d99bb5","0x817e85d8e3d19a4cbc1dec31597142c2daa4871bda89c2177fa719c00eda3344eb08b82eb92d4aa91a9eaacb3fc09783","0xb59053e1081d2603f1ca0ba553804d6fa696e1fd996631db8f62087b26a40dfef02098b0326bb75f99ec83b9267ca738","0x990a173d857d3ba81ff3789b931bfc9f5609cde0169b7f055fa3cb56451748d593d62d46ba33f80f9cafffe02b68dd14","0xb0c538dbba4954b809ab26f9f94a3cf1dcb77ce289eaec1d19f556c0ae4be1fa03af4a9b7057837541c3cc0a80538736","0xac3ba42f5f44f9e1fc453ce49c4ab79d0e1d5c42d3b30b1e098f3ab3f414c4c262fa12fb2be249f52d4aaf3c5224beb9","0xaf47467eb152e59870e21f0d4da2f43e093daf40180ab01438030684b114d025326928eaab12c41b81a066d94fce8436","0x98d1b58ba22e7289b1c45c79a24624f19b1d89e00f778eef327ec4856a9a897278e6f1a9a7e673844b31dde949153000","0x97ccb15dfadc7c59dca08cfe0d22df2e52c684cf97de1d94bc00d7ba24e020025130b0a39c0f4d46e4fc872771ee7875","0xb699e4ed9a000ff96ca296b2f09dce278832bc8ac96851ff3cff99ed3f6f752cfc0fea8571be28cd9b5a7ec36f1a08ee","0xb9f49f0edb7941cc296435ff0a912e3ad16848ee8765ab5f60a050b280d6ea585e5b34051b15f6b8934ef01ceb85f648","0xac3893df7b4ceab23c6b9054e48e8ba40d6e5beda8fbe90b814f992f52494186969b35d8c4cdc3c99890a222c9c09008","0xa41293ad22fae81dea94467bc1488c3707f3d4765059173980be93995fa4fcc3c9340796e3eed0beeb0ba0d9bb4fa3aa","0xa0543e77acd2aeecde13d18d258aeb2c7397b77f17c35a1992e8666ea7abcd8a38ec6c2741bd929abba2f766138618cc","0x92e79b22bc40e69f6527c969500ca543899105837b6b1075fa1796755c723462059b3d1b028e0b3df2559fa440e09175","0xa1fa1eac8f41a5197a6fb4aa1eae1a031c89f9c13ff9448338b222780cf9022e0b0925d930c37501a0ef7b2b00fdaf83","0xb3cb29ff73229f0637335f28a08ad8c5f166066f27c6c175164d0f26766a927f843b987ee9b309ed71cbf0a65d483831","0x84d4ab787f0ac00f104f4a734dc693d62d48c2aeb03913153da62c2ae2c27d11b1110dcef8980368dd84682ea2c1a308","0xab6a8e4bbc78d4a7b291ad3e9a8fe2d65f640524ba3181123b09d2d18a9e300e2509ccf7000fe47e75b65f3e992a2e7e","0xb7805ebe4f1a4df414003dc10bca805f2ab86ca75820012653e8f9b79c405196b0e2cab099f2ab953d67f0d60d31a0f9","0xb12c582454148338ea605d22bd00a754109063e22617f1f8ac8ddf5502c22a181c50c216c3617b9852aa5f26af56b323","0x86333ad9f898947e31ce747728dc8c887479e18d36ff3013f69ebef807d82c6981543b5c3788af93c4d912ba084d3cba","0xb514efa310dc4ad1258add138891e540d8c87142a881b5f46563cc58ecd1488e6d3a2fca54c0b72a929f3364ca8c333e","0xaa0a30f92843cf2f484066a783a1d75a7aa6f41f00b421d4baf20a6ac7886c468d0eea7ca8b17dd22f4f74631b62b640","0xb3b7dc63baec9a752e8433c0cdee4d0f9bc41f66f2b8d132faf925eef9cf89aae756fc132c45910f057122462605dc10","0xb9b8190dac5bfdeb59fd44f4da41a57e7f1e7d2c21faba9da91fa45cbeca06dcf299c9ae22f0c89ece11ac46352d619f","0x89f8cf36501ad8bdfeab863752a9090e3bfda57cf8fdeca2944864dc05925f501e252c048221bcc57136ab09a64b64b2","0xb0cbfaf317f05f97be47fc9d69eda2dd82500e00d42612f271a1fe24626408c28881f171e855bd5bd67409f9847502b4","0xa7c21a8fcede581bfd9847b6835eda62ba250bea81f1bb17372c800a19c732abe03064e64a2f865d974fb636cab4b859","0x95f9df524ba7a4667351696c4176b505d8ea3659f5ff2701173064acc624af69a0fad4970963736383b979830cb32260","0x856a74fe8b37a2e3afeac858c8632200485d438422a16ae3b29f359e470e8244995c63ad79c7e007ed063f178d0306fd","0xb37faa4d78fdc0bb9d403674dbea0176c2014a171c7be8527b54f7d1a32a76883d3422a3e7a5f5fcc5e9b31b57822eeb","0x8d37234d8594ec3fe75670b5c9cc1ec3537564d4739b2682a75b18b08401869a4264c0f264354219d8d896cded715db4","0xb5289ee5737f0e0bde485d32096d23387d68dab8f01f47821ab4f06cc79a967afe7355e72dc0c751d96b2747b26f6255","0x9085e1fdf9f813e9c3b8232d3c8863cd84ab30d45e8e0d3d6a0abd9ebc6fd70cdf749ff4d04390000e14c7d8c6655fc7","0x93a388c83630331eca4da37ea4a97b3b453238af474817cc0a0727fd3138dcb4a22de38c04783ec829c22cb459cb4e8e","0xa5377116027c5d061dbe24c240b891c08cdd8cd3f0899e848d682c873aff5b8132c1e7cfe76d2e5ed97ee0eb1d42cb68","0xa274c84b04338ed28d74683e2a7519c2591a3ce37c294d6f6e678f7d628be2db8eff253ede21823e2df7183e6552f622","0x8bc201147a842453a50bec3ac97671397bc086d6dfc9377fa38c2124cdc286abda69b7324f47d64da094ae011d98d9d9","0x9842d0c066c524592b76fbec5132bc628e5e1d21c424bec4555efca8619cc1fd8ea3161febcb8b9e8ab54702f4e815e2","0xa19191b713a07efe85c266f839d14e25660ee74452e6c691cd9997d85ae4f732052d802d3deb018bdd847caa298a894b","0xa24f71fc0db504da4e287dd118a4a74301cbcd16033937ba2abc8417956fcb4ae19b8e63b931795544a978137eff51cb","0xa90eec4a6a3a4b8f9a5b93d978b5026fcf812fe65585b008d7e08c4aaf21195a1d0699f12fc16f79b6a18a369af45771","0x8b551cf89737d7d06d9b3b9c4c1c73b41f2ea0af4540999c70b82dabff8580797cf0a3caf34c86c59a7069eb2e38f087","0xb8d312e6c635e7a216a1cda075ae77ba3e1d2fd501dc31e83496e6e81ed5d9c7799f8e578869c2e0e256fb29f5de10a7","0x8d144bdb8cae0b2cdb5b33d44bbc96984a5925202506a8cc65eb67ac904b466f5a7fe3e1cbf04aa785bbb7348c4bb73c","0xa101b3d58b7a98659244b88de0b478b3fb87dc5fc6031f6e689b99edf498abd43e151fd32bd4bbd240e0b3e59c440359","0x907453abca7d8e7151a05cc3d506c988007692fe7401395dc93177d0d07d114ab6cca0cc658eb94c0223fe8658295cad","0x825329ffbe2147ddb68f63a0a67f32d7f309657b8e5d9ab5bb34b3730bfa2c77a23eaaadb05def7d9f94a9e08fdc1e96","0x88ee923c95c1dac99ae7ed6067906d734d793c5dc5d26339c1bb3314abe201c5dccb33b9007351885eb2754e9a8ea06c","0x98bc9798543f5f1adc9f2cfcfa72331989420e9c3f6598c45269f0dc9b7c8607bbeaf03faa0aea2ddde2b8f17fdceff5","0x8ee87877702a79aef923ab970db6fa81561b3c07d5bf1a072af0a7bad765b4cbaec910afe1a91703feacc7822fa38a94","0x8060b9584aa294fe8adc2b22f67e988bc6da768eae91e429dcc43ddc53cfcc5d6753fdc1b420b268c7eb2fb50736a970","0xb344a5524d80a2f051870c7001f74fcf348a70fcf78dbd20c6ff9ca85d81567d2318c8b8089f2c4f195d6aec9fc15fa6","0x8f5a5d893e1936ed062149d20eb73d98b62b7f50ab5d93a6429c03656b36688d1c80cb5010e4977491e51fa0d7dd35d5","0x86fa32ebbf97328c5f5f15564e1238297e289ec3219b9a741724e9f3ae8d5c15277008f555863a478b247ba5dc601d44","0x9557e55377e279f4b6b5e0ffe01eca037cc13aac242d67dfcd0374a1e775c5ed5cb30c25fe21143fee54e3302d34a3ea","0x8cb6bcbc39372d23464a416ea7039f57ba8413cf3f00d9a7a5b356ab20dcb8ed11b3561f7bce372b8534d2870c7ee270","0xb5d59075cb5abde5391f64b6c3b8b50adc6e1f654e2a580b6d6d6eff3f4fbdd8fffc92e06809c393f5c8eab37f774c4b","0xafcfb6903ef13e493a1f7308675582f15af0403b6553e8c37afb8b2808ad21b88b347dc139464367dc260df075fea1ad","0x810fbbe808375735dd22d5bc7fc3828dc49fdd22cc2d7661604e7ac9c4535c1df578780affb3b895a0831640a945bcad","0x8056b0c678803b416f924e09a6299a33cf9ad7da6fe1ad7accefe95c179e0077da36815fde3716711c394e2c5ea7127f","0x8b67403702d06979be19f1d6dc3ec73cc2e81254d6b7d0cc49cd4fdda8cd51ab0835c1d2d26fc0ecab5df90585c2f351","0x87f97f9e6d4be07e8db250e5dd2bffdf1390665bc5709f2b631a6fa69a7fca958f19bd7cc617183da1f50ee63e9352b5","0xae151310985940471e6803fcf37600d7fa98830613e381e00dab943aec32c14162d51c4598e8847148148000d6e5af5c","0x81eb537b35b7602c45441cfc61b27fa9a30d3998fad35a064e05bc9479e9f10b62eba2b234b348219eea3cadcaac64bb","0x8a441434934180ab6f5bc541f86ebd06eadbee01f438836d797e930fa803a51510e005c9248cecc231a775b74d12b5e9","0x81f3c250a27ba14d8496a5092b145629eb2c2e6a5298438670375363f57e2798207832c8027c3e9238ad94ecdadfc4df","0xa6217c311f2f3db02ceaa5b6096849fe92b6f4b6f1491535ef8525f6ccee6130bed2809e625073ecbaddd4a3eb3df186","0x82d1c396f0388b942cf22b119d7ef1ad03d3dad49a74d9d01649ee284f377c8daddd095d596871669e16160299a210db","0xa40ddf7043c5d72a7246bd727b07f7fff1549f0e443d611de6f9976c37448b21664c5089c57f20105102d935ab82f27b","0xb6c03c1c97adf0c4bf4447ec71366c6c1bff401ba46236cd4a33d39291e7a1f0bb34bd078ba3a18d15c98993b153a279","0x8a94f5f632068399c359c4b3a3653cb6df2b207379b3d0cdace51afdf70d6d5cce6b89a2b0fee66744eba86c98fb21c2","0xb2f19e78ee85073f680c3bba1f07fd31b057c00b97040357d97855b54a0b5accb0d3b05b2a294568fcd6a4be6f266950","0xa74632d13bbe2d64b51d7a9c3ae0a5a971c19f51cf7596a807cea053e6a0f3719700976d4e394b356c0329a2dced9aa2","0xafef616d341a9bc94393b8dfba68ff0581436aa3a3adb7c26a1bbf2cf19fa877066191681f71f17f3cd6f9cf6bf70b5a","0x8ce96d93ae217408acf7eb0f9cbb9563363e5c7002e19bbe1e80760bc9d449daee2118f3878b955163ed664516b97294","0x8414f79b496176bc8b8e25f8e4cfee28f4f1c2ddab099d63d2aca1b6403d26a571152fc3edb97794767a7c4686ad557c","0xb6c61d01fd8ce087ef9f079bf25bf10090db483dd4f88c4a786d31c1bdf52065651c1f5523f20c21e75cea17df69ab73","0xa5790fd629be70545093631efadddc136661f63b65ec682609c38ef7d3d7fa4e56bdf94f06e263bc055b90cb1c6bcefe","0xb515a767e95704fb7597bca9e46f1753abacdc0e56e867ee3c6f4cd382643c2a28e65312c05ad040eaa3a8cbe7217a65","0x8135806a02ead6aa92e9adb6fefb91349837ab73105aaa7be488ef966aa8dfaafdfa64bbae30fcbfa55dd135a036a863","0x8f22435702716d76b1369750694540742d909d5e72b54d0878245fab7c269953b1c6f2b29c66f08d5e0263ca3a731771","0x8e0f8a8e8753e077dac95848212aeffd51c23d9b6d611df8b102f654089401954413ecbedc6367561ca599512ae5dda7","0x815a9084e3e2345f24c5fa559deec21ee1352fb60f4025c0779be65057f2d528a3d91593bd30d3a185f5ec53a9950676","0x967e6555ccba395b2cc1605f8484c5112c7b263f41ce8439a99fd1c71c5ed14ad02684d6f636364199ca48afbbde13be","0x8cd0ccf17682950b34c796a41e2ea7dd5367aba5e80a907e01f4cdc611e4a411918215e5aebf4292f8b24765d73314a6","0xa58bf1bbb377e4b3915df6f058a0f53b8fb8130fdec8c391f6bc82065694d0be59bb67ffb540e6c42cc8b380c6e36359","0x92af3151d9e6bfb3383d85433e953c0160859f759b0988431ec5893542ba40288f65db43c78a904325ef8d324988f09d","0x8011bbb05705167afb47d4425065630f54cb86cd462095e83b81dfebf348f846e4d8fbcf1c13208f5de1931f81da40b9","0x81c743c104fc3cb047885c9fa0fb9705c3a83ee24f690f539f4985509c3dafd507af3f6a2128276f45d5939ef70c167f","0xa2c9679b151c041aaf5efeac5a737a8f70d1631d931609fca16be1905682f35e291292874cb3b03f14994f98573c6f44","0xa4949b86c4e5b1d5c82a337e5ce6b2718b1f7c215148c8bfb7e7c44ec86c5c9476048fc5c01f57cb0920876478c41ad6","0x86c2495088bd1772152e527a1da0ef473f924ea9ab0e5b8077df859c28078f73c4e22e3a906b507fdf217c3c80808b5c","0x892e0a910dcf162bcea379763c3e2349349e4cda9402949255ac4a78dd5a47e0bf42f5bd0913951576b1d206dc1e536a","0xa7009b2c6b396138afe4754b7cc10dee557c51c7f1a357a11486b3253818531f781ea8107360c8d4c3b1cd96282353c0","0x911763ef439c086065cc7b4e57484ed6d693ea44acee4b18c9fd998116da55fbe7dcb8d2a0f0f9b32132fca82d73dff6","0xa722000b95a4a2d40bed81870793f15ba2af633f9892df507f2842e52452e02b5ea8dea6a043c2b2611d82376e33742a","0x9387ac49477bd719c2f92240d0bdfcf9767aad247ca93dc51e56106463206bc343a8ec855eb803471629a66fffb565d6","0x92819a1fa48ab4902939bb72a0a4e6143c058ea42b42f9bc6cea5df45f49724e2530daf3fc4f097cceefa2a8b9db0076","0x98eac7b04537653bc0f4941aae732e4b1f84bd276c992c64a219b8715eb1fb829b5cbd997d57feb15c7694c468f95f70","0xb275e7ba848ce21bf7996e12dbeb8dadb5d0e4f1cb5a0248a4f8f9c9fe6c74e3c93f4b61edbcb0a51af5a141e1c14bc7","0x97243189285aba4d49c53770c242f2faf5fd3914451da4931472e3290164f7663c726cf86020f8f181e568c72fd172d1","0x839b0b3c25dd412bee3dc24653b873cc65454f8f16186bb707bcd58259c0b6765fa4c195403209179192a4455c95f3b8","0x8689d1a870514568a074a38232e2ceb4d7df30fabeb76cff0aed5b42bf7f02baea12c5fadf69f4713464dbd52aafa55f","0x8958ae7b290f0b00d17c3e9fdb4dbf168432b457c7676829299dd428984aba892de1966fc106cfc58a772862ecce3976","0xa422bc6bd68b8870cfa5bc4ce71781fd7f4368b564d7f1e0917f6013c8bbb5b240a257f89ecfdbecb40fe0f3aa31d310","0xaa61f78130cebe09bc9a2c0a37f0dd57ed2d702962e37d38b1df7f17dc554b1d4b7a39a44182a452ce4c5eb31fa4cfcc","0xb7918bd114f37869bf1a459023386825821bfadce545201929d13ac3256d92a431e34f690a55d944f77d0b652cefeffc","0x819bba35fb6ace1510920d4dcff30aa682a3c9af9022e287751a6a6649b00c5402f14b6309f0aeef8fce312a0402915e","0x8b7c9ad446c6f63c11e1c24e24014bd570862b65d53684e107ba9ad381e81a2eaa96731b4b33536efd55e0f055071274","0x8fe79b53f06d33386c0ec7d6d521183c13199498594a46d44a8a716932c3ec480c60be398650bbfa044fa791c4e99b65","0x9558e10fb81250b9844c99648cf38fa05ec1e65d0ccbb18aa17f2d1f503144baf59d802c25be8cc0879fff82ed5034ad","0xb538a7b97fbd702ba84645ca0a63725be1e2891c784b1d599e54e3480e4670d0025526674ef5cf2f87dddf2290ba09f0","0x92eafe2e869a3dd8519bbbceb630585c6eb21712b2f31e1b63067c0acb5f9bdbbcbdb612db4ea7f9cc4e7be83d31973f","0xb40d21390bb813ab7b70a010dff64c57178418c62685761784e37d327ba3cb9ef62df87ecb84277c325a637fe3709732","0xb349e6fbf778c4af35fbed33130bd8a7216ed3ba0a79163ebb556e8eb8e1a7dad3456ddd700dad9d08d202491c51b939","0xa8fdaedecb251f892b66c669e34137f2650509ade5d38fbe8a05d9b9184bb3b2d416186a3640429bd1f3e4b903c159dd","0xac6167ebfee1dbab338eff7642f5e785fc21ef0b4ddd6660333fe398068cbd6c42585f62e81e4edbb72161ce852a1a4f","0x874b1fbf2ebe140c683bd7e4e0ab017afa5d4ad38055aaa83ee6bbef77dbc88a6ce8eb0dcc48f0155244af6f86f34c2d","0x903c58e57ddd9c446afab8256a6bb6c911121e6ccfb4f9b4ed3e2ed922a0e500a5cb7fa379d5285bc16e11dac90d1fda","0x8dae7a0cffa2fd166859cd1bf10ff82dd1932e488af377366b7efc0d5dec85f85fe5e8150ff86a79a39cefc29631733a","0xaa047857a47cc4dfc08585f28640420fcf105b881fd59a6cf7890a36516af0644d143b73f3515ab48faaa621168f8c31","0x864508f7077c266cc0cb3f7f001cb6e27125ebfe79ab57a123a8195f2e27d3799ff98413e8483c533b46a816a3557f1f","0x8bcd45ab1f9cbab36937a27e724af819838f66dfeb15923f8113654ff877bd8667c54f6307aaf0c35027ca11b6229bfd","0xb21aa34da9ab0a48fcfdd291df224697ce0c1ebc0e9b022fdee8750a1a4b5ba421c419541ed5c98b461eecf363047471","0xa9a18a2ab2fae14542dc336269fe612e9c1af6cf0c9ac933679a2f2cb77d3c304114f4d219ca66fe288adde30716775b","0xb5205989b92c58bdda71817f9a897e84100b5c4e708de1fced5c286f7a6f01ae96b1c8d845f3a320d77c8e2703c0e8b1","0xa364059412bbcc17b8907d43ac8e5df90bc87fd1724b5f99832d0d24559fae6fa76a74cff1d1eac8cbac6ec80b44af20","0xae709f2c339886b31450834cf29a38b26eb3b0779bd77c9ac269a8a925d1d78ea3837876c654b61a8fe834b3b6940808","0x8802581bba66e1952ac4dab36af371f66778958f4612901d95e5cac17f59165e6064371d02de8fb6fccf89c6dc8bd118","0xa313252df653e29c672cbcfd2d4f775089cb77be1077381cf4dc9533790e88af6cedc8a119158e7da5bf6806ad9b91a1","0x992a065b4152c7ef11515cd54ba9d191fda44032a01aed954acff3443377ee16680c7248d530b746b8c6dee2d634e68c","0xb627b683ee2b32c1ab4ccd27b9f6cce2fe097d96386fa0e5c182ad997c4c422ab8dfc03870cd830b8c774feb66537282","0xb823cf8a9aee03dadd013eb9efe40a201b4b57ef67efaae9f99683005f5d1bf55e950bf4af0774f50859d743642d3fea","0xb8a7449ffac0a3f206677097baf7ce00ca07a4d2bd9b5356fbcb83f3649b0fda07cfebad220c1066afba89e5a52abf4b","0xb2dd1a2f986395bb4e3e960fbbe823dbb154f823284ebc9068502c19a7609790ec0073d08bfa63f71e30c7161b6ef966","0x98e5236de4281245234f5d40a25b503505af140b503a035fc25a26159a9074ec81512b28f324c56ea2c9a5aa7ce90805","0x89070847dc8bbf5bc4ed073aa2e2a1f699cf0c2ca226f185a0671cecc54e7d3e14cd475c7752314a7a8e7476829da4bc","0xa9402dc9117fdb39c4734c0688254f23aed3dce94f5f53f5b7ef2b4bf1b71a67f85ab1a38ec224a59691f3bee050aeb3","0x957288f9866a4bf56a4204218ccc583f717d7ce45c01ea27142a7e245ad04a07f289cc044f8cf1f21d35e67e39299e9c","0xb2fb31ccb4e69113763d7247d0fc8edaae69b550c5c56aecacfd780c7217dc672f9fb7496edf4aba65dacf3361268e5b","0xb44a4526b2f1d6eb2aa8dba23bfa385ff7634572ab2afddd0546c3beb630fbfe85a32f42dd287a7fec069041411537f7","0x8db5a6660c3ac7fd7a093573940f068ee79a82bc17312af900b51c8c439336bc86ca646c6b7ab13aaaa008a24ca508ab","0x8f9899a6d7e8eb4367beb5c060a1f8e94d8a21099033ae582118477265155ba9e72176a67f7f25d7bad75a152b56e21a","0xa67de0e91ade8d69a0e00c9ff33ee2909b8a609357095fa12319e6158570c232e5b6f4647522efb7345ce0052aa9d489","0x82eb2414898e9c3023d57907a2b17de8e7eea5269029d05a94bfd7bf5685ac4a799110fbb375eb5e0e2bd16acf6458ae","0x94451fc7fea3c5a89ba701004a9693bab555cb622caf0896b678faba040409fdfd14a978979038b2a81e8f0abc4994d2","0xac879a5bb433998e289809a4a966bd02b4bf6a9c1cc276454e39c886efcf4fc68baebed575826bde577ab5aa71d735a9","0x880c0f8f49c875dfd62b4ddedde0f5c8b19f5687e693717f7e5c031bc580e58e13ab497d48b4874130a18743c59fdce3","0xb582af8d8ff0bf76f0a3934775e0b54c0e8fed893245d7d89cae65b03c8125b7237edc29dc45b4fe1a3fe6db45d280ee","0x89f337882ed3ae060aaee98efa20d79b6822bde9708c1c5fcee365d0ec9297f694cae37d38fd8e3d49717c1e86f078e7","0x826d2c1faea54061848b484e288a5f4de0d221258178cf87f72e14baaa4acc21322f8c9eab5dde612ef497f2d2e1d60b","0xa5333d4f227543e9cd741ccf3b81db79f2f03ca9e649e40d6a6e8ff9073e06da83683566d3b3c8d7b258c62970fb24d1","0xa28f08c473db06aaf4c043a2fae82b3c8cfaa160bce793a4c208e4e168fb1c65115ff8139dea06453c5963d95e922b94","0x8162546135cc5e124e9683bdfaa45833c18553ff06a0861c887dc84a5b12ae8cd4697f6794c7ef6230492c32faba7014","0xb23f0d05b74c08d6a7df1760792be83a761b36e3f8ae360f3c363fb196e2a9dd2de2e492e49d36561366e14daa77155c","0xb6f70d6c546722d3907c708d630dbe289771d2c8bf059c2e32b77f224696d750b4dda9b3a014debda38e7d02c9a77585","0x83bf4c4a9f3ca022c631017e7a30ea205ba97f7f5927cba8fc8489a4646eac6712cb821c5668c9ffe94d69d524374a27","0xb0371475425a8076d0dd5f733f55aabbe42d20a7c8ea7da352e736d4d35a327b2beb370dfcb05284e22cfd69c5f6c4cc","0xa0031ba7522c79211416c2cca3aa5450f96f8fee711552a30889910970ba13608646538781a2c08b834b140aadd7166f","0x99d273c80c7f2dc6045d4ed355d9fc6f74e93549d961f4a3b73cd38683f905934d359058cd1fc4da8083c7d75070487f","0xb0e4b0efa3237793e9dcce86d75aafe9879c5fa23f0d628649aef2130454dcf72578f9bf227b9d2b9e05617468e82588","0xa5ab076fa2e1c5c51f3ae101afdd596ad9d106bba7882b359c43d8548b64f528af19afa76cd6f40da1e6c5fca4def3fa","0x8ce2299e570331d60f6a6eff1b271097cd5f1c0e1113fc69b89c6a0f685dabea3e5bc2ac6bd789aa492ab189f89be494","0x91b829068874d911a310a5f9dee001021f97471307b5a3de9ec336870ec597413e1d92010ce320b619f38bed7c4f7910","0xb14fe91f4b07bf33b046e9285b66cb07927f3a8da0af548ac2569b4c4fb1309d3ced76d733051a20814e90dd5b75ffd1","0xabaab92ea6152d40f82940277c725aa768a631ee0b37f5961667f82fb990fc11e6d3a6a2752b0c6f94563ed9bb28265c","0xb7fe28543eca2a716859a76ab9092f135337e28109544f6bd2727728d0a7650428af5713171ea60bfc273d1c821d992c","0x8a4917b2ab749fc7343fc64bdf51b6c0698ff15d740cc7baf248c030475c097097d5a473bcc00d8c25817563fe0447b4","0xaa96156d1379553256350a0a3250166add75948fb9cde62aa555a0a9dc0a9cb7f2f7b8428aff66097bf6bfedaf14bbe2","0xae4ffeb9bdc76830d3eca2b705f30c1bdede6412fa064260a21562c8850c7fb611ec62bc68479fe48f692833e6f66d8d","0xb96543caaba9d051600a14997765d49e4ab10b07c7a92cccf0c90b309e6da334fdd6d18c96806cbb67a7801024fbd3c7","0x97b2b9ad76f19f500fcc94ca8e434176249f542ac66e5881a3dccd07354bdab6a2157018b19f8459437a68d8b86ba8e0","0xa8d206f6c5a14c80005849474fde44b1e7bcf0b2d52068f5f97504c3c035b09e65e56d1cf4b5322791ae2c2fdbd61859","0x936bad397ad577a70cf99bf9056584a61bd7f02d2d5a6cf219c05d770ae30a5cd902ba38366ce636067fc1dd10108d31","0xa77e30195ee402b84f3882e2286bf5380c0ed374a112dbd11e16cef6b6b61ab209d4635e6f35cdaaa72c1a1981d5dabe","0xa46ba4d3947188590a43c180757886a453a0503f79cc435322d92490446f37419c7b999fdf868a023601078070e03346","0x80d8d4c5542f223d48240b445d4d8cf6a75d120b060bc08c45e99a13028b809d910b534d2ac47fb7068930c54efd8da9","0x803be9c68c91b42b68e1f55e58917a477a9a6265e679ca44ee30d3eb92453f8c89c64eafc04c970d6831edd33d066902","0xb14b2b3d0dfe2bb57cee4cd72765b60ac33c1056580950be005790176543826c1d4fbd737f6cfeada6c735543244ab57","0xa9e480188bba1b8fb7105ff12215706665fd35bf1117bacfb6ab6985f4dbc181229873b82e5e18323c2b8f5de03258e0","0xa66a0f0779436a9a3999996d1e6d3000f22c2cac8e0b29cddef9636393c7f1457fb188a293b6c875b05d68d138a7cc4a","0x848397366300ab40c52d0dbbdafbafef6cd3dadf1503bb14b430f52bb9724188928ac26f6292a2412bc7d7aa620763c8","0x95466cc1a78c9f33a9aaa3829a4c8a690af074916b56f43ae46a67a12bb537a5ac6dbe61590344a25b44e8512355a4a7","0x8b5f7a959f818e3baf0887f140f4575cac093d0aece27e23b823cf421f34d6e4ff4bb8384426e33e8ec7b5eed51f6b5c","0x8d5e1368ec7e3c65640d216bcc5d076f3d9845924c734a34f3558ac0f16e40597c1a775a25bf38b187213fbdba17c93b","0xb4647c1b823516880f60d20c5cc38c7f80b363c19d191e8992226799718ee26b522a12ecb66556ed3d483aa4824f3326","0xac3abaea9cd283eb347efda4ed9086ea3acf495043e08d0d19945876329e8675224b685612a6badf8fd72fb6274902b1","0x8eae1ce292d317aaa71bcf6e77e654914edd5090e2e1ebab78b18bb41b9b1bc2e697439f54a44c0c8aa0d436ebe6e1a9","0x94dc7d1aec2c28eb43d93b111fa59aaa0d77d5a09501220bd411768c3e52208806abf973c6a452fd8292ff6490e0c9e2","0x8fd8967f8e506fef27d17b435d6b86b232ec71c1036351f12e6fb8a2e12daf01d0ee04451fb944d0f1bf7fd20e714d02","0x824e6865be55d43032f0fec65b3480ea89b0a2bf860872237a19a54bc186a85d2f8f9989cc837fbb325b7c72d9babe2c","0x8bd361f5adb27fd6f4e3f5de866e2befda6a8454efeb704aacc606f528c03f0faae888f60310e49440496abd84083ce2","0xb098a3c49f2aaa28b6b3e85bc40ce6a9cdd02134ee522ae73771e667ad7629c8d82c393fba9f27f5416986af4c261438","0xb385f5ca285ff2cfe64dcaa32dcde869c28996ed091542600a0b46f65f3f5a38428cca46029ede72b6cf43e12279e3d3","0x8196b03d011e5be5288196ef7d47137d6f9237a635ab913acdf9c595fa521d9e2df722090ec7eb0203544ee88178fc5f","0x8ed1270211ef928db18e502271b7edf24d0bbd11d97f2786aee772d70c2029e28095cf8f650b0328cc8a4c38d045316d","0xa52ab60e28d69b333d597a445884d44fd2a7e1923dd60f763951e1e45f83e27a4dac745f3b9eff75977b3280e132c15d","0x91e9fe78cdac578f4a4687f71b800b35da54b824b1886dafec073a3c977ce7a25038a2f3a5b1e35c2c8c9d1a7312417c","0xa42832173f9d9491c7bd93b21497fbfa4121687cd4d2ab572e80753d7edcbb42cfa49f460026fbde52f420786751a138","0x97b947126d84dcc70c97be3c04b3de3f239b1c4914342fa643b1a4bb8c4fe45c0fcb585700d13a7ed50784790c54bef9","0x860e407d353eac070e2418ef6cb80b96fc5f6661d6333e634f6f306779651588037be4c2419562c89c61f9aa2c4947f5","0xb2c9d93c3ba4e511b0560b55d3501bf28a510745fd666b3cb532db051e6a8617841ea2f071dda6c9f15619c7bfd2737f","0x8596f4d239aeeac78311207904d1bd863ef68e769629cc379db60e019aaf05a9d5cd31dc8e630b31e106a3a93e47cbc5","0x8b26e14e2e136b65c5e9e5c2022cee8c255834ea427552f780a6ca130a6446102f2a6f334c3f9a0308c53df09e3dba7e","0xb54724354eb515a3c8bed0d0677ff1db94ac0a07043459b4358cb90e3e1aa38ac23f2caa3072cf9647275d7cd61d0e80","0xb7ce9fe0e515e7a6b2d7ddcb92bc0196416ff04199326aea57996eef8c5b1548bd8569012210da317f7c0074691d01b7","0xa1a13549c82c877253ddefa36a29ea6a23695ee401fdd48e65f6f61e5ebd956d5e0edeff99484e9075cb35071fec41e2","0x838ba0c1e5bd1a6da05611ff1822b8622457ebd019cb065ece36a2d176bd2d889511328120b8a357e44569e7f640c1e6","0xb916eccff2a95519400bbf76b5f576cbe53cf200410370a19d77734dc04c05b585cfe382e8864e67142d548cd3c4c2f4","0xa610447cb7ca6eea53a6ff1f5fe562377dcb7f4aaa7300f755a4f5e8eba61e863c51dc2aa9a29b35525b550fbc32a0fe","0x9620e8f0f0ee9a4719aa9685eeb1049c5c77659ba6149ec4c158f999cfd09514794b23388879931fe26fea03fa471fd3","0xa9dcf8b679e276583cf5b9360702a185470d09aea463dc474ee9c8aee91ef089dacb073e334e47fbc78ec5417c90465c","0x8c9adee8410bdd99e5b285744cee61e2593b6300ff31a8a83b0ec28da59475a5c6fb9346fe43aadea2e6c3dad2a8e30a","0x97d5afe9b3897d7b8bb628b7220cf02d8ee4e9d0b78f5000d500aaf4c1df9251aaaabfd1601626519f9d66f00a821d4e","0x8a382418157b601ce4c3501d3b8409ca98136a4ef6abcbf62885e16e215b76b035c94d149cc41ff92e42ccd7c43b9b3d","0xb64b8d11fb3b01abb2646ac99fdb9c02b804ce15d98f9fe0fbf1c9df8440c71417487feb6cdf51e3e81d37104b19e012","0x849d7d044f9d8f0aab346a9374f0b3a5d14a9d1faa83dbacccbdc629ad1ef903a990940255564770537f8567521d17f0","0x829dbb0c76b996c2a91b4cbbe93ba455ca0d5729755e5f0c92aaee37dff7f36fcdc06f33aca41f1b609c784127b67d88","0x85a7c0069047b978422d264d831ab816435f63938015d2e977222b6b5746066c0071b7f89267027f8a975206ed25c1b0","0x84b9fbc1cfb302df1acdcf3dc5d66fd1edfe7839f7a3b2fb3a0d5548656249dd556104d7c32b73967bccf0f5bdcf9e3b","0x972220ac5b807f53eac37dccfc2ad355d8b21ea6a9c9b011c09fe440ddcdf7513e0b43d7692c09ded80d7040e26aa28f","0x855885ed0b21350baeca890811f344c553cf9c21024649c722453138ba29193c6b02c4b4994cd414035486f923472e28","0x841874783ae6d9d0e59daea03e96a01cbbe4ecaced91ae4f2c8386e0d87b3128e6d893c98d17c59e4de1098e1ad519dd","0x827e50fc9ce56f97a4c3f2f4cbaf0b22f1c3ce6f844ff0ef93a9c57a09b8bf91ebfbd2ba9c7f83c442920bffdaf288cc","0xa441f9136c7aa4c08d5b3534921b730e41ee91ab506313e1ba5f7c6f19fd2d2e1594e88c219834e92e6fb95356385aa7","0x97d75b144471bf580099dd6842b823ec0e6c1fb86dd0da0db195e65524129ea8b6fd4a7a9bbf37146269e938a6956596","0xa4b6fa87f09d5a29252efb2b3aaab6b3b6ea9fab343132a651630206254a25378e3e9d6c96c3d14c150d01817d375a8e","0xa31a671876d5d1e95fe2b8858dc69967231190880529d57d3cab7f9f4a2b9b458ac9ee5bdaa3289158141bf18f559efb","0x90bee6fff4338ba825974021b3b2a84e36d617e53857321f13d2b3d4a28954e6de3b3c0e629d61823d18a9763313b3bf","0x96b622a63153f393bb419bfcf88272ea8b3560dbd46b0aa07ada3a6223990d0abdd6c2adb356ef4be5641688c8d83941","0x84c202adeaff9293698022bc0381adba2cd959f9a35a4e8472288fd68f96f6de8be9da314c526d88e291c96b1f3d6db9","0x8ca01a143b8d13809e5a8024d03e6bc9492e22226073ef6e327edf1328ef4aff82d0bcccee92cb8e212831fa35fe1204","0xb2f970dbad15bfbefb38903c9bcc043d1367055c55dc1100a850f5eb816a4252c8c194b3132c929105511e14ea10a67d","0xa5e36556472a95ad57eb90c3b6623671b03eafd842238f01a081997ffc6e2401f76e781d049bb4aa94d899313577a9cf","0x8d1057071051772f7c8bedce53a862af6fd530dd56ae6321eaf2b9fc6a68beff5ed745e1c429ad09d5a118650bfd420a","0x8aadc4f70ace4fcb8d93a78610779748dcffc36182d45b932c226dc90e48238ea5daa91f137c65ed532352c4c4d57416","0xa2ea05ae37e673b4343232ae685ee14e6b88b867aef6dfac35db3589cbcd76f99540fed5c2641d5bb5a4a9f808e9bf0d","0x947f1abad982d65648ae4978e094332b4ecb90f482c9be5741d5d1cf5a28acf4680f1977bf6e49dd2174c37f11e01296","0xa27b144f1565e4047ba0e3f4840ef19b5095d1e281eaa463c5358f932114cbd018aa6dcf97546465cf2946d014d8e6d6","0x8574e1fc3acade47cd4539df578ce9205e745e161b91e59e4d088711a7ab5aa3b410d517d7304b92109924d9e2af8895","0xa48ee6b86b88015d6f0d282c1ae01d2a5b9e8c7aa3d0c18b35943dceb1af580d08a65f54dc6903cde82fd0d73ce94722","0x8875650cec543a7bf02ea4f2848a61d167a66c91ffaefe31a9e38dc8511c6a25bde431007eefe27a62af3655aca208dc","0x999b0a6e040372e61937bf0d68374e230346b654b5a0f591a59d33a4f95bdb2f3581db7c7ccb420cd7699ed709c50713","0x878c9e56c7100c5e47bbe77dc8da5c5fe706cec94d37fa729633bca63cace7c40102eee780fcdabb655f5fa47a99600e","0x865006fb5b475ada5e935f27b96f9425fc2d5449a3c106aa366e55ebed3b4ee42adc3c3f0ac19fd129b40bc7d6bc4f63","0xb7a7da847f1202e7bc1672553e68904715e84fd897d529243e3ecda59faa4e17ba99c649a802d53f6b8dfdd51f01fb74","0x8b2fb4432c05653303d8c8436473682933a5cb604da10c118ecfcd2c8a0e3132e125afef562bdbcc3df936164e5ce4f2","0x808d95762d33ddfa5d0ee3d7d9f327de21a994d681a5f372e2e3632963ea974da7f1f9e5bac8ccce24293509d1f54d27","0x932946532e3c397990a1df0e94c90e1e45133e347a39b6714c695be21aeb2d309504cb6b1dde7228ff6f6353f73e1ca2","0x9705e7c93f0cdfaa3fa96821f830fe53402ad0806036cd1b48adc2f022d8e781c1fbdab60215ce85c653203d98426da3","0xaa180819531c3ec1feb829d789cb2092964c069974ae4faad60e04a6afcce5c3a59aec9f11291e6d110a788d22532bc6","0x88f755097f7e25cb7dd3c449520c89b83ae9e119778efabb54fbd5c5714b6f37c5f9e0346c58c6ab09c1aef2483f895d","0x99fc03ab7810e94104c494f7e40b900f475fde65bdec853e60807ffd3f531d74de43335c3b2646b5b8c26804a7448898","0xaf2dea9683086bed1a179110efb227c9c00e76cd00a2015b089ccbcee46d1134aa18bda5d6cab6f82ae4c5cd2461ac21","0xa500f87ba9744787fdbb8e750702a3fd229de6b8817594348dec9a723b3c4240ddfa066262d002844b9e38240ce55658","0x924d0e45c780f5bc1c1f35d15dfc3da28036bdb59e4c5440606750ecc991b85be18bc9a240b6c983bc5430baa4c68287","0x865b11e0157b8bf4c5f336024b016a0162fc093069d44ac494723f56648bc4ded13dfb3896e924959ea11c96321afefc","0x93672d8607d4143a8f7894f1dcca83fb84906dc8d6dd7dd063bb0049cfc20c1efd933e06ca7bd03ea4cb5a5037990bfe","0x826891efbdff0360446825a61cd1fa04326dd90dae8c33dfb1ed97b045e165766dd070bd7105560994d0b2044bdea418","0x93c4a4a8bcbc8b190485cc3bc04175b7c0ed002c28c98a540919effd6ed908e540e6594f6db95cd65823017258fb3b1c","0xaeb2a0af2d2239fda9aa6b8234b019708e8f792834ff0dd9c487fa09d29800ddceddd6d7929faa9a3edcb9e1b3aa0d6b","0x87f11de7236d387863ec660d2b04db9ac08143a9a2c4dfff87727c95b4b1477e3bc473a91e5797313c58754905079643","0x80dc1db20067a844fe8baceca77f80db171a5ca967acb24e2d480eae9ceb91a3343c31ad1c95b721f390829084f0eae6","0x9825c31f1c18da0de3fa84399c8b40f8002c3cae211fb6a0623c76b097b4d39f5c50058f57a16362f7a575909d0a44a2","0xa99fc8de0c38dbf7b9e946de83943a6b46a762167bafe2a603fb9b86f094da30d6de7ed55d639aafc91936923ee414b3","0xad594678b407db5d6ea2e90528121f84f2b96a4113a252a30d359a721429857c204c1c1c4ff71d8bb5768c833f82e80e","0xb33d985e847b54510b9b007e31053732c8a495e43be158bd2ffcea25c6765bcbc7ca815f7c60b36ad088b955dd6e9350","0x815f8dfc6f90b3342ca3fbd968c67f324dae8f74245cbf8bc3bef10e9440c65d3a2151f951e8d18959ba01c1b50b0ec1","0x94c608a362dd732a1abc56e338637c900d59013db8668e49398b3c7a0cae3f7e2f1d1bf94c0299eeafe6af7f76c88618","0x8ebd8446b23e5adfcc393adc5c52fe172f030a73e63cd2d515245ca0dd02782ceed5bcdd9ccd9c1b4c5953dfac9c340c","0x820437f3f6f9ad0f5d7502815b221b83755eb8dc56cd92c29e9535eb0b48fb8d08c9e4fcc26945f9c8cca60d89c44710","0x8910e4e8a56bf4be9cc3bbf0bf6b1182a2f48837a2ed3c2aaec7099bfd7f0c83e14e608876b17893a98021ff4ab2f20d","0x9633918fde348573eec15ce0ad53ac7e1823aac86429710a376ad661002ae6d049ded879383faaa139435122f64047c6","0xa1f5e3fa558a9e89318ca87978492f0fb4f6e54a9735c1b8d2ecfb1d1c57194ded6e0dd82d077b2d54251f3bee1279e1","0xb208e22d04896abfd515a95c429ff318e87ff81a5d534c8ac2c33c052d6ffb73ef1dccd39c0bbe0734b596c384014766","0x986d5d7d2b5bde6d16336f378bd13d0e671ad23a8ec8a10b3fc09036faeeb069f60662138d7a6df3dfb8e0d36180f770","0xa2d4e6c5f5569e9cef1cddb569515d4b6ace38c8aed594f06da7434ba6b24477392cc67ba867c2b079545ca0c625c457","0xb5ac32b1d231957d91c8b7fc43115ce3c5c0d8c13ca633374402fa8000b6d9fb19499f9181844f0c10b47357f3f757ce","0x96b8bf2504b4d28fa34a4ec378e0e0b684890c5f44b7a6bb6e19d7b3db2ab27b1e2686389d1de9fbd981962833a313ea","0x953bfd7f6c3a0469ad432072b9679a25486f5f4828092401eff494cfb46656c958641a4e6d0d97d400bc59d92dba0030","0x876ab3cea7484bbfd0db621ec085b9ac885d94ab55c4bb671168d82b92e609754b86aaf472c55df3d81421d768fd108a","0x885ff4e67d9ece646d02dd425aa5a087e485c3f280c3471b77532b0db6145b69b0fbefb18aa2e3fa5b64928b43a94e57","0xb91931d93f806d0b0e6cc62a53c718c099526140f50f45d94b8bbb57d71e78647e06ee7b42aa5714aed9a5c05ac8533f","0xa0313eeadd39c720c9c27b3d671215331ab8d0a794e71e7e690f06bcd87722b531d6525060c358f35f5705dbb7109ccb","0x874c0944b7fedc6701e53344100612ddcb495351e29305c00ec40a7276ea5455465ffb7bded898886c1853139dfb1fc7","0x8dc31701a01ee8137059ca1874a015130d3024823c0576aa9243e6942ec99d377e7715ed1444cd9b750a64b85dcaa3e5","0x836d2a757405e922ec9a2dfdcf489a58bd48b5f9683dd46bf6047688f778c8dee9bc456de806f70464df0b25f3f3d238","0xb30b0a1e454a503ea3e2efdec7483eaf20b0a5c3cefc42069e891952b35d4b2c955cf615f3066285ed8fafd9fcfbb8f6","0x8e6d4044b55ab747e83ec8762ea86845f1785cc7be0279c075dadf08aca3ccc5a096c015bb3c3f738f647a4eadea3ba5","0xad7735d16ab03cbe09c029610aa625133a6daecfc990b297205b6da98eda8c136a7c50db90f426d35069708510d5ae9c","0x8d62d858bbb59ec3c8cc9acda002e08addab4d3ad143b3812098f3d9087a1b4a1bb255dcb1635da2402487d8d0249161","0x805beec33238b832e8530645a3254aeef957e8f7ea24bcfc1054f8b9c69421145ebb8f9d893237e8a001c857fedfc77e","0xb1005644be4b085e3f5775aa9bd3e09a283e87ddada3082c04e7a62d303dcef3b8cf8f92944c200c7ae6bb6bdf63f832","0xb4ba0e0790dc29063e577474ffe3b61f5ea2508169f5adc1e394934ebb473e356239413a17962bc3e5d3762d72cce8c2","0xa157ba9169c9e3e6748d9f1dd67fbe08b9114ade4c5d8fc475f87a764fb7e6f1d21f66d7905cd730f28a1c2d8378682a","0x913e52b5c93989b5d15e0d91aa0f19f78d592bc28bcfdfddc885a9980c732b1f4debb8166a7c4083c42aeda93a702898","0x90fbfc1567e7cd4e096a38433704d3f96a2de2f6ed3371515ccc30bc4dd0721a704487d25a97f3c3d7e4344472702d8d","0x89646043028ffee4b69d346907586fd12c2c0730f024acb1481abea478e61031966e72072ff1d5e65cb8c64a69ad4eb1","0xb125a45e86117ee11d2fb42f680ab4a7894edd67ff927ae2c808920c66c3e55f6a9d4588eee906f33a05d592e5ec3c04","0xaad47f5b41eae9be55fb4f67674ff1e4ae2482897676f964a4d2dcb6982252ee4ff56aac49578b23f72d1fced707525e","0xb9ddff8986145e33851b4de54d3e81faa3352e8385895f357734085a1616ef61c692d925fe62a5ed3be8ca49f5d66306","0xb3cb0963387ed28c0c0adf7fe645f02606e6e1780a24d6cecef5b7c642499109974c81a7c2a198b19862eedcea2c2d8c","0xac9c53c885457aaf5cb36c717a6f4077af701e0098eebd7aa600f5e4b14e6c1067255b3a0bc40e4a552025231be7de60","0x8e1a8d823c4603f6648ec21d064101094f2a762a4ed37dd2f0a2d9aa97b2d850ce1e76f4a4b8cae58819b058180f7031","0xb268b73bf7a179b6d22bd37e5e8cb514e9f5f8968c78e14e4f6d5700ca0d0ca5081d0344bb73b028970eebde3cb4124e","0xa7f57d71940f0edbd29ed8473d0149cae71d921dd15d1ff589774003e816b54b24de2620871108cec1ab9fa956ad6ce6","0x8053e6416c8b120e2b999cc2fc420a6a55094c61ac7f2a6c6f0a2c108a320890e389af96cbe378936132363c0d551277","0xb3823f4511125e5aa0f4269e991b435a0d6ceb523ebd91c04d7add5534e3df5fc951c504b4fd412a309fd3726b7f940b","0xae6eb04674d04e982ca9a6add30370ab90e303c71486f43ed3efbe431af1b0e43e9d06c11c3412651f304c473e7dbf39","0x96ab55e641ed2e677591f7379a3cd126449614181fce403e93e89b1645d82c4af524381ff986cae7f9cebe676878646d","0xb52423b4a8c37d3c3e2eca8f0ddbf7abe0938855f33a0af50f117fab26415fb0a3da5405908ec5fdc22a2c1f2ca64892","0x82a69ce1ee92a09cc709d0e3cd22116c9f69d28ea507fe5901f5676000b5179b9abe4c1875d052b0dd42d39925e186bb","0xa84c8cb84b9d5cfb69a5414f0a5283a5f2e90739e9362a1e8c784b96381b59ac6c18723a4aa45988ee8ef5c1f45cc97d","0xafd7efce6b36813082eb98257aae22a4c1ae97d51cac7ea9c852d4a66d05ef2732116137d8432e3f117119725a817d24","0xa0f5fe25af3ce021b706fcff05f3d825384a272284d04735574ce5fb256bf27100fad0b1f1ba0e54ae9dcbb9570ecad3","0x8751786cb80e2e1ff819fc7fa31c2833d25086534eb12b373d31f826382430acfd87023d2a688c65b5e983927e146336","0x8cf5c4b17fa4f3d35c78ce41e1dc86988fd1135cd5e6b2bb0c108ee13538d0d09ae7102609c6070f39f937b439b31e33","0xa9108967a2fedd7c322711eca8159c533dd561bedcb181b646de98bf5c3079449478eab579731bee8d215ae8852c7e21","0xb54c5171704f42a6f0f4e70767cdb3d96ffc4888c842eece343a01557da405961d53ffdc34d2f902ea25d3e1ed867cad","0xae8d4b764a7a25330ba205bf77e9f46182cd60f94a336bbd96773cf8064e3d39caf04c310680943dc89ed1fbad2c6e0d","0xaa5150e911a8e1346868e1b71c5a01e2a4bb8632c195861fb6c3038a0e9b85f0e09b3822e9283654a4d7bb17db2fc5f4","0x9685d3756ce9069bf8bb716cf7d5063ebfafe37e15b137fc8c3159633c4e006ff4887ddd0ae90360767a25c3f90cba7f","0x82155fd70f107ab3c8e414eadf226c797e07b65911508c76c554445422325e71af8c9a8e77fd52d94412a6fc29417cd3","0xabfae52f53a4b6e00760468d973a267f29321997c3dbb5aee36dc1f20619551229c0c45b9d9749f410e7f531b73378e8","0x81a76d921f8ef88e774fd985e786a4a330d779b93fad7def718c014685ca0247379e2e2a007ad63ee7f729cd9ed6ce1b","0x81947c84bc5e28e26e2e533af5ae8fe10407a7b77436dbf8f1d5b0bbe86fc659eae10f974659dc7c826c6dabd03e3a4b","0x92b8c07050d635b8dd4fd09df9054efe4edae6b86a63c292e73cc819a12a21dd7d104ce51fa56af6539dedf6dbe6f7b6","0xb44c579e3881f32b32d20c82c207307eca08e44995dd2aac3b2692d2c8eb2a325626c80ac81c26eeb38c4137ff95add5","0x97efab8941c90c30860926dea69a841f2dcd02980bf5413b9fd78d85904588bf0c1021798dbc16c8bbb32cce66c82621","0x913363012528b50698e904de0588bf55c8ec5cf6f0367cfd42095c4468fcc64954fbf784508073e542fee242d0743867","0x8ed203cf215148296454012bd10fddaf119203db1919a7b3d2cdc9f80e66729464fdfae42f1f2fc5af1ed53a42b40024","0xab84312db7b87d711e9a60824f4fe50e7a6190bf92e1628688dfcb38930fe87b2d53f9e14dd4de509b2216856d8d9188","0x880726def069c160278b12d2258eac8fa63f729cd351a710d28b7e601c6712903c3ac1e7bbd0d21e4a15f13ca49db5aa","0x980699cd51bac6283959765f5174e543ed1e5f5584b5127980cbc2ef18d984ecabba45042c6773b447b8e694db066028","0xaeb019cb80dc4cb4207430d0f2cd24c9888998b6f21d9bf286cc638449668d2eec0018a4cf3fe6448673cd6729335e2b","0xb29852f6aa6c60effdffe96ae88590c88abae732561d35cc19e82d3a51e26cb35ea00986193e07f90060756240f5346e","0xa0fa855adc5ba469f35800c48414b8921455950a5c0a49945d1ef6e8f2a1881f2e2dfae47de6417270a6bf49deeb091d","0xb6c7332e3b14813641e7272d4f69ecc7e09081df0037d6dab97ce13a9e58510f5c930d300633f208181d9205c5534001","0x85a6c050f42fce560b5a8d54a11c3bbb8407abbadd859647a7b0c21c4b579ec65671098b74f10a16245dc779dff7838e","0x8f3eb34bb68759d53c6677de4de78a6c24dd32c8962a7fb355ed362572ef8253733e6b52bc21c9f92ecd875020a9b8de","0xa17dd44181e5dab4dbc128e1af93ec22624b57a448ca65d2d9e246797e4af7d079e09c6e0dfb62db3a9957ce92f098d5","0xa56a1b854c3183082543a8685bb34cae1289f86cfa8123a579049dbd059e77982886bfeb61bf6e05b4b1fe4e620932e7","0xaedae3033cb2fb7628cb4803435bdd7757370a86f808ae4cecb9a268ad0e875f308c048c80cbcac523de16b609683887","0x9344905376aa3982b1179497fac5a1d74b14b7038fd15e3b002db4c11c8bfc7c39430db492cdaf58b9c47996c9901f28","0xa3bfafdae011a19f030c749c3b071f83580dee97dd6f949e790366f95618ca9f828f1daaeabad6dcd664fcef81b6556d","0x81c03d8429129e7e04434dee2c529194ddb01b414feda3adee2271eb680f6c85ec872a55c9fa9d2096f517e13ed5abcc","0x98205ef3a72dff54c5a9c82d293c3e45d908946fa74bb749c3aabe1ab994ea93c269bcce1a266d2fe67a8f02133c5985","0x85a70aeed09fda24412fadbafbbbf5ba1e00ac92885df329e147bfafa97b57629a3582115b780d8549d07d19b7867715","0xb0fbe81c719f89a57d9ea3397705f898175808c5f75f8eb81c2193a0b555869ba7bd2e6bc54ee8a60cea11735e21c68c","0xb03a0bd160495ee626ff3a5c7d95bc79d7da7e5a96f6d10116600c8fa20bedd1132f5170f25a22371a34a2d763f2d6d0","0xa90ab04091fbca9f433b885e6c1d60ab45f6f1daf4b35ec22b09909d493a6aab65ce41a6f30c98239cbca27022f61a8b","0xb66f92aa3bf2549f9b60b86f99a0bd19cbdd97036d4ae71ca4b83d669607f275260a497208f6476cde1931d9712c2402","0xb08e1fdf20e6a9b0b4942f14fa339551c3175c1ffc5d0ab5b226b6e6a322e9eb0ba96adc5c8d59ca4259e2bdd04a7eb0","0xa2812231e92c1ce74d4f5ac3ab6698520288db6a38398bb38a914ac9326519580af17ae3e27cde26607e698294022c81","0xabfcbbcf1d3b9e84c02499003e490a1d5d9a2841a9e50c7babbef0b2dd20d7483371d4dc629ba07faf46db659459d296","0xb0fe9f98c3da70927c23f2975a9dc4789194d81932d2ad0f3b00843dd9cbd7fb60747a1da8fe5a79f136a601becf279d","0xb130a6dba7645165348cb90f023713bed0eefbd90a976b313521c60a36d34f02032e69a2bdcf5361e343ed46911297ec","0x862f0cffe3020cea7a5fd4703353aa1eb1be335e3b712b29d079ff9f7090d1d8b12013011e1bdcbaa80c44641fd37c9f","0x8c6f11123b26633e1abb9ed857e0bce845b2b3df91cc7b013b2fc77b477eee445da0285fc6fc793e29d5912977f40916","0x91381846126ea819d40f84d3005e9fb233dc80071d1f9bb07f102bf015f813f61e5884ffffb4f5cd333c1b1e38a05a58","0x8add7d908de6e1775adbd39c29a391f06692b936518db1f8fde74eb4f533fc510673a59afb86e3a9b52ade96e3004c57","0x8780e086a244a092206edcde625cafb87c9ab1f89cc3e0d378bc9ee776313836160960a82ec397bc3800c0a0ec3da283","0xa6cb4cd9481e22870fdd757fae0785edf4635e7aacb18072fe8dc5876d0bab53fb99ce40964a7d3e8bcfff6f0ab1332f","0xaf30ff47ecc5b543efba1ba4706921066ca8bb625f40e530fb668aea0551c7647a9d126e8aba282fbcce168c3e7e0130","0x91b0bcf408ce3c11555dcb80c4410b5bc2386d3c05caec0b653352377efdcb6bab4827f2018671fc8e4a0e90d772acc1","0xa9430b975ef138b6b2944c7baded8fe102d31da4cfe3bd3d8778bda79189c99d38176a19c848a19e2d1ee0bddd9a13c1","0xaa5a4eef849d7c9d2f4b018bd01271c1dd83f771de860c4261f385d3bdcc130218495860a1de298f14b703ec32fa235f","0xb0ce79e7f9ae57abe4ff366146c3b9bfb38b0dee09c28c28f5981a5d234c6810ad4d582751948affb480d6ae1c8c31c4","0xb75122748560f73d15c01a8907d36d06dc068e82ce22b84b322ac1f727034493572f7907dec34ebc3ddcc976f2f89ed7","0xb0fc7836369a3e4411d34792d6bd5617c14f61d9bba023dda64e89dc5fb0f423244e9b48ee64869258931daa9753a56f","0x8956d7455ae9009d70c6e4a0bcd7610e55f37494cf9897a8f9e1b904cc8febc3fd2d642ebd09025cfff4609ad7e3bc52","0xad741efe9e472026aa49ae3d9914cb9c1a6f37a54f1a6fe6419bebd8c7d68dca105a751c7859f4389505ede40a0de786","0xb52f418797d719f0d0d0ffb0846788b5cba5d0454a69a2925de4b0b80fa4dd7e8c445e5eac40afd92897ed28ca650566","0xa0ab65fb9d42dd966cd93b1de01d7c822694669dd2b7a0c04d99cd0f3c3de795f387b9c92da11353412f33af5c950e9a","0xa0052f44a31e5741a331f7cac515a08b3325666d388880162d9a7b97598fde8b61f9ff35ff220df224eb5c4e40ef0567","0xa0101cfdc94e42b2b976c0d89612a720e55d145a5ef6ef6f1f78cf6de084a49973d9b5d45915349c34ce712512191e3c","0xa0dd99fcf3f5cead5aaf08e82212df3a8bb543c407a4d6fab88dc5130c1769df3f147e934a46f291d6c1a55d92b86917","0xa5939153f0d1931bbda5cf6bdf20562519ea55fbfa978d6dbc6828d298260c0da7a50c37c34f386e59431301a96c2232","0x9568269f3f5257200f9ca44afe1174a5d3cf92950a7f553e50e279c239e156a9faaa2a67f288e3d5100b4142efe64856","0xb746b0832866c23288e07f24991bbf687cad794e7b794d3d3b79367566ca617d38af586cdc8d6f4a85a34835be41d54f","0xa871ce28e39ab467706e32fec1669fda5a4abba2f8c209c6745df9f7a0fa36bbf1919cf14cb89ea26fa214c4c907ae03","0xa08dacdd758e523cb8484f6bd070642c0c20e184abdf8e2a601f61507e93952d5b8b0c723c34fcbdd70a8485eec29db2","0x85bdb78d501382bb95f1166b8d032941005661aefd17a5ac32df9a3a18e9df2fc5dc2c1f07075f9641af10353cecc0c9","0x98d730c28f6fa692a389e97e368b58f4d95382fad8f0baa58e71a3d7baaea1988ead47b13742ce587456f083636fa98e","0xa557198c6f3d5382be9fb363feb02e2e243b0c3c61337b3f1801c4a0943f18e38ce1a1c36b5c289c8fa2aa9d58742bab","0x89174f79201742220ac689c403fc7b243eed4f8e3f2f8aba0bf183e6f5d4907cb55ade3e238e3623d9885f03155c4d2b","0xb891d600132a86709e06f3381158db300975f73ea4c1f7c100358e14e98c5fbe792a9af666b85c4e402707c3f2db321e","0xb9e5b2529ef1043278c939373fc0dbafe446def52ddd0a8edecd3e4b736de87e63e187df853c54c28d865de18a358bb6","0x8589b2e9770340c64679062c5badb7bbef68f55476289b19511a158a9a721f197da03ece3309e059fc4468b15ac33aa3","0xaad8c6cd01d785a881b446f06f1e9cd71bca74ba98674c2dcddc8af01c40aa7a6d469037498b5602e76e9c91a58d3dbd","0xabaccb1bd918a8465f1bf8dbe2c9ad4775c620b055550b949a399f30cf0d9eb909f3851f5b55e38f9e461e762f88f499","0xae62339d26db46e85f157c0151bd29916d5cc619bd4b832814b3fd2f00af8f38e7f0f09932ffe5bba692005dab2d9a74","0x93a6ff30a5c0edf8058c89aba8c3259e0f1b1be1b80e67682de651e5346f7e1b4b4ac3d87cbaebf198cf779524aff6bf","0x8980a2b1d8f574af45b459193c952400b10a86122b71fca2acb75ee0dbd492e7e1ef5b959baf609a5172115e371f3177","0x8c2f49f3666faee6940c75e8c7f6f8edc3f704cca7a858bbb7ee5e96bba3b0cf0993996f781ba6be3b0821ef4cb75039","0xb14b9e348215b278696018330f63c38db100b0542cfc5be11dc33046e3bca6a13034c4ae40d9cef9ea8b34fef0910c4e","0xb59bc3d0a30d66c16e6a411cb641f348cb1135186d5f69fda8b0a0934a5a2e7f6199095ba319ec87d3fe8f1ec4a06368","0x8874aca2a3767aa198e4c3fec2d9c62d496bc41ff71ce242e9e082b7f38cdf356089295f80a301a3cf1182bde5308c97","0xb1820ebd61376d91232423fc20bf008b2ba37e761199f4ef0648ea2bd70282766799b4de814846d2f4d516d525c8daa7","0xa6b202e5dedc16a4073e04a11af3a8509b23dfe5a1952f899adeb240e75c3f5bde0c424f811a81ea48d343591faffe46","0xa69becee9c93734805523b92150a59a62eed4934f66056b645728740d42223f2925a1ad38359ba644da24d9414f4cdda","0xad72f0f1305e37c7e6b48c272323ee883320994cb2e0d850905d6655fafc9f361389bcb9c66b3ff8d2051dbb58c8aa96","0xb563600bd56fad7c8853af21c6a02a16ed9d8a8bbeea2c31731d63b976d83cb05b9779372d898233e8fd597a75424797","0xb0abb78ce465bf7051f563c62e8be9c57a2cc997f47c82819300f36e301fefd908894bb2053a9d27ce2d0f8c46d88b5b","0xa071a85fb8274bac2202e0cb8e0e2028a5e138a82d6e0374d39ca1884a549c7c401312f00071b91f455c3a2afcfe0cda","0xb931c271513a0f267b9f41444a5650b1918100b8f1a64959c552aff4e2193cc1b9927906c6fa7b8a8c68ef13d79aaa52","0xa6a1bb9c7d32cb0ca44d8b75af7e40479fbce67d216b48a2bb680d3f3a772003a49d3cd675fc64e9e0f8fabeb86d6d61","0xb98d609858671543e1c3b8564162ad828808bb50ded261a9f8690ded5b665ed8368c58f947365ed6e84e5a12e27b423d","0xb3dca58cd69ec855e2701a1d66cad86717ff103ef862c490399c771ad28f675680f9500cb97be48de34bcdc1e4503ffd","0xb34867c6735d3c49865e246ddf6c3b33baf8e6f164db3406a64ebce4768cb46b0309635e11be985fee09ab7a31d81402","0xacb966c554188c5b266624208f31fab250b3aa197adbdd14aee5ab27d7fb886eb4350985c553b20fdf66d5d332bfd3fe","0x943c36a18223d6c870d54c3b051ef08d802b85e9dd6de37a51c932f90191890656c06adfa883c87b906557ae32d09da0","0x81bca7954d0b9b6c3d4528aadf83e4bc2ef9ea143d6209bc45ae9e7ae9787dbcd8333c41f12c0b6deee8dcb6805e826a","0xaba176b92256efb68f574e543479e5cf0376889fb48e3db4ebfb7cba91e4d9bcf19dcfec444c6622d9398f06de29e2b9","0xb9f743691448053216f6ece7cd699871fff4217a1409ceb8ab7bdf3312d11696d62c74b0664ba0a631b1e0237a8a0361","0xa383c2b6276fa9af346b21609326b53fb14fdf6f61676683076e80f375b603645f2051985706d0401e6fbed7eb0666b6","0xa9ef2f63ec6d9beb8f3d04e36807d84bda87bdd6b351a3e4a9bf7edcb5618c46c1f58cfbf89e64b40f550915c6988447","0xa141b2d7a82f5005eaea7ae7d112c6788b9b95121e5b70b7168d971812f3381de8b0082ac1f0a82c7d365922ebd2d26a","0xb1b76ef8120e66e1535c17038b75255a07849935d3128e3e99e56567b842fb1e8d56ef932d508d2fb18b82f7868fe1a9","0x8e2e234684c81f21099f5c54f6bbe2dd01e3b172623836c77668a0c49ce1fe218786c3827e4d9ae2ea25c50a8924fb3c","0xa5caf5ff948bfd3c4ca3ffbdfcd91eec83214a6c6017235f309a0bbf7061d3b0b466307c00b44a1009cf575163898b43","0x986415a82ca16ebb107b4c50b0c023c28714281db0bcdab589f6cb13d80e473a3034b7081b3c358e725833f6d845cb14","0xb94836bf406ac2cbacb10e6df5bcdfcc9d9124ae1062767ca4e322d287fd5e353fdcebd0e52407cb3cd68571258a8900","0x83c6d70a640b33087454a4788dfd9ef3ed00272da084a8d36be817296f71c086b23b576f98178ab8ca6a74f04524b46b","0xad4115182ad784cfe11bcfc5ce21fd56229cc2ce77ac82746e91a2f0aa53ca6593a22efd2dc4ed8d00f84542643d9c58","0xab1434c5e5065da826d10c2a2dba0facccab0e52b506ce0ce42fbe47ced5a741797151d9ecc99dc7d6373cfa1779bbf6","0x8a8b591d82358d55e6938f67ea87a89097ab5f5496f7260adb9f649abb289da12b498c5b2539c2f9614fb4e21b1f66b0","0x964f355d603264bc1f44c64d6d64debca66f37dff39c971d9fc924f2bc68e6c187b48564a6dc82660a98b035f8addb5d","0xb66235eaaf47456bc1dc4bde454a028e2ce494ece6b713a94cd6bf27cf18c717fd0c57a5681caaa2ad73a473593cdd7a","0x9103e3bb74304186fa4e3e355a02da77da4aca9b7e702982fc2082af67127ebb23a455098313c88465bc9b7d26820dd5","0xb6a42ff407c9dd132670cdb83cbad4b20871716e44133b59a932cd1c3f97c7ac8ff7f61acfaf8628372508d8dc8cad7c","0x883a9c21c16a167a4171b0f084565c13b6f28ba7c4977a0de69f0a25911f64099e7bbb4da8858f2e93068f4155d04e18","0x8dbb3220abc6a43220adf0331e3903d3bfd1d5213aadfbd8dfcdf4b2864ce2e96a71f35ecfb7a07c3bbabf0372b50271","0xb4ad08aee48e176bda390b7d9acf2f8d5eb008f30d20994707b757dc6a3974b2902d29cd9b4d85e032810ad25ac49e97","0x865bb0f33f7636ec501bb634e5b65751c8a230ae1fa807a961a8289bbf9c7fe8c59e01fbc4c04f8d59b7f539cf79ddd5","0x86a54d4c12ad1e3605b9f93d4a37082fd26e888d2329847d89afa7802e815f33f38185c5b7292293d788ad7d7da1df97","0xb26c8615c5e47691c9ff3deca3021714662d236c4d8401c5d27b50152ce7e566266b9d512d14eb63e65bc1d38a16f914","0x827639d5ce7db43ba40152c8a0eaad443af21dc92636cc8cc2b35f10647da7d475a1e408901cd220552fddad79db74df","0xa2b79a582191a85dbe22dc384c9ca3de345e69f6aa370aa6d3ff1e1c3de513e30b72df9555b15a46586bd27ea2854d9d","0xae0d74644aba9a49521d3e9553813bcb9e18f0b43515e4c74366e503c52f47236be92dfbd99c7285b3248c267b1de5a0","0x80fb0c116e0fd6822a04b9c25f456bdca704e2be7bdc5d141dbf5d1c5eeb0a2c4f5d80db583b03ef3e47517e4f9a1b10","0xac3a1fa3b4a2f30ea7e0a114cdc479eb51773573804c2a158d603ad9902ae8e39ffe95df09c0d871725a5d7f9ba71a57","0xb56b2b0d601cba7f817fa76102c68c2e518c6f20ff693aad3ff2e07d6c4c76203753f7f91686b1801e8c4659e4d45c48","0x89d50c1fc56e656fb9d3915964ebce703cb723fe411ab3c9eaa88ccc5d2b155a9b2e515363d9c600d3c0cee782c43f41","0xb24207e61462f6230f3cd8ccf6828357d03e725769f7d1de35099ef9ee4dca57dbce699bb49ed994462bee17059d25ce","0xb886f17fcbcbfcd08ac07f04bb9543ef58510189decaccea4b4158c9174a067cb67d14b6be3c934e6e2a18c77efa9c9c","0xb9c050ad9cafd41c6e2e192b70d080076eed59ed38ea19a12bd92fa17b5d8947d58d5546aaf5e8e27e1d3b5481a6ce51","0xaaf7a34d3267e3b1ddbc54c641e3922e89303f7c86ebebc7347ebca4cffad5b76117dac0cbae1a133053492799cd936f","0xa9ee604ada50adef82e29e893070649d2d4b7136cc24fa20e281ce1a07bd736bf0de7c420369676bcbcecff26fb6e900","0x9855315a12a4b4cf80ab90b8bd13003223ba25206e52fd4fe6a409232fbed938f30120a3db23eab9c53f308bd8b9db81","0x8cd488dd7a24f548a3cf03c54dec7ff61d0685cb0f6e5c46c2d728e3500d8c7bd6bba0156f4bf600466fda53e5b20444","0x890ad4942ebac8f5b16c777701ab80c68f56fa542002b0786f8fea0fb073154369920ac3dbfc07ea598b82f4985b8ced","0x8de0cf9ddc84c9b92c59b9b044387597799246b30b9f4d7626fc12c51f6e423e08ee4cbfe9289984983c1f9521c3e19d","0xb474dfb5b5f4231d7775b3c3a8744956b3f0c7a871d835d7e4fd9cc895222c7b868d6c6ce250de568a65851151fac860","0x86433b6135d9ed9b5ee8cb7a6c40e5c9d30a68774cec04988117302b8a02a11a71a1e03fd8e0264ef6611d219f103007","0x80b9ed4adbe9538fb1ef69dd44ec0ec5b57cbfea820054d8d445b4261962624b4c70ac330480594bc5168184378379c3","0x8b2e83562ccd23b7ad2d17f55b1ab7ef5fbef64b3a284e6725b800f3222b8bdf49937f4a873917ada9c4ddfb090938c2","0xabe78cebc0f5a45d754140d1f685e387489acbfa46d297a8592aaa0d676a470654f417a4f7d666fc0b2508fab37d908e","0xa9c5f8ff1f8568e252b06d10e1558326db9901840e6b3c26bbd0cd5e850cb5fb3af3f117dbb0f282740276f6fd84126f","0x975f8dc4fb55032a5df3b42b96c8c0ffecb75456f01d4aef66f973cb7270d4eff32c71520ceefc1adcf38d77b6b80c67","0xb043306ed2c3d8a5b9a056565afd8b5e354c8c4569fda66b0d797a50a3ce2c08cffbae9bbe292da69f39e89d5dc7911e","0x8d2afc36b1e44386ba350c14a6c1bb31ff6ea77128a0c5287584ac3584282d18516901ce402b4644a53db1ed8e7fa581","0x8c294058bed53d7290325c363fe243f6ec4f4ea2343692f4bac8f0cb86f115c069ccb8334b53d2e42c067691ad110dba","0xb92157b926751aaf7ef82c1aa8c654907dccab6376187ee8b3e8c0c82811eae01242832de953faa13ebaff7da8698b3e","0xa780c4bdd9e4ba57254b09d745075cecab87feda78c88ffee489625c5a3cf96aa6b3c9503a374a37927d9b78de9bd22b","0x811f548ef3a2e6a654f7dcb28ac9378de9515ed61e5a428515d9594a83e80b35c60f96a5cf743e6fab0d3cb526149f49","0x85a4dccf6d90ee8e094731eec53bd00b3887aec6bd81a0740efddf812fd35e3e4fe4f983afb49a8588691c202dabf942","0xb152c2da6f2e01c8913079ae2b40a09b1f361a80f5408a0237a8131b429677c3157295e11b365b1b1841924b9efb922e","0x849b9efee8742502ffd981c4517c88ed33e4dd518a330802caff168abae3cd09956a5ee5eda15900243bc2e829016b74","0x955a933f3c18ec0f1c0e38fa931e4427a5372c46a3906ebe95082bcf878c35246523c23f0266644ace1fa590ffa6d119","0x911989e9f43e580c886656377c6f856cdd4ff1bd001b6db3bbd86e590a821d34a5c6688a29b8d90f28680e9fdf03ba69","0xb73b8b4f1fd6049fb68d47cd96a18fcba3f716e0a1061aa5a2596302795354e0c39dea04d91d232aec86b0bf2ba10522","0x90f87456d9156e6a1f029a833bf3c7dbed98ca2f2f147a8564922c25ae197a55f7ea9b2ee1f81bf7383197c4bad2e20c","0x903cba8b1e088574cb04a05ca1899ab00d8960580c884bd3c8a4c98d680c2ad11410f2b75739d6050f91d7208cac33a5","0x9329987d42529c261bd15ecedd360be0ea8966e7838f32896522c965adfc4febf187db392bd441fb43bbd10c38fdf68b","0x8178ee93acf5353baa349285067b20e9bb41aa32d77b5aeb7384fe5220c1fe64a2461bd7a83142694fe673e8bbf61b7c","0xa06a8e53abcff271b1394bcc647440f81fb1c1a5f29c27a226e08f961c3353f4891620f2d59b9d1902bf2f5cc07a4553","0xaaf5fe493b337810889e777980e6bbea6cac39ac66bc0875c680c4208807ac866e9fda9b5952aa1d04539b9f4a4bec57","0xaa058abb1953eceac14ccfa7c0cc482a146e1232905dcecc86dd27f75575285f06bbae16a8c9fe8e35d8713717f5f19f","0x8f15dd732799c879ca46d2763453b359ff483ca33adb1d0e0a57262352e0476c235987dc3a8a243c74bc768f93d3014c","0xa61cc8263e9bc03cce985f1663b8a72928a607121005a301b28a278e9654727fd1b22bc8a949af73929c56d9d3d4a273","0x98d6dc78502d19eb9f921225475a6ebcc7b44f01a2df6f55ccf6908d65b27af1891be2a37735f0315b6e0f1576c1f8d8","0x8bd258b883f3b3793ec5be9472ad1ff3dc4b51bc5a58e9f944acfb927349ead8231a523cc2175c1f98e7e1e2b9f363b8","0xaeacc2ecb6e807ad09bedd99654b097a6f39840e932873ace02eabd64ccfbb475abdcb62939a698abf17572d2034c51e","0xb8ccf78c08ccd8df59fd6eda2e01de328bc6d8a65824d6f1fc0537654e9bc6bf6f89c422dd3a295cce628749da85c864","0x8f91fd8cb253ba2e71cc6f13da5e05f62c2c3b485c24f5d68397d04665673167fce1fc1aec6085c69e87e66ec555d3fd","0xa254baa10cb26d04136886073bb4c159af8a8532e3fd36b1e9c3a2e41b5b2b6a86c4ebc14dbe624ee07b7ccdaf59f9ab","0x94e3286fe5cd68c4c7b9a7d33ae3d714a7f265cf77cd0e9bc19fc51015b1d1c34ad7e3a5221c459e89f5a043ee84e3a9","0xa279da8878af8d449a9539bec4b17cea94f0242911f66fab275b5143ab040825f78c89cb32a793930609415cfa3a1078","0xac846ceb89c9e5d43a2991c8443079dc32298cd63e370e64149cec98cf48a6351c09c856f2632fd2f2b3d685a18bbf8b","0xa847b27995c8a2e2454aaeb983879fb5d3a23105c33175839f7300b7e1e8ec3efd6450e9fa3f10323609dee7b98c6fd5","0xa2f432d147d904d185ff4b2de8c6b82fbea278a2956bc406855b44c18041854c4f0ecccd472d1d0dff1d8aa8e281cb1d","0x94a48ad40326f95bd63dff4755f863a1b79e1df771a1173b17937f9baba57b39e651e7695be9f66a472f098b339364fc","0xa12a0ccd8f96e96e1bc6494341f7ebce959899341b3a084aa1aa87d1c0d489ac908552b7770b887bb47e7b8cbc3d8e66","0x81a1f1681bda923bd274bfe0fbb9181d6d164fe738e54e25e8d4849193d311e2c4253614ed673c98af2c798f19a93468","0xabf71106a05d501e84cc54610d349d7d5eae21a70bd0250f1bebbf412a130414d1c8dbe673ffdb80208fd72f1defa4d4","0x96266dc2e0df18d8136d79f5b59e489978eee0e6b04926687fe389d4293c14f36f055c550657a8e27be4118b64254901","0x8df5dcbefbfb4810ae3a413ca6b4bf08619ca53cd50eb1dde2a1c035efffc7b7ac7dff18d403253fd80104bd83dc029e","0x9610b87ff02e391a43324a7122736876d5b3af2a137d749c52f75d07b17f19900b151b7f439d564f4529e77aa057ad12","0xa90a5572198b40fe2fcf47c422274ff36c9624df7db7a89c0eb47eb48a73a03c985f4ac5016161c76ca317f64339bce1","0x98e5e61a6ab6462ba692124dba7794b6c6bde4249ab4fcc98c9edd631592d5bc2fb5e38466691a0970a38e48d87c2e43","0x918cefb8f292f78d4db81462c633daf73b395e772f47b3a7d2cea598025b1d8c3ec0cbff46cdb23597e74929981cde40","0xa98918a5dc7cf610fe55f725e4fd24ce581d594cb957bb9b4e888672e9c0137003e1041f83e3f1d7b9caab06462c87d4","0xb92b74ac015262ca66c33f2d950221e19d940ba3bf4cf17845f961dc1729ae227aa9e1f2017829f2135b489064565c29","0xa053ee339f359665feb178b4e7ee30a85df37debd17cacc5a27d6b3369d170b0114e67ad1712ed26d828f1df641bcd99","0x8c3c8bad510b35da5ce5bd84b35c958797fbea024ad1c97091d2ff71d9b962e9222f65a9b776e5b3cc29c36e1063d2ee","0xaf99dc7330fe7c37e850283eb47cc3257888e7c197cb0d102edf94439e1e02267b6a56306d246c326c4c79f9dc8c6986","0xafecb2dc34d57a725efbd7eb93d61eb29dbe8409b668ab9ea040791f5b796d9be6d4fc10d7f627bf693452f330cf0435","0x93334fedf19a3727a81a6b6f2459db859186227b96fe7a391263f69f1a0884e4235de64d29edebc7b99c44d19e7c7d7a","0x89579c51ac405ad7e9df13c904061670ce4b38372492764170e4d3d667ed52e5d15c7cd5c5991bbfa3a5e4e3fa16363e","0x9778f3e8639030f7ef1c344014f124e375acb8045bd13d8e97a92c5265c52de9d1ffebaa5bc3e1ad2719da0083222991","0x88f77f34ee92b3d36791bdf3326532524a67d544297dcf1a47ff00b47c1b8219ff11e34034eab7d23b507caa2fd3c6b9","0xa699c1e654e7c484431d81d90657892efeb4adcf72c43618e71ca7bd7c7a7ebbb1db7e06e75b75dc4c74efd306b5df3f","0x81d13153baebb2ef672b5bdb069d3cd669ce0be96b742c94e04038f689ff92a61376341366b286eee6bf3ae85156f694","0x81efb17de94400fdacc1deec2550cbe3eecb27c7af99d8207e2f9be397e26be24a40446d2a09536bb5172c28959318d9","0x989b21ebe9ceab02488992673dc071d4d5edec24bff0e17a4306c8cb4b3c83df53a2063d1827edd8ed16d6e837f0d222","0x8d6005d6536825661b13c5fdce177cb37c04e8b109b7eb2b6d82ea1cb70efecf6a0022b64f84d753d165edc2bba784a3","0xa32607360a71d5e34af2271211652d73d7756d393161f4cf0da000c2d66a84c6826e09e759bd787d4fd0305e2439d342","0xaaad8d6f6e260db45d51b2da723be6fa832e76f5fbcb77a9a31e7f090dd38446d3b631b96230d78208cae408c288ac4e","0xabcfe425255fd3c5cffd3a818af7650190c957b6b07b632443f9e33e970a8a4c3bf79ac9b71f4d45f238a04d1c049857","0xaeabf026d4c783adc4414b5923dbd0be4b039cc7201219f7260d321f55e9a5b166d7b5875af6129c034d0108fdc5d666","0xaf49e740c752d7b6f17048014851f437ffd17413c59797e5078eaaa36f73f0017c3e7da020310cfe7d3c85f94a99f203","0x8854ca600d842566e3090040cd66bb0b3c46dae6962a13946f0024c4a8aca447e2ccf6f240045f1ceee799a88cb9210c","0xb6c03b93b1ab1b88ded8edfa1b487a1ed8bdce8535244dddb558ffb78f89b1c74058f80f4db2320ad060d0c2a9c351cc","0xb5bd7d17372faff4898a7517009b61a7c8f6f0e7ed4192c555db264618e3f6e57fb30a472d169fea01bf2bf0362a19a8","0x96eb1d38319dc74afe7e7eb076fcd230d19983f645abd14a71e6103545c01301b31c47ae931e025f3ecc01fb3d2f31fa","0xb55a8d30d4403067def9b65e16f867299f8f64c9b391d0846d4780bc196569622e7e5b64ce799b5aefac8f965b2a7a7b","0x8356d199a991e5cbbff608752b6291731b6b6771aed292f8948b1f41c6543e4ab1bedc82dd26d10206c907c03508df06","0x97f4137445c2d98b0d1d478049de952610ad698c91c9d0f0e7227d2aae690e9935e914ec4a2ea1fbf3fc1dddfeeacebb","0xaf5621707e0938320b15ddfc87584ab325fbdfd85c30efea36f8f9bd0707d7ec12c344eff3ec21761189518d192df035","0x8ac7817e71ea0825b292687928e349da7140285d035e1e1abff0c3704fa8453faaae343a441b7143a74ec56539687cc4","0x8a5e0a9e4758449489df10f3386029ada828d1762e4fb0a8ffe6b79e5b6d5d713cb64ed95960e126398b0cdb89002bc9","0x81324be4a71208bbb9bca74b77177f8f1abb9d3d5d9db195d1854651f2cf333cd618d35400da0f060f3e1b025124e4b2","0x849971d9d095ae067525b3cbc4a7dfae81f739537ade6d6cec1b42fb692d923176197a8770907c58069754b8882822d6","0x89f830825416802477cc81fdf11084885865ee6607aa15aa4eb28e351c569c49b8a1b9b5e95ddc04fa0ebafe20071313","0x9240aeeaff37a91af55f860b9badd466e8243af9e8c96a7aa8cf348cd270685ab6301bc135b246dca9eda696f8b0e350","0xacf74db78cc33138273127599eba35b0fb4e7b9a69fe02dae18fc6692d748ca332bd00b22afa8e654ed587aab11833f3","0xb091e6d37b157b50d76bd297ad752220cd5c9390fac16dc838f8557aed6d9833fc920b61519df21265406216315e883f","0xa6446c429ebf1c7793c622250e23594c836b2fbcaf6c5b3d0995e1595a37f50ea643f3e549b0be8bbdadd69044d72ab9","0x93e675353bd60e996bf1c914d5267eeaa8a52fc3077987ccc796710ef9becc6b7a00e3d82671a6bdfb8145ee3c80245a","0xa2f731e43251d04ed3364aa2f072d05355f299626f2d71a8a38b6f76cf08c544133f7d72dd0ab4162814b674b9fc7fa6","0x97a8b791a5a8f6e1d0de192d78615d73d0c38f1e557e4e15d15adc663d649e655bc8da3bcc499ef70112eafe7fb45c7a","0x98cd624cbbd6c53a94469be4643c13130916b91143425bcb7d7028adbbfede38eff7a21092af43b12d4fab703c116359","0x995783ce38fd5f6f9433027f122d4cf1e1ff3caf2d196ce591877f4a544ce9113ead60de2de1827eaff4dd31a20d79a8","0x8cf251d6f5229183b7f3fe2f607a90b4e4b6f020fb4ba2459d28eb8872426e7be8761a93d5413640a661d73e34a5b81f","0xb9232d99620652a3aa7880cad0876f153ff881c4ed4c0c2e7b4ea81d5d42b70daf1a56b869d752c3743c6d4c947e6641","0x849716f938f9d37250cccb1bf77f5f9fde53096cdfc6f2a25536a6187029a8f1331cdbed08909184b201f8d9f04b792f","0x80c7c4de098cbf9c6d17b14eba1805e433b5bc905f6096f8f63d34b94734f2e4ebf4bce8a177efd1186842a61204a062","0xb790f410cf06b9b8daadceeb4fd5ff40a2deda820c8df2537e0a7554613ae3948e149504e3e79aa84889df50c8678eeb","0x813aab8bd000299cd37485b73cd7cba06e205f8efb87f1efc0bae8b70f6db2bc7702eb39510ad734854fb65515fe9d0f","0x94f0ab7388ac71cdb67f6b85dfd5945748afb2e5abb622f0b5ad104be1d4d0062b651f134ba22385c9e32c2dfdcccce1","0xab6223dca8bd6a4f969e21ccd9f8106fc5251d321f9e90cc42cea2424b3a9c4e5060a47eeef6b23c7976109b548498e8","0x859c56b71343fce4d5c5b87814c47bf55d581c50fd1871a17e77b5e1742f5af639d0e94d19d909ec7dfe27919e954e0c","0xaae0d632b6191b8ad71b027791735f1578e1b89890b6c22e37de0e4a6074886126988fe8319ae228ac9ef3b3bcccb730","0x8ca9f32a27a024c3d595ecfaf96b0461de57befa3b331ab71dc110ec3be5824fed783d9516597537683e77a11d334338","0xa061df379fb3f4b24816c9f6cd8a94ecb89b4c6dc6cd81e4b8096fa9784b7f97ab3540259d1de9c02eb91d9945af4823","0x998603102ac63001d63eb7347a4bb2bf4cf33b28079bb48a169076a65c20d511ccd3ef696d159e54cc8e772fb5d65d50","0x94444d96d39450872ac69e44088c252c71f46be8333a608a475147752dbb99db0e36acfc5198f158509401959c12b709","0xac1b51b6c09fe055c1d7c9176eea9adc33f710818c83a1fbfa073c8dc3a7eb3513cbdd3f5960b7845e31e3e83181e6ba","0x803d530523fc9e1e0f11040d2412d02baef3f07eeb9b177fa9bfa396af42eea898a4276d56e1db998dc96ae47b644cb2","0x85a3c9fc7638f5bf2c3e15ba8c2fa1ae87eb1ceb44c6598c67a2948667a9dfa41e61f66d535b4e7fda62f013a5a8b885","0xa961cf5654c46a1a22c29baf7a4e77837a26b7f138f410e9d1883480ed5fa42411d522aba32040b577046c11f007388e","0xad1154142344f494e3061ef45a34fab1aaacf5fdf7d1b26adbb5fbc3d795655fa743444e39d9a4119b4a4f82a6f30441","0xb1d6c30771130c77806e7ab893b73d4deb590b2ff8f2f8b5e54c2040c1f3e060e2bd99afc668cf706a2df666a508bbf6","0xa00361fd440f9decabd98d96c575cd251dc94c60611025095d1201ef2dedde51cb4de7c2ece47732e5ed9b3526c2012c","0xa85c5ab4d17d328bda5e6d839a9a6adcc92ff844ec25f84981e4f44a0e8419247c081530f8d9aa629c7eb4ca21affba6","0xa4ddd3eab4527a2672cf9463db38bc29f61460e2a162f426b7852b7a7645fbd62084fd39a8e4d60e1958cce436dd8f57","0x811648140080fe55b8618f4cf17f3c5a250adb0cd53d885f2ddba835d2b4433188e41fc0661faac88e4ff910b16278c0","0xb85c7f1cfb0ed29addccf7546023a79249e8f15ac2d14a20accbfef4dd9dc11355d599815fa09d2b6b4e966e6ea8cff1","0xa10b5d8c260b159043b020d5dd62b3467df2671afea6d480ca9087b7e60ed170c82b121819d088315902842d66c8fb45","0x917e191df1bcf3f5715419c1e2191da6b8680543b1ba41fe84ed07ef570376e072c081beb67b375fca3565a2565bcabb","0x881fd967407390bfd7badc9ab494e8a287559a01eb07861f527207c127eadea626e9bcc5aa9cca2c5112fbac3b3f0e9c","0x959fd71149af82cc733619e0e5bf71760ca2650448c82984b3db74030d0e10f8ab1ce1609a6de6f470fe8b5bd90df5b3","0xa3370898a1c5f33d15adb4238df9a6c945f18b9ada4ce2624fc32a844f9ece4c916a64e9442225b6592afa06d2e015f2","0x817efb8a791435e4236f7d7b278181a5fa34587578c629dbc14fbf9a5c26772290611395eecd20222a4c58649fc256d8","0xa04c9876acf2cfdc8ef96de4879742709270fa1d03fe4c8511fbef2d59eb0aaf0336fa2c7dfe41a651157377fa217813","0x81e15875d7ea7f123e418edf14099f2e109d4f3a6ce0eb65f67fe9fb10d2f809a864a29f60ad3fc949f89e2596b21783","0xb49f529975c09e436e6bc202fdc16e3fdcbe056db45178016ad6fdece9faad4446343e83aed096209690b21a6910724f","0x879e8eda589e1a279f7f49f6dd0580788c040d973748ec4942dbe51ea8fbd05983cc919b78f0c6b92ef3292ae29db875","0x81a2b74b2118923f34139a102f3d95e7eee11c4c2929c2576dee200a5abfd364606158535a6c9e4178a6a83dbb65f3c4","0x8913f281d8927f2b45fc815d0f7104631cb7f5f7278a316f1327d670d15868daadd2a64e3eb98e1f53fe7e300338cc80","0xa6f815fba7ef9af7fbf45f93bc952e8b351f5de6568a27c7c47a00cb39a254c6b31753794f67940fc7d2e9cc581529f4","0xb3722a15c66a0014ce4d082de118def8d39190c15678a472b846225585f3a83756ae1b255b2e3f86a26168878e4773b2","0x817ae61ab3d0dd5b6e24846b5a5364b1a7dc2e77432d9fed587727520ae2f307264ea0948c91ad29f0aea3a11ff38624","0xb3db467464415fcad36dc1de2d6ba7686772a577cc2619242ac040d6734881a45d3b40ed4588db124e4289cfeec4bbf6","0xad66a14f5a54ac69603b16e5f1529851183da77d3cc60867f10aea41339dd5e06a5257982e9e90a352cdd32750f42ee4","0xadafa3681ef45d685555601a25a55cf23358319a17f61e2179e704f63df83a73bdd298d12cf6cef86db89bd17119e11d","0xa379dc44cb6dd3b9d378c07b2ec654fec7ca2f272de6ba895e3d00d20c9e4c5550498a843c8ac67e4221db2115bedc1c","0xb7bf81c267a78efc6b9e5a904574445a6487678d7ef70054e3e93ea6a23f966c2b68787f9164918e3b16d2175459ed92","0xb41d66a13a4afafd5760062b77f79de7e6ab8ccacde9c6c5116a6d886912fb491dc027af435b1b44aacc6af7b3c887f2","0x9904d23a7c1c1d2e4bab85d69f283eb0a8e26d46e8b7b30224438015c936729b2f0af7c7c54c03509bb0500acb42d8a4","0xae30d65e9e20c3bfd603994ae2b175ff691d51f3e24b2d058b3b8556d12ca4c75087809062dddd4aaac81c94d15d8a17","0x9245162fab42ac01527424f6013310c3eb462982518debef6c127f46ba8a06c705d7dc9f0a41e796ba8d35d60ae6cc64","0x87fab853638d7a29a20f3ba2b1a7919d023e9415bfa78ebb27973d8cbc7626f584dc5665d2e7ad71f1d760eba9700d88","0x85aac46ecd330608e5272430970e6081ff02a571e8ea444f1e11785ea798769634a22a142d0237f67b75369d3c484a8a","0x938c85ab14894cc5dfce3d80456f189a2e98eddbc8828f4ff6b1df1dcb7b42b17ca2ff40226a8a1390a95d63dca698dd","0xa18ce1f846e3e3c4d846822f60271eecf0f5d7d9f986385ac53c5ace9589dc7c0188910448c19b91341a1ef556652fa9","0x8611608a9d844f0e9d7584ad6ccf62a5087a64f764caf108db648a776b5390feb51e5120f0ef0e9e11301af3987dd7dc","0x8106333ba4b4de8d1ae43bc9735d3fea047392e88efd6a2fa6f7b924a18a7a265ca6123c3edc0f36307dd7fb7fe89257","0xa91426fa500951ff1b051a248c050b7139ca30dde8768690432d597d2b3c4357b11a577be6b455a1c5d145264dcf81fc","0xb7f9f90e0e450f37b081297f7f651bad0496a8b9afd2a4cf4120a2671aaaa8536dce1af301258bfbfdb122afa44c5048","0x84126da6435699b0c09fa4032dec73d1fca21d2d19f5214e8b0bea43267e9a8dd1fc44f8132d8315e734c8e2e04d7291","0xaff064708103884cb4f1a3c1718b3fc40a238d35cf0a7dc24bdf9823693b407c70da50df585bf5bc4e9c07d1c2d203e8","0xa8b40fc6533752983a5329c31d376c7a5c13ce6879cc7faee648200075d9cd273537001fb4c86e8576350eaac6ba60c2","0xa02db682bdc117a84dcb9312eb28fcbde12d49f4ce915cc92c610bb6965ec3cc38290f8c5b5ec70afe153956692cda95","0x86decd22b25d300508472c9ce75d3e465b737e7ce13bc0fcce32835e54646fe12322ba5bc457be18bfd926a1a6ca4a38","0xa18666ef65b8c2904fd598791f5627207165315a85ee01d5fb0e6b2e10bdd9b00babc447da5bd63445e3337de33b9b89","0x89bb0c06effadefdaf34ffe4b123e1678a90d4451ee856c863df1e752eef41fd984689ded8f0f878bf8916d5dd8e8024","0x97cfcba08ebec05d0073992a66b1d7d6fb9d95871f2cdc36db301f78bf8069294d1c259efef5c93d20dc937eedae3a1a","0xac2643b14ece79dcb2e289c96776a47e2bebd40dd6dc74fd035df5bb727b5596f40e3dd2d2202141e69b0993717ede09","0xa5e6fd88a2f9174d9bd4c6a55d9c30974be414992f22aa852f552c7648f722ed8077acf5aba030abd47939bb451b2c60","0x8ad40a612824a7994487731a40b311b7349038c841145865539c6ada75c56de6ac547a1c23df190e0caaafecddd80ccc","0x953a7cea1d857e09202c438c6108060961f195f88c32f0e012236d7a4b39d840c61b162ec86436e8c38567328bea0246","0x80d8b47a46dae1868a7b8ccfe7029445bbe1009dad4a6c31f9ef081be32e8e1ac1178c3c8fb68d3e536c84990cc035b1","0x81ecd99f22b3766ce0aca08a0a9191793f68c754fdec78b82a4c3bdc2db122bbb9ebfd02fc2dcc6e1567a7d42d0cc16a","0xb1dd0446bccc25846fb95d08c1c9cc52fb51c72c4c5d169ffde56ecfe800f108dc1106d65d5c5bd1087c656de3940b63","0xb87547f0931e164e96de5c550ca5aa81273648fe34f6e193cd9d69cf729cb432e17aa02e25b1c27a8a0d20a3b795e94e","0x820a94e69a927e077082aae66f6b292cfbe4589d932edf9e68e268c9bd3d71ef76cf7d169dd445b93967c25db11f58f1","0xb0d07ddf2595270c39adfa0c8cf2ab1322979b0546aa4d918f641be53cd97f36c879bb75d205e457c011aca3bbd9f731","0x8700b876b35b4b10a8a9372c5230acecd39539c1bb87515640293ad4464a9e02929d7d6a6a11112e8a29564815ac0de4","0xa61a601c5bb27dcb97e37c8e2b9ce479c6b192a5e04d9ed5e065833c5a1017ee5f237b77d1a17be5d48f8e7cc0bcacf6","0x92fb88fe774c1ba1d4a08cae3c0e05467ad610e7a3f1d2423fd47751759235fe0a3036db4095bd6404716aa03820f484","0xb274f140d77a3ce0796f5e09094b516537ccaf27ae1907099bff172e6368ba85e7c3ef8ea2a07457cac48ae334da95b3","0xb2292d9181f16581a9a9142490b2bdcdfb218ca6315d1effc8592100d792eb89d5356996c890441f04f2b4a95763503e","0x8897e73f576d86bc354baa3bd96e553107c48cf5889dcc23c5ba68ab8bcd4e81f27767be2233fdfa13d39f885087e668","0xa29eac6f0829791c728d71abc49569df95a4446ecbfc534b39f24f56c88fe70301838dfc1c19751e7f3c5c1b8c6af6a0","0x9346dc3720adc5df500a8df27fd9c75ef38dc5c8f4e8ed66983304750e66d502c3c59b8e955be781b670a0afc70a2167","0x9566d534e0e30a5c5f1428665590617e95fd05d45f573715f58157854ad596ece3a3cfec61356aee342308d623e029d5","0xa464fb8bffe6bd65f71938c1715c6e296cc6d0311a83858e4e7eb5873b7f2cf0c584d2101e3407b85b64ca78b2ac93ce","0xb54088f7217987c87e9498a747569ac5b2f8afd5348f9c45bf3fd9fbf713a20f495f49c8572d087efe778ac7313ad6d3","0x91fa9f5f8000fe050f5b224d90b59fcce13c77e903cbf98ded752e5b3db16adb2bc1f8c94be48b69f65f1f1ad81d6264","0x92d04a5b0ac5d8c8e313709b432c9434ecd3e73231f01e9b4e7952b87df60cbfa97b5dedd2200bd033b4b9ea8ba45cc1","0xa94b90ad3c3d6c4bbe169f8661a790c40645b40f0a9d1c7220f01cf7fc176e04d80bab0ced9323fcafb93643f12b2760","0x94d86149b9c8443b46196f7e5a3738206dd6f3be7762df488bcbb9f9ee285a64c997ed875b7b16b26604fa59020a8199","0x82efe4ae2c50a2d7645240c173a047f238536598c04a2c0b69c96e96bd18e075a99110f1206bc213f39edca42ba00cc1","0xab8667685f831bc14d4610f84a5da27b4ea5b133b4d991741a9e64dceb22cb64a3ce8f1b6e101d52af6296df7127c9ad","0x83ba433661c05dcc5d562f4a9a261c8110dac44b8d833ae1514b1fc60d8b4ee395b18804baea04cb10adb428faf713c3","0xb5748f6f660cc5277f1211d2b8649493ed8a11085b871cd33a5aea630abd960a740f08c08be5f9c21574600ac9bf5737","0xa5c8dd12af48fb710642ad65ebb97ca489e8206741807f7acfc334f8035d3c80593b1ff2090c9bb7bd138f0c48714ca8","0xa2b382fd5744e3babf454b1d806cc8783efeb4761bc42b6914ea48a46a2eae835efbe0a18262b6bc034379e03cf1262b","0xb3145ffaf603f69f15a64936d32e3219eea5ed49fdfd2f5bf40ea0dfd974b36fb6ff12164d4c2282d892db4cf3ff3ce1","0x87a316fb213f4c5e30c5e3face049db66be4f28821bd96034714ec23d3e97849d7b301930f90a4323c7ccf53de23050c","0xb9de09a919455070fed6220fc179c8b7a4c753062bcd27acf28f5b9947a659c0b364298daf7c85c4ca6fca7f945add1f","0x806fbd98d411b76979464c40ad88bc07a151628a27fcc1012ba1dfbaf5b5cc9d962fb9b3386008978a12515edce934bc","0xa15268877fae0d21610ae6a31061ed7c20814723385955fac09fdc9693a94c33dea11db98bb89fdfe68f933490f5c381","0x8d633fb0c4da86b2e0b37d8fad5972d62bff2ac663c5ec815d095cd4b7e1fe66ebef2a2590995b57eaf941983c7ad7a4","0x8139e5dd9cf405e8ef65f11164f0440827d98389ce1b418b0c9628be983a9ddd6cf4863036ccb1483b40b8a527acd9ed","0x88b15fa94a08eac291d2b94a2b30eb851ff24addf2cc30b678e72e32cfcb3424cf4b33aa395d741803f3e578ddf524de","0xb5eaf0c8506e101f1646bcf049ee38d99ea1c60169730da893fd6020fd00a289eb2f415947e44677af49e43454a7b1be","0x8489822ad0647a7e06aa2aa5595960811858ddd4542acca419dd2308a8c5477648f4dd969a6740bb78aa26db9bfcc555","0xb1e9a7b9f3423c220330d45f69e45fa03d7671897cf077f913c252e3e99c7b1b1cf6d30caad65e4228d5d7b80eb86e5e","0xb28fe9629592b9e6a55a1406903be76250b1c50c65296c10c5e48c64b539fb08fe11f68cf462a6edcbba71b0cee3feb2","0xa41acf96a02c96cd8744ff6577c244fc923810d17ade133587e4c223beb7b4d99fa56eae311a500d7151979267d0895c","0x880798938fe4ba70721be90e666dfb62fcab4f3556fdb7b0dc8ec5bc34f6b4513df965eae78527136eb391889fe2caf9","0x98d4d89d358e0fb7e212498c73447d94a83c1b66e98fc81427ab13acddb17a20f52308983f3a5a8e0aaacec432359604","0x81430b6d2998fc78ba937a1639c6020199c52da499f68109da227882dc26d005b73d54c5bdcac1a04e8356a8ca0f7017","0xa8d906a4786455eb74613aba4ce1c963c60095ffb8658d368df9266fdd01e30269ce10bf984e7465f34b4fd83beba26a","0xaf54167ac1f954d10131d44a8e0045df00d581dd9e93596a28d157543fbe5fb25d213806ed7fb3cba6b8f5b5423562db","0x8511e373a978a12d81266b9afbd55035d7bc736835cfa921903a92969eeba3624437d1346b55382e61415726ab84a448","0x8cf43eea93508ae586fa9a0f1354a1e16af659782479c2040874a46317f9e8d572a23238efa318fdfb87cc63932602b7","0xb0bdd3bacff077173d302e3a9678d1d37936188c7ecc34950185af6b462b7c679815176f3cce5db19aac8b282f2d60ad","0xa355e9b87f2f2672052f5d4d65b8c1c827d24d89b0d8594641fccfb69aef1b94009105f3242058bb31c8bf51caae5a41","0xb8baa9e4b950b72ff6b88a6509e8ed1304bc6fd955748b2e59a523a1e0c5e99f52aec3da7fa9ff407a7adf259652466c","0x840bc3dbb300ea6f27d1d6dd861f15680bd098be5174f45d6b75b094d0635aced539fa03ddbccb453879de77fb5d1fe9","0xb4bc7e7e30686303856472bae07e581a0c0bfc815657c479f9f5931cff208d5c12930d2fd1ff413ebd8424bcd7a9b571","0x89b5d514155d7999408334a50822508b9d689add55d44a240ff2bdde2eee419d117031f85e924e2a2c1ca77db9b91eea","0xa8604b6196f87a04e1350302e8aa745bba8dc162115d22657b37a1d1a98cb14876ddf7f65840b5dbd77e80cd22b4256c","0x83cb7acdb9e03247515bb2ce0227486ccf803426717a14510f0d59d45e998b245797d356f10abca94f7a14e1a2f0d552","0xaeb3266a9f16649210ab2df0e1908ac259f34ce1f01162c22b56cf1019096ee4ea5854c36e30bb2feb06c21a71e8a45c","0x89e72e86edf2aa032a0fc9acf4d876a40865fbb2c8f87cb7e4d88856295c4ac14583e874142fd0c314a49aba68c0aa3c","0x8c3576eba0583c2a7884976b4ed11fe1fda4f6c32f6385d96c47b0e776afa287503b397fa516a455b4b8c3afeedc76db","0xa31e5b633bda9ffa174654fee98b5d5930a691c3c42fcf55673d927dbc8d91c58c4e42e615353145431baa646e8bbb30","0x89f2f3f7a8da1544f24682f41c68114a8f78c86bd36b066e27da13acb70f18d9f548773a16bd8e24789420e17183f137","0xada27fa4e90a086240c9164544d2528621a415a5497badb79f8019dc3dce4d12eb6b599597e47ec6ac39c81efda43520","0x90dc1eb21bf21c0187f359566fc4bf5386abea52799306a0e5a1151c0817c5f5bc60c86e76b1929c092c0f3ff48cedd2","0xb702a53ebcc17ae35d2e735a347d2c700e9cbef8eadbece33cac83df483b2054c126593e1f462cfc00a3ce9d737e2af5","0x9891b06455ec925a6f8eafffba05af6a38cc5e193acaaf74ffbf199df912c5197106c5e06d72942bbb032ce277b6417f","0x8c0ee71eb01197b019275bcf96cae94e81d2cdc3115dbf2d8e3080074260318bc9303597e8f72b18f965ad601d31ec43","0x8aaf580aaf75c1b7a5f99ccf60503506e62058ef43b28b02f79b8536a96be3f019c9f71caf327b4e6730134730d1bef5","0xae6f9fc21dd7dfa672b25a87eb0a41644f7609fab5026d5cedb6e43a06dbbfd6d6e30322a2598c8dedde88c52eaed626","0x8159b953ffece5693edadb2e906ebf76ff080ee1ad22698950d2d3bfc36ac5ea78f58284b2ca180664452d55bd54716c","0xab7647c32ca5e9856ac283a2f86768d68de75ceeba9e58b74c5324f8298319e52183739aba4340be901699d66ac9eb3f","0xa4d85a5701d89bcfaf1572db83258d86a1a0717603d6f24ac2963ffcf80f1265e5ab376a4529ca504f4396498791253c","0x816080c0cdbfe61b4d726c305747a9eb58ac26d9a35f501dd32ba43c098082d20faf3ccd41aad24600aa73bfa453dfac","0x84f3afac024f576b0fd9acc6f2349c2fcefc3f77dbe5a2d4964d14b861b88e9b1810334b908cf3427d9b67a8aee74b18","0x94b390655557b1a09110018e9b5a14490681ade275bdc83510b6465a1218465260d9a7e2a6e4ec700f58c31dc3659962","0xa8c66826b1c04a2dd4c682543242e7a57acae37278bd09888a3d17747c5b5fec43548101e6f46d703638337e2fd3277b","0x86e6f4608a00007fa533c36a5b054c5768ccafe41ad52521d772dcae4c8a4bcaff8f7609be30d8fab62c5988cbbb6830","0x837da4cf09ae8aa0bceb16f8b3bfcc3b3367aecac9eed6b4b56d7b65f55981ef066490764fb4c108792623ecf8cad383","0x941ff3011462f9b5bf97d8cbdb0b6f5d37a1b1295b622f5485b7d69f2cb2bcabc83630dae427f0259d0d9539a77d8424","0xb99e5d6d82aa9cf7d5970e7f710f4039ac32c2077530e4c2779250c6b9b373bc380adb0a03b892b652f649720672fc8c","0xa791c78464b2d65a15440b699e1e30ebd08501d6f2720adbc8255d989a82fcded2f79819b5f8f201bed84a255211b141","0x84af7ad4a0e31fcbb3276ab1ad6171429cf39adcf78dc03750dc5deaa46536d15591e26d53e953dfb31e1622bc0743ab","0xa833e62fe97e1086fae1d4917fbaf09c345feb6bf1975b5cb863d8b66e8d621c7989ab3dbecda36bc9eaffc5eaa6fa66","0xb4ef79a46a2126f53e2ebe62770feb57fd94600be29459d70a77c5e9cc260fa892be06cd60f886bf48459e48eb50d063","0xb43b8f61919ea380bf151c294e54d3a3ff98e20d1ee5efbfe38aa2b66fafbc6a49739793bd5cb1c809f8b30466277c3a","0xab37735af2412d2550e62df9d8b3b5e6f467f20de3890bf56faf1abf2bf3bd1d98dc3fa0ad5e7ab3fce0fa20409eb392","0x82416b74b1551d484250d85bb151fabb67e29cce93d516125533df585bc80779ab057ea6992801a3d7d5c6dcff87a018","0x8145d0787f0e3b5325190ae10c1d6bee713e6765fb6a0e9214132c6f78f4582bb2771aaeae40d3dad4bafb56bf7e36d8","0xb6935886349ecbdd5774e12196f4275c97ec8279fdf28ccf940f6a022ebb6de8e97d6d2173c3fe402cbe9643bed3883b","0x87ef9b4d3dc71ac86369f8ed17e0dd3b91d16d14ae694bc21a35b5ae37211b043d0e36d8ff07dcc513fb9e6481a1f37f","0xae1d0ded32f7e6f1dc8fef495879c1d9e01826f449f903c1e5034aeeabc5479a9e323b162b688317d46d35a42d570d86","0xa40d16497004db4104c6794e2f4428d75bdf70352685944f3fbe17526df333e46a4ca6de55a4a48c02ecf0bde8ba03c0","0x8d45121efba8cc308a498e8ee39ea6fa5cae9fb2e4aab1c2ff9d448aa8494ccbec9a078f978a86fcd97b5d5e7be7522a","0xa8173865c64634ba4ac2fa432740f5c05056a9deaf6427cb9b4b8da94ca5ddbc8c0c5d3185a89b8b28878194de9cdfcd","0xb6ec06a74d690f6545f0f0efba236e63d1fdfba54639ca2617408e185177ece28901c457d02b849fd00f1a53ae319d0a","0xb69a12df293c014a40070e3e760169b6f3c627caf9e50b35a93f11ecf8df98b2bc481b410eecb7ab210bf213bbe944de","0x97e7dc121795a533d4224803e591eef3e9008bab16f12472210b73aaf77890cf6e3877e0139403a0d3003c12c8f45636","0xacdfa6fdd4a5acb7738cc8768f7cba84dbb95c639399b291ae8e4e63df37d2d4096900a84d2f0606bf534a9ccaa4993f","0x86ee253f3a9446a33e4d1169719b7d513c6b50730988415382faaf751988c10a421020609f7bcdef91be136704b906e2","0xaac9438382a856caf84c5a8a234282f71b5fc5f65219103b147e7e6cf565522285fbfd7417b513bdad8277a00f652ca1","0x83f3799d8e5772527930f5dc071a2e0a65471618993ec8990a96ccdeee65270e490bda9d26bb877612475268711ffd80","0x93f28a81ac8c0ec9450b9d762fae9c7f8feaace87a6ee6bd141ef1d2d0697ef1bbd159fe6e1de640dbdab2b0361fca8a","0xa0825c95ba69999b90eac3a31a3fd830ea4f4b2b7409bde5f202b61d741d6326852ce790f41de5cb0eccec7af4db30c1","0x83924b0e66233edd603c3b813d698daa05751fc34367120e3cf384ea7432e256ccee4d4daf13858950549d75a377107d","0x956fd9fa58345277e06ba2ec72f49ed230b8d3d4ff658555c52d6cddeb84dd4e36f1a614f5242d5ca0192e8daf0543c2","0x944869912476baae0b114cced4ff65c0e4c90136f73ece5656460626599051b78802df67d7201c55d52725a97f5f29fe","0x865cb25b64b4531fb6fe4814d7c8cd26b017a6c6b72232ff53defc18a80fe3b39511b23f9e4c6c7249d06e03b2282ed2","0x81e09ff55214960775e1e7f2758b9a6c4e4cd39edf7ec1adfaad51c52141182b79fe2176b23ddc7df9fd153e5f82d668","0xb31006896f02bc90641121083f43c3172b1039334501fbaf1672f7bf5d174ddd185f945adf1a9c6cf77be34c5501483d","0x88b92f6f42ae45e9f05b16e52852826e933efd0c68b0f2418ac90957fd018df661bc47c8d43c2a7d7bfcf669dab98c3c","0x92fc68f595853ee8683930751789b799f397135d002eda244fe63ecef2754e15849edde3ba2f0cc8b865c9777230b712","0x99ca06a49c5cd0bb097c447793fcdd809869b216a34c66c78c7e41e8c22f05d09168d46b8b1f3390db9452d91bc96dea","0xb48b9490a5d65296802431852d548d81047bbefc74fa7dc1d4e2a2878faacdfcb365ae59209cb0ade01901a283cbd15d","0xaff0fdbef7c188b120a02bc9085d7b808e88f73973773fef54707bf2cd772cd066740b1b6f4127b5c349f657bd97e738","0x966fd4463b4f43dd8ccba7ad50baa42292f9f8b2e70da23bb6780e14155d9346e275ef03ddaf79e47020dcf43f3738bd","0x9330c3e1fadd9e08ac85f4839121ae20bbeb0a5103d84fa5aadbd1213805bdcda67bf2fb75fc301349cbc851b5559d20","0x993bb99867bd9041a71a55ad5d397755cfa7ab6a4618fc526179bfc10b7dc8b26e4372fe9a9b4a15d64f2b63c1052dda","0xa29b59bcfab51f9b3c490a3b96f0bf1934265c315349b236012adbd64a56d7f6941b2c8cc272b412044bc7731f71e1dc","0xa65c9cefe1fc35d089fe8580c2e7671ebefdb43014ac291528ff4deefd4883fd4df274af83711dad610dad0d615f9d65","0x944c78c56fb227ae632805d448ca3884cd3d2a89181cead3d2b7835e63297e6d740aa79a112edb1d4727824991636df5","0xa73d782da1db7e4e65d7b26717a76e16dd9fab4df65063310b8e917dc0bc24e0d6755df5546c58504d04d9e68c3b474a","0xaf80f0b87811ae3124f68108b4ca1937009403f87928bbc53480e7c5408d072053ace5eeaf5a5aba814dab8a45502085","0x88aaf1acfc6e2e19b8387c97da707cb171c69812fefdd4650468e9b2c627bd5ccfb459f4d8e56bdfd84b09ddf87e128f","0x92c97276ff6f72bab6e9423d02ad6dc127962dbce15a0dd1e4a393b4510c555df6aa27be0f697c0d847033a9ca8b8dfd","0xa0e07d43d96e2d85b6276b3c60aadb48f0aedf2de8c415756dc597249ea64d2093731d8735231dadc961e5682ac59479","0xadc9e6718a8f9298957d1da3842a7751c5399bbdf56f8de6c1c4bc39428f4aee6f1ba6613d37bf46b9403345e9d6fc81","0x951da434da4b20d949b509ceeba02e24da7ed2da964c2fcdf426ec787779c696b385822c7dbea4df3e4a35921f1e912c","0xa04cbce0d2b2e87bbf038c798a12ec828423ca6aca08dc8d481cf6466e3c9c73d4d4a7fa47df9a7e2e15aae9e9f67208","0x8f855cca2e440d248121c0469de1f94c2a71b8ee2682bbad3a78243a9e03da31d1925e6760dbc48a1957e040fae9abe8","0xb642e5b17c1df4a4e101772d73851180b3a92e9e8b26c918050f51e6dd3592f102d20b0a1e96f0e25752c292f4c903ff","0xa92454c300781f8ae1766dbbb50a96192da7d48ef4cbdd72dd8cbb44c6eb5913c112cc38e9144615fdc03684deb99420","0x8b74f7e6c2304f8e780df4649ef8221795dfe85fdbdaa477a1542d135b75c8be45bf89adbbb6f3ddf54ca40f02e733e9","0x85cf66292cbb30cec5fd835ab10c9fcb3aea95e093aebf123e9a83c26f322d76ebc89c4e914524f6c5f6ee7d74fc917d","0xae0bfe0cdc97c09542a7431820015f2d16067b30dca56288013876025e81daa8c519e5e347268e19aa1a85fa1dc28793","0x921322fc6a47dc091afa0ad6df18ed14cde38e48c6e71550aa513918b056044983aee402de21051235eecf4ce8040fbe","0x96c030381e97050a45a318d307dcb3c8377b79b4dd5daf6337cded114de26eb725c14171b9b8e1b3c08fe1f5ea6b49e0","0x90c23b86b6111818c8baaf53a13eaee1c89203b50e7f9a994bf0edf851919b48edbac7ceef14ac9414cf70c486174a77","0x8bf6c301240d2d1c8d84c71d33a6dfc6d9e8f1cfae66d4d0f7a256d98ae12b0bcebfa94a667735ee89f810bcd7170cff","0xa41a4ffbbea0e36874d65c009ee4c3feffff322f6fc0e30d26ee4dbc1f46040d05e25d9d0ecb378cef0d24a7c2c4b850","0xa8d4cdd423986bb392a0a92c12a8bd4da3437eec6ef6af34cf5310944899287452a2eb92eb5386086d5063381189d10e","0xa81dd26ec057c4032a4ed7ad54d926165273ed51d09a1267b2e477535cf6966835a257c209e4e92d165d74fa75695fa3","0x8d7f708c3ee8449515d94fc26b547303b53d8dd55f177bc3b25d3da2768accd9bc8e9f09546090ebb7f15c66e6c9c723","0x839ba65cffcd24cfffa7ab3b21faabe3c66d4c06324f07b2729c92f15cad34e474b0f0ddb16cd652870b26a756b731d3","0x87f1a3968afec354d92d77e2726b702847c6afcabb8438634f9c6f7766de4c1504317dc4fa9a4a735acdbf985e119564","0x91a8a7fd6542f3e0673f07f510d850864b34ac087eb7eef8845a1d14b2b1b651cbdc27fa4049bdbf3fea54221c5c8549","0xaef3cf5f5e3a2385ead115728d7059e622146c3457d266c612e778324b6e06fbfb8f98e076624d2f3ce1035d65389a07","0x819915d6232e95ccd7693fdd78d00492299b1983bc8f96a08dcb50f9c0a813ed93ae53c0238345d5bea0beda2855a913","0x8e9ba68ded0e94935131b392b28218315a185f63bf5e3c1a9a9dd470944509ca0ba8f6122265f8da851b5cc2abce68f1","0xb28468e9b04ee9d69003399a3cf4457c9bf9d59f36ab6ceeb8e964672433d06b58beeea198fedc7edbaa1948577e9fa2","0xa633005e2c9f2fd94c8bce2dd5bb708fe946b25f1ec561ae65e54e15cdd88dc339f1a083e01f0d39610c8fe24151aaf0","0x841d0031e22723f9328dd993805abd13e0c99b0f59435d2426246996b08d00ce73ab906f66c4eab423473b409e972ce0","0x85758d1b084263992070ec8943f33073a2d9b86a8606672550c17545507a5b3c88d87382b41916a87ee96ff55a7aa535","0x8581b06b0fc41466ef94a76a1d9fb8ae0edca6d018063acf6a8ca5f4b02d76021902feba58972415691b4bdbc33ae3b4","0x83539597ff5e327357ee62bc6bf8c0bcaec2f227c55c7c385a4806f0d37fb461f1690bad5066b8a5370950af32fafbef","0xaee3557290d2dc10827e4791d00e0259006911f3f3fce4179ed3c514b779160613eca70f720bff7804752715a1266ffa","0xb48d2f0c4e90fc307d5995464e3f611a9b0ef5fe426a289071f4168ed5cc4f8770c9332960c2ca5c8c427f40e6bb389f","0x847af8973b4e300bb06be69b71b96183fd1a0b9d51b91701bef6fcfde465068f1eb2b1503b07afda380f18d69de5c9e1","0xa70a6a80ce407f07804c0051ac21dc24d794b387be94eb24e1db94b58a78e1bcfb48cd0006db8fc1f9bedaece7a44fbe","0xb40e942b8fa5336910ff0098347df716bff9d1fa236a1950c16eeb966b3bc1a50b8f7b0980469d42e75ae13ced53cead","0xb208fabaa742d7db3148515330eb7a3577487845abdb7bd9ed169d0e081db0a5816595c33d375e56aeac5b51e60e49d3","0xb7c8194b30d3d6ef5ab66ec88ad7ebbc732a3b8a41731b153e6f63759a93f3f4a537eab9ad369705bd730184bdbbdc34","0x9280096445fe7394d04aa1bc4620c8f9296e991cc4d6c131bd703cb1cc317510e6e5855ac763f4d958c5edfe7eebeed7","0xabc2aa4616a521400af1a12440dc544e3c821313d0ab936c86af28468ef8bbe534837e364598396a81cf8d06274ed5a6","0xb18ca8a3325adb0c8c18a666d4859535397a1c3fe08f95eebfac916a7a99bbd40b3c37b919e8a8ae91da38bc00fa56c0","0x8a40c33109ecea2a8b3558565877082f79121a432c45ec2c5a5e0ec4d1c203a6788e6b69cb37f1fd5b8c9a661bc5476d","0x88c47301dd30998e903c84e0b0f2c9af2e1ce6b9f187dab03528d44f834dc991e4c86d0c474a2c63468cf4020a1e24a0","0x920c832853e6ab4c851eecfa9c11d3acc7da37c823be7aa1ab15e14dfd8beb5d0b91d62a30cec94763bd8e4594b66600","0x98e1addbe2a6b8edc7f12ecb9be81c3250aeeca54a1c6a7225772ca66549827c15f3950d01b8eb44aecb56fe0fff901a","0x8cfb0fa1068be0ec088402f5950c4679a2eb9218c729da67050b0d1b2d7079f3ddf4bf0f57d95fe2a8db04bc6bcdb20c","0xb70f381aafe336b024120453813aeab70baac85b9c4c0f86918797b6aee206e6ed93244a49950f3d8ec9f81f4ac15808","0xa4c8edf4aa33b709a91e1062939512419711c1757084e46f8f4b7ed64f8e682f4e78b7135920c12f0eb0422fe9f87a6a","0xb4817e85fd0752d7ebb662d3a51a03367a84bac74ebddfba0e5af5e636a979500f72b148052d333b3dedf9edd2b4031b","0xa87430169c6195f5d3e314ff2d1c2f050e766fd5d2de88f5207d72dba4a7745bb86d0baca6e9ae156582d0d89e5838c7","0x991b00f8b104566b63a12af4826b61ce7aa40f4e5b8fff3085e7a99815bdb4471b6214da1e480214fac83f86a0b93cc5","0xb39966e3076482079de0678477df98578377a094054960ee518ef99504d6851f8bcd3203e8da5e1d4f6f96776e1fe6eb","0xa448846d9dc2ab7a0995fa44b8527e27f6b3b74c6e03e95edb64e6baa4f1b866103f0addb97c84bef1d72487b2e21796","0x894bec21a453ae84b592286e696c35bc30e820e9c2fd3e63dd4fbe629e07df16439c891056070faa490155f255bf7187","0xa9ec652a491b11f6a692064e955f3f3287e7d2764527e58938571469a1e29b5225b9415bd602a45074dfbfe9c131d6ca","0xb39d37822e6cbe28244b5f42ce467c65a23765bd16eb6447c5b3e942278069793763483dafd8c4dd864f8917aad357fe","0x88dba51133f2019cb266641c56101e3e5987d3b77647a2e608b5ff9113dfc5f85e2b7c365118723131fbc0c9ca833c9c","0xb566579d904b54ecf798018efcb824dccbebfc6753a0fd2128ac3b4bd3b038c2284a7c782b5ca6f310eb7ea4d26a3f0a","0xa97a55c0a492e53c047e7d6f9d5f3e86fb96f3dddc68389c0561515343b66b4bc02a9c0d5722dff1e3445308240b27f7","0xa044028ab4bcb9e1a2b9b4ca4efbf04c5da9e4bf2fff0e8bd57aa1fc12a71e897999c25d9117413faf2f45395dee0f13","0xa78dc461decbeaeed8ebd0909369b491a5e764d6a5645a7dac61d3140d7dc0062526f777b0eb866bff27608429ebbdde","0xb2c2a8991f94c39ca35fea59f01a92cb3393e0eccb2476dfbf57261d406a68bd34a6cff33ed80209991688c183609ef4","0x84189eefb521aff730a4fd3fd5b10ddfd29f0d365664caef63bb015d07e689989e54c33c2141dd64427805d37a7e546e","0x85ac80bd734a52235da288ff042dea9a62e085928954e8eacd2c751013f61904ed110e5b3afe1ab770a7e6485efb7b5e","0x9183a560393dcb22d0d5063e71182020d0fbabb39e32493eeffeb808df084aa243eb397027f150b55a247d1ed0c8513e","0x81c940944df7ecc58d3c43c34996852c3c7915ed185d7654627f7af62abae7e0048dd444a6c09961756455000bd96d09","0xaa8c34e164019743fd8284b84f06c3b449aae7996e892f419ee55d82ad548cb300fd651de329da0384243954c0ef6a60","0x89a7b7bdfc7e300d06a14d463e573d6296d8e66197491900cc9ae49504c4809ff6e61b758579e9091c61085ba1237b83","0x878d21809ba540f50bd11f4c4d9590fb6f3ab9de5692606e6e2ef4ed9d18520119e385be5e1f4b3f2e2b09c319f0e8fc","0x8eb248390193189cf0355365e630b782cd15751e672dc478b39d75dc681234dcd9309df0d11f4610dbb249c1e6be7ef9","0xa1d7fb3aecb896df3a52d6bd0943838b13f1bd039c936d76d03de2044c371d48865694b6f532393b27fd10a4cf642061","0xa34bca58a24979be442238cbb5ece5bee51ae8c0794dd3efb3983d4db713bc6f28a96e976ac3bd9a551d3ed9ba6b3e22","0x817c608fc8cacdd178665320b5a7587ca21df8bdd761833c3018b967575d25e3951cf3d498a63619a3cd2ad4406f5f28","0x86c95707db0495689afd0c2e39e97f445f7ca0edffad5c8b4cacd1421f2f3cc55049dfd504f728f91534e20383955582","0x99c3b0bb15942c301137765d4e19502f65806f3b126dc01a5b7820c87e8979bce6a37289a8f6a4c1e4637227ad5bf3bf","0x8aa1518a80ea8b074505a9b3f96829f5d4afa55a30efe7b4de4e5dbf666897fdd2cf31728ca45921e21a78a80f0e0f10","0x8d74f46361c79e15128ac399e958a91067ef4cec8983408775a87eca1eed5b7dcbf0ddf30e66f51780457413496c7f07","0xa41cde4a786b55387458a1db95171aca4fd146507b81c4da1e6d6e495527c3ec83fc42fad1dfe3d92744084a664fd431","0x8c352852c906fae99413a84ad11701f93f292fbf7bd14738814f4c4ceab32db02feb5eb70bc73898b0bc724a39d5d017","0xa5993046e8f23b71ba87b7caa7ace2d9023fb48ce4c51838813174880d918e9b4d2b0dc21a2b9c6f612338c31a289df8","0x83576d3324bf2d8afbfb6eaecdc5d767c8e22e7d25160414924f0645491df60541948a05e1f4202e612368e78675de8a","0xb43749b8df4b15bc9a3697e0f1c518e6b04114171739ef1a0c9c65185d8ec18e40e6954d125cbc14ebc652cf41ad3109","0xb4eebd5d80a7327a040cafb9ccdb12b2dfe1aa86e6bc6d3ac8a57fadfb95a5b1a7332c66318ff72ba459f525668af056","0x9198be7f1d413c5029b0e1c617bcbc082d21abe2c60ec8ce9b54ca1a85d3dba637b72fda39dae0c0ae40d047eab9f55a","0x8d96a0232832e24d45092653e781e7a9c9520766c3989e67bbe86b3a820c4bf621ea911e7cd5270a4bfea78b618411f6","0x8d7160d0ea98161a2d14d46ef01dff72d566c330cd4fabd27654d300e1bc7644c68dc8eabf2a20a59bfe7ba276545f9b","0xabb60fce29dec7ba37e3056e412e0ec3e05538a1fc0e2c68877378c867605966108bc5742585ab6a405ce0c962b285b6","0x8fabffa3ed792f05e414f5839386f6449fd9f7b41a47595c5d71074bd1bb3784cc7a1a7e1ad6b041b455035957e5b2dc","0x90ff017b4804c2d0533b72461436b10603ab13a55f86fd4ec11b06a70ef8166f958c110519ca1b4cc7beba440729fe2d","0xb340cfd120f6a4623e3a74cf8c32bfd7cd61a280b59dfd17b15ca8fae4d82f64a6f15fbde4c02f424debc72b7db5fe67","0x871311c9c7220c932e738d59f0ecc67a34356d1429fe570ca503d340c9996cb5ee2cd188fad0e3bd16e4c468ec1dbebd","0xa772470262186e7b94239ba921b29f2412c148d6f97c4412e96d21e55f3be73f992f1ad53c71008f0558ec3f84e2b5a7","0xb2a897dcb7ffd6257f3f2947ec966f2077d57d5191a88840b1d4f67effebe8c436641be85524d0a21be734c63ab5965d","0xa044f6eacc48a4a061fa149500d96b48cbf14853469aa4d045faf3dca973be1bd4b4ce01646d83e2f24f7c486d03205d","0x981af5dc2daa73f7fa9eae35a93d81eb6edba4a7f673b55d41f6ecd87a37685d31bb40ef4f1c469b3d72f2f18b925a17","0x912d2597a07864de9020ac77083eff2f15ceb07600f15755aba61251e8ce3c905a758453b417f04d9c38db040954eb65","0x9642b7f6f09394ba5e0805734ef6702c3eddf9eea187ba98c676d5bbaec0e360e3e51dc58433aaa1e2da6060c8659cb7","0x8ab3836e0a8ac492d5e707d056310c4c8e0489ca85eb771bff35ba1d658360084e836a6f51bb990f9e3d2d9aeb18fbb5","0x879e058e72b73bb1f4642c21ffdb90544b846868139c6511f299aafe59c2d0f0b944dffc7990491b7c4edcd6a9889250","0xb9e60b737023f61479a4a8fd253ed0d2a944ea6ba0439bbc0a0d3abf09b0ad1f18d75555e4a50405470ae4990626f390","0xb9c2535d362796dcd673640a9fa2ebdaec274e6f8b850b023153b0a7a30fffc87f96e0b72696f647ebe7ab63099a6963","0x94aeff145386a087b0e91e68a84a5ede01f978f9dd9fe7bebca78941938469495dc30a96bba9508c0d017873aeea9610","0x98b179f8a3d9f0d0a983c30682dd425a2ddc7803be59bd626c623c8951a5179117d1d2a68254c95c9952989877d0ee55","0x889ecf5f0ee56938273f74eb3e9ecfb5617f04fb58e83fe4c0e4aef51615cf345bc56f3f61b17f6eed3249d4afd54451","0xa0f2b2c39bcea4b50883e2587d16559e246248a66ecb4a4b7d9ab3b51fb39fe98d83765e087eee37a0f86b0ba4144c02","0xb2a61e247ed595e8a3830f7973b07079cbda510f28ad8c78c220b26cb6acde4fbb5ee90c14a665f329168ee951b08cf0","0x95bd0fcfb42f0d6d8a8e73d7458498a85bcddd2fb132fd7989265648d82ac2707d6d203fac045504977af4f0a2aca4b7","0x843e5a537c298666e6cf50fcc044f13506499ef83c802e719ff2c90e85003c132024e04711be7234c04d4b0125512d5d","0xa46d1797c5959dcd3a5cfc857488f4d96f74277c3d13b98b133620192f79944abcb3a361d939a100187f1b0856eae875","0xa1c7786736d6707a48515c38660615fcec67eb8a2598f46657855215f804fd72ab122d17f94fcffad8893f3be658dca7","0xb23dc9e610abc7d8bd21d147e22509a0fa49db5be6ea7057b51aae38e31654b3aa044df05b94b718153361371ba2f622","0xb00cc8f257d659c22d30e6d641f79166b1e752ea8606f558e4cad6fc01532e8319ea4ee12265ba4140ac45aa4613c004","0xac7019af65221b0cc736287b32d7f1a3561405715ba9a6a122342e04e51637ba911c41573de53e4781f2230fdcb2475f","0x81a630bc41b3da8b3eb4bf56cba10cd9f93153c3667f009dc332287baeb707d505fb537e6233c8e53d299ec0f013290c","0xa6b7aea5c545bb76df0f230548539db92bc26642572cb7dd3d5a30edca2b4c386f44fc8466f056b42de2a452b81aff5b","0x8271624ff736b7b238e43943c81de80a1612207d32036d820c11fc830c737972ccc9c60d3c2359922b06652311e3c994","0x8a684106458cb6f4db478170b9ad595d4b54c18bf63b9058f095a2fa1b928c15101472c70c648873d5887880059ed402","0xa5cc3c35228122f410184e4326cf61a37637206e589fcd245cb5d0cec91031f8f7586b80503070840fdfd8ce75d3c88b","0x9443fc631aed8866a7ed220890911057a1f56b0afe0ba15f0a0e295ab97f604b134b1ed9a4245e46ee5f9a93aa74f731","0x984b6f7d79835dffde9558c6bb912d992ca1180a2361757bdba4a7b69dc74b056e303adc69fe67414495dd9c2dd91e64","0xb15a5c8cba5de080224c274d31c68ed72d2a7126d347796569aef0c4e97ed084afe3da4d4b590b9dda1a07f0c2ff3dfb","0x991708fe9650a1f9a4e43938b91d45dc68c230e05ee999c95dbff3bf79b1c1b2bb0e7977de454237c355a73b8438b1d9","0xb4f7edc7468b176a4a7c0273700c444fa95c726af6697028bed4f77eee887e3400f9c42ee15b782c0ca861c4c3b8c98a","0x8c60dcc16c51087eb477c13e837031d6c6a3dc2b8bf8cb43c23f48006bc7173151807e866ead2234b460c2de93b31956","0x83ad63e9c910d1fc44bc114accfb0d4d333b7ebe032f73f62d25d3e172c029d5e34a1c9d547273bf6c0fead5c8801007","0x85de73213cc236f00777560756bdbf2b16841ba4b55902cf2cad9742ecaf5d28209b012ceb41f337456dfeca93010cd7","0xa7561f8827ccd75b6686ba5398bb8fc3083351c55a589b18984e186820af7e275af04bcd4c28e1dc11be1e8617a0610b","0x88c0a4febd4068850557f497ea888035c7fc9f404f6cc7794e7cc8722f048ad2f249e7dc62743e7a339eb7473ad3b0cd","0x932b22b1d3e6d5a6409c34980d176feb85ada1bf94332ef5c9fc4d42b907dabea608ceef9b5595ef3feee195151f18d8","0xa2867bb3f5ab88fbdae3a16c9143ab8a8f4f476a2643c505bb9f37e5b1fd34d216cab2204c9a017a5a67b7ad2dda10e8","0xb573d5f38e4e9e8a3a6fd82f0880dc049efa492a946d00283019bf1d5e5516464cf87039e80aef667cb86fdea5075904","0xb948f1b5ab755f3f5f36af27d94f503b070696d793b1240c1bdfd2e8e56890d69e6904688b5f8ff5a4bdf5a6abfe195f","0x917eae95ebc4109a2e99ddd8fec7881d2f7aaa0e25fda44dec7ce37458c2ee832f1829db7d2dcfa4ca0f06381c7fe91d","0x95751d17ed00a3030bce909333799bb7f4ab641acf585807f355b51d6976dceee410798026a1a004ef4dcdff7ec0f5b8","0xb9b7bd266f449a79bbfe075e429613e76c5a42ac61f01c8f0bbbd34669650682efe01ff9dbbc400a1e995616af6aa278","0xac1722d097ce9cd7617161f8ec8c23d68f1fb1c9ca533e2a8b4f78516c2fd8fb38f23f834e2b9a03bb06a9d655693ca9","0xa7ad9e96ffd98db2ecdb6340c5d592614f3c159abfd832fe27ee9293519d213a578e6246aae51672ee353e3296858873","0x989b8814d5de7937c4acafd000eec2b4cd58ba395d7b25f98cafd021e8efa37029b29ad8303a1f6867923f5852a220eb","0xa5bfe6282c771bc9e453e964042d44eff4098decacb89aecd3be662ea5b74506e1357ab26f3527110ba377711f3c9f41","0x8900a7470b656639721d2abbb7b06af0ac4222ab85a1976386e2a62eb4b88bfb5b72cf7921ddb3cf3a395d7eeb192a2e","0x95a71b55cd1f35a438cf5e75f8ff11c5ec6a2ebf2e4dba172f50bfad7d6d5dca5de1b1afc541662c81c858f7604c1163","0x82b5d62fea8db8d85c5bc3a76d68dedd25794cf14d4a7bc368938ffca9e09f7e598fdad2a5aac614e0e52f8112ae62b9","0x997173f07c729202afcde3028fa7f52cefc90fda2d0c8ac2b58154a5073140683e54c49ed1f254481070d119ce0ce02a","0xaeffb91ccc7a72bbd6ffe0f9b99c9e66e67d59cec2e02440465e9636a613ab3017278cfa72ea8bc4aba9a8dc728cb367","0x952743b06e8645894aeb6440fc7a5f62dd3acf96dab70a51e20176762c9751ea5f2ba0b9497ccf0114dc4892dc606031","0x874c63baeddc56fbbca2ff6031f8634b745f6e34ea6791d7c439201aee8f08ef5ee75f7778700a647f3b21068513fce6","0x85128fec9c750c1071edfb15586435cc2f317e3e9a175bb8a9697bcda1eb9375478cf25d01e7fed113483b28f625122d","0x85522c9576fd9763e32af8495ae3928ed7116fb70d4378448926bc9790e8a8d08f98cf47648d7da1b6e40d6a210c7924","0x97d0f37a13cfb723b848099ca1c14d83e9aaf2f7aeb71829180e664b7968632a08f6a85f557d74b55afe6242f2a36e7c","0xabaa472d6ad61a5fccd1a57c01aa1bc081253f95abbcba7f73923f1f11c4e79b904263890eeb66926de3e2652f5d1c70","0xb3c04945ba727a141e5e8aec2bf9aa3772b64d8fd0e2a2b07f3a91106a95cbcb249adcd074cbe498caf76fffac20d4ef","0x82c46781a3d730d9931bcabd7434a9171372dde57171b6180e5516d4e68db8b23495c8ac3ab96994c17ddb1cf249b9fb","0xa202d8b65613c42d01738ccd68ed8c2dbc021631f602d53f751966e04182743ebc8e0747d600b8a8676b1da9ae7f11ab","0xae73e7256e9459db04667a899e0d3ea5255211fb486d084e6550b6dd64ca44af6c6b2d59d7aa152de9f96ce9b58d940d","0xb67d87b176a9722945ec7593777ee461809861c6cfd1b945dde9ee4ff009ca4f19cf88f4bbb5c80c9cbab2fe25b23ac8","0x8f0b7a317a076758b0dac79959ee4a06c08b07d0f10538a4b53d3da2eda16e2af26922feb32c090330dc4d969cf69bd3","0x90b36bf56adbd8c4b6cb32febc3a8d5f714370c2ac3305c10fa6d168dffb2a026804517215f9a2d4ec8310cdb6bb459b","0xaa80c19b0682ead69934bf18cf476291a0beddd8ef4ed75975d0a472e2ab5c70f119722a8574ae4973aceb733d312e57","0xa3fc9abb12574e5c28dcb51750b4339b794b8e558675eef7d26126edf1de920c35e992333bcbffcbf6a5f5c0d383ce62","0xa1573ff23ab972acdcd08818853b111fc757fdd35aa070186d3e11e56b172fb49d840bf297ac0dd222e072fc09f26a81","0x98306f2be4caa92c2b4392212d0cbf430b409b19ff7d5b899986613bd0e762c909fc01999aa94be3bd529d67f0113d7f","0x8c1fc42482a0819074241746d17dc89c0304a2acdae8ed91b5009e9e3e70ff725ba063b4a3e68fdce05b74f5180c545e","0xa6c6113ebf72d8cf3163b2b8d7f3fa24303b13f55752522c660a98cd834d85d8c79214d900fa649499365e2e7641f77a","0xab95eea424f8a2cfd9fb1c78bb724e5b1d71a0d0d1e4217c5d0f98b0d8bbd3f8400a2002abc0a0e4576d1f93f46fefad","0x823c5a4fd8cf4a75fdc71d5f2dd511b6c0f189b82affeacd2b7cfcad8ad1a5551227dcc9bfdb2e34b2097eaa00efbb51","0xb97314dfff36d80c46b53d87a61b0e124dc94018a0bb680c32765b9a2d457f833a7c42bbc90b3b1520c33a182580398d","0xb17566ee3dcc6bb3b004afe4c0136dfe7dd27df9045ae896dca49fb36987501ae069eb745af81ba3fc19ff037e7b1406","0xb0bdc0f55cfd98d331e3a0c4fbb776a131936c3c47c6bffdc3aaf7d8c9fa6803fbc122c2fefbb532e634228687d52174","0xaa5d9e60cc9f0598559c28bb9bdd52aa46605ab4ffe3d192ba982398e72cec9a2a44c0d0d938ce69935693cabc0887ea","0x802b6459d2354fa1d56c592ac1346c428dadea6b6c0a87bf7d309bab55c94e1cf31dd98a7a86bd92a840dd51f218b91b","0xa526914efdc190381bf1a73dd33f392ecf01350b9d3f4ae96b1b1c3d1d064721c7d6eec5788162c933245a3943f5ee51","0xb3b8fcf637d8d6628620a1a99dbe619eabb3e5c7ce930d6efd2197e261bf394b74d4e5c26b96c4b8009c7e523ccfd082","0x8f7510c732502a93e095aba744535f3928f893f188adc5b16008385fb9e80f695d0435bfc5b91cdad4537e87e9d2551c","0x97b90beaa56aa936c3ca45698f79273a68dd3ccd0076eab48d2a4db01782665e63f33c25751c1f2e070f4d1a8525bf96","0xb9fb798324b1d1283fdc3e48288e3861a5449b2ab5e884b34ebb8f740225324af86e4711da6b5cc8361c1db15466602f","0xb6d52b53cea98f1d1d4c9a759c25bf9d8a50b604b144e4912acbdbdc32aab8b9dbb10d64a29aa33a4f502121a6fb481c","0x9174ffff0f2930fc228f0e539f5cfd82c9368d26b074467f39c07a774367ff6cccb5039ac63f107677d77706cd431680","0xa33b6250d4ac9e66ec51c063d1a6a31f253eb29bbaed12a0d67e2eccfffb0f3a52750fbf52a1c2aaba8c7692346426e7","0xa97025fd5cbcebe8ef865afc39cd3ea707b89d4e765ec817fd021d6438e02fa51e3544b1fd45470c58007a08efac6edd","0xb32a78480edd9ff6ba2f1eec4088db5d6ceb2d62d7e59e904ecaef7bb4a2e983a4588e51692b3be76e6ffbc0b5f911a5","0xb5ab590ef0bb77191f00495b33d11c53c65a819f7d0c1f9dc4a2caa147a69c77a4fff7366a602d743ee1f395ce934c1e","0xb3fb0842f9441fb1d0ee0293b6efbc70a8f58d12d6f769b12872db726b19e16f0f65efbc891cf27a28a248b0ef9c7e75","0x9372ad12856fefb928ccb0d34e198df99e2f8973b07e9d417a3134d5f69e12e79ff572c4e03ccd65415d70639bc7c73e","0xaa8d6e83d09ce216bfe2009a6b07d0110d98cf305364d5529c170a23e693aabb768b2016befb5ada8dabdd92b4d012bb","0xa954a75791eeb0ce41c85200c3763a508ed8214b5945a42c79bfdcfb1ec4f86ad1dd7b2862474a368d4ac31911a2b718","0x8e2081cfd1d062fe3ab4dab01f68062bac802795545fede9a188f6c9f802cb5f884e60dbe866710baadbf55dc77c11a4","0xa2f06003b9713e7dd5929501ed485436b49d43de80ea5b15170763fd6346badf8da6de8261828913ee0dacd8ff23c0e1","0x98eecc34b838e6ffd1931ca65eec27bcdb2fdcb61f33e7e5673a93028c5865e0d1bf6d3bec040c5e96f9bd08089a53a4","0x88cc16019741b341060b95498747db4377100d2a5bf0a5f516f7dec71b62bcb6e779de2c269c946d39040e03b3ae12b7","0xad1135ccbc3019d5b2faf59a688eef2500697642be8cfbdf211a1ab59abcc1f24483e50d653b55ff1834675ac7b4978f","0xa946f05ed9972f71dfde0020bbb086020fa35b482cce8a4cc36dd94355b2d10497d7f2580541bb3e81b71ac8bba3c49f","0xa83aeed488f9a19d8cfd743aa9aa1982ab3723560b1cd337fc2f91ad82f07afa412b3993afb845f68d47e91ba4869840","0x95eebe006bfc316810cb71da919e5d62c2cebb4ac99d8e8ef67be420302320465f8b69873470982de13a7c2e23516be9","0xa55f8961295a11e91d1e5deadc0c06c15dacbfc67f04ccba1d069cba89d72aa3b3d64045579c3ea8991b150ac29366ae","0xb321991d12f6ac07a5de3c492841d1a27b0d3446082fbce93e7e1f9e8d8fe3b45d41253556261c21b70f5e189e1a7a6f","0xa0b0822f15f652ce7962a4f130104b97bf9529797c13d6bd8e24701c213cc37f18157bd07f3d0f3eae6b7cd1cb40401f","0x96e2fa4da378aa782cc2d5e6e465fc9e49b5c805ed01d560e9b98abb5c0de8b74a2e7bec3aa5e2887d25cccb12c66f0c","0x97e4ab610d414f9210ed6f35300285eb3ccff5b0b6a95ed33425100d7725e159708ea78704497624ca0a2dcabce3a2f9","0x960a375b17bdb325761e01e88a3ea57026b2393e1d887b34b8fa5d2532928079ce88dc9fd06a728b26d2bb41b12b9032","0x8328a1647398e832aadc05bd717487a2b6fcdaa0d4850d2c4da230c6a2ed44c3e78ec4837b6094f3813f1ee99414713f","0xaa283834ebd18e6c99229ce4b401eda83f01d904f250fedd4e24f1006f8fa0712a6a89a7296a9bf2ce8de30e28d1408e","0xb29e097f2caadae3e0f0ae3473c072b0cd0206cf6d2e9b22c1a5ad3e07d433e32bd09ed1f4e4276a2da4268633357b7f","0x9539c5cbba14538b2fe077ecf67694ef240da5249950baaabea0340718b882a966f66d97f08556b08a4320ceb2cc2629","0xb4529f25e9b42ae8cf8338d2eface6ba5cd4b4d8da73af502d081388135c654c0b3afb3aa779ffc80b8c4c8f4425dd2b","0x95be0739c4330619fbe7ee2249c133c91d6c07eab846c18c5d6c85fc21ac5528c5d56dcb0145af68ed0c6a79f68f2ccd","0xac0c83ea802227bfc23814a24655c9ff13f729619bcffdb487ccbbf029b8eaee709f8bddb98232ef33cd70e30e45ca47","0xb503becb90acc93b1901e939059f93e671900ca52c6f64ae701d11ac891d3a050b505d89324ce267bc43ab8275da6ffe","0x98e3811b55b1bacb70aa409100abb1b870f67e6d059475d9f278c751b6e1e2e2d6f2e586c81a9fb6597fda06e7923274","0xb0b0f61a44053fa6c715dbb0731e35d48dba257d134f851ee1b81fd49a5c51a90ebf5459ec6e489fce25da4f184fbdb1","0xb1d2117fe811720bb997c7c93fe9e4260dc50fca8881b245b5e34f724aaf37ed970cdad4e8fcb68e05ac8cf55a274a53","0xa10f502051968f14b02895393271776dee7a06db9de14effa0b3471825ba94c3f805302bdddac4d397d08456f620999d","0xa3dbad2ef060ae0bb7b02eaa4a13594f3f900450faa1854fc09620b01ac94ab896321dfb1157cf2374c27e5718e8026a","0xb550fdec503195ecb9e079dcdf0cad559d64d3c30818ef369b4907e813e689da316a74ad2422e391b4a8c2a2bef25fc0","0xa25ba865e2ac8f28186cea497294c8649a201732ecb4620c4e77b8e887403119910423df061117e5f03fc5ba39042db1","0xb3f88174e03fdb443dd6addd01303cf88a4369352520187c739fc5ae6b22fa99629c63c985b4383219dab6acc5f6f532","0x97a7503248e31e81b10eb621ba8f5210c537ad11b539c96dfb7cf72b846c7fe81bd7532c5136095652a9618000b7f8d3","0xa8bcdc1ce5aa8bfa683a2fc65c1e79de8ff5446695dcb8620f7350c26d2972a23da22889f9e2b1cacb3f688c6a2953dc","0x8458c111df2a37f5dd91a9bee6c6f4b79f4f161c93fe78075b24a35f9817da8dde71763218d627917a9f1f0c4709c1ed","0xac5f061a0541152b876cbc10640f26f1cc923c9d4ae1b6621e4bb3bf2cec59bbf87363a4eb72fb0e5b6d4e1c269b52d5","0xa9a25ca87006e8a9203cbb78a93f50a36694aa4aad468b8d80d3feff9194455ca559fcc63838128a0ab75ad78c07c13a","0xa450b85f5dfffa8b34dfd8bc985f921318efacf8857cf7948f93884ba09fb831482ee90a44224b1a41e859e19b74962f","0x8ed91e7f92f5c6d7a71708b6132f157ac226ecaf8662af7d7468a4fa25627302efe31e4620ad28719318923e3a59bf82","0xab524165fd4c71b1fd395467a14272bd2b568592deafa039d8492e9ef36c6d3f96927c95c72d410a768dc0b6d1fbbc9b","0xb662144505aa8432c75ffb8d10318526b6d5777ac7af9ebfad87d9b0866c364f7905a6352743bd8fd79ffd9d5dd4f3e6","0xa48f1677550a5cd40663bb3ba8f84caaf8454f332d0ceb1d94dbea52d0412fe69c94997f7749929712fd3995298572f7","0x8391cd6e2f6b0c242de1117a612be99776c3dc95cb800b187685ea5bf7e2722275eddb79fd7dfc8be8e389c4524cdf70","0x875d3acb9af47833b72900bc0a2448999d638f153c5e97e8a14ec02d0c76f6264353a7e275e1f1a5855daced523d243b","0x91f1823657d30b59b2f627880a9a9cb530f5aca28a9fd217fe6f2f5133690dfe7ad5a897872e400512db2e788b3f7628","0xad3564332aa56cea84123fc7ca79ea70bb4fef2009fa131cb44e4b15e8613bd11ca1d83b9d9bf456e4b7fee9f2e8b017","0x8c530b84001936d5ab366c84c0b105241a26d1fb163669f17c8f2e94776895c2870edf3e1bc8ccd04d5e65531471f695","0x932d01fa174fdb0c366f1230cffde2571cc47485f37f23ba5a1825532190cc3b722aeb1f15aed62cf83ccae9403ba713","0x88b28c20585aca50d10752e84b901b5c2d58efef5131479fbbe53de7bce2029e1423a494c0298e1497669bd55be97a5d","0xb914148ca717721144ebb3d3bf3fcea2cd44c30c5f7051b89d8001502f3856fef30ec167174d5b76265b55d70f8716b5","0x81d0173821c6ddd2a068d70766d9103d1ee961c475156e0cbd67d54e668a796310474ef698c7ab55abe6f2cf76c14679","0x8f28e8d78e2fe7fa66340c53718e0db4b84823c8cfb159c76eac032a62fb53da0a5d7e24ca656cf9d2a890cb2a216542","0x8a26360335c73d1ab51cec3166c3cf23b9ea51e44a0ad631b0b0329ef55aaae555420348a544e18d5760969281759b61","0x94f326a32ed287545b0515be9e08149eb0a565025074796d72387cc3a237e87979776410d78339e23ef3172ca43b2544","0xa785d2961a2fa5e70bffa137858a92c48fe749fee91b02599a252b0cd50d311991a08efd7fa5e96b78d07e6e66ffe746","0x94af9030b5ac792dd1ce517eaadcec1482206848bea4e09e55cc7f40fd64d4c2b3e9197027c5636b70d6122c51d2235d","0x9722869f7d1a3992850fe7be405ec93aa17dc4d35e9e257d2e469f46d2c5a59dbd504056c85ab83d541ad8c13e8bcd54","0xb13c4088b61a06e2c03ac9813a75ff1f68ffdfee9df6a8f65095179a475e29cc49119cad2ce05862c3b1ac217f3aace9","0x8c64d51774753623666b10ca1b0fe63ae42f82ed6aa26b81dc1d48c86937c5772eb1402624c52a154b86031854e1fb9f","0xb47e4df18002b7dac3fee945bf9c0503159e1b8aafcce2138818e140753011b6d09ef1b20894e08ba3006b093559061b","0x93cb5970076522c5a0483693f6a35ffd4ea2aa7aaf3730c4eccd6af6d1bebfc1122fc4c67d53898ae13eb6db647be7e2","0xa68873ef80986795ea5ed1a597d1cd99ed978ec25e0abb57fdcc96e89ef0f50aeb779ff46e3dce21dc83ada3157a8498","0x8cab67f50949cc8eee6710e27358aea373aae3c92849f8f0b5531c080a6300cdf2c2094fe6fecfef6148de0d28446919","0x993e932bcb616dbaa7ad18a4439e0565211d31071ef1b85a0627db74a05d978c60d507695eaeea5c7bd9868a21d06923","0xacdadff26e3132d9478a818ef770e9fa0d2b56c6f5f48bd3bd674436ccce9bdfc34db884a73a30c04c5f5e9764cb2218","0xa0d3e64c9c71f84c0eef9d7a9cb4fa184224b969db5514d678e93e00f98b41595588ca802643ea225512a4a272f5f534","0x91c9140c9e1ba6e330cb08f6b2ce4809cd0d5a0f0516f70032bf30e912b0ed684d07b413b326ab531ee7e5b4668c799b","0x87bc2ee7a0c21ba8334cd098e35cb703f9af57f35e091b8151b9b63c3a5b0f89bd7701dbd44f644ea475901fa6d9ef08","0x9325ccbf64bf5d71b303e31ee85d486298f9802c5e55b2c3d75427097bf8f60fa2ab4fcaffa9b60bf922c3e24fbd4b19","0x95d0506e898318f3dc8d28d16dfd9f0038b54798838b3c9be2a2ae3c2bf204eb496166353fc042220b0bd4f6673b9285","0x811de529416331fe9c416726d45df9434c29dcd7e949045eb15740f47e97dde8f31489242200e19922cac2a8b7c6fd1f","0xade632d04a4c8bbab6ca7df370b2213cb9225023e7973f0e29f4f5e52e8aeaabc65171306bbdd12a67b195dfbb96d48f","0x88b7f029e079b6ae956042c0ea75d53088c5d0efd750dd018adaeacf46be21bf990897c58578c491f41afd3978d08073","0x91f477802de507ffd2be3f4319903119225b277ad24f74eb50f28b66c14d32fae53c7edb8c7590704741af7f7f3e3654","0x809838b32bb4f4d0237e98108320d4b079ee16ed80c567e7548bd37e4d7915b1192880f4812ac0e00476d246aec1dbc8","0x84183b5fc4a7997a8ae5afedb4d21dce69c480d5966b5cbdafd6dd10d29a9a6377f3b90ce44da0eb8b176ac3af0253bb","0x8508abbf6d3739a16b9165caf0f95afb3b3ac1b8c38d6d374cf0c91296e2c1809a99772492b539cda184510bce8a0271","0x8722054e59bab2062e6419a6e45fc803af77fde912ef2cd23055ad0484963de65a816a2debe1693d93c18218d2b8e81a","0x8e895f80e485a7c4f56827bf53d34b956281cdc74856c21eb3b51f6288c01cc3d08565a11cc6f3e2604775885490e8c5","0xafc92714771b7aa6e60f3aee12efd9c2595e9659797452f0c1e99519f67c8bc3ac567119c1ddfe82a3e961ee9defea9a","0x818ff0fd9cefd32db87b259e5fa32967201016fc02ef44116cdca3c63ce5e637756f60477a408709928444a8ad69c471","0x8251e29af4c61ae806fc5d032347fb332a94d472038149225298389495139ce5678fae739d02dfe53a231598a992e728","0xa0ea39574b26643f6f1f48f99f276a8a64b5481989cfb2936f9432a3f8ef5075abfe5c067dc5512143ce8bf933984097","0xaf67a73911b372bf04e57e21f289fc6c3dfac366c6a01409b6e76fea4769bdb07a6940e52e8d7d3078f235c6d2f632c6","0xb5291484ef336024dd2b9b4cf4d3a6b751133a40656d0a0825bcc6d41c21b1c79cb50b0e8f4693f90c29c8f4358641f9","0x8bc0d9754d70f2cb9c63f991902165a87c6535a763d5eece43143b5064ae0bcdce7c7a8f398f2c1c29167b2d5a3e6867","0x8d7faff53579ec8f6c92f661c399614cc35276971752ce0623270f88be937c414eddcb0997e14724a783905a026c8883","0x9310b5f6e675fdf60796f814dbaa5a6e7e9029a61c395761e330d9348a7efab992e4e115c8be3a43d08e90d21290c892","0xb5eb4f3eb646038ad2a020f0a42202532d4932e766da82b2c1002bf9c9c2e5336b54c8c0ffcc0e02d19dde2e6a35b6cc","0x91dabfd30a66710f1f37a891136c9be1e23af4abf8cb751f512a40c022a35f8e0a4fb05b17ec36d4208de02d56f0d53a","0xb3ded14e82d62ac7a5a036122a62f00ff8308498f3feae57d861babaff5a6628d43f0a0c5fc903f10936bcf4e2758ceb","0xa88e8348fed2b26acca6784d19ef27c75963450d99651d11a950ea81d4b93acd2c43e0ecce100eaf7e78508263d5baf3","0xb1f5bbf7c4756877b87bb42163ac570e08c6667c4528bf68b5976680e19beeff7c5effd17009b0718797077e2955457a","0xad2e7b516243f915d4d1415326e98b1a7390ae88897d0b03b66c2d9bd8c3fba283d7e8fe44ed3333296a736454cef6d8","0x8f82eae096d5b11f995de6724a9af895f5e1c58d593845ad16ce8fcae8507e0d8e2b2348a0f50a1f66a17fd6fac51a5c","0x890e4404d0657c6c1ee14e1aac132ecf7a568bb3e04137b85ac0f84f1d333bd94993e8750f88eee033a33fb00f85dcc7","0x82ac7d3385e035115f1d39a99fc73e5919de44f5e6424579776d118d711c8120b8e5916372c6f27bed4cc64cac170b6c","0x85ee16d8901c272cfbbe966e724b7a891c1bd5e68efd5d863043ad8520fc409080af61fd726adc680b3f1186fe0ac8b8","0x86dc564c9b545567483b43a38f24c41c6551a49cabeebb58ce86404662a12dbfafd0778d30d26e1c93ce222e547e3898","0xa29f5b4522db26d88f5f95f18d459f8feefab02e380c2edb65aa0617a82a3c1a89474727a951cef5f15050bcf7b380fb","0xa1ce039c8f6cac53352899edb0e3a72c76da143564ad1a44858bd7ee88552e2fe6858d1593bbd74aeee5a6f8034b9b9d","0x97f10d77983f088286bd7ef3e7fdd8fa275a56bec19919adf33cf939a90c8f2967d2b1b6fc51195cb45ad561202a3ed7","0xa25e2772e8c911aaf8712bdac1dd40ee061c84d3d224c466cfaae8e5c99604053f940cde259bd1c3b8b69595781dbfec","0xb31bb95a0388595149409c48781174c340960d59032ab2b47689911d03c68f77a2273576fbe0c2bf4553e330656058c7","0xb8b2e9287ad803fb185a13f0d7456b397d4e3c8ad5078f57f49e8beb2e85f661356a3392dbd7bcf6a900baa5582b86a1","0xa3d0893923455eb6e96cc414341cac33d2dbc88fba821ac672708cce131761d85a0e08286663a32828244febfcae6451","0x82310cb42f647d99a136014a9f881eb0b9791efd2e01fc1841907ad3fc8a9654d3d1dab6689c3607214b4dc2aca01cee","0x874022d99c16f60c22de1b094532a0bc6d4de700ad01a31798fac1d5088b9a42ad02bef8a7339af7ed9c0d4f16b186ee","0x94981369e120265aed40910eebc37eded481e90f4596b8d57c3bec790ab7f929784bd33ddd05b7870aad6c02e869603b","0xa4f1f50e1e2a73f07095e0dd31cb45154f24968dae967e38962341c1241bcd473102fff1ff668b20c6547e9732d11701","0xae2328f3b0ad79fcda807e69a1b5278145225083f150f67511dafc97e079f860c3392675f1752ae7e864c056e592205b","0x875d8c971e593ca79552c43d55c8c73b17cd20c81ff2c2fed1eb19b1b91e4a3a83d32df150dbfd5db1092d0aebde1e1f","0xadd2e80aa46aae95da73a11f130f4bda339db028e24c9b11e5316e75ba5e63bc991d2a1da172c7c8e8fee038baae3433","0xb46dbe1cb3424002aa7de51e82f600852248e251465c440695d52538d3f36828ff46c90ed77fc1d11534fe3c487df8ef","0xa5e5045d28b4e83d0055863c30c056628c58d4657e6176fd0536f5933f723d60e851bb726d5bf3c546b8ce4ac4a57ef8","0x91fec01e86dd1537e498fff7536ea3ca012058b145f29d9ada49370cd7b7193ac380e116989515df1b94b74a55c45df3","0xa7428176d6918cd916a310bdc75483c72de660df48cac4e6e7478eef03205f1827ea55afc0df5d5fa7567d14bbea7fc9","0x851d89bef45d9761fe5fdb62972209335193610015e16a675149519f9911373bac0919add226ef118d9f3669cfdf4734","0xb74acf5c149d0042021cb2422ea022be4c4f72a77855f42393e71ffd12ebb3eec16bdf16f812159b67b79a9706e7156d","0x99f35dce64ec99aa595e7894b55ce7b5a435851b396e79036ffb249c28206087db4c85379df666c4d95857db02e21ff9","0xb6b9a384f70db9e298415b8ab394ee625dafff04be2886476e59df8d052ca832d11ac68a9b93fba7ab055b7bc36948a4","0x898ee4aefa923ffec9e79f2219c7389663eb11eb5b49014e04ed4a336399f6ea1691051d86991f4c46ca65bcd4fdf359","0xb0f948217b0d65df7599a0ba4654a5e43c84db477936276e6f11c8981efc6eaf14c90d3650107ed4c09af4cc8ec11137","0xaa6286e27ac54f73e63dbf6f41865dd94d24bc0cf732262fcaff67319d162bb43af909f6f8ee27b1971939cfbba08141","0x8bca7cdf730cf56c7b2c8a2c4879d61361a6e1dba5a3681a1a16c17a56e168ace0e99cf0d15826a1f5e67e6b8a8a049a","0xa746d876e8b1ce225fcafca603b099b36504846961526589af977a88c60d31ba2cc56e66a3dec8a77b3f3531bf7524c9","0xa11e2e1927e6704cdb8874c75e4f1842cef84d7d43d7a38e339e61dc8ba90e61bbb20dd3c12e0b11d2471d58eed245be","0xa36395e22bc1d1ba8b0459a235203177737397da5643ce54ded3459d0869ff6d8d89f50c73cb62394bf66a959cde9b90","0x8b49f12ba2fdf9aca7e5f81d45c07d47f9302a2655610e7634d1e4bd16048381a45ef2c95a8dd5b0715e4b7cf42273af","0x91cffa2a17e64eb7f76bccbe4e87280ee1dd244e04a3c9eac12e15d2d04845d876eb24fe2ec6d6d266cce9efb281077f","0xa6b8afabf65f2dee01788114e33a2f3ce25376fb47a50b74da7c3c25ff1fdc8aa9f41307534abbf48acb6f7466068f69","0x8d13db896ccfea403bd6441191995c1a65365cab7d0b97fbe9526da3f45a877bd1f4ef2edef160e8a56838cd1586330e","0x98c717de9e01bef8842c162a5e757fe8552d53269c84862f4d451e7c656ae6f2ae473767b04290b134773f63be6fdb9d","0x8c2036ace1920bd13cf018e82848c49eb511fad65fd0ff51f4e4b50cf3bfc294afb63cba682c16f52fb595a98fa84970","0xa3520fdff05dbad9e12551b0896922e375f9e5589368bcb2cc303bde252743b74460cb5caf99629325d3620f13adc796","0x8d4f83a5bfec05caf5910e0ce538ee9816ee18d0bd44c1d0da2a87715a23cd2733ad4d47552c6dc0eb397687d611dd19","0xa7b39a0a6a02823452d376533f39d35029867b3c9a6ad6bca181f18c54132d675613a700f9db2440fb1b4fa13c8bf18a","0x80bcb114b2544b80f404a200fc36860ed5e1ad31fe551acd4661d09730c452831751baa9b19d7d311600d267086a70bc","0x90dcce03c6f88fc2b08f2b42771eedde90cc5330fe0336e46c1a7d1b5a6c1641e5fcc4e7b3d5db00bd8afca9ec66ed81","0xaec15f40805065c98e2965b1ae12a6c9020cfdb094c2d0549acfc7ea2401a5fb48d3ea7d41133cf37c4e096e7ff53eb9","0x80e129b735dba49fa627a615d6c273119acec8e219b2f2c4373a332b5f98d66cbbdd688dfbe72a8f8bfefaccc02c50c1","0xa9b596da3bdfe23e6799ece5f7975bf7a1979a75f4f546deeaf8b34dfe3e0d623217cb4cf4ccd504cfa3625b88cd53f1","0xabcbbb70b16f6e517c0ab4363ab76b46e4ff58576b5f8340e5c0e8cc0e02621b6e23d742d73b015822a238b17cfd7665","0xa046937cc6ea6a2e1adae543353a9fe929c1ae4ad655be1cc051378482cf88b041e28b1e9a577e6ccff2d3570f55e200","0x831279437282f315e65a60184ef158f0a3dddc15a648dc552bdc88b3e6fe8288d3cfe9f0031846d81350f5e7874b4b33","0x993d7916fa213c6d66e7c4cafafc1eaec9a2a86981f91c31eb8a69c5df076c789cbf498a24c84e0ee77af95b42145026","0x823907a3b6719f8d49b3a4b7c181bd9bb29fcf842d7c70660c4f351852a1e197ca46cf5e879b47fa55f616fa2b87ce5e","0x8d228244e26132b234930ee14c75d88df0943cdb9c276a8faf167d259b7efc1beec2a87c112a6c608ad1600a239e9aae","0xab6e55766e5bfb0cf0764ed909a8473ab5047d3388b4f46faeba2d1425c4754c55c6daf6ad4751e634c618b53e549529","0xab0cab6860e55a84c5ad2948a7e0989e2b4b1fd637605634b118361497332df32d9549cb854b2327ca54f2bcb85eed8f","0xb086b349ae03ef34f4b25a57bcaa5d1b29bd94f9ebf87e22be475adfe475c51a1230c1ebe13506cb72c4186192451658","0x8a0b49d8a254ca6d91500f449cbbfbb69bb516c6948ac06808c65595e46773e346f97a5ce0ef7e5a5e0de278af22709c","0xac49de11edaaf04302c73c578cc0824bdd165c0d6321be1c421c1950e68e4f3589aa3995448c9699e93c6ebae8803e27","0x884f02d841cb5d8f4c60d1402469216b114ab4e93550b5bc1431756e365c4f870a9853449285384a6fa49e12ce6dc654","0xb75f3a28fa2cc8d36b49130cb7448a23d73a7311d0185ba803ad55c8219741d451c110f48b786e96c728bc525903a54f","0x80ae04dbd41f4a35e33f9de413b6ad518af0919e5a30cb0fa1b061b260420780bb674f828d37fd3b52b5a31673cbd803","0xb9a8011eb5fcea766907029bf743b45262db3e49d24f84503687e838651ed11cb64c66281e20a0ae9f6aa51acc552263","0x90bfdd75e2dc9cf013e22a5d55d2d2b8a754c96103a17524488e01206e67f8b6d52b1be8c4e3d5307d4fe06d0e51f54c","0xb4af353a19b06203a815ec43e79a88578cc678c46f5a954b85bc5c53b84059dddba731f3d463c23bfd5273885c7c56a4","0xaa125e96d4553b64f7140e5453ff5d2330318b69d74d37d283e84c26ad672fa00e3f71e530eb7e28be1e94afb9c4612e","0xa18e060aee3d49cde2389b10888696436bb7949a79ca7d728be6456a356ea5541b55492b2138da90108bd1ce0e6f5524","0x93e55f92bdbccc2de655d14b1526836ea2e52dba65eb3f87823dd458a4cb5079bf22ce6ef625cb6d6bfdd0995ab9a874","0x89f5a683526b90c1c3ceebbb8dc824b21cff851ce3531b164f6626e326d98b27d3e1d50982e507d84a99b1e04e86a915","0x83d1c38800361633a3f742b1cb2bfc528129496e80232611682ddbe403e92c2ac5373aea0bca93ecb5128b0b2b7a719e","0x8ecba560ac94905e19ce8d9c7af217bf0a145d8c8bd38e2db82f5e94cc3f2f26f55819176376b51f154b4aab22056059","0xa7e2a4a002b60291924850642e703232994acb4cfb90f07c94d1e0ecd2257bb583443283c20fc6017c37e6bfe85b7366","0x93ed7316fa50b528f1636fc6507683a672f4f4403e55e94663f91221cc198199595bd02eef43d609f451acc9d9b36a24","0xa1220a8ebc5c50ceed76a74bc3b7e0aa77f6884c71b64b67c4310ac29ce5526cb8992d6abc13ef6c8413ce62486a6795","0xb2f6eac5c869ad7f4a25161d3347093e2f70e66cd925032747e901189355022fab3038bca4d610d2f68feb7e719c110b","0xb703fa11a4d511ca01c7462979a94acb40b5d933759199af42670eb48f83df202fa0c943f6ab3b4e1cc54673ea3aab1e","0xb5422912afbfcb901f84791b04f1ddb3c3fbdc76d961ee2a00c5c320e06d3cc5b5909c3bb805df66c5f10c47a292b13d","0xad0934368da823302e1ac08e3ede74b05dfdbfffca203e97ffb0282c226814b65c142e6e15ec1e754518f221f01b30f7","0xa1dd302a02e37df15bf2f1147efe0e3c06933a5a767d2d030e1132f5c3ce6b98e216b6145eb39e1e2f74e76a83165b8d","0xa346aab07564432f802ae44738049a36f7ca4056df2d8f110dbe7fef4a3e047684dea609b2d03dc6bf917c9c2a47608f","0xb96c5f682a5f5d02123568e50f5d0d186e4b2c4c9b956ec7aabac1b3e4a766d78d19bd111adb5176b898e916e49be2aa","0x8a96676d56876fc85538db2e806e1cba20fd01aeb9fa3cb43ca6ca94a2c102639f65660db330e5d74a029bb72d6a0b39","0xab0048336bd5c3def1a4064eadd49e66480c1f2abb4df46e03afbd8a3342c2c9d74ee35d79f08f4768c1646681440984","0x888427bdf76caec90814c57ee1c3210a97d107dd88f7256f14f883ad0f392334b82be11e36dd8bfec2b37935177c7831","0xb622b282becf0094a1916fa658429a5292ba30fb48a4c8066ce1ddcefb71037948262a01c95bab6929ed3a76ba5db9fe","0xb5b9e005c1f456b6a368a3097634fb455723abe95433a186e8278dceb79d4ca2fbe21f8002e80027b3c531e5bf494629","0xa3c6707117a1e48697ed41062897f55d8119403eea6c2ee88f60180f6526f45172664bfee96bf61d6ec0b7fbae6aa058","0xb02a9567386a4fbbdb772d8a27057b0be210447348efe6feb935ceec81f361ed2c0c211e54787dc617cdffed6b4a6652","0xa9b8364e40ef15c3b5902e5534998997b8493064fa2bea99600def58279bb0f64574c09ba11e9f6f669a8354dd79dc85","0x9998a2e553a9aa9a206518fae2bc8b90329ee59ab23005b10972712389f2ec0ee746033c733092ffe43d73d33abbb8ef","0x843a4b34d9039bf79df96d79f2d15e8d755affb4d83d61872daf540b68c0a3888cf8fc00d5b8b247b38524bcb3b5a856","0x84f7128920c1b0bb40eee95701d30e6fc3a83b7bb3709f16d97e72acbb6057004ee7ac8e8f575936ca9dcb7866ab45f7","0x918d3e2222e10e05edb34728162a899ad5ada0aaa491aeb7c81572a9c0d506e31d5390e1803a91ff3bd8e2bb15d47f31","0x9442d18e2489613a7d47bb1cb803c8d6f3259d088cd079460976d87f7905ee07dea8f371b2537f6e1d792d36d7e42723","0xb491976970fe091995b2ed86d629126523ccf3e9daf8145302faca71b5a71a5da92e0e05b62d7139d3efac5c4e367584","0xaa628006235dc77c14cef4c04a308d66b07ac92d377df3de1a2e6ecfe3144f2219ad6d7795e671e1cb37a3641910b940","0x99d386adaea5d4981d7306feecac9a555b74ffdc218c907c5aa7ac04abaead0ec2a8237300d42a3fbc464673e417ceed","0x8f78e8b1556f9d739648ea3cab9606f8328b52877fe72f9305545a73b74d49884044ba9c1f1c6db7d9b7c7b7c661caba","0x8fb357ae49932d0babdf74fc7aa7464a65d3b6a2b3acf4f550b99601d3c0215900cfd67f2b6651ef94cfc323bac79fae","0x9906f2fa25c0290775aa001fb6198113d53804262454ae8b83ef371b5271bde189c0460a645829cb6c59f9ee3a55ce4d","0x8f4379b3ebb50e052325b27655ca6a82e6f00b87bf0d2b680d205dd2c7afdc9ff32a9047ae71a1cdf0d0ce6b9474d878","0xa85534e88c2bd43c043792eaa75e50914b21741a566635e0e107ae857aed0412035f7576cf04488ade16fd3f35fdbb87","0xb4ce93199966d3c23251ca7f28ec5af7efea1763d376b0385352ffb2e0a462ef95c69940950278cf0e3dafd638b7bd36","0xb10cb3d0317dd570aa73129f4acf63c256816f007607c19b423fb42f65133ce21f2f517e0afb41a5378cccf893ae14d0","0xa9b231c9f739f7f914e5d943ed9bff7eba9e2c333fbd7c34eb1648a362ee01a01af6e2f7c35c9fe962b11152cddf35de","0x99ff6a899e156732937fb81c0cced80ae13d2d44c40ba99ac183aa246103b31ec084594b1b7feb96da58f4be2dd5c0ed","0x8748d15d18b75ff2596f50d6a9c4ce82f61ecbcee123a6ceae0e43cab3012a29b6f83cf67b48c22f6f9d757c6caf76b2","0xb88ab05e4248b7fb634cf640a4e6a945d13e331237410f7217d3d17e3e384ddd48897e7a91e4516f1b9cbd30f35f238b","0x8d826deaeeb84a3b2d2c04c2300ca592501f992810582d6ae993e0d52f6283a839dba66c6c72278cff5871802b71173b","0xb36fed027c2f05a5ef625ca00b0364b930901e9e4420975b111858d0941f60e205546474bb25d6bfa6928d37305ae95f","0xaf2fcfc6b87967567e8b8a13a4ed914478185705724e56ce68fb2df6d1576a0cf34a61e880997a0d35dc2c3276ff7501","0xac351b919cd1fbf106feb8af2c67692bfcddc84762d18cea681cfa7470a5644839caace27efee5f38c87d3df306f4211","0x8d6665fb1d4d8d1fa23bd9b8a86e043b8555663519caac214d1e3e3effbc6bee7f2bcf21e645f77de0ced279d69a8a8b","0xa9fc1c2061756b2a1a169c1b149f212ff7f0d2488acd1c5a0197eba793cffa593fc6d1d1b40718aa75ca3ec77eff10e1","0xaff64f0fa009c7a6cf0b8d7a22ddb2c8170c3cb3eec082e60d5aadb00b0040443be8936d728d99581e33c22178c41c87","0x82e0b181adc5e3b1c87ff8598447260e839d53debfae941ebea38265575546c3a74a14b4325a030833a62ff6c52d9365","0xb7ad43cbb22f6f892c2a1548a41dc120ab1f4e1b8dea0cb6272dd9cb02054c542ecabc582f7e16de709d48f5166cae86","0x985e0c61094281532c4afb788ecb2dfcba998e974b5d4257a22040a161883908cdd068fe80f8eb49b8953cfd11acf43a","0xae46895c6d67ea6d469b6c9c07b9e5d295d9ae73b22e30da4ba2c973ba83a130d7eef39717ec9d0f36e81d56bf742671","0x8600177ea1f7e7ef90514b38b219a37dedfc39cb83297e4c7a5b479817ef56479d48cf6314820960c751183f6edf8b0e","0xb9208ec1c1d7a1e99b59c62d3e4e61dfb706b0e940d09d3abfc3454c19749083260614d89cfd7e822596c3cdbcc6bb95","0xa1e94042c796c2b48bc724352d2e9f3a22291d9a34705993357ddb6adabd76da6fc25dac200a8cb0b5bbd99ecddb7af6","0xb29c3adedd0bcad8a930625bc4dfdc3552a9afd5ca6dd9c0d758f978068c7982b50b711aa0eb5b97f2b84ee784637835","0xaf0632a238bb1f413c7ea8e9b4c3d68f2827bd2e38cd56024391fba6446ac5d19a780d0cfd4a78fe497d537b766a591a","0xaaf6e7f7d54f8ef5e2e45dd59774ecbeecf8683aa70483b2a75be6a6071b5981bbaf1627512a65d212817acdfab2e428","0x8c751496065da2e927cf492aa5ca9013b24f861d5e6c24b30bbf52ec5aaf1905f40f9a28175faef283dd4ed4f2182a09","0x8952377d8e80a85cf67d6b45499f3bad5fd452ea7bcd99efc1b066c4720d8e5bff1214cea90fd1f972a7f0baac3d29be","0xa1946ee543d1a6e21f380453be4d446e4130950c5fc3d075794eb8260f6f52d0a795c1ff91d028a648dc1ce7d9ab6b47","0x89f3fefe37af31e0c17533d2ca1ce0884cc1dc97c15cbfab9c331b8debd94781c9396abef4bb2f163d09277a08d6adf0","0xa2753f1e6e1a154fb117100a5bd9052137add85961f8158830ac20541ab12227d83887d10acf7fd36dcaf7c2596d8d23","0x814955b4198933ee11c3883863b06ff98c7eceb21fc3e09df5f916107827ccf3323141983e74b025f46ae00284c9513b","0x8cc5c6bb429073bfef47cae7b3bfccb0ffa076514d91a1862c6bda4d581e0df87db53cc6c130bf8a7826304960f5a34e","0x909f22c1f1cdc87f7be7439c831a73484a49acbf8f23d47087d7cf867c64ef61da3bde85dc57d705682b4c3fc710d36e","0x8048fee7f276fcd504aed91284f28e73693615e0eb3858fa44bcf79d7285a9001c373b3ef71d9a3054817ba293ebe28c","0x94400e5cf5d2700ca608c5fe35ce14623f71cc24959f2bc27ca3684092850f76b67fb1f07ca9e5b2ca3062cf8ad17bd4","0x81c2ae7d4d1b17f8b6de6a0430acc0d58260993980fe48dc2129c4948269cdc74f9dbfbf9c26b19360823fd913083d48","0x8c41fe765128e63f6889d6a979f6a4342300327c8b245a8cfe3ecfbcac1e09c3da30e2a1045b24b78efc6d6d50c8c6ac","0xa5dd4ae51ae48c8be4b218c312ade226cffce671cf121cb77810f6c0990768d6dd767badecb5c69921d5574d5e8433d3","0xb7642e325f4ba97ae2a39c1c9d97b35aafd49d53dba36aed3f3cb0ca816480b3394079f46a48252d46596559c90f4d58","0xae87375b40f35519e7bd4b1b2f73cd0b329b0c2cb9d616629342a71c6c304338445eda069b78ea0fbe44087f3de91e09","0xb08918cb6f736855e11d3daca1ddfbdd61c9589b203b5493143227bf48e2c77c2e8c94b0d1aa2fab2226e0eae83f2681","0xac36b84a4ac2ebd4d6591923a449c564e3be8a664c46092c09e875c2998eba16b5d32bfd0882fd3851762868e669f0b1","0xa44800a3bb192066fa17a3f29029a23697240467053b5aa49b9839fb9b9b8b12bcdcbfc557f024b61f4f51a9aacdefcb","0x9064c688fec23441a274cdf2075e5a449caf5c7363cc5e8a5dc9747183d2e00a0c69f2e6b3f6a7057079c46014c93b3b","0xaa367b021469af9f5b764a79bb3afbe2d87fe1e51862221672d1a66f954b165778b7c27a705e0f93841fab4c8468344d","0xa1a8bfc593d4ab71f91640bc824de5c1380ab2591cfdafcbc78a14b32de3c0e15f9d1b461d85c504baa3d4232c16bb53","0x97df48da1799430f528184d30b6baa90c2a2f88f34cdfb342d715339c5ebd6d019aa693cea7c4993daafc9849063a3aa","0xabd923831fbb427e06e0dd335253178a9e5791395c84d0ab1433c07c53c1209161097e9582fb8736f8a60bde62d8693e","0x84cd1a43f1a438b43dc60ffc775f646937c4f6871438163905a3cebf1115f814ccd38a6ccb134130bff226306e412f32","0x91426065996b0743c5f689eb3ca68a9f7b9e4d01f6c5a2652b57fa9a03d8dc7cd4bdbdab0ca5a891fee1e97a7f00cf02","0xa4bee50249db3df7fd75162b28f04e57c678ba142ce4d3def2bc17bcb29e4670284a45f218dad3969af466c62a903757","0x83141ebcc94d4681404e8b67a12a46374fded6df92b506aff3490d875919631408b369823a08b271d006d5b93136f317","0xa0ea1c8883d58d5a784da3d8c8a880061adea796d7505c1f903d07c287c5467f71e4563fc0faafbc15b5a5538b0a7559","0x89d9d480574f201a87269d26fb114278ed2c446328df431dc3556e3500e80e4cd01fcac196a2459d8646361ebda840df","0x8bf302978973632dd464bec819bdb91304712a3ec859be071e662040620422c6e75eba6f864f764cffa2799272efec39","0x922f666bc0fd58b6d7d815c0ae4f66d193d32fc8382c631037f59eeaeae9a8ca6c72d08e72944cf9e800b8d639094e77","0x81ad8714f491cdff7fe4399f2eb20e32650cff2999dd45b9b3d996d54a4aba24cc6c451212e78c9e5550368a1a38fb3f","0xb58fcf4659d73edb73175bd9139d18254e94c3e32031b5d4b026f2ed37aa19dca17ec2eb54c14340231615277a9d347e","0xb365ac9c2bfe409b710928c646ea2fb15b28557e0f089d39878e365589b9d1c34baf5566d20bb28b33bb60fa133f6eff","0x8fcae1d75b53ab470be805f39630d204853ca1629a14158bac2f52632277d77458dec204ff84b7b2d77e641c2045be65","0xa03efa6bebe84f4f958a56e2d76b5ba4f95dd9ed7eb479edc7cc5e646c8d4792e5b0dfc66cc86aa4b4afe2f7a4850760","0xaf1c823930a3638975fb0cc5c59651771b2719119c3cd08404fbd4ce77a74d708cefbe3c56ea08c48f5f10e6907f338f","0x8260c8299b17898032c761c325ac9cabb4c5b7e735de81eacf244f647a45fb385012f4f8df743128888c29aefcaaad16","0xab2f37a573c82e96a8d46198691cd694dfa860615625f477e41f91b879bc58a745784fccd8ffa13065834ffd150d881d","0x986c746c9b4249352d8e5c629e8d7d05e716b3c7aab5e529ca969dd1e984a14b5be41528baef4c85d2369a42d7209216","0xb25e32da1a8adddf2a6080725818b75bc67240728ad1853d90738485d8924ea1e202df0a3034a60ffae6f965ec55cf63","0xa266e627afcebcefea6b6b44cbc50f5c508f7187e87d047b0450871c2a030042c9e376f3ede0afcf9d1952f089582f71","0x86c3bbca4c0300606071c0a80dbdec21ce1dd4d8d4309648151c420854032dff1241a1677d1cd5de4e4de4385efda986","0xb9a21a1fe2d1f3273a8e4a9185abf2ff86448cc98bfa435e3d68306a2b8b4a6a3ea33a155be3cb62a2170a86f77679a5","0xb117b1ea381adce87d8b342cba3a15d492ff2d644afa28f22424cb9cbc820d4f7693dfc1a4d1b3697046c300e1c9b4c8","0x9004c425a2e68870d6c69b658c344e3aa3a86a8914ee08d72b2f95c2e2d8a4c7bb0c6e7e271460c0e637cec11117bf8e","0x86a18aa4783b9ebd9131580c8b17994825f27f4ac427b0929a1e0236907732a1c8139e98112c605488ee95f48bbefbfc","0x84042243b955286482ab6f0b5df4c2d73571ada00716d2f737ca05a0d2e88c6349e8ee9e67934cfee4a1775dbf7f4800","0x92c2153a4733a62e4e1d5b60369f3c26777c7d01cd3c8679212660d572bd3bac9b8a8a64e1f10f7dbf5eaa7579c4e423","0x918454b6bb8e44a2afa144695ba8d48ae08d0cdfef4ad078f67709eddf3bb31191e8b006f04e82ea45a54715ef4d5817","0xacf0b54f6bf34cf6ed6c2b39cf43194a40d68de6bcf1e4b82c34c15a1343e9ac3737885e1a30b78d01fa3a5125463db8","0xa7d60dbe4b6a7b054f7afe9ee5cbbfeca0d05dc619e6041fa2296b549322529faddb8a11e949562309aecefb842ac380","0x91ffb53e6d7e5f11159eaf13e783d6dbdfdb1698ed1e6dbf3413c6ea23492bbb9e0932230a9e2caac8fe899a17682795","0xb6e8d7be5076ee3565d5765a710c5ecf17921dd3cf555c375d01e958a365ae087d4a88da492a5fb81838b7b92bf01143","0xa8c6b763de2d4b2ed42102ef64eccfef31e2fb2a8a2776241c82912fa50fc9f77f175b6d109a97ede331307c016a4b1a","0x99839f86cb700c297c58bc33e28d46b92931961548deac29ba8df91d3e11721b10ea956c8e16984f9e4acf1298a79b37","0x8c2e2c338f25ea5c25756b7131cde0d9a2b35abf5d90781180a00fe4b8e64e62590dc63fe10a57fba3a31c76d784eb01","0x9687d7df2f41319ca5469d91978fed0565a5f11f829ebadaa83db92b221755f76c6eacd7700735e75c91e257087512e3","0x8795fdfb7ff8439c58b9bf58ed53873d2780d3939b902b9ddaaa4c99447224ced9206c3039a23c2c44bcc461e2bb637f","0xa803697b744d2d087f4e2307218d48fa88620cf25529db9ce71e2e3bbcc65bac5e8bb9be04777ef7bfb5ed1a5b8e6170","0x80f3d3efbbb9346ddd413f0a8e36b269eb5d7ff6809d5525ff9a47c4bcab2c01b70018b117f6fe05253775612ff70c6b","0x9050e0e45bcc83930d4c505af35e5e4d7ca01cd8681cba92eb55821aececcebe32bb692ebe1a4daac4e7472975671067","0x8d206812aac42742dbaf233e0c080b3d1b30943b54b60283515da005de05ea5caa90f91fedcfcba72e922f64d7040189","0xa2d44faaeb2eff7915c83f32b13ca6f31a6847b1c1ce114ea240bac3595eded89f09b2313b7915ad882292e2b586d5b4","0x961776c8576030c39f214ea6e0a3e8b3d32f023d2600958c098c95c8a4e374deeb2b9dc522adfbd6bda5949bdc09e2a2","0x993fa7d8447407af0fbcd9e6d77f815fa5233ab00674efbcf74a1f51c37481445ae291cc7b76db7c178f9cb0e570e0fc","0xabd5b1c78e05f9d7c8cc99bdaef8b0b6a57f2daf0f02bf492bec48ea4a27a8f1e38b5854da96efff11973326ff980f92","0x8f15af4764bc275e6ccb892b3a4362cacb4e175b1526a9a99944e692fe6ccb1b4fc19abf312bb2a089cb1f344d91a779","0xa09b27ccd71855512aba1d0c30a79ffbe7f6707a55978f3ced50e674b511a79a446dbc6d7946add421ce111135a460af","0x94b2f98ce86a9271fbd4153e1fc37de48421fe3490fb3840c00f2d5a4d0ba8810c6a32880b002f6374b59e0a7952518b","0x8650ac644f93bbcb88a6a0f49fee2663297fd4bc6fd47b6a89b9d8038d32370438ab3a4775ec9b58cb10aea8a95ef7b6","0x95e5c2f2e84eed88c6980bbba5a1c0bb375d5a628bff006f7516d45bb7d723da676add4fdd45956f312e7bab0f052644","0xb3278a3fa377ac93af7cfc9453f8cb594aae04269bbc99d2e0e45472ff4b6a2f97a26c4c57bf675b9d86f5e77a5d55d1","0xb4bcbe6eb666a206e2ea2f877912c1d3b5bdbd08a989fc4490eb06013e1a69ad1ba08bcdac048bf29192312be399077b","0xa76d70b78c99fffcbf9bb9886eab40f1ea4f99a309710b660b64cbf86057cbcb644d243f6e341711bb7ef0fedf0435a7","0xb2093c1ee945dca7ac76ad5aed08eae23af31dd5a77c903fd7b6f051f4ab84425d33a03c3d45bf2907bc93c02d1f3ad8","0x904b1f7534e053a265b22d20be859912b9c9ccb303af9a8d6f1d8f6ccdc5c53eb4a45a1762b880d8444d9be0cd55e7f9","0x8f664a965d65bc730c9ef1ec7467be984d4b8eb46bd9b0d64e38e48f94e6e55dda19aeac82cbcf4e1473440e64c4ca18","0x8bcee65c4cc7a7799353d07b114c718a2aae0cd10a3f22b7eead5185d159dafd64852cb63924bf87627d176228878bce","0x8c78f2e3675096fef7ebaa898d2615cd50d39ca3d8f02b9bdfb07e67da648ae4be3da64838dffc5935fd72962c4b96c7","0x8c40afd3701629421fec1df1aac4e849384ef2e80472c0e28d36cb1327acdf2826f99b357f3d7afdbc58a6347fc40b3c","0xa197813b1c65a8ea5754ef782522a57d63433ef752215ecda1e7da76b0412ee619f58d904abd2e07e0c097048b6ae1dd","0xa670542629e4333884ad7410f9ea3bd6f988df4a8f8a424ca74b9add2312586900cf9ae8bd50411f9146e82626b4af56","0xa19875cc07ab84e569d98b8b67fb1dbbdfb59093c7b748fae008c8904a6fd931a63ca8d03ab5fea9bc8d263568125a9b","0xb57e7f68e4eb1bd04aafa917b1db1bdab759a02aa8a9cdb1cba34ba8852b5890f655645c9b4e15d5f19bf37e9f2ffe9f","0x8abe4e2a4f6462b6c64b3f10e45db2a53c2b0d3c5d5443d3f00a453e193df771eda635b098b6c8604ace3557514027af","0x8459e4fb378189b22b870a6ef20183deb816cefbf66eca1dc7e86d36a2e011537db893729f500dc154f14ce24633ba47","0x930851df4bc7913c0d8c0f7bd3b071a83668987ed7c397d3d042fdc0d9765945a39a3bae83da9c88cb6b686ed8aeeb26","0x8078c9e5cd05e1a8c932f8a1d835f61a248b6e7133fcbb3de406bf4ffc0e584f6f9f95062740ba6008d98348886cf76b","0xaddff62bb29430983fe578e3709b0949cdc0d47a13a29bc3f50371a2cb5c822ce53e2448cfaa01bcb6e0aa850d5a380e","0x9433add687b5a1e12066721789b1db2edf9b6558c3bdc0f452ba33b1da67426abe326e9a34d207bfb1c491c18811bde1","0x822beda3389963428cccc4a2918fa9a8a51cf0919640350293af70821967108cded5997adae86b33cb917780b097f1ca","0xa7a9f52bda45e4148ed56dd176df7bd672e9b5ed18888ccdb405f47920fdb0844355f8565cefb17010b38324edd8315f","0xb35c3a872e18e607b2555c51f9696a17fa18da1f924d503b163b4ec9fe22ed0c110925275cb6c93ce2d013e88f173d6a","0xadf34b002b2b26ab84fc1bf94e05bd8616a1d06664799ab149363c56a6e0c807fdc473327d25632416e952ea327fcd95","0xae4a6b9d22a4a3183fac29e2551e1124a8ce4a561a9a2afa9b23032b58d444e6155bb2b48f85c7b6d70393274e230db7","0xa2ea3be4fc17e9b7ce3110284038d46a09e88a247b6971167a7878d9dcf36925d613c382b400cfa4f37a3ebea3699897","0x8e5863786b641ce3140fbfe37124d7ad3925472e924f814ebfc45959aaf3f61dc554a597610b5defaecc85b59a99b50f","0xaefde3193d0f700d0f515ab2aaa43e2ef1d7831c4f7859f48e52693d57f97fa9e520090f3ed700e1c966f4b76048e57f","0x841a50f772956622798e5cd208dc7534d4e39eddee30d8ce133383d66e5f267e389254a0cdae01b770ecd0a9ca421929","0x8fbc2bfd28238c7d47d4c03b1b910946c0d94274a199575e5b23242619b1de3497784e646a92aa03e3e24123ae4fcaba","0x926999579c8eec1cc47d7330112586bdca20b4149c8b2d066f527c8b9f609e61ce27feb69db67eea382649c6905efcf9","0xb09f31f305efcc65589adf5d3690a76cf339efd67cd43a4e3ced7b839507466e4be72dd91f04e89e4bbef629d46e68c0","0xb917361f6b95f759642638e0b1d2b3a29c3bdef0b94faa30de562e6078c7e2d25976159df3edbacbf43614635c2640b4","0x8e7e8a1253bbda0e134d62bfe003a2669d471b47bd2b5cde0ff60d385d8e62279d54022f5ac12053b1e2d3aaa6910b4c","0xb69671a3c64e0a99d90b0ed108ce1912ff8ed983e4bddd75a370e9babde25ee1f5efb59ec707edddd46793207a8b1fe7","0x910b2f4ebd37b7ae94108922b233d0920b4aba0bd94202c70f1314418b548d11d8e9caa91f2cd95aff51b9432d122b7f","0x82f645c90dfb52d195c1020346287c43a80233d3538954548604d09fbab7421241cde8593dbc4acc4986e0ea39a27dd9","0x8fee895f0a140d88104ce442fed3966f58ff9d275e7373483f6b4249d64a25fb5374bbdc6bce6b5ab0270c2847066f83","0x84f5bd7aab27b2509397aeb86510dd5ac0a53f2c8f73799bf720f2f87a52277f8d6b0f77f17bc80739c6a7119b7eb062","0x9903ceced81099d7e146e661bcf01cbaccab5ba54366b85e2177f07e2d8621e19d9c9c3eee14b9266de6b3f9b6ea75ae","0xb9c16ea2a07afa32dd6c7c06df0dec39bca2067a9339e45475c98917f47e2320f6f235da353fd5e15b477de97ddc68dd","0x9820a9bbf8b826bec61ebf886de2c4f404c1ebdc8bab82ee1fea816d9de29127ce1852448ff717a3fe8bbfe9e92012e5","0x817224d9359f5da6f2158c2c7bf9165501424f063e67ba9859a07ab72ee2ee62eb00ca6da821cfa19065c3282ca72c74","0x94b95c465e6cb00da400558a3c60cfec4b79b27e602ca67cbc91aead08de4b6872d8ea096b0dc06dca4525c8992b8547","0xa2b539a5bccd43fa347ba9c15f249b417997c6a38c63517ca38394976baa08e20be384a360969ff54e7e721db536b3e5","0x96caf707e34f62811ee8d32ccf28d8d6ec579bc33e424d0473529af5315c456fd026aa910c1fed70c91982d51df7d3ca","0x8a77b73e890b644c6a142bdbac59b22d6a676f3b63ddafb52d914bb9d395b8bf5aedcbcc90429337df431ebd758a07a6","0x8857830a7351025617a08bc44caec28d2fae07ebf5ffc9f01d979ce2a53839a670e61ae2783e138313929129790a51a1","0xaa3e420321ed6f0aa326d28d1a10f13facec6f605b6218a6eb9cbc074801f3467bf013a456d1415a5536f12599efa3d3","0x824aed0951957b00ea2f3d423e30328a3527bf6714cf9abbae84cf27e58e5c35452ba89ccc011de7c68c75d6e021d8f1","0xa2e87cc06bf202e953fb1081933d8b4445527dde20e38ed1a4f440144fd8fa464a2b73e068b140562e9045e0f4bd3144","0xae3b8f06ad97d7ae3a5e5ca839efff3e4824dc238c0c03fc1a8d2fc8aa546cdfd165b784a31bb4dec7c77e9305b99a4b","0xb30c3e12395b1fb8b776f3ec9f87c70e35763a7b2ddc68f0f60a4982a84017f27c891a98561c830038deb033698ed7fc","0x874e507757cd1177d0dff0b0c62ce90130324442a33da3b2c8ee09dbca5d543e3ecfe707e9f1361e7c7db641c72794bb","0xb53012dd10b5e7460b57c092eaa06d6502720df9edbbe3e3f61a9998a272bf5baaac4a5a732ad4efe35d6fac6feca744","0x85e6509d711515534d394e6cacbed6c81da710074d16ef3f4950bf2f578d662a494d835674f79c4d6315bced4defc5f0","0xb6132b2a34b0905dcadc6119fd215419a7971fe545e52f48b768006944b4a9d7db1a74b149e2951ea48c083b752d0804","0x989867da6415036d19b4bacc926ce6f4df7a556f50a1ba5f3c48eea9cefbb1c09da81481c8009331ee83f0859185e164","0x960a6c36542876174d3fbc1505413e29f053ed87b8d38fef3af180491c7eff25200b45dd5fe5d4d8e63c7e8c9c00f4c8","0x9040b59bd739d9cc2e8f6e894683429e4e876a8106238689ff4c22770ae5fdae1f32d962b30301fa0634ee163b524f35","0xaf3fcd0a45fe9e8fe256dc7eab242ef7f582dd832d147444483c62787ac820fafc6ca55d639a73f76bfa5e7f5462ab8f","0xb934c799d0736953a73d91e761767fdb78454355c4b15c680ce08accb57ccf941b13a1236980001f9e6195801cffd692","0x8871e8e741157c2c326b22cf09551e78da3c1ec0fc0543136f581f1550f8bab03b0a7b80525c1e99812cdbf3a9698f96","0xa8a977f51473a91d178ee8cfa45ffef8d6fd93ab1d6e428f96a3c79816d9c6a93cd70f94d4deda0125fd6816e30f3bea","0xa7688b3b0a4fc1dd16e8ba6dc758d3cfe1b7cf401c31739484c7fa253cce0967df1b290769bcefc9d23d3e0cb19e6218","0x8ae84322662a57c6d729e6ff9d2737698cc2da2daeb1f39e506618750ed23442a6740955f299e4a15dda6db3e534d2c6","0xa04a961cdccfa4b7ef83ced17ab221d6a043b2c718a0d6cc8e6f798507a31f10bf70361f70a049bc8058303fa7f96864","0xb463e39732a7d9daec8a456fb58e54b30a6e160aa522a18b9a9e836488cce3342bcbb2e1deab0f5e6ec0a8796d77197d","0xb1434a11c6750f14018a2d3bcf94390e2948f4f187e93bb22070ca3e5393d339dc328cbfc3e48815f51929465ffe7d81","0x84ff81d73f3828340623d7e3345553610aa22a5432217ef0ebd193cbf4a24234b190c65ca0873c22d10ea7b63bd1fbed","0xb6fe2723f0c47757932c2ddde7a4f8434f665612f7b87b4009c2635d56b6e16b200859a8ade49276de0ef27a2b6c970a","0x9742884ed7cd52b4a4a068a43d3faa02551a424136c85a9313f7cb58ea54c04aa83b0728fd741d1fe39621e931e88f8f","0xb7d2d65ea4d1ad07a5dee39e40d6c03a61264a56b1585b4d76fc5b2a68d80a93a42a0181d432528582bf08d144c2d6a9","0x88c0f66bada89f8a43e5a6ead2915088173d106c76f724f4a97b0f6758aed6ae5c37c373c6b92cdd4aea8f6261f3a374","0x81f9c43582cb42db3900747eb49ec94edb2284999a499d1527f03315fd330e5a509afa3bff659853570e9886aab5b28b","0x821f9d27d6beb416abf9aa5c79afb65a50ed276dbda6060103bc808bcd34426b82da5f23e38e88a55e172f5c294b4d40","0x8ba307b9e7cb63a6c4f3851b321aebfdb6af34a5a4c3bd949ff7d96603e59b27ff4dc4970715d35f7758260ff942c9e9","0xb142eb6c5f846de33227d0bda61d445a7c33c98f0a8365fe6ab4c1fabdc130849be597ef734305894a424ea715372d08","0xa732730ae4512e86a741c8e4c87fee8a05ee840fec0e23b2e037d58dba8dde8d10a9bc5191d34d00598941becbbe467f","0xadce6f7c30fd221f6b10a0413cc76435c4bb36c2d60bca821e5c67409fe9dbb2f4c36ef85eb3d734695e4be4827e9fd3","0xa74f00e0f9b23aff7b2527ce69852f8906dab9d6abe62ecd497498ab21e57542e12af9918d4fd610bb09e10b0929c510","0xa593b6b0ef26448ce4eb3ab07e84238fc020b3cb10d542ff4b16d4e2be1bcde3797e45c9cf753b8dc3b0ffdb63984232","0xaed3913afccf1aa1ac0eb4980eb8426d0baccebd836d44651fd72af00d09fac488a870223c42aca3ceb39752070405ae","0xb2c44c66a5ea7fde626548ba4cef8c8710191343d3dadfd3bb653ce715c0e03056a5303a581d47dde66e70ea5a2d2779","0x8e5029b2ccf5128a12327b5103f7532db599846e422531869560ceaff392236434d87159f597937dbf4054f810c114f4","0x82beed1a2c4477e5eb39fc5b0e773b30cfec77ef2b1bf17eadaf60eb35b6d0dd9d8cf06315c48d3546badb3f21cd0cca","0x90077bd6cc0e4be5fff08e5d07a5a158d36cebd1d1363125bc4fae0866ffe825b26f933d4ee5427ba5cd0c33c19a7b06","0xa7ec0d8f079970e8e34f0ef3a53d3e0e45428ddcef9cc776ead5e542ef06f3c86981644f61c5a637e4faf001fb8c6b3e","0xae6d4add6d1a6f90b22792bc9d40723ee6850c27d0b97eefafd5b7fd98e424aa97868b5287cc41b4fbd7023bca6a322c","0x831aa917533d077da07c01417feaa1408846363ba2b8d22c6116bb858a95801547dd88b7d7fa1d2e3f0a02bdeb2e103d","0x96511b860b07c8a5ed773f36d4aa9d02fb5e7882753bf56303595bcb57e37ccc60288887eb83bef08c657ec261a021a2","0x921d2a3e7e9790f74068623de327443666b634c8443aba80120a45bba450df920b2374d96df1ce3fb1b06dd06f8cf6e3","0xaa74451d51fe82b4581ead8e506ec6cd881010f7e7dd51fc388eb9a557db5d3c6721f81c151d08ebd9c2591689fbc13e","0xa972bfbcf4033d5742d08716c927c442119bdae336bf5dff914523b285ccf31953da2733759aacaa246a9af9f698342c","0xad1fcd0cae0e76840194ce4150cb8a56ebed728ec9272035f52a799d480dfc85840a4d52d994a18b6edb31e79be6e8ad","0xa2c69fe1d36f235215432dad48d75887a44c99dfa0d78149acc74087da215a44bdb5f04e6eef88ff7eff80a5a7decc77","0xa94ab2af2b6ee1bc6e0d4e689ca45380d9fbd3c5a65b9bd249d266a4d4c07bf5d5f7ef2ae6000623aee64027892bf8fe","0x881ec1fc514e926cdc66480ac59e139148ff8a2a7895a49f0dff45910c90cdda97b66441a25f357d6dd2471cddd99bb3","0x884e6d3b894a914c8cef946a76d5a0c8351843b2bffa2d1e56c6b5b99c84104381dd1320c451d551c0b966f4086e60f9","0x817c6c10ce2677b9fc5223500322e2b880583254d0bb0d247d728f8716f5e05c9ff39f135854342a1afecd9fbdcf7c46","0xaaf4a9cb686a14619aa1fc1ac285dd3843ac3dd99f2b2331c711ec87b03491c02f49101046f3c5c538dc9f8dba2a0ac2","0x97ecea5ce53ca720b5d845227ae61d70269a2f53540089305c86af35f0898bfd57356e74a8a5e083fa6e1ea70080bd31","0xa22d811e1a20a75feac0157c418a4bfe745ccb5d29466ffa854dca03e395b6c3504a734341746b2846d76583a780b32e","0x940cbaa0d2b2db94ae96b6b9cf2deefbfd059e3e5745de9aec4a25f0991b9721e5cd37ef71c631575d1a0c280b01cd5b","0xae33cb4951191258a11044682de861bf8d92d90ce751b354932dd9f3913f542b6a0f8a4dc228b3cd9244ac32c4582832","0xa580df5e58c4274fe0f52ac2da1837e32f5c9db92be16c170187db4c358f43e5cfdda7c5911dcc79d77a5764e32325f5","0x81798178cb9d8affa424f8d3be67576ba94d108a28ccc01d330c51d5a63ca45bb8ca63a2f569b5c5fe1303cecd2d777f","0x89975b91b94c25c9c3660e4af4047a8bacf964783010820dbc91ff8281509379cb3b24c25080d5a01174dd9a049118d5","0xa7327fcb3710ed3273b048650bde40a32732ef40a7e58cf7f2f400979c177944c8bc54117ba6c80d5d4260801dddab79","0x92b475dc8cb5be4b90c482f122a51bcb3b6c70593817e7e2459c28ea54a7845c50272af38119406eaadb9bcb993368d0","0x9645173e9ecefc4f2eae8363504f7c0b81d85f8949a9f8a6c01f2d49e0a0764f4eacecf3e94016dd407fc14494fce9f9","0x9215fd8983d7de6ae94d35e6698226fc1454977ae58d42d294be9aad13ac821562ad37d5e7ee5cdfe6e87031d45cd197","0x810360a1c9b88a9e36f520ab5a1eb8bed93f52deefbe1312a69225c0a08edb10f87cc43b794aced9c74220cefcc57e7d","0xad7e810efd61ed4684aeda9ed8bb02fb9ae4b4b63fda8217d37012b94ff1b91c0087043bfa4e376f961fff030c729f3b","0x8b07c95c6a06db8738d10bb03ec11b89375c08e77f0cab7e672ce70b2685667ca19c7e1c8b092821d31108ea18dfd4c7","0x968825d025ded899ff7c57245250535c732836f7565eab1ae23ee7e513201d413c16e1ba3f5166e7ac6cf74de8ceef4f","0x908243370c5788200703ade8164943ad5f8c458219186432e74dbc9904a701ea307fd9b94976c866e6c58595fd891c4b","0x959969d16680bc535cdc6339e6186355d0d6c0d53d7bbfb411641b9bf4b770fd5f575beef5deec5c4fa4d192d455c350","0xad177f4f826a961adeac76da40e2d930748effff731756c797eddc4e5aa23c91f070fb69b19221748130b0961e68a6bb","0x82f8462bcc25448ef7e0739425378e9bb8a05e283ce54aae9dbebaf7a3469f57833c9171672ad43a79778366c72a5e37","0xa28fb275b1845706c2814d9638573e9bc32ff552ebaed761fe96fdbce70395891ca41c400ae438369264e31a2713b15f","0x8a9c613996b5e51dadb587a787253d6081ea446bf5c71096980bf6bd3c4b69905062a8e8a3792de2d2ece3b177a71089","0x8d5aefef9f60cb27c1db2c649221204dda48bb9bf8bf48f965741da051340e8e4cab88b9d15c69f3f84f4c854709f48a","0x93ebf2ca6ad85ab6deace6de1a458706285b31877b1b4d7dcb9d126b63047efaf8c06d580115ec9acee30c8a7212fa55","0xb3ee46ce189956ca298057fa8223b7fd1128cf52f39159a58bca03c71dd25161ac13f1472301f72aef3e1993fe1ab269","0xa24d7a8d066504fc3f5027ccb13120e2f22896860e02c45b5eba1dbd512d6a17c28f39155ea581619f9d33db43a96f92","0xae9ceacbfe12137db2c1a271e1b34b8f92e4816bad1b3b9b6feecc34df0f8b3b0f7ed0133acdf59c537d43d33fc8d429","0x83967e69bf2b361f86361bd705dce0e1ad26df06da6c52b48176fe8dfcbeb03c462c1a4c9e649eff8c654b18c876fdef","0x9148e6b814a7d779c19c31e33a068e97b597de1f8100513db3c581190513edc4d544801ce3dd2cf6b19e0cd6daedd28a","0x94ccdafc84920d320ed22de1e754adea072935d3c5f8c2d1378ebe53d140ea29853f056fb3fb1e375846061a038cc9bc","0xafb43348498c38b0fa5f971b8cdd3a62c844f0eb52bc33daf2f67850af0880fce84ecfb96201b308d9e6168a0d443ae3","0x86d5736520a83538d4cd058cc4b4e84213ed00ebd6e7af79ae787adc17a92ba5359e28ba6c91936d967b4b28d24c3070","0xb5210c1ff212c5b1e9ef9126e08fe120a41e386bb12c22266f7538c6d69c7fd8774f11c02b81fd4e88f9137b020801fe","0xb78cfd19f94d24e529d0f52e18ce6185cb238edc6bd43086270fd51dd99f664f43dd4c7d2fe506762fbd859028e13fcf","0xa6e7220598c554abdcc3fdc587b988617b32c7bb0f82c06205467dbedb58276cc07cae317a190f19d19078773f4c2bbb","0xb88862809487ee430368dccd85a5d72fa4d163ca4aad15c78800e19c1a95be2192719801e315d86cff7795e0544a77e4","0x87ecb13a03921296f8c42ceb252d04716f10e09c93962239fcaa0a7fef93f19ab3f2680bc406170108bc583e9ff2e721","0xa810cd473832b6581c36ec4cb403f2849357ba2d0b54df98ef3004b8a530c078032922a81d40158f5fb0043d56477f6e","0xa247b45dd85ca7fbb718b328f30a03f03c84aef2c583fbdc9fcc9eb8b52b34529e8c8f535505c10598b1b4dac3d7c647","0x96ee0b91313c68bac4aa9e065ce9e1d77e51ca4cff31d6a438718c58264dee87674bd97fc5c6b8008be709521e4fd008","0x837567ad073e42266951a9a54750919280a2ac835a73c158407c3a2b1904cf0d17b7195a393c71a18ad029cbd9cf79ee","0xa6a469c44b67ebf02196213e7a63ad0423aab9a6e54acc6fcbdbb915bc043586993454dc3cd9e4be8f27d67c1050879b","0x8712d380a843b08b7b294f1f06e2f11f4ad6bcc655fdde86a4d8bc739c23916f6fad2b902fe47d6212f03607907e9f0e","0x920adfb644b534789943cdae1bdd6e42828dda1696a440af2f54e6b97f4f97470a1c6ea9fa6a2705d8f04911d055acd1","0xa161c73adf584a0061e963b062f59d90faac65c9b3a936b837a10d817f02fcabfa748824607be45a183dd40f991fe83f","0x874f4ecd408c76e625ea50bc59c53c2d930ee25baf4b4eca2440bfbffb3b8bc294db579caa7c68629f4d9ec24187c1ba","0x8bff18087f112be7f4aa654e85c71fef70eee8ae480f61d0383ff6f5ab1a0508f966183bb3fc4d6f29cb7ca234aa50d3","0xb03b46a3ca3bc743a173cbc008f92ab1aedd7466b35a6d1ca11e894b9482ea9dc75f8d6db2ddd1add99bfbe7657518b7","0x8b4f3691403c3a8ad9e097f02d130769628feddfa8c2b3dfe8cff64e2bed7d6e5d192c1e2ba0ac348b8585e94acd5fa1","0xa0d9ca4a212301f97591bf65d5ef2b2664766b427c9dd342e23cb468426e6a56be66b1cb41fea1889ac5d11a8e3c50a5","0x8c93ed74188ca23b3df29e5396974b9cc135c91fdefdea6c0df694c8116410e93509559af55533a3776ac11b228d69b1","0x82dd331fb3f9e344ebdeeb557769b86a2cc8cc38f6c298d7572a33aea87c261afa9dbd898989139b9fc16bc1e880a099","0xa65faedf326bcfd8ef98a51410c78b021d39206704e8291cd1f09e096a66b9b0486be65ff185ca224c45918ac337ddeb","0xa188b37d363ac072a766fd5d6fa27df07363feff1342217b19e3c37385e42ffde55e4be8355aceaa2f267b6d66b4ac41","0x810fa3ba3e96d843e3bafd3f2995727f223d3567c8ba77d684c993ba1773c66551eb5009897c51b3fe9b37196984f5ec","0x87631537541852da323b4353af45a164f68b304d24c01183bf271782e11687f3fcf528394e1566c2a26cb527b3148e64","0xb721cb2b37b3c477a48e3cc0044167d51ff568a5fd2fb606e5aec7a267000f1ddc07d3db919926ae12761a8e017c767c","0x904dfad4ba2cc1f6e60d1b708438a70b1743b400164cd981f13c064b8328d5973987d4fb9cf894068f29d3deaf624dfb","0xa70491538893552c20939fae6be2f07bfa84d97e2534a6bbcc0f1729246b831103505e9f60e97a8fa7d2e6c1c2384579","0x8726cf1b26b41f443ff7485adcfddc39ace2e62f4d65dd0bb927d933e262b66f1a9b367ded5fbdd6f3b0932553ac1735","0xae8a11cfdf7aa54c08f80cb645e3339187ab3886babe9fae5239ba507bb3dd1c0d161ca474a2df081dcd3d63e8fe445e","0x92328719e97ce60e56110f30a00ac5d9c7a2baaf5f8d22355d53c1c77941e3a1fec7d1405e6fbf8959665fe2ba7a8cad","0x8d9d6255b65798d0018a8cccb0b6343efd41dc14ff2058d3eed9451ceaad681e4a0fa6af67b0a04318aa628024e5553d","0xb70209090055459296006742d946a513f0cba6d83a05249ee8e7a51052b29c0ca9722dc4af5f9816a1b7938a5dac7f79","0xaab7b766b9bf91786dfa801fcef6d575dc6f12b77ecc662eb4498f0312e54d0de9ea820e61508fc8aeee5ab5db529349","0xa8104b462337748b7f086a135d0c3f87f8e51b7165ca6611264b8fb639d9a2f519926cb311fa2055b5fadf03da70c678","0xb0d2460747d5d8b30fc6c6bd0a87cb343ddb05d90a51b465e8f67d499cfc5e3a9e365da05ae233bbee792cdf90ec67d5","0xaa55f5bf3815266b4a149f85ed18e451c93de9163575e3ec75dd610381cc0805bb0a4d7c4af5b1f94d10231255436d2c","0x8d4c6a1944ff94426151909eb5b99cfd92167b967dabe2bf3aa66bb3c26c449c13097de881b2cfc1bf052862c1ef7b03","0x8862296162451b9b6b77f03bf32e6df71325e8d7485cf3335d66fd48b74c2a8334c241db8263033724f26269ad95b395","0x901aa96deb26cda5d9321190ae6624d357a41729d72ef1abfd71bebf6139af6d690798daba53b7bc5923462115ff748a","0x96c195ec4992728a1eb38cdde42d89a7bce150db43adbc9e61e279ea839e538deec71326b618dd39c50d589f78fc0614","0xb6ff8b8aa0837b99a1a8b46fb37f20ad4aecc6a98381b1308697829a59b8442ffc748637a88cb30c9b1f0f28a926c4f6","0x8d807e3dca9e7bef277db1d2cfb372408dd587364e8048b304eff00eacde2c723bfc84be9b98553f83cba5c7b3cba248","0x8800c96adb0195c4fc5b24511450dee503c32bf47044f5e2e25bd6651f514d79a2dd9b01cd8c09f3c9d3859338490f57","0x89fe366096097e38ec28dd1148887112efa5306cc0c3da09562aafa56f4eb000bf46ff79bf0bdd270cbde6bf0e1c8957","0xaf409a90c2776e1e7e3760b2042507b8709e943424606e31e791d42f17873a2710797f5baaab4cc4a19998ef648556b0","0x8d761863c9b6edbd232d35ab853d944f5c950c2b643f84a1a1327ebb947290800710ff01dcfa26dc8e9828481240e8b1","0x90b95e9be1e55c463ed857c4e0617d6dc3674e99b6aa62ed33c8e79d6dfcf7d122f4f4cc2ee3e7c5a49170cb617d2e2e","0xb3ff381efefabc4db38cc4727432e0301949ae4f16f8d1dea9b4f4de611cf5a36d84290a0bef160dac4e1955e516b3b0","0xa8a84564b56a9003adcadb3565dc512239fc79572762cda7b5901a255bc82656bb9c01212ad33d6bef4fbbce18dacc87","0x90a081890364b222eef54bf0075417f85e340d2fec8b7375995f598aeb33f26b44143ebf56fca7d8b4ebb36b5747b0eb","0xade6ee49e1293224ddf2d8ab7f14bb5be6bc6284f60fd5b3a1e0cf147b73cff57cf19763b8a36c5083badc79c606b103","0xb2fa99806dd2fa3de09320b615a2570c416c9bcdb052e592b0aead748bbe407ec9475a3d932ae48b71c2627eb81986a6","0x91f3b7b73c8ccc9392542711c45fe6f236057e6efad587d661ad5cb4d6e88265f86b807bb1151736b1009ab74fd7acb4","0x8800e2a46af96696dfbdcbf2ca2918b3dcf28ad970170d2d1783b52b8d945a9167d052beeb55f56c126da7ffa7059baa","0x9862267a1311c385956b977c9aa08548c28d758d7ba82d43dbc3d0a0fd1b7a221d39e8399997fea9014ac509ff510ac4","0xb7d24f78886fd3e2d283e18d9ad5a25c1a904e7d9b9104bf47da469d74f34162e27e531380dbbe0a9d051e6ffd51d6e7","0xb0f445f9d143e28b9df36b0f2c052da87ee2ca374d9d0fbe2eff66ca6fe5fe0d2c1951b428d58f7314b7e74e45d445ea","0xb63fc4083eabb8437dafeb6a904120691dcb53ce2938b820bb553da0e1eecd476f72495aacb72600cf9cad18698fd3db","0xb9ffd8108eaebd582d665f8690fe8bb207fd85185e6dd9f0b355a09bac1bbff26e0fdb172bc0498df025414e88fe2eda","0x967ed453e1f1a4c5b7b6834cc9f75c13f6889edc0cc91dc445727e9f408487bbf05c337103f61397a10011dfbe25d61d","0x98ceb673aff36e1987d5521a3984a07079c3c6155974bb8b413e8ae1ce84095fe4f7862fba7aefa14753eb26f2a5805f","0x85f01d28603a8fdf6ce6a50cb5c44f8a36b95b91302e3f4cd95c108ce8f4d212e73aec1b8d936520d9226802a2bd9136","0x88118e9703200ca07910345fbb789e7a8f92bd80bbc79f0a9e040e8767d33df39f6eded403a9b636eabf9101e588482a","0x90833a51eef1b10ed74e8f9bbd6197e29c5292e469c854eed10b0da663e2bceb92539710b1858bbb21887bd538d28d89","0xb513b905ec19191167c6193067b5cfdf5a3d3828375360df1c7e2ced5815437dfd37f0c4c8f009d7fb29ff3c8793f560","0xb1b6d405d2d18f9554b8a358cc7e2d78a3b34269737d561992c8de83392ac9a2857be4bf15de5a6c74e0c9d0f31f393c","0xb828bd3e452b797323b798186607849f85d1fb20c616833c0619360dfd6b3e3aa000fd09dafe4b62d74abc41072ff1a9","0x8efde67d0cca56bb2c464731879c9ac46a52e75bac702a63200a5e192b4f81c641f855ca6747752b84fe469cb7113b6c","0xb2762ba1c89ac3c9a983c242e4d1c2610ff0528585ed5c0dfc8a2c0253551142af9b59f43158e8915a1da7cc26b9df67","0x8a3f1157fb820d1497ef6b25cd70b7e16bb8b961b0063ad340d82a79ee76eb2359ca9e15e6d42987ed7f154f5eeaa2da","0xa75e29f29d38f09c879f971c11beb5368affa084313474a5ecafa2896180b9e47ea1995c2733ec46f421e395a1d9cffe","0x8e8c3dd3e7196ef0b4996b531ec79e4a1f211db5d5635e48ceb80ff7568b2ff587e845f97ee703bb23a60945ad64314a","0x8e7f32f4a3e3c584af5e3d406924a0aa34024c42eca74ef6cc2a358fd3c9efaf25f1c03aa1e66bb94b023a2ee2a1cace","0xab7dce05d59c10a84feb524fcb62478906b3fa045135b23afbede3bb32e0c678d8ebe59feabccb5c8f3550ea76cae44b","0xb38bb4b44d827f6fd3bd34e31f9186c59e312dbfadd4a7a88e588da10146a78b1f8716c91ad8b806beb8da65cab80c4c","0x9490ce9442bbbd05438c7f5c4dea789f74a7e92b1886a730544b55ba377840740a3ae4f2f146ee73f47c9278b0e233bc","0x83c003fab22a7178eed1a668e0f65d4fe38ef3900044e9ec63070c23f2827d36a1e73e5c2b883ec6a2afe2450171b3b3","0x9982f02405978ddc4fca9063ebbdb152f524c84e79398955e66fe51bc7c1660ec1afc3a86ec49f58d7b7dde03505731c","0xab337bd83ccdd2322088ffa8d005f450ced6b35790f37ab4534313315ee84312adc25e99cce052863a8bedee991729ed","0x8312ce4bec94366d88f16127a17419ef64285cd5bf9e5eda010319b48085966ed1252ed2f5a9fd3e0259b91bb65f1827","0xa60d5a6327c4041b0c00a1aa2f0af056520f83c9ce9d9ccd03a0bd4d9e6a1511f26a422ea86bd858a1f77438adf07e6c","0xb84a0a0b030bdad83cf5202aa9afe58c9820e52483ab41f835f8c582c129ee3f34aa096d11c1cd922eda02ea1196a882","0x8077d105317f4a8a8f1aadeb05e0722bb55f11abcb490c36c0904401107eb3372875b0ac233144829e734f0c538d8c1d","0x9202503bd29a6ec198823a1e4e098f9cfe359ed51eb5174d1ca41368821bfeebcbd49debfd02952c41359d1c7c06d2b1","0xabc28c155e09365cb77ffead8dc8f602335ef93b2f44e4ef767ce8fc8ef9dd707400f3a722e92776c2e0b40192c06354","0xb0f6d1442533ca45c9399e0a63a11f85ff288d242cea6cb3b68c02e77bd7d158047cae2d25b3bcd9606f8f66d9b32855","0xb01c3d56a0db84dc94575f4b6ee2de4beca3230e86bed63e2066beb22768b0a8efb08ebaf8ac3dedb5fe46708b084807","0x8c8634b0432159f66feaabb165842d1c8ac378f79565b1b90c381aa8450eb4231c3dad11ec9317b9fc2b155c3a771e32","0x8e67f623d69ecd430c9ee0888520b6038f13a2b6140525b056dc0951f0cfed2822e62cf11d952a483107c5c5acac4826","0x9590bb1cba816dd6acd5ac5fba5142c0a19d53573e422c74005e0bcf34993a8138c83124cad35a3df65879dba6134edd","0x801cd96cde0749021a253027118d3ea135f3fcdbe895db08a6c145641f95ebd368dd6a1568d995e1d0084146aebe224a","0x848b5d196427f6fc1f762ee3d36e832b64a76ec1033cfedc8b985dea93932a7892b8ef1035c653fb9dcd9ab2d9a44ac8","0xa1017eb83d5c4e2477e7bd2241b2b98c4951a3b391081cae7d75965cadc1acaec755cf350f1f3d29741b0828e36fedea","0x8d6d2785e30f3c29aad17bd677914a752f831e96d46caf54446d967cb2432be2c849e26f0d193a60bee161ea5c6fe90a","0x935c0ba4290d4595428e034b5c8001cbd400040d89ab00861108e8f8f4af4258e41f34a7e6b93b04bc253d3b9ffc13bf","0xaac02257146246998477921cef2e9892228590d323b839f3e64ea893b991b463bc2f47e1e5092ddb47e70b2f5bce7622","0xb921fde9412970a5d4c9a908ae8ce65861d06c7679af577cf0ad0d5344c421166986bee471fd6a6cecb7d591f06ec985","0x8ef4c37487b139d6756003060600bb6ebac7ea810b9c4364fc978e842f13ac196d1264fbe5af60d76ff6d9203d8e7d3f","0x94b65e14022b5cf6a9b95f94be5ace2711957c96f4211c3f7bb36206bd39cfbd0ea82186cab5ad0577a23214a5c86e9e","0xa31c166d2a2ca1d5a75a5920fef7532681f62191a50d8555fdaa63ba4581c3391cc94a536fc09aac89f64eafceec3f90","0x919a8cc128de01e9e10f5d83b08b52293fdd41bde2b5ae070f3d95842d4a16e5331cf2f3d61c765570c8022403610fa4","0xb23d6f8331eef100152d60483cfa14232a85ee712c8538c9b6417a5a7c5b353c2ac401390c6c215cb101f5cee6b5f43e","0xab357160c08a18319510a571eafff154298ce1020de8e1dc6138a09fcb0fcbcdd8359f7e9386bda00b7b9cdea745ffdc","0xab55079aea34afa5c0bd1124b9cdfe01f325b402fdfa017301bf87812eaa811ea5798c3aaf818074d420d1c782b10ada","0xade616010dc5009e7fc4f8d8b00dc716686a5fa0a7816ad9e503e15839d3b909b69d9dd929b7575376434ffec0d2bea8","0x863997b97ed46898a8a014599508fa3079f414b1f4a0c4fdc6d74ae8b444afa350f327f8bfc2a85d27f9e2d049c50135","0x8d602ff596334efd4925549ed95f2aa762b0629189f0df6dbb162581657cf3ea6863cd2287b4d9c8ad52813d87fcd235","0xb70f68c596dcdeed92ad5c6c348578b26862a51eb5364237b1221e840c47a8702f0fbc56eb520a22c0eed99795d3903e","0x9628088f8e0853cefadee305a8bf47fa990c50fa96a82511bbe6e5dc81ef4b794e7918a109070f92fc8384d77ace226f","0x97e26a46e068b605ce96007197ecd943c9a23881862f4797a12a3e96ba2b8d07806ad9e2a0646796b1889c6b7d75188c","0xb1edf467c068cc163e2d6413cc22b16751e78b3312fe47b7ea82b08a1206d64415b2c8f2a677fa89171e82cc49797150","0xa44d15ef18745b251429703e3cab188420e2d974de07251501799b016617f9630643fcd06f895634d8ecdd579e1bf000","0xabd126df3917ba48c618ee4dbdf87df506193462f792874439043fa1b844466f6f4e0ff2e42516e63b5b23c0892b2695","0xa2a67f57c4aa3c2aa1eeddbfd5009a89c26c2ce8fa3c96a64626aba19514beb125f27df8559506f737de3eae0f1fc18f","0xa633e0132197e6038197304b296ab171f1d8e0d0f34dcf66fe9146ac385b0239232a8470b9205a4802ab432389f4836d","0xa914b3a28509a906c3821463b936455d58ff45dcbe158922f9efb2037f2eb0ce8e92532d29b5d5a3fcd0d23fa773f272","0xa0e1412ce4505daf1a2e59ce4f0fc0e0023e335b50d2b204422f57cd65744cc7a8ed35d5ef131a42c70b27111d3115b7","0xa2339e2f2b6072e88816224fdd612c04d64e7967a492b9f8829db15367f565745325d361fd0607b0def1be384d010d9e","0xa7309fc41203cb99382e8193a1dcf03ac190a7ce04835304eb7e341d78634e83ea47cb15b885601956736d04cdfcaa01","0x81f3ccd6c7f5b39e4e873365f8c37b214e8ab122d04a606fbb7339dc3298c427e922ec7418002561d4106505b5c399ee","0x92c121cf914ca549130e352eb297872a63200e99b148d88fbc9506ad882bec9d0203d65f280fb5b0ba92e336b7f932e8","0xa4b330cf3f064f5b131578626ad7043ce2a433b6f175feb0b52d36134a454ca219373fd30d5e5796410e005b69082e47","0x86fe5774112403ad83f9c55d58317eeb17ad8e1176d9f2f69c2afb7ed83bc718ed4e0245ceab4b377f5f062dcd4c00e7","0x809d152a7e2654c7fd175b57f7928365a521be92e1ed06c05188a95864ddb25f7cab4c71db7d61bbf4cae46f3a1d96ce","0xb82d663e55c2a5ada7e169e9b1a87bc1c0177baf1ec1c96559b4cb1c5214ce1ddf2ab8d345014cab6402f3774235cf5a","0x86580af86df1bd2c385adb8f9a079e925981b7184db66fc5fe5b14cddb82e7d836b06eaeef14924ac529487b23dae111","0xb5f5f4c5c94944ecc804df6ab8687d64e27d988cbfeae1ba7394e0f6adbf778c5881ead7cd8082dd7d68542b9bb4ecd5","0xa6016916146c2685c46e8fdd24186394e2d5496e77e08c0c6a709d4cd7dfa97f1efcef94922b89196819076a91ad37b5","0xb778e7367ded3b6eab53d5fc257f7a87e8faf74a593900f2f517220add2125be3f6142022660d8181df8d164ad9441ce","0x8581b2d36abe6f553add4d24be761bec1b8efaa2929519114346615380b3c55b59e6ad86990e312f7e234d0203bdf59b","0x9917e74fd45c3f71a829ff5498a7f6b5599b48c098dda2339bf04352bfc7f368ccf1a407f5835901240e76452ae807d7","0xafd196ce6f9335069138fd2e3d133134da253978b4ce373152c0f26affe77a336505787594022e610f8feb722f7cc1fb","0xa477491a1562e329764645e8f24d8e228e5ef28c9f74c6b5b3abc4b6a562c15ffb0f680d372aed04d9e1bf944dece7be","0x9767440d58c57d3077319d3a330e5322b9ba16981ec74a5a14d53462eab59ae7fd2b14025bfc63b268862094acb444e6","0x80986d921be3513ef69264423f351a61cb48390c1be8673aee0f089076086aaebea7ebe268fd0aa7182695606116f679","0xa9554c5c921c07b450ee04e34ec58e054ac1541b26ce2ce5a393367a97348ba0089f53db6660ad76b60278b66fd12e3e","0x95097e7d2999b3e84bf052c775581cf361325325f4a50192521d8f4693c830bed667d88f482dc1e3f833aa2bd22d2cbf","0x9014c91d0f85aefd28436b5228c12f6353c055a9326c7efbf5e071e089e2ee7c070fcbc84c5fafc336cbb8fa6fec1ca1","0x90f57ba36ee1066b55d37384942d8b57ae00f3cf9a3c1d6a3dfee1d1af42d4b5fa9baeb0cd7e46687d1d6d090ddb931d","0x8e4b1db12fd760a17214c9e47f1fce6e43c0dbb4589a827a13ac61aaae93759345697bb438a00edab92e0b7b62414683","0x8022a959a513cdc0e9c705e0fc04eafd05ff37c867ae0f31f6d01cddd5df86138a426cab2ff0ac8ff03a62e20f7e8f51","0x914e9a38829834c7360443b8ed86137e6f936389488eccf05b4b4db7c9425611705076ecb3f27105d24b85c852be7511","0x957fb10783e2bd0db1ba66b18e794df710bc3b2b05776be146fa5863c15b1ebdd39747b1a95d9564e1772cdfc4f37b8a","0xb6307028444daed8ed785ac9d0de76bc3fe23ff2cc7e48102553613bbfb5afe0ebe45e4212a27021c8eb870721e62a1f","0x8f76143597777d940b15a01b39c5e1b045464d146d9a30a6abe8b5d3907250e6c7f858ff2308f8591e8b0a7b3f3c568a","0x96163138ac0ce5fd00ae9a289648fd9300a0ca0f63a88481d703ecd281c06a52a3b5178e849e331f9c85ca4ba398f4cc","0xa63ef47c3e18245b0482596a09f488a716df3cbd0f9e5cfabed0d742843e65db8961c556f45f49762f3a6ac8b627b3ef","0x8cb595466552e7c4d42909f232d4063e0a663a8ef6f6c9b7ce3a0542b2459cde04e0e54c7623d404acb5b82775ac04f6","0xb47fe69960eb45f399368807cff16d941a5a4ebad1f5ec46e3dc8a2e4d598a7e6114d8f0ca791e9720fd786070524e2b","0x89eb5ff83eea9df490e5beca1a1fbbbbcf7184a37e2c8c91ede7a1e654c81e8cd41eceece4042ea7918a4f4646b67fd6","0xa84f5d155ed08b9054eecb15f689ba81e44589e6e7207a99790c598962837ca99ec12344105b16641ca91165672f7153","0xa6cc8f25c2d5b2d2f220ec359e6a37a52b95fa6af6e173c65e7cd55299eff4aa9e6d9e6f2769e6459313f1f2aecb0fab","0xafcde944411f017a9f7979755294981e941cc41f03df5e10522ef7c7505e5f1babdd67b3bf5258e8623150062eb41d9b","0x8fab39f39c0f40182fcd996ade2012643fe7731808afbc53f9b26900b4d4d1f0f5312d9d40b3df8baa4739970a49c732","0xae193af9726da0ebe7df1f9ee1c4846a5b2a7621403baf8e66c66b60f523e719c30c6b4f897bb14b27d3ff3da8392eeb","0x8ac5adb82d852eba255764029f42e6da92dcdd0e224d387d1ef94174038db9709ac558d90d7e7c57ad4ce7f89bbfc38c","0xa2066b3458fdf678ee487a55dd5bfb74fde03b54620cb0e25412a89ee28ad0d685e309a51e3e4694be2fa6f1593a344c","0x88d031745dd0ae07d61a15b594be5d4b2e2a29e715d081649ad63605e3404b0c3a5353f0fd9fad9c05c18e93ce674fa1","0x8283cfb0ef743a043f2b77ecaeba3005e2ca50435585b5dd24777ee6bce12332f85e21b446b536da38508807f0f07563","0xb376de22d5f6b0af0b59f7d9764561f4244cf8ffe22890ecd3dcf2ff1832130c9b821e068c9d8773136f4796721e5963","0xae3afc50c764f406353965363840bf28ee85e7064eb9d5f0bb3c31c64ab10f48c853e942ee2c9b51bae59651eaa08c2f","0x948b204d103917461a01a6c57a88f2d66b476eae5b00be20ec8c747650e864bc8a83aee0aff59cb7584b7a3387e0ee48","0x81ab098a082b07f896c5ffd1e4446cb7fb44804cbbf38d125208b233fc82f8ec9a6a8d8dd1c9a1162dc28ffeec0dde50","0xa149c6f1312821ced2969268789a3151bdda213451760b397139a028da609c4134ac083169feb0ee423a0acafd10eceb","0xb0ac9e27a5dadaf523010f730b28f0ebac01f460d3bbbe277dc9d44218abb5686f4fac89ae462682fef9edbba663520a","0x8d0e0073cca273daaaa61b6fc54bfe5a009bc3e20ae820f6c93ba77b19eca517d457e948a2de5e77678e4241807157cb","0xad61d3a2edf7c7533a04964b97499503fd8374ca64286dba80465e68fe932e96749b476f458c6fc57cb1a7ca85764d11","0x90eb5e121ae46bc01a30881eaa556f46bd8457a4e80787cf634aab355082de34ac57d7f497446468225f7721e68e2a47","0x8cdac557de7c42d1f3780e33dec1b81889f6352279be81c65566cdd4952d4c15d79e656cbd46035ab090b385e90245ef","0x82b67e61b88b84f4f4d4f65df37b3e3dcf8ec91ea1b5c008fdccd52da643adbe6468a1cfdb999e87d195afe2883a3b46","0x8503b467e8f5d6048a4a9b78496c58493a462852cab54a70594ae3fd064cfd0deb4b8f336a262155d9fedcaa67d2f6fd","0x8db56c5ac763a57b6ce6832930c57117058e3e5a81532b7d19346346205e2ec614eb1a2ee836ef621de50a7bc9b7f040","0xad344699198f3c6e8c0a3470f92aaffc805b76266734414c298e10b5b3797ca53578de7ccb2f458f5e0448203f55282b","0x80602032c43c9e2a09154cc88b83238343b7a139f566d64cb482d87436b288a98f1ea244fd3bff8da3c398686a900c14","0xa6385bd50ecd548cfb37174cdbb89e10025b5cadaf3cff164c95d7aef5a33e3d6a9bf0c681b9e11db9ef54ebeee2a0c1","0xabf2d95f4aa34b0581eb9257a0cc8462b2213941a5deb8ba014283293e8b36613951b61261cc67bbd09526a54cbbff76","0xa3d5de52f48df72c289ff713e445991f142390798cd42bd9d9dbefaee4af4f5faf09042d126b975cf6b98711c3072553","0x8e627302ff3d686cff8872a1b7c2a57b35f45bf2fc9aa42b049d8b4d6996a662b8e7cbac6597f0cb79b0cc4e29fbf133","0x8510702e101b39a1efbf4e504e6123540c34b5689645e70d0bac1ecc1baf47d86c05cef6c4317a4e99b4edaeb53f2d00","0xaa173f0ecbcc6088f878f8726d317748c81ebf501bba461f163b55d66099b191ec7c55f7702f351a9c8eb42cfa3280e2","0xb560a697eafab695bcef1416648a0a664a71e311ecbe5823ae903bd0ed2057b9d7574b9a86d3fe22aa3e6ddce38ea513","0x8df6304a3d9cf40100f3f687575419c998cd77e5cc27d579cf4f8e98642de3609af384a0337d145dd7c5635172d26a71","0x8105c7f3e4d30a29151849673853b457c1885c186c132d0a98e63096c3774bc9deb956cf957367e633d0913680bda307","0x95373fc22c0917c3c2044ac688c4f29a63ed858a45c0d6d2d0fe97afd6f532dcb648670594290c1c89010ecc69259bef","0x8c2fae9bcadab341f49b55230310df93cac46be42d4caa0d42e45104148a91e527af1b4209c0d972448162aed28fab64","0xb05a77baab70683f76209626eaefdda2d36a0b66c780a20142d23c55bd479ddd4ad95b24579384b6cf62c8eb4c92d021","0x8e6bc6a7ea2755b4aaa19c1c1dee93811fcde514f03485fdc3252f0ab7f032c315614f6336e57cea25dcfb8fb6084eeb","0xb656a27d06aade55eadae2ad2a1059198918ea6cc3fd22c0ed881294d34d5ac7b5e4700cc24350e27d76646263b223aa","0xa296469f24f6f56da92d713afcd4dd606e7da1f79dc4e434593c53695847eefc81c7c446486c4b3b8c8d00c90c166f14","0x87a326f57713ac2c9dffeb3af44b9f3c613a8f952676fc46343299122b47ee0f8d792abaa4b5db6451ced5dd153aabd0","0xb689e554ba9293b9c1f6344a3c8fcb6951d9f9eac4a2e2df13de021aade7c186be27500e81388e5b8bcab4c80f220a31","0x87ae0aa0aa48eac53d1ca5a7b93917de12db9e40ceabf8fdb40884ae771cfdf095411deef7c9f821af0b7070454a2608","0xa71ffa7eae8ace94e6c3581d4cb2ad25d48cbd27edc9ec45baa2c8eb932a4773c3272b2ffaf077b40f76942a1f3af7f2","0x94c218c91a9b73da6b7a495b3728f3028df8ad9133312fc0c03e8c5253b7ccb83ed14688fd4602e2fd41f29a0bc698bd","0xae1e77b90ca33728af07a4c03fb2ef71cd92e2618e7bf8ed4d785ce90097fc4866c29999eb84a6cf1819d75285a03af2","0xb7a5945b277dab9993cf761e838b0ac6eaa903d7111fca79f9fde3d4285af7a89bf6634a71909d095d7619d913972c9c","0x8c43b37be02f39b22029b20aca31bff661abce4471dca88aa3bddefd9c92304a088b2dfc8c4795acc301ca3160656af2","0xb32e5d0fba024554bd5fe8a793ebe8003335ddd7f585876df2048dcf759a01285fecb53daae4950ba57f3a282a4d8495","0x85ea7fd5e10c7b659df5289b2978b2c89e244f269e061b9a15fcab7983fc1962b63546e82d5731c97ec74b6804be63ef","0x96b89f39181141a7e32986ac02d7586088c5a9662cec39843f397f3178714d02f929af70630c12cbaba0268f8ba2d4fa","0x929ab1a2a009b1eb37a2817c89696a06426529ebe3f306c586ab717bd34c35a53eca2d7ddcdef36117872db660024af9","0xa696dccf439e9ca41511e16bf3042d7ec0e2f86c099e4fc8879d778a5ea79e33aa7ce96b23dc4332b7ba26859d8e674d","0xa8fe69a678f9a194b8670a41e941f0460f6e2dbc60470ab4d6ae2679cc9c6ce2c3a39df2303bee486dbfde6844e6b31a","0x95f58f5c82de2f2a927ca99bf63c9fc02e9030c7e46d0bf6b67fe83a448d0ae1c99541b59caf0e1ccab8326231af09a5","0xa57badb2c56ca2c45953bd569caf22968f76ed46b9bac389163d6fe22a715c83d5e94ae8759b0e6e8c2f27bff7748f3f","0x868726fd49963b24acb5333364dffea147e98f33aa19c7919dc9aca0fd26661cfaded74ede7418a5fadbe7f5ae67b67b","0xa8d8550dcc64d9f1dd7bcdab236c4122f2b65ea404bb483256d712c7518f08bb028ff8801f1da6aed6cbfc5c7062e33b","0x97e25a87dae23155809476232178538d4bc05d4ff0882916eb29ae515f2a62bfce73083466cc0010ca956aca200aeacc","0xb4ea26be3f4bd04aa82d7c4b0913b97bcdf5e88b76c57eb1a336cbd0a3eb29de751e1bc47c0e8258adec3f17426d0c71","0x99ee555a4d9b3cf2eb420b2af8e3bc99046880536116d0ce7193464ac40685ef14e0e3c442f604e32f8338cb0ef92558","0x8c64efa1da63cd08f319103c5c7a761221080e74227bbc58b8fb35d08aa42078810d7af3e60446cbaff160c319535648","0x8d9fd88040076c28420e3395cbdfea402e4077a3808a97b7939d49ecbcf1418fe50a0460e1c1b22ac3f6e7771d65169a","0xae3c19882d7a9875d439265a0c7003c8d410367627d21575a864b9cb4918de7dbdb58a364af40c5e045f3df40f95d337","0xb4f7bfacab7b2cafe393f1322d6dcc6f21ffe69cd31edc8db18c06f1a2b512c27bd0618091fd207ba8df1808e9d45914","0x94f134acd0007c623fb7934bcb65ef853313eb283a889a3ffa79a37a5c8f3665f3d5b4876bc66223610c21dc9b919d37","0xaa15f74051171daacdc1f1093d3f8e2d13da2833624b80a934afec86fc02208b8f55d24b7d66076444e7633f46375c6a","0xa32d6bb47ef9c836d9d2371807bafbbbbb1ae719530c19d6013f1d1f813c49a60e4fa51d83693586cba3a840b23c0404","0xb61b3599145ea8680011aa2366dc511a358b7d67672d5b0c5be6db03b0efb8ca5a8294cf220ea7409621f1664e00e631","0x859cafc3ee90b7ececa1ed8ef2b2fc17567126ff10ca712d5ffdd16aa411a5a7d8d32c9cab1fbf63e87dce1c6e2f5f53","0xa2fef1b0b2874387010e9ae425f3a9676d01a095d017493648bcdf3b31304b087ccddb5cf76abc4e1548b88919663b6b","0x939e18c73befc1ba2932a65ede34c70e4b91e74cc2129d57ace43ed2b3af2a9cc22a40fbf50d79a63681b6d98852866d","0xb3b4259d37b1b14aee5b676c9a0dd2d7f679ab95c120cb5f09f9fbf10b0a920cb613655ddb7b9e2ba5af4a221f31303c","0x997255fe51aaca6e5a9cb3359bcbf25b2bb9e30649bbd53a8a7c556df07e441c4e27328b38934f09c09d9500b5fabf66","0xabb91be2a2d860fd662ed4f1c6edeefd4da8dc10e79251cf87f06029906e7f0be9b486462718f0525d5e049472692cb7","0xb2398e593bf340a15f7801e1d1fbda69d93f2a32a889ec7c6ae5e8a37567ac3e5227213c1392ee86cfb3b56ec2787839","0x8ddf10ccdd72922bed36829a36073a460c2118fc7a56ff9c1ac72581c799b15c762cb56cb78e3d118bb9f6a7e56cb25e","0x93e6bc0a4708d16387cacd44cf59363b994dc67d7ada7b6d6dbd831c606d975247541b42b2a309f814c1bfe205681fc6","0xb93fc35c05998cffda2978e12e75812122831523041f10d52f810d34ff71944979054b04de0117e81ddf5b0b4b3e13c0","0x92221631c44d60d68c6bc7b287509f37ee44cbe5fdb6935cee36b58b17c7325098f98f7910d2c3ca5dc885ad1d6dabc7","0xa230124424a57fad3b1671f404a94d7c05f4c67b7a8fbacfccea28887b78d7c1ed40b92a58348e4d61328891cd2f6cee","0xa6a230edb8518a0f49d7231bc3e0bceb5c2ac427f045819f8584ba6f3ae3d63ed107a9a62aad543d7e1fcf1f20605706","0x845be1fe94223c7f1f97d74c49d682472585d8f772762baad8a9d341d9c3015534cc83d102113c51a9dea2ab10d8d27b","0xb44262515e34f2db597c8128c7614d33858740310a49cdbdf9c8677c5343884b42c1292759f55b8b4abc4c86e4728033","0x805592e4a3cd07c1844bc23783408310accfdb769cca882ad4d07d608e590a288b7370c2cb327f5336e72b7083a0e30f","0x95153e8b1140df34ee864f4ca601cb873cdd3efa634af0c4093fbaede36f51b55571ab271e6a133020cd34db8411241f","0x82878c1285cfa5ea1d32175c9401f3cc99f6bb224d622d3fd98cc7b0a27372f13f7ab463ce3a33ec96f9be38dbe2dfe3","0xb7588748f55783077c27fc47d33e20c5c0f5a53fc0ac10194c003aa09b9f055d08ec971effa4b7f760553997a56967b3","0xb36b4de6d1883b6951f59cfae381581f9c6352fcfcf1524fccdab1571a20f80441d9152dc6b48bcbbf00371337ca0bd5","0x89c5523f2574e1c340a955cbed9c2f7b5fbceb260cb1133160dabb7d41c2f613ec3f6e74bbfab3c4a0a6f0626dbe068f","0xa52f58cc39f968a9813b1a8ddc4e83f4219e4dd82c7aa1dd083bea7edf967151d635aa9597457f879771759b876774e4","0x8300a67c2e2e123f89704abfde095463045dbd97e20d4c1157bab35e9e1d3d18f1f4aaba9cbe6aa2d544e92578eaa1b6","0xac6a7f2918768eb6a43df9d3a8a04f8f72ee52f2e91c064c1c7d75cad1a3e83e5aba9fe55bb94f818099ac91ccf2e961","0x8d64a2b0991cf164e29835c8ddef6069993a71ec2a7de8157bbfa2e00f6367be646ed74cbaf524f0e9fe13fb09fa15fd","0x8b2ffe5a545f9f680b49d0a9797a4a11700a2e2e348c34a7a985fc278f0f12def6e06710f40f9d48e4b7fbb71e072229","0x8ab8f71cd337fa19178924e961958653abf7a598e3f022138b55c228440a2bac4176cea3aea393549c03cd38a13eb3fc","0x8419d28318c19ea4a179b7abb43669fe96347426ef3ac06b158d79c0acf777a09e8e770c2fb10e14b3a0421705990b23","0x8bacdac310e1e49660359d0a7a17fe3d334eb820e61ae25e84cb52f863a2f74cbe89c2e9fc3283745d93a99b79132354","0xb57ace3fa2b9f6b2db60c0d861ace7d7e657c5d35d992588aeed588c6ce3a80b6f0d49f8a26607f0b17167ab21b675e4","0x83e265cde477f2ecc164f49ddc7fb255bb05ff6adc347408353b7336dc3a14fdedc86d5a7fb23f36b8423248a7a67ed1","0xa60ada971f9f2d79d436de5d3d045f5ab05308cae3098acaf5521115134b2a40d664828bb89895840db7f7fb499edbc5","0xa63eea12efd89b62d3952bf0542a73890b104dd1d7ff360d4755ebfa148fd62de668edac9eeb20507967ea37fb220202","0xa0275767a270289adc991cc4571eff205b58ad6d3e93778ddbf95b75146d82517e8921bd0d0564e5b75fa0ccdab8e624","0xb9b03fd3bf07201ba3a039176a965d736b4ef7912dd9e9bf69fe1b57c330a6aa170e5521fe8be62505f3af81b41d7806","0xa95f640e26fb1106ced1729d6053e41a16e4896acac54992279ff873e5a969aad1dcfa10311e28b8f409ac1dab7f03bb","0xb144778921742418053cb3c70516c63162c187f00db2062193bb2c14031075dbe055d020cde761b26e8c58d0ea6df2c1","0x8432fbb799e0435ef428d4fefc309a05dd589bce74d7a87faf659823e8c9ed51d3e42603d878e80f439a38be4321c2fa","0xb08ddef14e42d4fd5d8bf39feb7485848f0060d43b51ed5bdda39c05fe154fb111d29719ee61a23c392141358c0cfcff","0x8ae3c5329a5e025b86b5370e06f5e61177df4bda075856fade20a17bfef79c92f54ed495f310130021ba94fb7c33632b","0x92b6d3c9444100b4d7391febfc1dddaa224651677c3695c47a289a40d7a96d200b83b64e6d9df51f534564f272a2c6c6","0xb432bc2a3f93d28b5e506d68527f1efeb2e2570f6be0794576e2a6ef9138926fdad8dd2eabfa979b79ab7266370e86bc","0x8bc315eacedbcfc462ece66a29662ca3dcd451f83de5c7626ef8712c196208fb3d8a0faf80b2e80384f0dd9772f61a23","0xa72375b797283f0f4266dec188678e2b2c060dfed5880fc6bb0c996b06e91a5343ea2b695adaab0a6fd183b040b46b56","0xa43445036fbaa414621918d6a897d3692fdae7b2961d87e2a03741360e45ebb19fcb1703d23f1e15bb1e2babcafc56ac","0xb9636b2ffe305e63a1a84bd44fb402442b1799bd5272638287aa87ca548649b23ce8ce7f67be077caed6aa2dbc454b78","0x99a30bf0921d854c282b83d438a79f615424f28c2f99d26a05201c93d10378ab2cd94a792b571ddae5d4e0c0013f4006","0x8648e3c2f93d70b392443be116b48a863e4b75991bab5db656a4ef3c1e7f645e8d536771dfe4e8d1ceda3be8d32978b0","0xab50dc9e6924c1d2e9d2e335b2d679fc7d1a7632e84964d3bac0c9fe57e85aa5906ec2e7b0399d98ddd022e9b19b5904","0xab729328d98d295f8f3272afaf5d8345ff54d58ff9884da14f17ecbdb7371857fdf2f3ef58080054e9874cc919b46224","0x83fa5da7592bd451cad3ad7702b4006332b3aae23beab4c4cb887fa6348317d234bf62a359e665b28818e5410c278a09","0x8bdbff566ae9d368f114858ef1f009439b3e9f4649f73efa946e678d6c781d52c69af195df0a68170f5f191b2eac286b","0x91245e59b4425fd4edb2a61d0d47c1ccc83d3ced8180de34887b9655b5dcda033d48cde0bdc3b7de846d246c053a02e8","0xa2cb00721e68f1cad8933947456f07144dc69653f96ceed845bd577d599521ba99cdc02421118971d56d7603ed118cbf","0xaf8cd66d303e808b22ec57860dd909ca64c27ec2c60e26ffecfdc1179d8762ffd2739d87b43959496e9fee4108df71df","0x9954136812dffcd5d3f167a500e7ab339c15cfc9b3398d83f64b0daa3dd5b9a851204f424a3493b4e326d3de81e50a62","0x93252254d12511955f1aa464883ad0da793f84d900fea83e1df8bca0f2f4cf5b5f9acbaec06a24160d33f908ab5fea38","0x997cb55c26996586ba436a95566bd535e9c22452ca5d2a0ded2bd175376557fa895f9f4def4519241ff386a063f2e526","0xa12c78ad451e0ac911260ade2927a768b50cb4125343025d43474e7f465cdc446e9f52a84609c5e7e87ae6c9b3f56cda","0xa789d4ca55cbba327086563831b34487d63d0980ba8cf55197c016702ed6da9b102b1f0709ce3da3c53ff925793a3d73","0xa5d76acbb76741ce85be0e655b99baa04f7f587347947c0a30d27f8a49ae78cce06e1cde770a8b618d3db402be1c0c4b","0x873c0366668c8faddb0eb7c86f485718d65f8c4734020f1a18efd5fa123d3ea8a990977fe13592cd01d17e60809cb5ff","0xb659b71fe70f37573ff7c5970cc095a1dc0da3973979778f80a71a347ef25ad5746b2b9608bad4ab9a4a53a4d7df42d7","0xa34cbe05888e5e5f024a2db14cb6dcdc401a9cbd13d73d3c37b348f68688f87c24ca790030b8f84fef9e74b4eab5e412","0x94ce8010f85875c045b0f014db93ef5ab9f1f6842e9a5743dce9e4cb872c94affd9e77c1f1d1ab8b8660b52345d9acb9","0xadefa9b27a62edc0c5b019ddd3ebf45e4de846165256cf6329331def2e088c5232456d3de470fdce3fa758bfdd387512","0xa6b83821ba7c1f83cc9e4529cf4903adb93b26108e3d1f20a753070db072ad5a3689643144bdd9c5ea06bb9a7a515cd0","0xa3a9ddedc2a1b183eb1d52de26718151744db6050f86f3580790c51d09226bf05f15111691926151ecdbef683baa992c","0xa64bac89e7686932cdc5670d07f0b50830e69bfb8c93791c87c7ffa4913f8da881a9d8a8ce8c1a9ce5b6079358c54136","0xa77b5a63452cb1320b61ab6c7c2ef9cfbcade5fd4727583751fb2bf3ea330b5ca67757ec1f517bf4d503ec924fe32fbd","0x8746fd8d8eb99639d8cd0ca34c0d9c3230ed5a312aab1d3d925953a17973ee5aeb66e68667e93caf9cb817c868ea8f3d","0x88a2462a26558fc1fbd6e31aa8abdc706190a17c27fdc4217ffd2297d1b1f3321016e5c4b2384c5454d5717dc732ed03","0xb78893a97e93d730c8201af2e0d3b31cb923d38dc594ffa98a714e627c473d42ea82e0c4d2eeb06862ee22a9b2c54588","0x920cc8b5f1297cf215a43f6fc843e379146b4229411c44c0231f6749793d40f07b9af7699fd5d21fd69400b97febe027","0xa0f0eafce1e098a6b58c7ad8945e297cd93aaf10bc55e32e2e32503f02e59fc1d5776936577d77c0b1162cb93b88518b","0x98480ba0064e97a2e7a6c4769b4d8c2a322cfc9a3b2ca2e67e9317e2ce04c6e1108169a20bd97692e1cb1f1423b14908","0x83dbbb2fda7e287288011764a00b8357753a6a44794cc8245a2275237f11affdc38977214e463ad67aec032f3dfa37e9","0x86442fff37598ce2b12015ff19b01bb8a780b40ad353d143a0f30a06f6d23afd5c2b0a1253716c855dbf445cc5dd6865","0xb8a4c60c5171189414887847b9ed9501bff4e4c107240f063e2d254820d2906b69ef70406c585918c4d24f1dd052142b","0x919f33a98e84015b2034b57b5ffe9340220926b2c6e45f86fd79ec879dbe06a148ae68b77b73bf7d01bd638a81165617","0x95c13e78d89474a47fbc0664f6f806744b75dede95a479bbf844db4a7f4c3ae410ec721cb6ffcd9fa9c323da5740d5ae","0xab7151acc41fffd8ec6e90387700bcd7e1cde291ea669567295bea1b9dd3f1df2e0f31f3588cd1a1c08af8120aca4921","0x80e74c5c47414bd6eeef24b6793fb1fa2d8fb397467045fcff887c52476741d5bc4ff8b6d3387cb53ad285485630537f","0xa296ad23995268276aa351a7764d36df3a5a3cffd7dbeddbcea6b1f77adc112629fdeffa0918b3242b3ccd5e7587e946","0x813d2506a28a2b01cb60f49d6bd5e63c9b056aa56946faf2f33bd4f28a8d947569cfead3ae53166fc65285740b210f86","0x924b265385e1646287d8c09f6c855b094daaee74b9e64a0dddcf9ad88c6979f8280ba30c8597b911ef58ddb6c67e9fe3","0x8d531513c70c2d3566039f7ca47cd2352fd2d55b25675a65250bdb8b06c3843db7b2d29c626eed6391c238fc651cf350","0x82b338181b62fdc81ceb558a6843df767b6a6e3ceedc5485664b4ea2f555904b1a45fbb35f6cf5d96f27da10df82a325","0x92e62faaedea83a37f314e1d3cb4faaa200178371d917938e59ac35090be1db4b4f4e0edb78b9c991de202efe4f313d8","0x99d645e1b642c2dc065bac9aaa0621bc648c9a8351efb6891559c3a41ba737bd155fb32d7731950514e3ecf4d75980e4","0xb34a13968b9e414172fb5d5ece9a39cf2eb656128c3f2f6cc7a9f0c69c6bae34f555ecc8f8837dc34b5e470e29055c78","0xa2a0bb7f3a0b23a2cbc6585d59f87cd7e56b2bbcb0ae48f828685edd9f7af0f5edb4c8e9718a0aaf6ef04553ba71f3b7","0x8e1a94bec053ed378e524b6685152d2b52d428266f2b6eadd4bcb7c4e162ed21ab3e1364879673442ee2162635b7a4d8","0x9944adaff14a85eab81c73f38f386701713b52513c4d4b838d58d4ffa1d17260a6d056b02334850ea9a31677c4b078bd","0xa450067c7eceb0854b3eca3db6cf38669d72cb7143c3a68787833cbca44f02c0be9bfbe082896f8a57debb13deb2afb1","0x8be4ad3ac9ef02f7df09254d569939757101ee2eda8586fefcd8c847adc1efe5bdcb963a0cafa17651befaafb376a531","0x90f6de91ea50255f148ac435e08cf2ac00c772a466e38155bd7e8acf9197af55662c7b5227f88589b71abe9dcf7ba343","0x86e5a24f0748b106dee2d4d54e14a3b0af45a96cbee69cac811a4196403ebbee17fd24946d7e7e1b962ac7f66dbaf610","0xafdd96fbcda7aa73bf9eeb2292e036c25753d249caee3b9c013009cc22e10d3ec29e2aa6ddbb21c4e949b0c0bccaa7f4","0xb5a4e7436d5473647c002120a2cb436b9b28e27ad4ebdd7c5f122b91597c507d256d0cbd889d65b3a908531936e53053","0xb632414c3da704d80ac2f3e5e0e9f18a3637cdc2ebeb613c29300745582427138819c4e7b0bec3099c1b8739dac1807b","0xa28df1464d3372ce9f37ef1db33cc010f752156afae6f76949d98cd799c0cf225c20228ae86a4da592d65f0cffe3951b","0x898b93d0a31f7d3f11f253cb7a102db54b669fd150da302d8354d8e02b1739a47cb9bd88015f3baf12b00b879442464e","0x96fb88d89a12049091070cb0048a381902965e67a8493e3991eaabe5d3b7ff7eecd5c94493a93b174df3d9b2c9511755","0xb899cb2176f59a5cfba3e3d346813da7a82b03417cad6342f19cc8f12f28985b03bf031e856a4743fd7ebe16324805b0","0xa60e2d31bc48e0c0579db15516718a03b73f5138f15037491f4dae336c904e312eda82d50862f4debd1622bb0e56d866","0x979fc8b987b5cef7d4f4b58b53a2c278bd25a5c0ea6f41c715142ea5ff224c707de38451b0ad3aa5e749aa219256650a","0xb2a75bff18e1a6b9cf2a4079572e41205741979f57e7631654a3c0fcec57c876c6df44733c9da3d863db8dff392b44a3","0xb7a0f0e811222c91e3df98ff7f286b750bc3b20d2083966d713a84a2281744199e664879401e77470d44e5a90f3e5181","0x82b74ba21c9d147fbc338730e8f1f8a6e7fc847c3110944eb17a48bea5e06eecded84595d485506d15a3e675fd0e5e62","0xa7f44eef817d5556f0d1abcf420301217d23c69dd2988f44d91ea1f1a16c322263cbacd0f190b9ba22b0f141b9267b4f","0xaadb68164ede84fc1cb3334b3194d84ba868d5a88e4c9a27519eef4923bc4abf81aab8114449496c073c2a6a0eb24114","0xb5378605fabe9a8c12a5dc55ef2b1de7f51aedb61960735c08767a565793cea1922a603a6983dc25f7cea738d0f7c40d","0xa97a4a5cd8d51302e5e670aee78fe6b5723f6cc892902bbb4f131e82ca1dfd5de820731e7e3367fb0c4c1922a02196e3","0x8bdfeb15c29244d4a28896f2b2cb211243cd6a1984a3f5e3b0ebe5341c419beeab3304b390a009ffb47588018034b0ea","0xa9af3022727f2aa2fca3b096968e97edad3f08edcbd0dbca107b892ae8f746a9c0485e0d6eb5f267999b23a845923ed0","0x8e7594034feef412f055590fbb15b6322dc4c6ab7a4baef4685bd13d71a83f7d682b5781bdfa0d1c659489ce9c2b8000","0x84977ca6c865ebee021c58106c1a4ad0c745949ecc5332948002fd09bd9b890524878d0c29da96fd11207621136421fe","0x8687551a79158e56b2375a271136756313122132a6670fa51f99a1b5c229ed8eea1655a734abae13228b3ebfd2a825dd","0xa0227d6708979d99edfc10f7d9d3719fd3fc68b0d815a7185b60307e4c9146ad2f9be2b8b4f242e320d4288ceeb9504c","0x89f75583a16735f9dd8b7782a130437805b34280ccea8dac6ecaee4b83fe96947e7b53598b06fecfffdf57ffc12cc445","0xa0056c3353227f6dd9cfc8e3399aa5a8f1d71edf25d3d64c982910f50786b1e395c508d3e3727ac360e3e040c64b5298","0xb070e61a6d813626144b312ded1788a6d0c7cec650a762b2f8df6e4743941dd82a2511cd956a3f141fc81e15f4e092da","0xb4e6db232e028a1f989bb5fc13416711f42d389f63564d60851f009dcffac01acfd54efa307aa6d4c0f932892d4e62b0","0x89b5991a67db90024ddd844e5e1a03ef9b943ad54194ae0a97df775dde1addf31561874f4e40fbc37a896630f3bbda58","0xad0e8442cb8c77d891df49cdb9efcf2b0d15ac93ec9be1ad5c3b3cca1f4647b675e79c075335c1f681d56f14dc250d76","0xb5d55a6ae65bb34dd8306806cb49b5ccb1c83a282ee47085cf26c4e648e19a52d9c422f65c1cd7e03ca63e926c5e92ea","0xb749501347e5ec07e13a79f0cb112f1b6534393458b3678a77f02ca89dca973fa7b30e55f0b25d8b92b97f6cb0120056","0x94144b4a3ffc5eec6ba35ce9c245c148b39372d19a928e236a60e27d7bc227d18a8cac9983851071935d8ffb64b3a34f","0x92bb4f9f85bc8c028a3391306603151c6896673135f8a7aefedd27acb322c04ef5dac982fc47b455d6740023e0dd3ea3","0xb9633a4a101461a782fc2aa092e9dbe4e2ad00987578f18cd7cf0021a909951d60fe79654eb7897806795f93c8ff4d1c","0x809f0196753024821b48a016eca5dbb449a7c55750f25981bb7a4b4c0e0846c09b8f6128137905055fc43a3f0deb4a74","0xa27dc9cdd1e78737a443570194a03d89285576d3d7f3a3cf15cc55b3013e42635d4723e2e8fe1d0b274428604b630db9","0x861f60f0462e04cd84924c36a28163def63e777318d00884ab8cb64c8df1df0bce5900342163edb60449296484a6c5bf","0xb7bc23fb4e14af4c4704a944253e760adefeca8caee0882b6bbd572c84434042236f39ae07a8f21a560f486b15d82819","0xb9a6eb492d6dd448654214bd01d6dc5ff12067a11537ab82023fc16167507ee25eed2c91693912f4155d1c07ed9650b3","0x97678af29c68f9a5e213bf0fb85c265303714482cfc4c2c00b4a1e8a76ed08834ee6af52357b143a1ca590fb0265ea5a","0x8a15b499e9eca5b6cac3070b5409e8296778222018ad8b53a5d1f6b70ad9bb10c68a015d105c941ed657bf3499299e33","0xb487fefede2e8091f2c7bfe85770db2edff1db83d4effe7f7d87bff5ab1ace35e9b823a71adfec6737fede8d67b3c467","0x8b51b916402aa2c437fce3bcad6dad3be8301a1a7eab9d163085b322ffb6c62abf28637636fe6114573950117fc92898","0xb06a2106d031a45a494adec0881cb2f82275dff9dcdd2bc16807e76f3bec28a6734edd3d54f0be8199799a78cd6228ad","0xaf0a185391bbe2315eb97feac98ad6dd2e5d931d012c621abd6e404a31cc188b286fef14871762190acf086482b2b5e2","0x8e78ee8206506dd06eb7729e32fceda3bebd8924a64e4d8621c72e36758fda3d0001af42443851d6c0aea58562870b43","0xa1ba52a569f0461aaf90b49b92be976c0e73ec4a2c884752ee52ffb62dd137770c985123d405dfb5de70692db454b54a","0x8d51b692fa1543c51f6b62b9acb8625ed94b746ef96c944ca02859a4133a5629da2e2ce84e111a7af8d9a5b836401c64","0xa7a20d45044cf6492e0531d0b8b26ffbae6232fa05a96ed7f06bdb64c2b0f5ca7ec59d5477038096a02579e633c7a3ff","0x84df867b98c53c1fcd4620fef133ee18849c78d3809d6aca0fb6f50ff993a053a455993f216c42ab6090fa5356b8d564","0xa7227c439f14c48e2577d5713c97a5205feb69acb0b449152842e278fa71e8046adfab468089c8b2288af1fc51fa945b","0x855189b3a105670779997690876dfaa512b4a25a24931a912c2f0f1936971d2882fb4d9f0b3d9daba77eaf660e9d05d5","0xb5696bd6706de51c502f40385f87f43040a5abf99df705d6aac74d88c913b8ecf7a99a63d7a37d9bdf3a941b9e432ff5","0xab997beb0d6df9c98d5b49864ef0b41a2a2f407e1687dfd6089959757ba30ed02228940b0e841afe6911990c74d536c4","0xb36b65f85546ebfdbe98823d5555144f96b4ab39279facd19c0de3b8919f105ba0315a0784dce4344b1bc62d8bb4a5a3","0xb8371f0e4450788720ac5e0f6cd3ecc5413d33895083b2c168d961ec2b5c3de411a4cc0712481cbe8df8c2fa1a7af006","0x98325d8026b810a8b7a114171ae59a57e8bbc9848e7c3df992efc523621729fd8c9f52114ce01d7730541a1ada6f1df1","0x8d0e76dbd37806259486cd9a31bc8b2306c2b95452dc395546a1042d1d17863ef7a74c636b782e214d3aa0e8d717f94a","0xa4e15ead76da0214d702c859fb4a8accdcdad75ed08b865842bd203391ec4cba2dcc916455e685f662923b96ee0c023f","0x8618190972086ebb0c4c1b4a6c94421a13f378bc961cc8267a301de7390c5e73c3333864b3b7696d81148f9d4843fd02","0x85369d6cc7342e1aa15b59141517d8db8baaaeb7ab9670f3ba3905353948d575923d283b7e5a05b13a30e7baf1208a86","0x87c51ef42233c24a6da901f28c9a075d9ba3c625687c387ad6757b72ca6b5a8885e6902a3082da7281611728b1e45f26","0xaa6348a4f71927a3106ad0ea8b02fc8d8c65531e4ab0bd0a17243e66f35afe252e40ab8eef9f13ae55a72566ffdaff5c","0x96a3bc976e9d03765cc3fee275fa05b4a84c94fed6b767e23ca689394501e96f56f7a97cffddc579a6abff632bf153be","0x97dbf96c6176379fdb2b888be4e757b2bca54e74124bd068d3fa1dbd82a011bbeb75079da38e0cd22a761fe208ecad9b","0xb70cf0a1d14089a4129ec4e295313863a59da8c7e26bf74cc0e704ed7f0ee4d7760090d0ddf7728180f1bf2c5ac64955","0x882d664714cc0ffe53cbc9bef21f23f3649824f423c4dbad1f893d22c4687ab29583688699efc4d5101aa08b0c3e267a","0x80ecb7cc963e677ccaddbe3320831dd6ee41209acf4ed41b16dc4817121a3d86a1aac9c4db3d8c08a55d28257088af32","0xa25ba667d832b145f9ce18c3f9b1bd00737aa36db020e1b99752c8ef7d27c6c448982bd8d352e1b6df266b8d8358a8d5","0x83734841c13dee12759d40bdd209b277e743b0d08cc0dd1e0b7afd2d65bfa640400eefcf6be4a52e463e5b3d885eeac6","0x848d16505b04804afc773aebabb51b36fd8aacfbb0e09b36c0d5d57df3c0a3b92f33e7d5ad0a7006ec46ebb91df42b8c","0x909a8d793f599e33bb9f1dc4792a507a97169c87cd5c087310bc05f30afcd247470b4b56dec59894c0fb1d48d39bb54e","0x8e558a8559df84a1ba8b244ece667f858095c50bb33a5381e60fcc6ba586b69693566d8819b4246a27287f16846c1dfa","0x84d6b69729f5aaa000cd710c2352087592cfbdf20d5e1166977e195818e593fa1a50d1e04566be23163a2523dc1612f1","0x9536d262b7a42125d89f4f32b407d737ba8d9242acfc99d965913ab3e043dcac9f7072a43708553562cac4cba841df30","0x9598548923ca119d6a15fd10861596601dd1dedbcccca97bb208cdc1153cf82991ea8cc17686fbaa867921065265970c","0xb87f2d4af6d026e4d2836bc3d390a4a18e98a6e386282ce96744603bab74974272e97ac2da281afa21885e2cbb3a8001","0x991ece62bf07d1a348dd22191868372904b9f8cf065ae7aa4e44fd24a53faf6d851842e35fb472895963aa1992894918","0xa8c53dea4c665b30e51d22ca6bc1bc78aaf172b0a48e64a1d4b93439b053877ec26cb5221c55efd64fa841bbf7d5aff4","0x93487ec939ed8e740f15335b58617c3f917f72d07b7a369befd479ae2554d04deb240d4a14394b26192efae4d2f4f35d","0xa44793ab4035443f8f2968a40e043b4555960193ffa3358d22112093aadfe2c136587e4139ffd46d91ed4107f61ea5e0","0xb13fe033da5f0d227c75927d3dacb06dbaf3e1322f9d5c7c009de75cdcba5e308232838785ab69a70f0bedea755e003f","0x970a29b075faccd0700fe60d1f726bdebf82d2cc8252f4a84543ebd3b16f91be42a75c9719a39c4096139f0f31393d58","0xa4c3eb1f7160f8216fc176fb244df53008ff32f2892363d85254002e66e2de21ccfe1f3b1047589abee50f29b9d507e3","0x8c552885eab04ba40922a8f0c3c38c96089c95ff1405258d3f1efe8d179e39e1295cbf67677894c607ae986e4e6b1fb0","0xb3671746fa7f848c4e2ae6946894defadd815230b906b419143523cc0597bc1d6c0a4c1e09d49b66b4a2c11cde3a4de3","0x937a249a95813a5e2ef428e355efd202e15a37d73e56cfb7e57ea9f943f2ce5ca8026f2f1fd25bf164ba89d07077d858","0x83646bdf6053a04aa9e2f112499769e5bd5d0d10f2e13db3ca89bd45c0b3b7a2d752b7d137fb3909f9c62b78166c9339","0xb4eac4b91e763666696811b7ed45e97fd78310377ebea1674b58a2250973f80492ac35110ed1240cd9bb2d17493d708c","0x82db43a99bc6573e9d92a3fd6635dbbb249ac66ba53099c3c0c8c8080b121dd8243cd5c6e36ba0a4d2525bae57f5c89c","0xa64d6a264a681b49d134c655d5fc7756127f1ee7c93d328820f32bca68869f53115c0d27fef35fe71f7bc4fdaed97348","0x8739b7a9e2b4bc1831e7f04517771bc7cde683a5e74e052542517f8375a2f64e53e0d5ac925ef722327e7bb195b4d1d9","0x8f337cdd29918a2493515ebb5cf702bbe8ecb23b53c6d18920cc22f519e276ca9b991d3313e2d38ae17ae8bdfa4f8b7e","0xb0edeab9850e193a61f138ef2739fc42ceec98f25e7e8403bfd5fa34a7bc956b9d0898250d18a69fa4625a9b3d6129da","0xa9920f26fe0a6d51044e623665d998745c9eca5bce12051198b88a77d728c8238f97d4196f26e43b24f8841500b998d0","0x86e655d61502b979eeeeb6f9a7e1d0074f936451d0a1b0d2fa4fb3225b439a3770767b649256fe481361f481a8dbc276","0x84d3b32fa62096831cc3bf013488a9f3f481dfe293ae209ed19585a03f7db8d961a7a9dd0db82bd7f62d612707575d9c","0x81c827826ec9346995ffccf62a241e3b2d32f7357acd1b1f8f7a7dbc97022d3eb51b8a1230e23ce0b401d2e535e8cd78","0x94a1e40c151191c5b055b21e86f32e69cbc751dcbdf759a48580951834b96a1eed75914c0d19a38aefd21fb6c8d43d0c","0xab890222b44bc21b71f7c75e15b6c6e16bb03371acce4f8d4353ff3b8fcd42a14026589c5ed19555a3e15e4d18bfc3a3","0xaccb0be851e93c6c8cc64724cdb86887eea284194b10e7a43c90528ed97e9ec71ca69c6fac13899530593756dd49eab2","0xb630220aa9e1829c233331413ee28c5efe94ea8ea08d0c6bfd781955078b43a4f92915257187d8526873e6c919c6a1de","0xadd389a4d358c585f1274b73f6c3c45b58ef8df11f9d11221f620e241bf3579fba07427b288c0c682885a700cc1fa28d","0xa9fe6ca8bf2961a3386e8b8dcecc29c0567b5c0b3bcf3b0f9169f88e372b80151af883871fc5229815f94f43a6f5b2b0","0xad839ae003b92b37ea431fa35998b46a0afc3f9c0dd54c3b3bf7a262467b13ff3c323ada1c1ae02ac7716528bdf39e3e","0x9356d3fd0edcbbb65713c0f2a214394f831b26f792124b08c5f26e7f734b8711a87b7c4623408da6a091c9aef1f6af3c","0x896b25b083c35ac67f0af3784a6a82435b0e27433d4d74cd6d1eafe11e6827827799490fb1c77c11de25f0d75f14e047","0x8bfa019391c9627e8e5f05c213db625f0f1e51ec68816455f876c7e55b8f17a4f13e5aae9e3fb9e1cf920b1402ee2b40","0x8ba3a6faa6a860a8f3ce1e884aa8769ceded86380a86520ab177ab83043d380a4f535fe13884346c5e51bee68da6ab41","0xa8292d0844084e4e3bb7af92b1989f841a46640288c5b220fecfad063ee94e86e13d3d08038ec2ac82f41c96a3bfe14d","0x8229bb030b2fc566e11fd33c7eab7a1bb7b49fed872ea1f815004f7398cb03b85ea14e310ec19e1f23e0bdaf60f8f76c","0x8cfbf869ade3ec551562ff7f63c2745cc3a1f4d4dc853a0cd42dd5f6fe54228f86195ea8fe217643b32e9f513f34a545","0xac52a3c8d3270ddfe1b5630159da9290a5ccf9ccbdef43b58fc0a191a6c03b8a5974cf6e2bbc7bd98d4a40a3581482d7","0xab13decb9e2669e33a7049b8eca3ca327c40dea15ad6e0e7fa63ed506db1d258bc36ac88b35f65cae0984e937eb6575d","0xb5e748eb1a7a1e274ff0cc56311c198f2c076fe4b7e73e5f80396fe85358549df906584e6bb2c8195b3e2be7736850a5","0xb5cb911325d8f963c41f691a60c37831c7d3bbd92736efa33d1f77a22b3fde7f283127256c2f47e197571e6fe0b46149","0x8a01dc6ed1b55f26427a014faa347130738b191a06b800e32042a46c13f60b49534520214359d68eb2e170c31e2b8672","0xa72fa874866e19b2efb8e069328362bf7921ec375e3bcd6b1619384c3f7ee980f6cf686f3544e9374ff54b4d17a1629c","0x8db21092f7c5f110fba63650b119e82f4b42a997095d65f08f8237b02dd66fdf959f788df2c35124db1dbd330a235671","0x8c65d50433d9954fe28a09fa7ba91a70a590fe7ba6b3060f5e4be0f6cef860b9897fa935fb4ebc42133524eb071dd169","0xb4614058e8fa21138fc5e4592623e78b8982ed72aa35ee4391b164f00c68d277fa9f9eba2eeefc890b4e86eba5124591","0xab2ad3a1bce2fbd55ca6b7c23786171fe1440a97d99d6df4d80d07dd56ac2d7203c294b32fc9e10a6c259381a73f24a1","0x812ae3315fdc18774a8da3713a4679e8ed10b9405edc548c00cacbe25a587d32040566676f135e4723c5dc25df5a22e9","0xa464b75f95d01e5655b54730334f443c8ff27c3cb79ec7af4b2f9da3c2039c609908cd128572e1fd0552eb597e8cef8d","0xa0db3172e93ca5138fe419e1c49a1925140999f6eff7c593e5681951ee0ec1c7e454c851782cbd2b8c9bc90d466e90e0","0x806db23ba7d00b87d544eed926b3443f5f9c60da6b41b1c489fba8f73593b6e3b46ebfcab671ee009396cd77d5e68aa1","0x8bfdf2c0044cc80260994e1c0374588b6653947b178e8b312be5c2a05e05767e98ea15077278506aee7df4fee1aaf89e","0x827f6558c16841b5592ff089c9c31e31eb03097623524394813a2e4093ad2d3f8f845504e2af92195aaa8a1679d8d692","0x925c4f8eab2531135cd71a4ec88e7035b5eea34ba9d799c5898856080256b4a15ed1a746e002552e2a86c9c157e22e83","0xa9f9a368f0e0b24d00a35b325964c85b69533013f9c2cfad9708be5fb87ff455210f8cb8d2ce3ba58ca3f27495552899","0x8ac0d3bebc1cae534024187e7c71f8927ba8fcc6a1926cb61c2b6c8f26bb7831019e635a376146c29872a506784a4aaa","0x97c577be2cbbfdb37ad754fae9df2ada5fc5889869efc7e18a13f8e502fbf3f4067a509efbd46fd990ab47ce9a70f5a8","0x935e7d82bca19f16614aa43b4a3474e4d20d064e4bfdf1cea2909e5c9ab72cfe3e54dc50030e41ee84f3588cebc524e9","0x941aafc08f7c0d94cebfbb1f0aad5202c02e6e37f2c12614f57e727efa275f3926348f567107ee6d8914dd71e6060271","0xaf0fbc1ba05b4b5b63399686df3619968be5d40073de0313cbf5f913d3d4b518d4c249cdd2176468ccaa36040a484f58","0xa0c414f23f46ca6d69ce74c6f8a00c036cb0edd098af0c1a7d39c802b52cfb2d5dbdf93fb0295453d4646e2af7954d45","0x909cf39e11b3875bb63b39687ae1b5d1f5a15445e39bf164a0b14691b4ddb39a8e4363f584ef42213616abc4785b5d66","0xa92bac085d1194fbd1c88299f07a061d0bdd3f980b663e81e6254dbb288bf11478c0ee880e28e01560f12c5ccb3c0103","0x841705cd5cd76b943e2b7c5e845b9dd3c8defe8ef67e93078d6d5e67ade33ad4b0fd413bc196f93b0a4073c855cd97d4","0x8e7eb8364f384a9161e81d3f1d52ceca9b65536ae49cc35b48c3e2236322ba4ae9973e0840802d9fa4f4d82ea833544f","0xaed3ab927548bc8bec31467ba80689c71a168e34f50dcb6892f19a33a099f5aa6b3f9cb79f5c0699e837b9a8c7f27efe","0xb8fbf7696210a36e20edabd77839f4dfdf50d6d015cdf81d587f90284a9bcef7d2a1ff520728d7cc69a4843d6c20dedd","0xa9d533769ce6830211c884ae50a82a7bf259b44ac71f9fb11f0296fdb3981e6b4c1753fe744647b247ebc433a5a61436","0x8b4bdf90d33360b7f428c71cde0a49fb733badba8c726876945f58c620ce7768ae0e98fc8c31fa59d8955a4823336bb1","0x808d42238e440e6571c59e52a35ae32547d502dc24fd1759d8ea70a7231a95859baf30b490a4ba55fa2f3aaa11204597","0x85594701f1d2fee6dc1956bc44c7b31db93bdeec2f3a7d622c1a08b26994760773e3d57521a44cfd7e407ac3fd430429","0xa66de045ce7173043a6825e9dc440ac957e2efb6df0a337f4f8003eb0c719d873a52e6eba3cb0d69d977ca37d9187674","0x87a1c6a1fdff993fa51efa5c3ba034c079c0928a7d599b906336af7c2dcab9721ceaf3108c646490af9dff9a754f54b3","0x926424223e462ceb75aed7c22ade8a7911a903b7e5dd4bc49746ddce8657f4616325cd12667d4393ac52cdd866396d0e","0xb5dc96106593b42b30f06f0b0a1e0c1aafc70432e31807252d3674f0b1ea5e58eac8424879d655c9488d85a879a3e572","0x997ca0987735cc716507cb0124b1d266d218b40c9d8e0ecbf26a1d65719c82a637ce7e8be4b4815d307df717bde7c72a","0x92994d3f57a569b7760324bb5ae4e8e14e1633d175dab06aa57b8e391540e05f662fdc08b8830f489a063f59b689a688","0xa8087fcc6aa4642cb998bea11facfe87eb33b90a9aa428ab86a4124ad032fc7d2e57795311a54ec9f55cc120ebe42df1","0xa9bd7d1de6c0706052ca0b362e2e70e8c8f70f1f026ea189b4f87a08ce810297ebfe781cc8004430776c54c1a05ae90c","0x856d33282e8a8e33a3d237fb0a0cbabaf77ba9edf2fa35a831fdafcadf620561846aa6cbb6bdc5e681118e1245834165","0x9524a7aa8e97a31a6958439c5f3339b19370f03e86b89b1d02d87e4887309dbbe9a3a8d2befd3b7ed5143c8da7e0a8ad","0x824fdf433e090f8acbd258ac7429b21f36f9f3b337c6d0b71d1416a5c88a767883e255b2888b7c906dd2e9560c4af24c","0x88c7fee662ca7844f42ed5527996b35723abffd0d22d4ca203b9452c639a5066031207a5ae763dbc0865b3299d19b1ec","0x919dca5c5595082c221d5ab3a5bc230f45da7f6dec4eb389371e142c1b9c6a2c919074842479c2844b72c0d806170c0c","0xb939be8175715e55a684578d8be3ceff3087f60fa875fff48e52a6e6e9979c955efef8ff67cfa2b79499ea23778e33b0","0x873b6db725e7397d11bc9bed9ac4468e36619135be686790a79bc6ed4249058f1387c9a802ea86499f692cf635851066","0xaeae06db3ec47e9e5647323fa02fac44e06e59b885ad8506bf71b184ab3895510c82f78b6b22a5d978e8218e7f761e9f","0xb99c0a8359c72ab88448bae45d4bf98797a26bca48b0d4460cd6cf65a4e8c3dd823970ac3eb774ae5d0cea4e7fadf33e","0x8f10c8ec41cdfb986a1647463076a533e6b0eec08520c1562401b36bb063ac972aa6b28a0b6ce717254e35940b900e3c","0xa106d9be199636d7add43b942290269351578500d8245d4aae4c083954e4f27f64740a3138a66230391f2d0e6043a8de","0xa469997908244578e8909ff57cffc070f1dbd86f0098df3cfeb46b7a085cfecc93dc69ee7cad90ff1dc5a34d50fe580c","0xa4ef087bea9c20eb0afc0ee4caba7a9d29dfa872137828c721391273e402fb6714afc80c40e98bbd8276d3836bffa080","0xb07a013f73cd5b98dae0d0f9c1c0f35bff8a9f019975c4e1499e9bee736ca6fcd504f9bc32df1655ff333062382cff04","0xb0a77188673e87cc83348c4cc5db1eecf6b5184e236220c8eeed7585e4b928db849944a76ec60ef7708ef6dac02d5592","0xb1284b37e59b529f0084c0dacf0af6c0b91fc0f387bf649a8c74819debf606f7b07fc3e572500016fb145ec2b24e9f17","0x97b20b5b4d6b9129da185adfbf0d3d0b0faeba5b9715f10299e48ea0521709a8296a9264ce77c275a59c012b50b6519a","0xb9d37e946fae5e4d65c1fbfacc8a62e445a1c9d0f882e60cca649125af303b3b23af53c81d7bac544fb7fcfc7a314665","0x8e5acaac379f4bb0127efbef26180f91ff60e4c525bc9b798fc50dfaf4fe8a5aa84f18f3d3cfb8baead7d1e0499af753","0xb0c0b8ab1235bf1cda43d4152e71efc1a06c548edb964eb4afceb201c8af24240bf8ab5cae30a08604e77432b0a5faf0","0x8cc28d75d5c8d062d649cbc218e31c4d327e067e6dbd737ec0a35c91db44fbbd0d40ec424f5ed79814add16947417572","0x95ae6219e9fd47efaa9cb088753df06bc101405ba50a179d7c9f7c85679e182d3033f35b00dbba71fdcd186cd775c52e","0xb5d28fa09f186ebc5aa37453c9b4d9474a7997b8ae92748ecb940c14868792292ac7d10ade01e2f8069242b308cf97e5","0x8c922a0faa14cc6b7221f302df3342f38fc8521ec6c653f2587890192732c6da289777a6cd310747ea7b7d104af95995","0xb9ad5f660b65230de54de535d4c0fcae5bc6b59db21dea5500fdc12eea4470fb8ea003690fdd16d052523418d5e01e8c","0xa39a9dd41a0ff78c82979483731f1cd68d3921c3e9965869662c22e02dde3877802e180ba93f06e7346f96d9fa9261d2","0x8b32875977ec372c583b24234c27ed73aef00cdff61eb3c3776e073afbdeade548de9497c32ec6d703ff8ad0a5cb7fe4","0x9644cbe755a5642fe9d26cfecf170d3164f1848c2c2e271d5b6574a01755f3980b3fc870b98cf8528fef6ecef4210c16","0x81ea9d1fdd9dd66d60f40ce0712764b99da9448ae0b300f8324e1c52f154e472a086dda840cb2e0b9813dc8ce8afd4b5","0x906aaa4a7a7cdf01909c5cfbc7ded2abc4b869213cbf7c922d4171a4f2e637e56f17020b852ad339d83b8ac92f111666","0x939b5f11acbdeff998f2a080393033c9b9d8d5c70912ea651c53815c572d36ee822a98d6dfffb2e339f29201264f2cf4","0xaba4898bf1ccea9b9e2df1ff19001e05891581659c1cbbde7ee76c349c7fc7857261d9785823c9463a8aea3f40e86b38","0x83ca1a56b8a0be4820bdb5a9346357c68f9772e43f0b887729a50d2eb2a326bbcede676c8bf2e51d7c89bbd8fdb778a6","0x94e86e9fe6addfe2c3ee3a547267ed921f4230d877a85bb4442c2d9350c2fa9a9c54e6fe662de82d1a2407e4ab1691c2","0xa0cc3bdef671a59d77c6984338b023fa2b431b32e9ed2abe80484d73edc6540979d6f10812ecc06d4d0c5d4eaca7183c","0xb5343413c1b5776b55ea3c7cdd1f3af1f6bd802ea95effe3f2b91a523817719d2ecc3f8d5f3cc2623ace7e35f99ca967","0x92085d1ed0ed28d8cabe3e7ff1905ed52c7ceb1eac5503760c52fb5ee3a726aba7c90b483c032acc3f166b083d7ec370","0x8ec679520455275cd957fca8122724d287db5df7d29f1702a322879b127bff215e5b71d9c191901465d19c86c8d8d404","0xb65eb2c63d8a30332eb24ee8a0c70156fc89325ebbb38bacac7cf3f8636ad8a472d81ccca80423772abc00192d886d8a","0xa9fe1c060b974bee4d590f2873b28635b61bfcf614e61ff88b1be3eee4320f4874e21e8d666d8ac8c9aba672efc6ecae","0xb3fe2a9a389c006a831dea7e777062df84b5c2803f9574d7fbe10b7e1c125817986af8b6454d6be9d931a5ac94cfe963","0x95418ad13b734b6f0d33822d9912c4c49b558f68d08c1b34a0127fcfa666bcae8e6fda8832d2c75bb9170794a20e4d7c","0xa9a7df761e7f18b79494bf429572140c8c6e9d456c4d4e336184f3f51525a65eb9582bea1e601bdb6ef8150b7ca736a5","0xa0de03b1e75edf7998c8c1ac69b4a1544a6fa675a1941950297917366682e5644a4bda9cdeedfaf9473d7fccd9080b0c","0xa61838af8d95c95edf32663a68f007d95167bf6e41b0c784a30b22d8300cfdd5703bd6d16e86396638f6db6ae7e42a85","0x8866d62084d905c145ff2d41025299d8b702ac1814a7dec4e277412c161bc9a62fed735536789cb43c88693c6b423882","0x91da22c378c81497fe363e7f695c0268443abee50f8a6625b8a41e865638a643f07b157ee566de09ba09846934b4e2d7","0x941d21dd57c9496aa68f0c0c05507405fdd413acb59bc668ce7e92e1936c68ec4b065c3c30123319884149e88228f0b2","0xa77af9b094bc26966ddf2bf9e1520c898194a5ccb694915950dadc204facbe3066d3d89f50972642d76b14884cfbaa21","0x8e76162932346869f4618bde744647f7ab52ab498ad654bdf2a4feeb986ac6e51370841e5acbb589e38b6e7142bb3049","0xb60979ace17d6937ece72e4f015da4657a443dd01cebc7143ef11c09e42d4aa8855999a65a79e2ea0067f31c9fc2ab0f","0xb3e2ffdd5ee6fd110b982fd4fad4b93d0fca65478f986d086eeccb0804960bfaa1919afa743c2239973ea65091fe57d2","0x8ce0ce05e7d7160d44574011da687454dbd3c8b8290aa671731b066e2c82f8cf2d63cb8e932d78c6122ec610e44660e6","0xab005dd8d297045c39e2f72fb1c48edb501ccf3575d3d04b9817b3afee3f0bb0f3f53f64bda37d1d9cde545aae999bae","0x95bd7edb4c4cd60e3cb8a72558845a3cce6bb7032ccdf33d5a49ebb6ddf203bc3c79e7b7e550735d2d75b04c8b2441e8","0x889953ee256206284094e4735dbbb17975bafc7c3cb94c9fbfee4c3e653857bfd49e818f64a47567f721b98411a3b454","0xb188423e707640ab0e75a061e0b62830cde8afab8e1ad3dae30db69ffae4e2fc005bababbdcbd7213b918ed4f70e0c14","0xa97e0fafe011abd70d4f99a0b36638b3d6e7354284588f17a88970ed48f348f88392779e9a038c6cbc9208d998485072","0x87db11014a91cb9b63e8dfaa82cdebca98272d89eb445ee1e3ff9dbaf2b3fad1a03b888cffc128e4fe208ed0dddece0f","0xaad2e40364edd905d66ea4ac9d51f9640d6fda9a54957d26ba233809851529b32c85660fa401dbee3679ec54fa6dd966","0x863e99336ca6edf03a5a259e59a2d0f308206e8a2fb320cfc0be06057366df8e0f94b33a28f574092736b3c5ada84270","0xb34bcc56a057589f34939a1adc51de4ff6a9f4fee9c7fa9aa131e28d0cf0759a0c871b640162acdfbf91f3f1b59a3703","0x935dd28f2896092995c5eff1618e5b6efe7a40178888d7826da9b0503c2d6e68a28e7fac1a334e166d0205f0695ef614","0xb842cd5f8f5de5ca6c68cb4a5c1d7b451984930eb4cc18fd0934d52fdc9c3d2d451b1c395594d73bc3451432bfba653f","0x9014537885ce2debad736bc1926b25fdab9f69b216bf024f589c49dc7e6478c71d595c3647c9f65ff980b14f4bb2283b","0x8e827ccca1dd4cd21707140d10703177d722be0bbe5cac578db26f1ef8ad2909103af3c601a53795435b27bf95d0c9ed","0x8a0b8ad4d466c09d4f1e9167410dbe2edc6e0e6229d4b3036d30f85eb6a333a18b1c968f6ca6d6889bb08fecde017ef4","0x9241ee66c0191b06266332dc9161dede384c4bb4e116dbd0890f3c3790ec5566da4568243665c4725b718ac0f6b5c179","0xaeb4d5fad81d2b505d47958a08262b6f1b1de9373c2c9ba6362594194dea3e002ab03b8cbb43f867be83065d3d370f19","0x8781bc83bb73f7760628629fe19e4714b494dbed444c4e4e4729b7f6a8d12ee347841a199888794c2234f51fa26fc2b9","0xb58864f0acd1c2afa29367e637cbde1968d18589245d9936c9a489c6c495f54f0113ecdcbe4680ac085dd3c397c4d0c3","0x94a24284afaeead61e70f3e30f87248d76e9726759445ca18cdb9360586c60cc9f0ec1c397f9675083e0b56459784e2e","0xaed358853f2b54dcbddf865e1816c2e89be12e940e1abfa661e2ee63ffc24a8c8096be2072fa83556482c0d89e975124","0xb95374e6b4fc0765708e370bc881e271abf2e35c08b056a03b847e089831ef4fe3124b9c5849d9c276eb2e35b3daf264","0xb834cdbcfb24c8f84bfa4c552e7fadc0028a140952fd69ed13a516e1314a4cd35d4b954a77d51a1b93e1f5d657d0315d","0x8fb6d09d23bfa90e7443753d45a918d91d75d8e12ec7d016c0dfe94e5c592ba6aaf483d2f16108d190822d955ad9cdc3","0xaa315cd3c60247a6ad4b04f26c5404c2713b95972843e4b87b5a36a89f201667d70f0adf20757ebe1de1b29ae27dda50","0xa116862dca409db8beff5b1ccd6301cdd0c92ca29a3d6d20eb8b87f25965f42699ca66974dd1a355200157476b998f3b","0xb4c2f5fe173c4dc8311b60d04a65ce1be87f070ac42e13cd19c6559a2931c6ee104859cc2520edebbc66a13dc7d30693","0x8d4a02bf99b2260c334e7d81775c5cf582b00b0c982ce7745e5a90624919028278f5e9b098573bad5515ce7fa92a80c8","0x8543493bf564ce6d97bd23be9bff1aba08bd5821ca834f311a26c9139c92a48f0c2d9dfe645afa95fec07d675d1fd53b","0x9344239d13fde08f98cb48f1f87d34cf6abe8faecd0b682955382a975e6eed64e863fa19043290c0736261622e00045c","0xaa49d0518f343005ca72b9e6c7dcaa97225ce6bb8b908ebbe7b1a22884ff8bfb090890364e325a0d414ad180b8f161d1","0x907d7fd3e009355ab326847c4a2431f688627faa698c13c03ffdd476ecf988678407f029b8543a475dcb3dafdf2e7a9c","0x845f1f10c6c5dad2adc7935f5cd2e2b32f169a99091d4f1b05babe7317b9b1cdce29b5e62f947dc621b9acbfe517a258","0x8f3be8e3b380ea6cdf9e9c237f5e88fd5a357e5ded80ea1fc2019810814de82501273b4da38916881125b6fa0cfd4459","0xb9c7f487c089bf1d20c822e579628db91ed9c82d6ca652983aa16d98b4270c4da19757f216a71b9c13ddee3e6e43705f","0x8ba2d8c88ad2b872db104ea8ddbb006ec2f3749fd0e19298a804bb3a5d94de19285cc7fb19fee58a66f7851d1a66c39f","0x9375ecd3ed16786fe161af5d5c908f56eeb467a144d3bbddfc767e90065b7c94fc53431adebecba2b6c9b5821184d36e","0xa49e069bfadb1e2e8bff6a4286872e2a9765d62f0eaa4fcb0e5af4bbbed8be3510fb19849125a40a8a81d1e33e81c3eb","0x9522cc66757b386aa6b88619525c8ce47a5c346d590bb3647d12f991e6c65c3ab3c0cfc28f0726b6756c892eae1672be","0xa9a0f1f51ff877406fa83a807aeb17b92a283879f447b8a2159653db577848cc451cbadd01f70441e351e9ed433c18bc","0x8ff7533dcff6be8714df573e33f82cf8e9f2bcaaa43e939c4759d52b754e502717950de4b4252fb904560fc31dce94a4","0x959724671e265a28d67c29d95210e97b894b360da55e4cf16e6682e7912491ed8ca14bfaa4dce9c25a25b16af580494f","0x92566730c3002f4046c737032487d0833c971e775de59fe02d9835c9858e2e3bc37f157424a69764596c625c482a2219","0xa84b47ceff13ed9c3e5e9cdf6739a66d3e7c2bd8a6ba318fefb1a9aecf653bb2981da6733ddb33c4b0a4523acc429d23","0xb4ddf571317e44f859386d6140828a42cf94994e2f1dcbcc9777f4eebbfc64fc1e160b49379acc27c4672b8e41835c5d","0x8ab95c94072b853d1603fdd0a43b30db617d13c1d1255b99075198e1947bfa5f59aed2b1147548a1b5e986cd9173d15c","0x89511f2eab33894fd4b3753d24249f410ff7263052c1fef6166fc63a79816656b0d24c529e45ccce6be28de6e375d916","0xa0866160ca63d4f2be1b4ea050dac6b59db554e2ebb4e5b592859d8df339b46fd7cb89aaed0951c3ee540aee982c238a","0x8fcc5cbba1b94970f5ff2eb1922322f5b0aa7d918d4b380c9e7abfd57afd8b247c346bff7b87af82efbce3052511cd1b","0x99aeb2a5e846b0a2874cca02c66ed40d5569eb65ab2495bc3f964a092e91e1517941f2688e79f8cca49cd3674c4e06dc","0xb7a096dc3bad5ca49bee94efd884aa3ff5615cf3825cf95fbe0ce132e35f46581d6482fa82666c7ef5f1643eaee8f1ca","0x94393b1da6eaac2ffd186b7725eca582f1ddc8cdd916004657f8a564a7c588175cb443fc6943b39029f5bbe0add3fad8","0x884b85fe012ccbcd849cb68c3ad832d83b3ef1c40c3954ffdc97f103b1ed582c801e1a41d9950f6bddc1d11f19d5ec76","0xb00061c00131eded8305a7ce76362163deb33596569afb46fe499a7c9d7a0734c084d336b38d168024c2bb42b58e7660","0xa439153ac8e6ca037381e3240e7ba08d056c83d7090f16ed538df25901835e09e27de2073646e7d7f3c65056af6e4ce7","0x830fc9ca099097d1f38b90e6843dc86f702be9d20bdacc3e52cae659dc41df5b8d2c970effa6f83a5229b0244a86fe22","0xb81ea2ffaaff2bb00dd59a9ab825ba5eed4db0d8ac9c8ed1a632ce8f086328a1cddd045fbe1ace289083c1325881b7e7","0xb51ea03c58daf2db32c99b9c4789b183365168cb5019c72c4cc91ac30b5fb7311d3db76e6fa41b7cd4a8c81e2f6cdc94","0xa4170b2c6d09ca5beb08318730419b6f19215ce6c631c854116f904be3bc30dd85a80c946a8ab054d3e307afaa3f8fbc","0x897cc42ff28971ff54d2a55dd6b35cfb8610ac902f3c06e3a5cea0e0a257e870c471236a8e84709211c742a09c5601a6","0xa18f2e98d389dace36641621488664ecbb422088ab03b74e67009b8b8acacaaa24fdcf42093935f355207d934adc52a8","0x92adcfb678cc2ba19c866f3f2b988fdcb4610567f3ab436cc0cb9acaf5a88414848d71133ebdbec1983e38e6190f1b5f","0xa86d43c2ce01b366330d3b36b3ca85f000c3548b8297e48478da1ee7d70d8576d4650cba7852ed125c0d7cb6109aa7f3","0x8ed31ceed9445437d7732dce78a762d72ff32a7636bfb3fd7974b7ae15db414d8184a1766915244355deb354fbc5803b","0x9268f70032584f416e92225d65af9ea18c466ebc7ae30952d56a4e36fd9ea811dde0a126da9220ba3c596ec54d8a335e","0x9433b99ee94f2d3fbdd63b163a2bdf440379334c52308bd24537f7defd807145a062ff255a50d119a7f29f4b85d250e3","0x90ce664f5e4628a02278f5cf5060d1a34f123854634b1870906e5723ac9afd044d48289be283b267d45fcbf3f4656aaf","0xaaf21c4d59378bb835d42ae5c5e5ab7a3c8c36a59e75997989313197752b79a472d866a23683b329ea69b048b87fa13e","0xb83c0589b304cec9ede549fde54f8a7c2a468c6657da8c02169a6351605261202610b2055c639b9ed2d5b8c401fb8f56","0x9370f326ea0f170c2c05fe2c5a49189f20aec93b6b18a5572a818cd4c2a6adb359e68975557b349fb54f065d572f4c92","0xac3232fa5ce6f03fca238bef1ce902432a90b8afce1c85457a6bee5571c033d4bceefafc863af04d4e85ac72a4d94d51","0x80d9ea168ff821b22c30e93e4c7960ce3ad3c1e6deeebedd342a36d01bd942419b187e2f382dbfd8caa34cca08d06a48","0xa387a3c61676fb3381eefa2a45d82625635a666e999aba30e3b037ec9e040f414f9e1ad9652abd3bcad63f95d85038db","0xa1b229fe32121e0b391b0f6e0180670b9dc89d79f7337de4c77ea7ad0073e9593846f06797c20e923092a08263204416","0x92164a9d841a2b828cedf2511213268b698520f8d1285852186644e9a0c97512cafa4bfbe29af892c929ebccd102e998","0x82ee2fa56308a67c7db4fd7ef539b5a9f26a1c2cc36da8c3206ba4b08258fbb3cec6fe5cdbd111433fb1ba2a1e275927","0x8c77bfe9e191f190a49d46f05600603fa42345592539b82923388d72392404e0b29a493a15e75e8b068dddcd444c2928","0x80b927f93ccf79dcf5c5b20bcf5a7d91d7a17bc0401bb7cc9b53a6797feac31026eb114257621f5a64a52876e4474cc1","0xb6b68b6501c37804d4833d5a063dd108a46310b1400549074e3cac84acc6d88f73948b7ad48d686de89c1ec043ae8c1a","0xab3da00f9bdc13e3f77624f58a3a18fc3728956f84b5b549d62f1033ae4b300538e53896e2d943f160618e05af265117","0xb6830e87233b8eace65327fdc764159645b75d2fd4024bf8f313b2dd5f45617d7ecfb4a0b53ccafb5429815a9a1adde6","0xb9251cfe32a6dc0440615aadcd98b6b1b46e3f4e44324e8f5142912b597ee3526bea2431e2b0282bb58f71be5b63f65e","0xaf8d70711e81cdddfb39e67a1b76643292652584c1ce7ce4feb1641431ad596e75c9120e85f1a341e7a4da920a9cdd94","0x98cd4e996594e89495c078bfd52a4586b932c50a449a7c8dfdd16043ca4cda94dafbaa8ad1b44249c99bbcc52152506e","0xb9fc6d1c24f48404a4a64fbe3e43342738797905db46e4132aee5f086aaa4c704918ad508aaefa455cfe1b36572e6242","0xa365e871d30ba9291cedaba1be7b04e968905d003e9e1af7e3b55c5eb048818ae5b913514fb08b24fb4fbdccbb35d0b8","0x93bf99510971ea9af9f1e364f1234c898380677c8e8de9b0dd24432760164e46c787bc9ec42a7ad450500706cf247b2d","0xb872f825a5b6e7b9c7a9ddfeded3516f0b1449acc9b4fd29fc6eba162051c17416a31e5be6d3563f424d28e65bab8b8f","0xb06b780e5a5e8eb4f4c9dc040f749cf9709c8a4c9ef15e925f442b696e41e5095db0778a6c73bcd329b265f2c6955c8b","0x848f1a981f5fc6cd9180cdddb8d032ad32cdfa614fc750d690dbae36cc0cd355cbf1574af9b3ffc8b878f1b2fafb9544","0xa03f48cbff3e9e8a3a655578051a5ae37567433093ac500ed0021c6250a51b767afac9bdb194ee1e3eac38a08c0eaf45","0xb5be78ce638ff8c4aa84352b536628231d3f7558c5be3bf010b28feac3022e64691fa672f358c8b663904aebe24a54ed","0xa9d4da70ff676fa55d1728ba6ab03b471fa38b08854d99e985d88c2d050102d8ccffbe1c90249a5607fa7520b15fe791","0x8fe9f7092ffb0b69862c8e972fb1ecf54308c96d41354ed0569638bb0364f1749838d6d32051fff1599112978c6e229c","0xae6083e95f37770ecae0df1e010456f165d96cfe9a7278c85c15cffd61034081ce5723e25e2bede719dc9341ec8ed481","0xa260891891103089a7afbd9081ea116cfd596fd1015f5b65e10b0961eb37fab7d09c69b7ce4be8bf35e4131848fb3fe4","0x8d729fa32f6eb9fd2f6a140bef34e8299a2f3111bffd0fe463aa8622c9d98bfd31a1df3f3e87cd5abc52a595f96b970e","0xa30ec6047ae4bc7da4daa7f4c28c93aedb1112cfe240e681d07e1a183782c9ff6783ac077c155af23c69643b712a533f","0xac830726544bfe7b5467339e5114c1a75f2a2a8d89453ce86115e6a789387e23551cd64620ead6283dfa4538eb313d86","0x8445c135b7a48068d8ed3e011c6d818cfe462b445095e2fbf940301e50ded23f272d799eea47683fc027430ce14613ef","0x95785411715c9ae9d8293ce16a693a2aa83e3cb1b4aa9f76333d0da2bf00c55f65e21e42e50e6c5772ce213dd7b4f7a0","0xb273b024fa18b7568c0d1c4d2f0c4e79ec509dafac8c5951f14192d63ddbcf2d8a7512c1c1b615cc38fa3e336618e0c5","0xa78b9d3ea4b6a90572eb27956f411f1d105fdb577ee2ffeec9f221da9b45db84bfe866af1f29597220c75e0c37a628d8","0xa4be2bf058c36699c41513c4d667681ce161a437c09d81383244fc55e1c44e8b1363439d0cce90a3e44581fb31d49493","0xb6eef13040f17dd4eba22aaf284d2f988a4a0c4605db44b8d2f4bf9567ac794550b543cc513c5f3e2820242dd704152e","0x87eb00489071fa95d008c5244b88e317a3454652dcb1c441213aa16b28cd3ecaa9b22fec0bdd483c1df71c37119100b1","0x92d388acdcb49793afca329cd06e645544d2269234e8b0b27d2818c809c21726bc9cf725651b951e358a63c83dedee24","0xae27e219277a73030da27ab5603c72c8bd81b6224b7e488d7193806a41343dff2456132274991a4722fdb0ef265d04cd","0x97583e08ecb82bbc27c0c8476d710389fa9ffbead5c43001bd36c1b018f29faa98de778644883e51870b69c5ffb558b5","0x90a799a8ce73387599babf6b7da12767c0591cadd36c20a7990e7c05ea1aa2b9645654ec65308ee008816623a2757a6a","0xa1b47841a0a2b06efd9ab8c111309cc5fc9e1d5896b3e42ed531f6057e5ade8977c29831ce08dbda40348386b1dcc06d","0xb92b8ef59bbddb50c9457691bc023d63dfcc54e0fd88bd5d27a09e0d98ac290fc90e6a8f6b88492043bf7c87fac8f3e4","0xa9d6240b07d62e22ec8ab9b1f6007c975a77b7320f02504fc7c468b4ee9cfcfd945456ff0128bc0ef2174d9e09333f8d","0x8e96534c94693226dc32bca79a595ca6de503af635f802e86442c67e77564829756961d9b701187fe91318da515bf0e6","0xb6ba290623cd8dd5c2f50931c0045d1cfb0c30877bc8fe58cbc3ff61ee8da100045a39153916efa1936f4aee0892b473","0xb43baa7717fac02d4294f5b3bb5e58a65b3557747e3188b482410388daac7a9c177f762d943fd5dcf871273921213da8","0xb9cf00f8fb5e2ef2b836659fece15e735060b2ea39b8e901d3dcbdcf612be8bf82d013833718c04cd46ffaa70b85f42e","0x8017d0c57419e414cbba504368723e751ef990cc6f05dad7b3c2de6360adc774ad95512875ab8337d110bf39a42026fa","0xae7401048b838c0dcd4b26bb6c56d79d51964a0daba780970b6c97daee4ea45854ea0ac0e4139b3fe60dac189f84df65","0x887b237b0cd0f816b749b21db0b40072f9145f7896c36916296973f9e6990ede110f14e5976c906d08987c9836cca57f","0xa88c3d5770148aee59930561ca1223aceb2c832fb5417e188dca935905301fc4c6c2c9270bc1dff7add490a125eb81c6","0xb6cf9b02c0cd91895ad209e38c54039523f137b5848b9d3ad33ae43af6c20c98434952db375fe378de7866f2d0e8b18a","0x84ef3d322ff580c8ad584b1fe4fe346c60866eb6a56e982ba2cf3b021ecb1fdb75ecc6c29747adda86d9264430b3f816","0xa0561c27224baf0927ad144cb71e31e54a064c598373fcf0d66aebf98ab7af1d8e2f343f77baefff69a6da750a219e11","0xaa5cc43f5b8162b016f5e1b61214c0c9d15b1078911c650b75e6cdfb49b85ee04c6739f5b1687d15908444f691f732de","0xad4ac099b935589c7b8fdfdf3db332b7b82bb948e13a5beb121ebd7db81a87d278024a1434bcf0115c54ca5109585c3d","0x8a00466abf3f109a1dcd19e643b603d3af23d42794ef8ca2514dd507ecea44a031ac6dbc18bd02f99701168b25c1791e","0xb00b5900dfad79645f8bee4e5adc7b84eb22e5b1e67df77ccb505b7fc044a6c08a8ea5faca662414eb945f874f884cea","0x950e204e5f17112250b22ea6bb8423baf522fc0af494366f18fe0f949f51d6e6812074a80875cf1ed9c8e7420058d541","0x91e5cbf8bb1a1d50c81608c9727b414d0dd2fb467ebc92f100882a3772e54f94979cfdf8e373fdef7c7fcdd60fec9e00","0xa093f6a857b8caaff80599c2e89c962b415ecbaa70d8fd973155fa976a284c6b29a855f5f7a3521134d00d2972755188","0xb4d55a3551b00da54cc010f80d99ddd2544bde9219a3173dfaadf3848edc7e4056ab532fb75ac26f5f7141e724267663","0xa03ea050fc9b011d1b04041b5765d6f6453a93a1819cd9bd6328637d0b428f08526466912895dcc2e3008ee58822e9a7","0x99b12b3665e473d01bc6985844f8994fb65cb15745024fb7af518398c4a37ff215da8f054e8fdf3286984ae36a73ca5e","0x9972c7e7a7fb12e15f78d55abcaf322c11249cd44a08f62c95288f34f66b51f146302bce750ff4d591707075d9123bd2","0xa64b4a6d72354e596d87cda213c4fc2814009461570ccb27d455bbe131f8d948421a71925425b546d8cf63d5458cd64b","0x91c215c73b195795ede2228b7ed1f6e37892e0c6b0f4a0b5a16c57aa1100c84df9239054a173b6110d6c2b7f4bf1ce52","0x88807198910ec1303480f76a3683870246a995e36adaeadc29c22f0bdba8152fe705bd070b75de657b04934f7d0ccf80","0xb37c0026c7b32eb02cacac5b55cb5fe784b8e48b2945c64d3037af83ece556a117f0ff053a5968c2f5fa230e291c1238","0x94c768384ce212bc2387e91ce8b45e4ff120987e42472888a317abc9dcdf3563b62e7a61c8e98d7cdcbe272167d91fc6","0xa10c2564936e967a390cb14ef6e8f8b04ea9ece5214a38837eda09e79e0c7970b1f83adf017c10efd6faa8b7ffa2c567","0xa5085eed3a95f9d4b1269182ea1e0d719b7809bf5009096557a0674bde4201b0ddc1f0f16a908fc468846b3721748ce3","0x87468eb620b79a0a455a259a6b4dfbc297d0d53336537b771254dd956b145dc816b195b7002647ea218552e345818a3f","0xace2b77ffb87366af0a9cb5d27d6fc4a14323dbbf1643f5f3c4559306330d86461bb008894054394cbfaefeaa0bc2745","0xb27f56e840a54fbd793f0b7a7631aa4cee64b5947e4382b2dfb5eb1790270288884c2a19afebe5dc0c6ef335d4531c1c","0x876e438633931f7f895062ee16c4b9d10428875f7bc79a8e156a64d379a77a2c45bf5430c5ab94330f03da352f1e9006","0xa2512a252587d200d2092b44c914df54e04ff8bcef36bf631f84bde0cf5a732e3dc7f00f662842cfd74b0b0f7f24180e","0x827f1bc8f54a35b7a4bd8154f79bcc055e45faed2e74adf7cf21cca95df44d96899e847bd70ead6bb27b9c0ed97bbd8b","0xa0c92cf5a9ed843714f3aea9fe7b880f622d0b4a3bf66de291d1b745279accf6ba35097849691370f41732ba64b5966b","0xa63f5c1e222775658421c487b1256b52626c6f79cb55a9b7deb2352622cedffb08502042d622eb3b02c97f9c09f9c957","0x8cc093d52651e65fb390e186db6cc4de559176af4624d1c44cb9b0e836832419dacac7b8db0627b96288977b738d785d","0xaa7b6a17dfcec146134562d32a12f7bd7fe9522e300859202a02939e69dbd345ed7ff164a184296268f9984f9312e8fc","0x8ac76721f0d2b679f023d06cbd28c85ae5f4b43c614867ccee88651d4101d4fd352dbdb65bf36bfc3ebc0109e4b0c6f9","0x8d350f7c05fc0dcd9a1170748846fb1f5d39453e4cb31e6d1457bed287d96fc393b2ecc53793ca729906a33e59c6834a","0xb9913510dfc5056d7ec5309f0b631d1ec53e3a776412ada9aefdaf033c90da9a49fdde6719e7c76340e86599b1f0eec2","0x94955626bf4ce87612c5cfffcf73bf1c46a4c11a736602b9ba066328dc52ad6d51e6d4f53453d4ed55a51e0aad810271","0xb0fcab384fd4016b2f1e53f1aafd160ae3b1a8865cd6c155d7073ecc1664e05b1d8bca1def39c158c7086c4e1103345e","0x827de3f03edfbde08570b72de6662c8bfa499b066a0a27ebad9b481c273097d17a5a0a67f01553da5392ec3f149b2a78","0xab7940384c25e9027c55c40df20bd2a0d479a165ced9b1046958353cd69015eeb1e44ed2fd64e407805ba42df10fc7bf","0x8ad456f6ff8cd58bd57567d931f923d0c99141978511b17e03cab7390a72b9f62498b2893e1b05c7c22dd274e9a31919","0xac75399e999effe564672db426faa17a839e57c5ef735985c70cd559a377adec23928382767b55ed5a52f7b11b54b756","0xb17f975a00b817299ac7af5f2024ea820351805df58b43724393bfb3920a8cd747a3bbd4b8286e795521489db3657168","0xa2bed800a6d95501674d9ee866e7314063407231491d794f8cf57d5be020452729c1c7cefd8c50dc1540181f5caab248","0x9743f5473171271ffdd3cc59a3ae50545901a7b45cd4bc3570db487865f3b73c0595bebabbfe79268809ee1862e86e4a","0xb7eab77c2d4687b60d9d7b04e842b3880c7940140012583898d39fcc22d9b9b0a9be2c2e3788b3e6f30319b39c338f09","0x8e2b8f797a436a1b661140e9569dcf3e1eea0a77c7ff2bc4ff0f3e49af04ed2de95e255df8765f1d0927fb456a9926b1","0x8aefea201d4a1f4ff98ffce94e540bb313f2d4dfe7e9db484a41f13fc316ed02b282e1acc9bc6f56cad2dc2e393a44c9","0xb950c17c0e5ca6607d182144aa7556bb0efe24c68f06d79d6413a973b493bfdf04fd147a4f1ab03033a32004cc3ea66f","0xb7b8dcbb179a07165f2dc6aa829fad09f582a71b05c3e3ea0396bf9e6fe73076f47035c031c2101e8e38e0d597eadd30","0xa9d77ed89c77ec1bf8335d08d41c3c94dcca9fd1c54f22837b4e54506b212aa38d7440126c80648ab7723ff18e65ed72","0xa819d6dfd4aef70e52b8402fe5d135f8082d40eb7d3bb5c4d7997395b621e2bb10682a1bad2c9caa33dd818550fc3ec6","0x8f6ee34128fac8bbf13ce2d68b2bb363eb4fd65b297075f88e1446ddeac242500eeb4ef0735e105882ff5ba8c44c139b","0xb4440e48255c1644bcecf3a1e9958f1ec4901cb5b1122ee5b56ffd02cad1c29c4266999dbb85aa2605c1b125490074d4","0xa43304a067bede5f347775d5811cf65a6380a8d552a652a0063580b5c5ef12a0867a39c7912fa219e184f4538eba1251","0xa891ad67a790089ffc9f6d53e6a3d63d3556f5f693e0cd8a7d0131db06fd4520e719cfcc3934f0a8f62a95f90840f1d4","0xaea6df8e9bb871081aa0fc5a9bafb00be7d54012c5baf653791907d5042a326aeee966fd9012a582cc16695f5baf7042","0x8ffa2660dc52ed1cd4eff67d6a84a8404f358a5f713d04328922269bee1e75e9d49afeec0c8ad751620f22352a438e25","0x87ec6108e2d63b06abed350f8b363b7489d642486f879a6c3aa90e5b0f335efc2ff2834eef9353951a42136f8e6a1b32","0x865619436076c2760d9e87ddc905023c6de0a8d56eef12c98a98c87837f2ca3f27fd26a2ad752252dbcbe2b9f1d5a032","0x980437dce55964293cb315c650c5586ffd97e7a944a83f6618af31c9d92c37b53ca7a21bb5bc557c151b9a9e217e7098","0x95d128fc369df4ad8316b72aea0ca363cbc7b0620d6d7bb18f7076a8717a6a46956ff140948b0cc4f6d2ce33b5c10054","0x8c7212d4a67b9ec70ebbca04358ad2d36494618d2859609163526d7b3acc2fc935ca98519380f55e6550f70a9bc76862","0x893a2968819401bf355e85eee0f0ed0406a6d4a7d7f172d0017420f71e00bb0ba984f6020999a3cdf874d3cd8ebcd371","0x9103c1af82dece25d87274e89ea0acd7e68c2921c4af3d8d7c82ab0ed9990a5811231b5b06113e7fa43a6bd492b4564f","0x99cfd87a94eab7d35466caa4ed7d7bb45e5c932b2ec094258fb14bf205659f83c209b83b2f2c9ccb175974b2a33e7746","0x874b6b93e4ee61be3f00c32dd84c897ccd6855c4b6251eb0953b4023634490ed17753cd3223472873cbc6095b2945075","0x84a32c0dc4ea60d33aac3e03e70d6d639cc9c4cc435c539eff915017be3b7bdaba33349562a87746291ebe9bc5671f24","0xa7057b24208928ad67914e653f5ac1792c417f413d9176ba635502c3f9c688f7e2ee81800d7e3dc0a340c464da2fd9c5","0xa03fb9ed8286aacfa69fbd5d953bec591c2ae4153400983d5dbb6cd9ea37fff46ca9e5cceb9d117f73e9992a6c055ad2","0x863b2de04e89936c9a4a2b40380f42f20aefbae18d03750fd816c658aee9c4a03df7b12121f795c85d01f415baaeaa59","0x8526eb9bd31790fe8292360d7a4c3eed23be23dd6b8b8f01d2309dbfdc0cfd33ad1568ddd7f8a610f3f85a9dfafc6a92","0xb46ab8c5091a493d6d4d60490c40aa27950574a338ea5bbc045be3a114af87bdcb160a8c80435a9b7ad815f3cb56a3f3","0xaeadc47b41a8d8b4176629557646202f868b1d728b2dda58a347d937e7ffc8303f20d26d6c00b34c851b8aeec547885d","0xaebb19fc424d72c1f1822aa7adc744cd0ef7e55727186f8df8771c784925058c248406ebeeaf3c1a9ee005a26e9a10c6","0x8ff96e81c1a4a2ab1b4476c21018fae0a67e92129ee36120cae8699f2d7e57e891f5c624902cb1b845b944926a605cc3","0x8251b8d2c43fadcaa049a9e7aff838dae4fb32884018d58d46403ac5f3beb5c518bfd45f03b8abb710369186075eb71c","0xa8b2a64f865f51a5e5e86a66455c093407933d9d255d6b61e1fd81ffafc9538d73caaf342338a66ba8ee166372a3d105","0xaad915f31c6ba7fdc04e2aaac62e84ef434b7ee76a325f07dc430d12c84081999720181067b87d792efd0117d7ee1eab","0xa13db3bb60389883fd41d565c54fb5180d9c47ce2fe7a169ae96e01d17495f7f4fa928d7e556e7c74319c4c25d653eb2","0xa4491b0198459b3f552855d680a59214eb74e6a4d6c5fa3b309887dc50ebea2ecf6d26c040550f7dc478b452481466fb","0x8f017f13d4b1e3f0c087843582b52d5f8d13240912254d826dd11f8703a99a2f3166dfbdfdffd9a3492979d77524276b","0x96c3d5dcd032660d50d7cd9db2914f117240a63439966162b10c8f1f3cf74bc83b0f15451a43b31dbd85e4a7ce0e4bb1","0xb479ec4bb79573d32e0ec93b92bdd7ec8c26ddb5a2d3865e7d4209d119fd3499eaac527615ffac78c440e60ef3867ae0","0xb2c49c4a33aa94b52b6410b599e81ff15490aafa7e43c8031c865a84e4676354a9c81eb4e7b8be6825fdcefd1e317d44","0x906dc51d6a90c089b6704b47592805578a6eed106608eeb276832f127e1b8e858b72e448edcbefb497d152447e0e68ff","0xb0e81c63b764d7dfbe3f3fddc9905aef50f3633e5d6a4af6b340495124abedcff5700dfd1577bbbed7b6bf97d02719cb","0x9304c64701e3b4ed6d146e48a881f7d83a17f58357cca0c073b2bb593afd2d94f6e2a7a1ec511d0a67ad6ff4c3be5937","0xb6fdbd12ba05aa598d80b83f70a15ef90e5cba7e6e75fa038540ee741b644cd1f408a6cecfd2a891ef8d902de586c6b5","0xb80557871a6521b1b3c74a1ba083ae055b575df607f1f7b04c867ba8c8c181ea68f8d90be6031f4d25002cca27c44da2","0xaa7285b8e9712e06b091f64163f1266926a36607f9d624af9996856ed2aaf03a580cb22ce407d1ade436c28b44ca173f","0x8148d72b975238b51e6ea389e5486940d22641b48637d7dfadfa603a605bfc6d74a016480023945d0b85935e396aea5d","0x8a014933a6aea2684b5762af43dcf4bdbb633cd0428d42d71167a2b6fc563ece5e618bff22f1db2ddb69b845b9a2db19","0x990d91740041db770d0e0eb9d9d97d826f09fd354b91c41e0716c29f8420e0e8aac0d575231efba12fe831091ec38d5a","0x9454d0d32e7e308ddec57cf2522fb1b67a2706e33fb3895e9e1f18284129ab4f4c0b7e51af25681d248d7832c05eb698","0xa5bd434e75bac105cb3e329665a35bce6a12f71dd90c15165777d64d4c13a82bceedb9b48e762bd24034e0fc9fbe45f4","0xb09e3b95e41800d4dc29c6ffdaab2cd611a0050347f6414f154a47ee20ee59bf8cf7181454169d479ebce1eb5c777c46","0xb193e341d6a047d15eea33766d656d807b89393665a783a316e9ba10518e5515c8e0ade3d6e15641d917a8a172a5a635","0xade435ec0671b3621dde69e07ead596014f6e1daa1152707a8c18877a8b067bde2895dd47444ffa69db2bbef1f1d8816","0xa7fd3d6d87522dfc56fb47aef9ce781a1597c56a8bbfd796baba907afdc872f753d732bfda1d3402aee6c4e0c189f52d","0xa298cb4f4218d0464b2fab393e512bbc477c3225aa449743299b2c3572f065bc3a42d07e29546167ed9e1b6b3b3a3af3","0xa9ee57540e1fd9c27f4f0430d194b91401d0c642456c18527127d1f95e2dba41c2c86d1990432eb38a692fda058fafde","0x81d6c1a5f93c04e6d8e5a7e0678c1fc89a1c47a5c920bcd36180125c49fcf7c114866b90e90a165823560b19898a7c16","0xa4b7a1ec9e93c899b9fd9aaf264c50e42c36c0788d68296a471f7a3447af4dbc81e4fa96070139941564083ec5b5b5a1","0xb3364e327d381f46940c0e11e29f9d994efc6978bf37a32586636c0070b03e4e23d00650c1440f448809e1018ef9f6d8","0x8056e0913a60155348300e3a62e28b5e30629a90f7dd4fe11289097076708110a1d70f7855601782a3cdc5bdb1ca9626","0xb4980fd3ea17bac0ba9ee1c470b17e575bb52e83ebdd7d40c93f4f87bebeaff1c8a679f9d3d09d635f068d37d5bd28bd","0x905a9299e7e1853648e398901dfcd437aa575c826551f83520df62984f5679cb5f0ea86aa45ed3e18b67ddc0dfafe809","0xab99553bf31a84f2e0264eb34a08e13d8d15e2484aa9352354becf9a15999c76cc568d68274b70a65e49703fc23540d0","0xa43681597bc574d2dae8964c9a8dc1a07613d7a1272bdcb818d98c85d44e16d744250c33f3b5e4d552d97396b55e601f","0xa54e5a31716fccb50245898c99865644405b8dc920ded7a11f3d19bdc255996054b268e16f2e40273f11480e7145f41e","0x8134f3ad5ef2ad4ba12a8a4e4d8508d91394d2bcdc38b7c8c8c0b0a820357ac9f79d286c65220f471eb1adca1d98fc68","0x94e2f755e60471578ab2c1adb9e9cea28d4eec9b0e92e0140770bca7002c365fcabfe1e5fb4fe6cfe79a0413712aa3ef","0xad48f8d0ce7eb3cc6e2a3086ad96f562e5bed98a360721492ae2e74dc158586e77ec8c35d5fd5927376301b7741bad2b","0x8614f0630bdd7fbad3a31f55afd9789f1c605dc85e7dc67e2edfd77f5105f878bb79beded6e9f0b109e38ea7da67e8d5","0x9804c284c4c5e77dabb73f655b12181534ca877c3e1e134aa3f47c23b7ec92277db34d2b0a5d38d2b69e5d1c3008a3e3","0xa51b99c3088e473afdaa9e0a9f7e75a373530d3b04e44e1148da0726b95e9f5f0c7e571b2da000310817c36f84b19f7f","0xac4ff909933b3b76c726b0a382157cdc74ab851a1ac6cef76953c6444441804cc43abb883363f416592e8f6cfbc4550b","0xae7d915eb9fc928b65a29d6edbc75682d08584d0014f7bcf17d59118421ae07d26a02137d1e4de6938bcd1ab8ef48fad","0x852f7e453b1af89b754df6d11a40d5d41ea057376e8ecacd705aacd2f917457f4a093d6b9a8801837fa0f62986ad7149","0x92c6bf5ada5d0c3d4dd8058483de36c215fa98edab9d75242f3eff9db07c734ad67337da6f0eefe23a487bf75a600dee","0xa2b42c09d0db615853763552a48d2e704542bbd786aae016eb58acbf6c0226c844f5fb31e428cb6450b9db855f8f2a6f","0x880cc07968266dbfdcfbc21815cd69e0eddfee239167ac693fb0413912d816f2578a74f7716eecd6deefa68c6eccd394","0xb885b3ace736cd373e8098bf75ba66fa1c6943ca1bc4408cd98ac7074775c4478594f91154b8a743d9c697e1b29f5840","0xa51ce78de512bd87bfa0835de819941dffbf18bec23221b61d8096fc9436af64e0693c335b54e7bfc763f287bdca2db6","0xa3c76166a3bdb9b06ef696e57603b58871bc72883ee9d45171a30fe6e1d50e30bc9c51b4a0f5a7270e19a77b89733850","0xacefc5c6f8a1e7c24d7b41e0fc7f6f3dc0ede6cf3115ffb9a6e54b1d954cbca9bda8ad7a084be9be245a1b8e9770d141","0xb420ed079941842510e31cfad117fa11fb6b4f97dfbc6298cb840f27ebaceba23eeaf3f513bcffbf5e4aae946310182d","0x95c3bb5ef26c5ed2f035aa5d389c6b3c15a6705b9818a3fefaed28922158b35642b2e8e5a1a620fdad07e75ad4b43af4","0x825149f9081ecf07a2a4e3e8b5d21bade86c1a882475d51c55ee909330b70c5a2ac63771c8600c6f38df716af61a3ea1","0x873b935aae16d9f08adbc25353cee18af2f1b8d5f26dec6538d6bbddc515f2217ed7d235dcfea59ae61b428798b28637","0x9294150843a2bedcedb3bb74c43eb28e759cf9499582c5430bccefb574a8ddd4f11f9929257ff4c153990f9970a2558f","0xb619563a811cc531da07f4f04e5c4c6423010ff9f8ed7e6ec9449162e3d501b269fb1c564c09c0429431879b0f45df02","0x91b509b87eb09f007d839627514658c7341bc76d468920fe8a740a8cb96a7e7e631e0ea584a7e3dc1172266f641d0f5c","0x8b8aceace9a7b9b4317f1f01308c3904d7663856946afbcea141a1c615e21ccad06b71217413e832166e9dd915fbe098","0x87b3b36e725833ea0b0f54753c3728c0dbc87c52d44d705ffc709f2d2394414c652d3283bab28dcce09799504996cee0","0xb2670aad5691cbf308e4a6a77a075c4422e6cbe86fdba24e9f84a313e90b0696afb6a067eebb42ba2d10340d6a2f6e51","0x876784a9aff3d54faa89b2bacd3ff5862f70195d0b2edc58e8d1068b3c9074c0da1cfa23671fe12f35e33b8a329c0ccd","0x8b48b9e758e8a8eae182f5cbec96f67d20cca6d3eee80a2d09208eb1d5d872e09ef23d0df8ebbb9b01c7449d0e3e3650","0xb79303453100654c04a487bdcadc9e3578bc80930c489a7069a52e8ca1dba36c492c8c899ce025f8364599899baa287d","0x961b35a6111da54ece6494f24dacd5ea46181f55775b5f03df0e370c34a5046ac2b4082925855325bb42bc2a2c98381d","0xa31feb1be3f5a0247a1f7d487987eb622e34fca817832904c6ee3ee60277e5847945a6f6ea1ac24542c72e47bdf647df","0xa12a2aa3e7327e457e1aae30e9612715dd2cfed32892c1cd6dcda4e9a18203af8a44afb46d03b2eed89f6b9c5a2c0c23","0xa08265a838e69a2ca2f80fead6ccf16f6366415b920c0b22ee359bcd8d4464ecf156f400a16a7918d52e6d733dd64211","0xb723d6344e938d801cca1a00032af200e541d4471fd6cbd38fb9130daa83f6a1dffbbe7e67fc20f9577f884acd7594b2","0xa6733d83ec78ba98e72ddd1e7ff79b7adb0e559e256760d0c590a986e742445e8cdf560d44b29439c26d87edd0b07c8c","0xa61c2c27d3f7b9ff4695a17afedf63818d4bfba390507e1f4d0d806ce8778d9418784430ce3d4199fd3bdbc2504d2af3","0x8332f3b63a6dc985376e8b1b25eeae68be6160fbe40053ba7bcf6f073204f682da72321786e422d3482fd60c9e5aa034","0xa280f44877583fbb6b860d500b1a3f572e3ee833ec8f06476b3d8002058e25964062feaa1e5bec1536d734a5cfa09145","0xa4026a52d277fcea512440d2204f53047718ebfcae7b48ac57ea7f6bfbc5de9d7304db9a9a6cbb273612281049ddaec5","0x95cdf69c831ab2fad6c2535ede9c07e663d2ddccc936b64e0843d2df2a7b1c31f1759c3c20f1e7a57b1c8f0dbb21b540","0x95c96cec88806469c277ab567863c5209027cecc06c7012358e5f555689c0d9a5ffb219a464f086b45817e8536b86d2f","0xafe38d4684132a0f03d806a4c8df556bf589b25271fbc6fe2e1ed16de7962b341c5003755da758d0959d2e6499b06c68","0xa9b77784fda64987f97c3a23c5e8f61b918be0f7c59ba285084116d60465c4a2aaafc8857eb16823282cc83143eb9126","0xa830f05881ad3ce532a55685877f529d32a5dbe56cea57ffad52c4128ee0fad0eeaf0da4362b55075e77eda7babe70e5","0x992b3ad190d6578033c13ed5abfee4ef49cbc492babb90061e3c51ee4b5790cdd4c8fc1abff1fa2c00183b6b64f0bbbe","0xb1015424d9364aeff75de191652dc66484fdbec3e98199a9eb9671ec57bec6a13ff4b38446e28e4d8aedb58dd619cd90","0xa745304604075d60c9db36cada4063ac7558e7ec2835d7da8485e58d8422e817457b8da069f56511b02601289fbb8981","0xa5ba4330bc5cb3dbe0486ddf995632a7260a46180a08f42ae51a2e47778142132463cc9f10021a9ad36986108fefa1a9","0xb419e9fd4babcaf8180d5479db188bb3da232ae77a1c4ed65687c306e6262f8083070a9ac32220cddb3af2ec73114092","0xa49e23dc5f3468f3bf3a0bb7e4a114a788b951ff6f23a3396ae9e12cbff0abd1240878a3d1892105413dbc38818e807c","0xb7ecc7b4831f650202987e85b86bc0053f40d983f252e9832ef503aea81c51221ce93279da4aa7466c026b2d2070e55d","0x96a8c35cb87f84fa84dcd6399cc2a0fd79cc9158ef4bdde4bae31a129616c8a9f2576cd19baa3f497ca34060979aed7d","0x8681b2c00aa62c2b519f664a95dcb8faef601a3b961bb4ce5d85a75030f40965e2983871d41ea394aee934e859581548","0x85c229a07efa54a713d0790963a392400f55fbb1a43995a535dc6c929f20d6a65cf4efb434e0ad1cb61f689b8011a3bc","0x90856f7f3444e5ad44651c28e24cc085a5db4d2ffe79aa53228c26718cf53a6e44615f3c5cda5aa752d5f762c4623c66","0x978999b7d8aa3f28a04076f74d11c41ef9c89fdfe514936c4238e0f13c38ec97e51a5c078ebc6409e517bfe7ccb42630","0xa099914dd7ed934d8e0d363a648e9038eb7c1ec03fa04dbcaa40f7721c618c3ef947afef7a16b4d7ac8c12aa46637f03","0xab2a104fed3c83d16f2cda06878fa5f30c8c9411de71bfb67fd2fc9aa454dcbcf3d299d72f8cc12e919466a50fcf7426","0xa4471d111db4418f56915689482f6144efc4664cfb0311727f36c864648d35734351becc48875df96f4abd3cfcf820f9","0x83be11727cd30ea94ccc8fa31b09b81c9d6a9a5d3a4686af9da99587332fe78c1f94282f9755854bafd6033549afec91","0x88020ff971dc1a01a9e993cd50a5d2131ffdcbb990c1a6aaa54b20d8f23f9546a70918ea57a21530dcc440c1509c24ad","0xae24547623465e87905eaffa1fa5d52bb7c453a8dbd89614fa8819a2abcedaf455c2345099b7324ae36eb0ad7c8ef977","0xb59b0c60997de1ee00b7c388bc7101d136c9803bf5437b1d589ba57c213f4f835a3e4125b54738e78abbc21b000f2016","0xa584c434dfe194546526691b68fa968c831c31da42303a1d735d960901c74011d522246f37f299555416b8cf25c5a548","0x80408ce3724f4837d4d52376d255e10f69eb8558399ae5ca6c11b78b98fe67d4b93157d2b9b639f1b5b64198bfe87713","0xabb941e8d406c2606e0ddc35c113604fdd9d249eacc51cb64e2991e551b8639ce44d288cc92afa7a1e7fc599cfc84b22","0xb223173f560cacb1c21dba0f1713839e348ad02cbfdef0626748604c86f89e0f4c919ed40b583343795bdd519ba952c8","0xaf1c70512ec3a19d98b8a1fc3ff7f7f5048a27d17d438d43f561974bbdd116fcd5d5c21040f3447af3f0266848d47a15","0x8a44809568ebe50405bede19b4d2607199159b26a1b33e03d180e6840c5cf59d991a4fb150d111443235d75ecad085b7","0xb06207cdca46b125a27b3221b5b50cf27af4c527dd7c80e2dbcebbb09778a96df3af67e50f07725239ce3583dad60660","0x993352d9278814ec89b26a11c4a7c4941bf8f0e6781ae79559d14749ee5def672259792db4587f85f0100c7bb812f933","0x9180b8a718b971fd27bc82c8582d19c4b4f012453e8c0ffeeeffe745581fc6c07875ab28be3af3fa3896d19f0c89ac5b","0x8b8e1263eb48d0fe304032dd5ea1f30e73f0121265f7458ba9054d3626894e8a5fef665340abd2ede9653045c2665938","0x99a2beee4a10b7941c24b2092192faf52b819afd033e4a2de050fd6c7f56d364d0cf5f99764c3357cf32399e60fc5d74","0x946a4aad7f8647ea60bee2c5fcdeb6f9a58fb2cfca70c4d10e458027a04846e13798c66506151be3df9454b1e417893f","0xa672a88847652d260b5472d6908d1d57e200f1e492d30dd1cecc441cdfc9b76e016d9bab560efd4d7f3c30801de884a9","0x9414e1959c156cde1eb24e628395744db75fc24b9df4595350aaad0bc38e0246c9b4148f6443ef68b8e253a4a6bcf11c","0x9316e9e4ec5fab4f80d6540df0e3a4774db52f1d759d2e5b5bcd3d7b53597bb007eb1887cb7dc61f62497d51ffc8d996","0x902d6d77bb49492c7a00bc4b70277bc28c8bf9888f4307bb017ac75a962decdedf3a4e2cf6c1ea9f9ba551f4610cbbd7","0xb07025a18b0e32dd5e12ec6a85781aa3554329ea12c4cd0d3b2c22e43d777ef6f89876dd90a9c8fb097ddf61cf18adc5","0xb355a849ad3227caa4476759137e813505ec523cbc2d4105bc7148a4630f9e81918d110479a2d5f5e4cd9ccec9d9d3e3","0xb49532cfdf02ee760109881ad030b89c48ee3bb7f219ccafc13c93aead754d29bdafe345be54c482e9d5672bd4505080","0x9477802410e263e4f938d57fa8f2a6cac7754c5d38505b73ee35ea3f057aad958cb9722ba6b7b3cfc4524e9ca93f9cdc","0x9148ea83b4436339580f3dbc9ba51509e9ab13c03063587a57e125432dd0915f5d2a8f456a68f8fff57d5f08c8f34d6e","0xb00b6b5392b1930b54352c02b1b3b4f6186d20bf21698689bbfc7d13e86538a4397b90e9d5c93fd2054640c4dbe52a4f","0x926a9702500441243cd446e7cbf15dde16400259726794694b1d9a40263a9fc9e12f7bcbf12a27cb9aaba9e2d5848ddc","0xa0c6155f42686cbe7684a1dc327100962e13bafcf3db97971fc116d9f5c0c8355377e3d70979cdbd58fd3ea52440901c","0xa277f899f99edb8791889d0817ea6a96c24a61acfda3ad8c3379e7c62b9d4facc4b965020b588651672fd261a77f1bfc","0x8f528cebb866b501f91afa50e995234bef5bf20bff13005de99cb51eaac7b4f0bf38580cfd0470de40f577ead5d9ba0f","0x963fc03a44e9d502cc1d23250efef44d299befd03b898d07ce63ca607bb474b5cf7c965a7b9b0f32198b04a8393821f7","0xab087438d0a51078c378bf4a93bd48ef933ff0f1fa68d02d4460820df564e6642a663b5e50a5fe509527d55cb510ae04","0xb0592e1f2c54746bb076be0fa480e1c4bebc4225e1236bcda3b299aa3853e3afb401233bdbcfc4a007b0523a720fbf62","0x851613517966de76c1c55a94dc4595f299398a9808f2d2f0a84330ba657ab1f357701d0895f658c18a44cb00547f6f57","0xa2fe9a1dd251e72b0fe4db27be508bb55208f8f1616b13d8be288363ec722826b1a1fd729fc561c3369bf13950bf1fd6","0xb896cb2bc2d0c77739853bc59b0f89b2e008ba1f701c9cbe3bef035f499e1baee8f0ff1e794854a48c320586a2dfc81a","0xa1b60f98e5e5106785a9b81a85423452ee9ef980fa7fa8464f4366e73f89c50435a0c37b2906052b8e58e212ebd366cf","0xa853b0ebd9609656636df2e6acd5d8839c0fda56f7bf9288a943b06f0b67901a32b95e016ca8bc99bd7b5eab31347e72","0xb290fa4c1346963bd5225235e6bdf7c542174dab4c908ab483d1745b9b3a6015525e398e1761c90e4b49968d05e30eea","0xb0f65a33ad18f154f1351f07879a183ad62e5144ad9f3241c2d06533dad09cbb2253949daff1bb02d24d16a3569f7ef0","0xa00db59b8d4218faf5aeafcd39231027324408f208ec1f54d55a1c41228b463b88304d909d16b718cfc784213917b71e","0xb8d695dd33dc2c3bc73d98248c535b2770ad7fa31aa726f0aa4b3299efb0295ba9b4a51c71d314a4a1bd5872307534d1","0xb848057cca2ca837ee49c42b88422303e58ea7d2fc76535260eb5bd609255e430514e927cc188324faa8e657396d63ec","0x92677836061364685c2aaf0313fa32322746074ed5666fd5f142a7e8f87135f45cd10e78a17557a4067a51dfde890371","0xa854b22c9056a3a24ab164a53e5c5cf388616c33e67d8ebb4590cb16b2e7d88b54b1393c93760d154208b5ca822dc68f","0x86fff174920388bfab841118fb076b2b0cdec3fdb6c3d9a476262f82689fb0ed3f1897f7be9dbf0932bb14d346815c63","0x99661cf4c94a74e182752bcc4b98a8c2218a8f2765642025048e12e88ba776f14f7be73a2d79bd21a61def757f47f904","0x8a8893144d771dca28760cba0f950a5d634195fd401ec8cf1145146286caffb0b1a6ba0c4c1828d0a5480ce49073c64c","0x938a59ae761359ee2688571e7b7d54692848eb5dde57ffc572b473001ea199786886f8c6346a226209484afb61d2e526","0x923f68a6aa6616714cf077cf548aeb845bfdd78f2f6851d8148cba9e33a374017f2f3da186c39b82d14785a093313222","0xac923a93d7da7013e73ce8b4a2b14b8fd0cc93dc29d5de941a70285bdd19be4740fedfe0c56b046689252a3696e9c5bc","0xb49b32c76d4ec1a2c68d4989285a920a805993bc6fcce6dacd3d2ddae73373050a5c44ba8422a3781050682fa0ef6ba2","0x8a367941c07c3bdca5712524a1411bad7945c7c48ffc7103b1d4dff2c25751b0624219d1ccde8c3f70c465f954be5445","0xb838f029df455efb6c530d0e370bbbf7d87d61a9aea3d2fe5474c5fe0a39cf235ceecf9693c5c6c5820b1ba8f820bd31","0xa8983b7c715eaac7f13a001d2abc462dfc1559dab4a6b554119c271aa8fe00ffcf6b6949a1121f324d6d26cb877bcbae","0xa2afb24ad95a6f14a6796315fbe0d8d7700d08f0cfaf7a2abe841f5f18d4fecf094406cbd54da7232a159f9c5b6e805e","0x87e8e95ad2d62f947b2766ff405a23f7a8afba14e7f718a691d95369c79955cdebe24c54662553c60a3f55e6322c0f6f","0x87c2cbcecb754e0cc96128e707e5c5005c9de07ffd899efa3437cadc23362f5a1d3fcdd30a1f5bdc72af3fb594398c2a","0x91afd6ee04f0496dc633db88b9370d41c428b04fd991002502da2e9a0ef051bcd7b760e860829a44fbe5539fa65f8525","0x8c50e5d1a24515a9dd624fe08b12223a75ca55196f769f24748686315329b337efadca1c63f88bee0ac292dd0a587440","0x8a07e8f912a38d94309f317c32068e87f68f51bdfa082d96026f5f5f8a2211621f8a3856dda8069386bf15fb2d28c18f","0x94ad1dbe341c44eeaf4dc133eed47d8dbfe752575e836c075745770a6679ff1f0e7883b6aa917462993a7f469d74cab5","0x8745f8bd86c2bb30efa7efb7725489f2654f3e1ac4ea95bd7ad0f3cfa223055d06c187a16192d9d7bdaea7b050c6a324","0x900d149c8d79418cda5955974c450a70845e02e5a4ecbcc584a3ca64d237df73987c303e3eeb79da1af83bf62d9e579f","0x8f652ab565f677fb1a7ba03b08004e3cda06b86c6f1b0b9ab932e0834acf1370abb2914c15b0d08327b5504e5990681c","0x9103097d088be1f75ab9d3da879106c2f597e2cc91ec31e73430647bdd5c33bcfd771530d5521e7e14df6acda44f38a6","0xb0fec7791cfb0f96e60601e1aeced9a92446b61fedab832539d1d1037558612d78419efa87ff5f6b7aab8fd697d4d9de","0xb9d2945bdb188b98958854ba287eb0480ef614199c4235ce5f15fc670b8c5ffe8eeb120c09c53ea8a543a022e6a321ac","0xa9461bb7d5490973ebaa51afc0bb4a5e42acdccb80e2f939e88b77ac28a98870e103e1042899750f8667a8cc9123bae9","0xa37fdf11d4bcb2aed74b9f460a30aa34afea93386fa4cdb690f0a71bc58f0b8df60bec56e7a24f225978b862626fa00e","0xa214420e183e03d531cf91661466ea2187d84b6e814b8b20b3730a9400a7d25cf23181bb85589ebc982cec414f5c2923","0xad09a45a698a6beb3e0915f540ef16e9af7087f53328972532d6b5dfe98ce4020555ece65c6cbad8bd6be8a4dfefe6fd","0xab6742800b02728c92d806976764cb027413d6f86edd08ad8bb5922a2969ee9836878cd39db70db0bd9a2646862acc4f","0x974ca9305bd5ea1dc1755dff3b63e8bfe9f744321046c1395659bcea2a987b528e64d5aa96ac7b015650b2253b37888d","0x84eee9d6bce039c52c2ebc4fccc0ad70e20c82f47c558098da4be2f386a493cbc76adc795b5488c8d11b6518c2c4fab8","0x875d7bda46efcb63944e1ccf760a20144df3b00d53282b781e95f12bfc8f8316dfe6492c2efbf796f1150e36e436e9df","0xb68a2208e0c587b5c31b5f6cb32d3e6058a9642e2d9855da4f85566e1412db528475892060bb932c55b3a80877ad7b4a","0xba006368ecab5febb6ab348644d9b63de202293085ed468df8bc24d992ae8ce468470aa37f36a73630c789fb9c819b30","0x90a196035150846cd2b482c7b17027471372a8ce7d914c4d82b6ea7fa705d8ed5817bd42d63886242585baf7d1397a1c","0xa223b4c85e0daa8434b015fd9170b5561fe676664b67064974a1e9325066ecf88fc81f97ab5011c59fad28cedd04b240","0x82e8ec43139cf15c6bbeed484b62e06cded8a39b5ce0389e4cbe9c9e9c02f2f0275d8d8d4e8dfec8f69a191bef220408","0x81a3fc07a7b68d92c6ee4b6d28f5653ee9ec85f7e2ee1c51c075c1b130a8c5097dc661cf10c5aff1c7114b1a6a19f11a","0x8ed2ef8331546d98819a5dd0e6c9f8cb2630d0847671314a28f277faf68da080b53891dd75c82cbcf7788b255490785d","0xacecabf84a6f9bbed6b2fc2e7e4b48f02ef2f15e597538a73aea8f98addc6badda15e4695a67ecdb505c1554e8f345ec","0xb8f51019b2aa575f8476e03dcadf86cc8391f007e5f922c2a36b2daa63f5a503646a468990cd5c65148d323942193051","0xaaa595a84b403ec65729bc1c8055a94f874bf9adddc6c507b3e1f24f79d3ad359595a672b93aab3394db4e2d4a7d8970","0x895144c55fcbd0f64d7dd69e6855cfb956e02b5658eadf0f026a70703f3643037268fdd673b0d21b288578a83c6338dd","0xa2e92ae6d0d237d1274259a8f99d4ea4912a299816350b876fba5ebc60b714490e198a916e1c38c6e020a792496fa23c","0xa45795fda3b5bb0ad1d3c628f6add5b2a4473a1414c1a232e80e70d1cfffd7f8a8d9861f8df2946999d7dbb56bf60113","0xb6659bf7f6f2fef61c39923e8c23b8c70e9c903028d8f62516d16755cd3fba2fe41c285aa9432dc75ab08f8a1d8a81fc","0xa735609a6bc5bfd85e58234fc439ff1f58f1ff1dd966c5921d8b649e21f006bf2b8642ad8a75063c159aaf6935789293","0xa3c622eb387c9d15e7bda2e3e84d007cb13a6d50d655c3f2f289758e49d3b37b9a35e4535d3cc53d8efd51f407281f19","0x8afe147b53ad99220f5ef9d763bfc91f9c20caecbcf823564236fb0e6ede49414c57d71eec4772c8715cc65a81af0047","0xb5f0203233cf71913951e9c9c4e10d9243e3e4a1f2cb235bf3f42009120ba96e04aa414c9938ea8873b63148478927e8","0x93c52493361b458d196172d7ba982a90a4f79f03aa8008edc322950de3ce6acf4c3977807a2ffa9e924047e02072b229","0xb9e72b805c8ac56503f4a86c82720afbd5c73654408a22a2ac0b2e5caccdfb0e20b59807433a6233bc97ae58cf14c70a","0xaf0475779b5cee278cca14c82da2a9f9c8ef222eb885e8c50cca2315fea420de6e04146590ed0dd5a29c0e0812964df5","0xb430ccab85690db02c2d0eb610f3197884ca12bc5f23c51e282bf3a6aa7e4a79222c3d8761454caf55d6c01a327595f9","0x830032937418b26ee6da9b5206f3e24dc76acd98589e37937e963a8333e5430abd6ce3dd93ef4b8997bd41440eed75d6","0x8820a6d73180f3fe255199f3f175c5eb770461ad5cfdde2fb11508041ed19b8c4ce66ad6ecebf7d7e836cc2318df47ca","0xaef1393e7d97278e77bbf52ef6e1c1d5db721ccf75fe753cf47a881fa034ca61eaa5098ee5a344c156d2b14ff9e284ad","0x8a4a26c07218948c1196c45d927ef4d2c42ade5e29fe7a91eaebe34a29900072ce5194cf28d51f746f4c4c649daf4396","0x84011dc150b7177abdcb715efbd8c201f9cb39c36e6069af5c50a096021768ba40cef45b659c70915af209f904ede3b6","0xb1bd90675411389bb66910b21a4bbb50edce5330850c5ab0b682393950124252766fc81f5ecfc72fb7184387238c402e","0x8dfdcd30583b696d2c7744655f79809f451a60c9ad5bf1226dc078b19f4585d7b3ef7fa9d54e1ac09520d95cbfd20928","0xb351b4dc6d98f75b8e5a48eb7c6f6e4b78451991c9ba630e5a1b9874c15ac450cd409c1a024713bf2cf82dc400e025ef","0xa462b8bc97ac668b97b28b3ae24b9f5de60e098d7b23ecb600d2194cd35827fb79f77c3e50d358f5bd72ee83fef18fa0","0xa183753265c5f7890270821880cce5f9b2965b115ba783c6dba9769536f57a04465d7da5049c7cf8b3fcf48146173c18","0xa8a771b81ed0d09e0da4d79f990e58eabcd2be3a2680419502dd592783fe52f657fe55125b385c41d0ba3b9b9cf54a83","0xa71ec577db46011689d073245e3b1c3222a9b1fe6aa5b83629adec5733dd48617ebea91346f0dd0e6cdaa86e4931b168","0xa334b8b244f0d598a02da6ae0f918a7857a54dce928376c4c85df15f3b0f2ba3ac321296b8b7c9dd47d770daf16c8f8c","0xa29037f8ef925c417c90c4df4f9fb27fb977d04e2b3dd5e8547d33e92ab72e7a00f5461de21e28835319eae5db145eb7","0xb91054108ae78b00e3298d667b913ebc44d8f26e531eae78a8fe26fdfb60271c97efb2dee5f47ef5a3c15c8228138927","0x926c13efbe90604f6244be9315a34f72a1f8d1aab7572df431998949c378cddbf2fe393502c930fff614ff06ae98a0ce","0x995c758fd5600e6537089b1baa4fbe0376ab274ff3e82a17768b40df6f91c2e443411de9cafa1e65ea88fb8b87d504f4","0x9245ba307a7a90847da75fca8d77ec03fdfc812c871e7a2529c56a0a79a6de16084258e7a9ac4ae8a3756f394336e21c","0x99e0cfa2bb57a7e624231317044c15e52196ecce020db567c8e8cb960354a0be9862ee0c128c60b44777e65ac315e59f","0xad4f6b3d27bbbb744126601053c3dc98c07ff0eb0b38a898bd80dce778372846d67e5ab8fb34fb3ad0ef3f235d77ba7f","0xa0f12cae3722bbbca2e539eb9cc7614632a2aefe51410430070a12b5bc5314ecec5857b7ff8f41e9980cac23064f7c56","0xb487f1bc59485848c98222fd3bc36c8c9bb3d2912e2911f4ceca32c840a7921477f9b1fe00877e05c96c75d3eecae061","0xa6033db53925654e18ecb3ce715715c36165d7035db9397087ac3a0585e587998a53973d011ac6d48af439493029cee6","0xa6b4d09cd01c70a3311fd131d3710ccf97bde3e7b80efd5a8c0eaeffeb48cca0f951ced905290267b115b06d46f2693b","0xa9dff1df0a8f4f218a98b6f818a693fb0d611fed0fc3143537cbd6578d479af13a653a8155e535548a2a0628ae24fa58","0xa58e469f65d366b519f9a394cacb7edaddac214463b7b6d62c2dbc1316e11c6c5184ce45c16de2d77f990dcdd8b55430","0x989e71734f8119103586dc9a3c5f5033ddc815a21018b34c1f876cdfc112efa868d5751bf6419323e4e59fa6a03ece1c","0xa2da00e05036c884369e04cf55f3de7d659cd5fa3f849092b2519dd263694efe0f051953d9d94b7e121f0aee8b6174d7","0x968f3c029f57ee31c4e1adea89a7f92e28483af9a74f30fbdb995dc2d40e8e657dff8f8d340d4a92bf65f54440f2859f","0x932778df6f60ac1639c1453ef0cbd2bf67592759dcccb3e96dcc743ff01679e4c7dd0ef2b0833dda548d32cb4eba49e2","0xa805a31139f8e0d6dae1ac87d454b23a3dc9fc653d4ca18d4f8ebab30fc189c16e73981c2cb7dd6f8c30454a5208109d","0xa9ba0991296caa2aaa4a1ceacfb205544c2a2ec97088eace1d84ee5e2767656a172f75d2f0c4e16a3640a0e0dec316e0","0xb1e49055c968dced47ec95ae934cf45023836d180702e20e2df57e0f62fb85d7ac60d657ba3ae13b8560b67210449459","0xa94e1da570a38809c71e37571066acabff7bf5632737c9ab6e4a32856924bf6211139ab3cedbf083850ff2d0e0c0fcfc","0x88ef1bb322000c5a5515b310c838c9af4c1cdbb32eab1c83ac3b2283191cd40e9573747d663763a28dad0d64adc13840","0xa987ce205f923100df0fbd5a85f22c9b99b9b9cbe6ddfa8dfda1b8fe95b4f71ff01d6c5b64ca02eb24edb2b255a14ef0","0x84fe8221a9e95d9178359918a108de4763ebfa7a6487facb9c963406882a08a9a93f492f8e77cf9e7ea41ae079c45993","0xaa1cf3dc7c5dcfa15bbbc811a4bb6dbac4fba4f97fb1ed344ab60264d7051f6eef19ea9773441d89929ee942ed089319","0x8f6a7d610d59d9f54689bbe6a41f92d9f6096cde919c1ab94c3c7fcecf0851423bc191e5612349e10f855121c0570f56","0xb5af1fa7894428a53ea520f260f3dc3726da245026b6d5d240625380bfb9c7c186df0204bb604efac5e613a70af5106e","0xa5bce6055ff812e72ce105f147147c7d48d7a2313884dd1f488b1240ee320f13e8a33f5441953a8e7a3209f65b673ce1","0xb9b55b4a1422677d95821e1d042ab81bbf0bf087496504021ec2e17e238c2ca6b44fb3b635a5c9eac0871a724b8d47c3","0x941c38e533ce4a673a3830845b56786585e5fe49c427f2e5c279fc6db08530c8f91db3e6c7822ec6bb4f956940052d18","0xa38e191d66c625f975313c7007bbe7431b5a06ed2da1290a7d5d0f2ec73770d476efd07b8e632de64597d47df175cbb0","0x94ba76b667abf055621db4c4145d18743a368d951565632ed4e743dd50dd3333507c0c34f286a5c5fdbf38191a2255cd","0xa5ca38c60be5602f2bfa6e00c687ac96ac36d517145018ddbee6f12eb0faa63dd57909b9eeed26085fe5ac44e55d10ab","0xb00fea3b825e60c1ed1c5deb4b551aa65a340e5af36b17d5262c9cd2c508711e4dc50dc2521a2c16c7c901902266e64a","0x971b86fc4033485e235ccb0997a236206ba25c6859075edbcdf3c943116a5030b7f75ebca9753d863a522ba21a215a90","0xb3b31f52370de246ee215400975b674f6da39b2f32514fe6bd54e747752eedca22bb840493b44a67df42a3639c5f901f","0xaffbbfac9c1ba7cbfa1839d2ae271dd6149869b75790bf103230637da41857fc326ef3552ff31c15bda0694080198143","0xa95d42aa7ef1962520845aa3688f2752d291926f7b0d73ea2ee24f0612c03b43f2b0fe3c9a9a99620ffc8d487b981bc2","0x914a266065caf64985e8c5b1cb2e3f4e3fe94d7d085a1881b1fefa435afef4e1b39a98551d096a62e4f5cc1a7f0fdc2e","0x81a0b4a96e2b75bc1bf2dbd165d58d55cfd259000a35504d1ffb18bc346a3e6f07602c683723864ffb980f840836fd8d","0x91c1556631cddd4c00b65b67962b39e4a33429029d311c8acf73a18600e362304fb68bccb56fde40f49e95b7829e0b87","0x8befbacc19e57f7c885d1b7a6028359eb3d80792fe13b92a8400df21ce48deb0bb60f2ddb50e3d74f39f85d7eab23adc","0x92f9458d674df6e990789690ec9ca73dacb67fc9255b58c417c555a8cc1208ace56e8e538f86ba0f3615573a0fbac00d","0xb4b1b3062512d6ae7417850c08c13f707d5838e43d48eb98dd4621baf62eee9e82348f80fe9b888a12874bfa538771f8","0xa13c4a3ac642ede37d9c883f5319e748d2b938f708c9d779714108a449b343f7b71a6e3ef4080fee125b416762920273","0xaf44983d5fc8cceee0551ef934e6e653f2d3efa385e5c8a27a272463a6f333e290378cc307c2b664eb923c78994e706e","0xa389fd6c59fe2b4031cc244e22d3991e541bd203dd5b5e73a6159e72df1ab41d49994961500dcde7989e945213184778","0x8d2141e4a17836c548de9598d7b298b03f0e6c73b7364979a411c464e0628e21cff6ac3d6decdba5d1c4909eff479761","0x980b22ef53b7bdf188a3f14bc51b0dbfdf9c758826daa3cbc1e3986022406a8aa9a6a79e400567120b88c67faa35ce5f","0xa28882f0a055f96df3711de5d0aa69473e71245f4f3e9aa944e9d1fb166e02caa50832e46da6d3a03b4801735fd01b29","0x8db106a37d7b88f5d995c126abb563934dd8de516af48e85695d02b1aea07f79217e3cdd03c6f5ca57421830186c772b","0xb5a7e50da0559a675c472f7dfaee456caab6695ab7870541b2be8c2b118c63752427184aad81f0e1afc61aef1f28c46f","0x9962118780e20fe291d10b64f28d09442a8e1b5cffd0f3dd68d980d0614050a626c616b44e9807fbee7accecae00686a","0xb38ddf33745e8d2ad6a991aefaf656a33c5f8cbe5d5b6b6fd03bd962153d8fd0e01b5f8f96d80ae53ab28d593ab1d4e7","0x857dc12c0544ff2c0c703761d901aba636415dee45618aba2e3454ff9cbc634a85c8b05565e88520ff9be2d097c8b2b1","0xa80d465c3f8cc63af6d74a6a5086b626c1cb4a8c0fee425964c3bd203d9d7094e299f81ce96d58afc20c8c9a029d9dae","0x89e1c8fbde8563763be483123a3ed702efac189c6d8ab4d16c85e74bbaf856048cc42d5d6e138633a38572ba5ec3f594","0x893a594cf495535f6d216508f8d03c317dcf03446668cba688da90f52d0111ac83d76ad09bf5ea47056846585ee5c791","0xaadbd8be0ae452f7f9450c7d2957598a20cbf10139a4023a78b4438172d62b18b0de39754dd2f8862dbd50a3a0815e53","0xae7d39670ecca3eb6db2095da2517a581b0e8853bdfef619b1fad9aacd443e7e6a40f18209fadd44038a55085c5fe8b2","0x866ef241520eacb6331593cfcb206f7409d2f33d04542e6e52cba5447934e02d44c471f6c9a45963f9307e9809ab91d9","0xb1a09911ad3864678f7be79a9c3c3eb5c84a0a45f8dcb52c67148f43439aeaaa9fd3ed3471276b7e588b49d6ebe3033a","0xadd07b7f0dbb34049cd8feeb3c18da5944bf706871cfd9f14ff72f6c59ad217ebb1f0258b13b167851929387e4e34cfe","0xae048892d5c328eefbdd4fba67d95901e3c14d974bfc0a1fc68155ca9f0d59e61d7ba17c6c9948b120cf35fd26e6fee9","0x9185b4f3b7da0ddb4e0d0f09b8a9e0d6943a4611e43f13c3e2a767ed8592d31e0ba3ebe1914026a3627680274291f6e5","0xa9c022d4e37b0802284ce3b7ee9258628ab4044f0db4de53d1c3efba9de19d15d65cc5e608dbe149c21c2af47d0b07b5","0xb24dbd5852f8f24921a4e27013b6c3fa8885b973266cb839b9c388efad95821d5d746348179dcc07542bd0d0aefad1ce","0xb5fb4f279300876a539a27a441348764908bc0051ebd66dc51739807305e73db3d2f6f0f294ffb91b508ab150eaf8527","0xace50841e718265b290c3483ed4b0fdd1175338c5f1f7530ae9a0e75d5f80216f4de37536adcbc8d8c95982e88808cd0","0xb19cadcde0f63bd1a9c24bd9c2806f53c14c0b9735bf351601498408ba503ddbd2037c891041cbba47f58b8c483f3b21","0xb6061e63558d312eb891b97b39aa552fa218568d79ee26fe6dd5b864aea9e3216d8f2e2f3b093503be274766dac41426","0x89730fdb2876ab6f0fe780d695f6e12090259027e789b819956d786e977518057e5d1d7f5ab24a3ae3d5d4c97773bd2b","0xb6fa841e81f9f2cad0163a02a63ae96dc341f7ae803b616efc6e1da2fbea551c1b96b11ad02c4afbdf6d0cc9f23da172","0x8fb66187182629c861ddb6896d7ed3caf2ad050c3dba8ab8eb0d7a2c924c3d44c48d1a148f9e33fb1f061b86972f8d21","0x86022ac339c1f84a7fa9e05358c1a5b316b4fc0b83dbe9c8c7225dc514f709d66490b539359b084ce776e301024345fa","0xb50b9c321468da950f01480bb62b6edafd42f83c0001d6e97f2bd523a1c49a0e8574fb66380ea28d23a7c4d54784f9f0","0xa31c05f7032f30d1dac06678be64d0250a071fd655e557400e4a7f4c152be4d5c7aa32529baf3e5be7c4bd49820054f6","0xb95ac0848cd322684772119f5b682d90a66bbf9dac411d9d86d2c34844bbd944dbaf8e47aa41380455abd51687931a78","0xae4a6a5ce9553b65a05f7935e61e496a4a0f6fd8203367a2c627394c9ce1e280750297b74cdc48fd1d9a31e93f97bef4","0xa22daf35f6e9b05e52e0b07f7bd1dbbebd2c263033fb0e1b2c804e2d964e2f11bc0ece6aca6af079dd3a9939c9c80674","0x902150e0cb1f16b9b59690db35281e28998ce275acb313900da8b2d8dfd29fa1795f8ca3ff820c31d0697de29df347c1","0xb17b5104a5dc665cdd7d47e476153d715eb78c6e5199303e4b5445c21a7fa7cf85fe7cfd08d7570f4e84e579b005428c","0xa03f49b81c15433f121680aa02d734bb9e363af2156654a62bcb5b2ba2218398ccb0ff61104ea5d7df5b16ea18623b1e","0x802101abd5d3c88876e75a27ffc2f9ddcce75e6b24f23dba03e5201281a7bd5cc7530b6a003be92d225093ca17d3c3bb","0xa4d183f63c1b4521a6b52226fc19106158fc8ea402461a5cccdaa35fee93669df6a8661f45c1750cd01308149b7bf08e","0x8d17c22e0c8403b69736364d460b3014775c591032604413d20a5096a94d4030d7c50b9fe3240e31d0311efcf9816a47","0x947225acfcce5992eab96276f668c3cbe5f298b90a59f2bb213be9997d8850919e8f496f182689b5cbd54084a7332482","0x8df6f4ed216fc8d1905e06163ba1c90d336ab991a18564b0169623eb39b84e627fa267397da15d3ed754d1f3423bff07","0x83480007a88f1a36dea464c32b849a3a999316044f12281e2e1c25f07d495f9b1710b4ba0d88e9560e72433addd50bc2","0xb3019d6e591cf5b33eb972e49e06c6d0a82a73a75d78d383dd6f6a4269838289e6e07c245f54fed67f5c9bb0fd5e1c5f","0x92e8ce05e94927a9fb02debadb99cf30a26172b2705003a2c0c47b3d8002bf1060edb0f6a5750aad827c98a656b19199","0xac2aff801448dbbfc13cca7d603fd9c69e82100d997faf11f465323b97255504f10c0c77401e4d1890339d8b224f5803","0xb0453d9903d08f508ee27e577445dc098baed6cde0ac984b42e0f0efed62760bd58d5816cf1e109d204607b7b175e30c","0xae68dc4ba5067e825d46d2c7c67f1009ceb49d68e8d3e4c57f4bcd299eb2de3575d42ea45e8722f8f28497a6e14a1cfe","0xb22486c2f5b51d72335ce819bbafb7fa25eb1c28a378a658f13f9fc79cd20083a7e573248d911231b45a5cf23b561ca7","0x89d1201d1dbd6921867341471488b4d2fd0fc773ae1d4d074c78ae2eb779a59b64c00452c2a0255826fca6b3d03be2b1","0xa2998977c91c7a53dc6104f5bc0a5b675e5350f835e2f0af69825db8af4aeb68435bdbcc795f3dd1f55e1dd50bc0507f","0xb0be4937a925b3c05056ed621910d535ccabf5ab99fd3b9335080b0e51d9607d0fd36cb5781ff340018f6acfca4a9736","0xaea145a0f6e0ba9df8e52e84bb9c9de2c2dc822f70d2724029b153eb68ee9c17de7d35063dcd6a39c37c59fdd12138f7","0x91cb4545d7165ee8ffbc74c874baceca11fdebbc7387908d1a25877ca3c57f2c5def424dab24148826832f1e880bede0","0xb3b579cb77573f19c571ad5eeeb21f65548d7dff9d298b8d7418c11f3e8cd3727c5b467f013cb87d6861cfaceee0d2e3","0xb98a1eeec2b19fecc8378c876d73645aa52fb99e4819903735b2c7a885b242787a30d1269a04bfb8573d72d9bbc5f0f0","0x940c1f01ed362bd588b950c27f8cc1d52276c71bb153d47f07ec85b038c11d9a8424b7904f424423e714454d5e80d1cd","0xaa343a8ecf09ce11599b8cf22f7279cf80f06dbf9f6d62cb05308dbbb39c46fd0a4a1240b032665fbb488a767379b91b","0x87c3ac72084aca5974599d3232e11d416348719e08443acaba2b328923af945031f86432e170dcdd103774ec92e988c9","0x91d6486eb5e61d2b9a9e742c20ec974a47627c6096b3da56209c2b4e4757f007e793ebb63b2b246857c9839b64dc0233","0xaebcd3257d295747dd6fc4ff910d839dd80c51c173ae59b8b2ec937747c2072fa85e3017f9060aa509af88dfc7529481","0xb3075ba6668ca04eff19efbfa3356b92f0ab12632dcda99cf8c655f35b7928c304218e0f9799d68ef9f809a1492ff7db","0x93ba7468bb325639ec2abd4d55179c69fd04eaaf39fc5340709227bbaa4ad0a54ea8b480a1a3c8d44684e3be0f8d1980","0xa6aef86c8c0d92839f38544d91b767c582568b391071228ff5a5a6b859c87bf4f81a7d926094a4ada1993ddbd677a920","0x91dcd6d14207aa569194aa224d1e5037b999b69ade52843315ca61ba26abe9a76412c9e88259bc5cf5d7b95b97d9c3bc","0xb3b483d31c88f78d49bd065893bc1e3d2aa637e27dedb46d9a7d60be7660ce7a10aaaa7deead362284a52e6d14021178","0x8e5730070acf8371461ef301cc4523e8e672aa0e3d945d438a0e0aa6bdf8cb9c685dcf38df429037b0c8aff3955c6f5b","0xb8c6d769890a8ee18dc4f9e917993315877c97549549b34785a92543cbeec96a08ae3a28d6e809c4aacd69de356c0012","0x95ca86cd384eaceaa7c077c5615736ca31f36824bd6451a16142a1edc129fa42b50724aeed7c738f08d7b157f78b569e","0x94df609c6d71e8eee7ab74226e371ccc77e01738fe0ef1a6424435b4570fe1e5d15797b66ed0f64eb88d4a3a37631f0e","0x89057b9783212add6a0690d6bb99097b182738deff2bd9e147d7fd7d6c8eacb4c219923633e6309ad993c24572289901","0x83a0f9f5f265c5a0e54defa87128240235e24498f20965009fef664f505a360b6fb4020f2742565dfc7746eb185bcec0","0x91170da5306128931349bc3ed50d7df0e48a68b8cc8420975170723ac79d8773e4fa13c5f14dc6e3fafcad78379050b1","0xb7178484d1b55f7e56a4cc250b6b2ec6040437d96bdfddfa7b35ed27435860f3855c2eb86c636f2911b012eb83b00db8","0xac0b00c4322d1e4208e09cd977b4e54d221133ff09551f75b32b0b55d0e2be80941dda26257b0e288c162e63c7e9cf68","0x9690ed9e7e53ed37ff362930e4096b878b12234c332fd19d5d064824084245952eda9f979e0098110d6963e468cf513e","0xb6fa547bb0bb83e5c5be0ed462a8783fba119041c136a250045c09d0d2af330c604331e7de960df976ff76d67f8000cd","0x814603907c21463bcf4e59cfb43066dfe1a50344ae04ef03c87c0f61b30836c3f4dea0851d6fa358c620045b7f9214c8","0x9495639e3939fad2a3df00a88603a5a180f3c3a0fe4d424c35060e2043e0921788003689887b1ed5be424d9a89bb18bb","0xaba4c02d8d57f2c92d5bc765885849e9ff8393d6554f5e5f3e907e5bfac041193a0d8716d7861104a4295d5a03c36b03","0x8ead0b56c1ca49723f94a998ba113b9058059321da72d9e395a667e6a63d5a9dac0f5717cec343f021695e8ced1f72af","0xb43037f7e3852c34ed918c5854cd74e9d5799eeddfe457d4f93bb494801a064735e326a76e1f5e50a339844a2f4a8ec9","0x99db8422bb7302199eb0ff3c3d08821f8c32f53a600c5b6fb43e41205d96adae72be5b460773d1280ad1acb806af9be8","0x8a9be08eae0086c0f020838925984df345c5512ff32e37120b644512b1d9d4fecf0fd30639ca90fc6cf334a86770d536","0x81b43614f1c28aa3713a309a88a782fb2bdfc4261dd52ddc204687791a40cf5fd6a263a8179388596582cccf0162efc2","0xa9f3a8b76912deb61d966c75daf5ddb868702ebec91bd4033471c8e533183df548742a81a2671de5be63a502d827437d","0x902e2415077f063e638207dc7e14109652e42ab47caccd6204e2870115791c9defac5425fd360b37ac0f7bd8fe7011f8","0xaa18e4fdc1381b59c18503ae6f6f2d6943445bd00dd7d4a2ad7e5adad7027f2263832690be30d456e6d772ad76f22350","0xa348b40ba3ba7d81c5d4631f038186ebd5e5f314f1ea737259151b07c3cc8cf0c6ed4201e71bcc1c22fefda81a20cde6","0xaa1306f7ac1acbfc47dc6f7a0cb6d03786cec8c8dc8060388ccda777bca24bdc634d03e53512c23dba79709ff64f8620","0x818ccfe46e700567b7f3eb400e5a35f6a5e39b3db3aa8bc07f58ace35d9ae5a242faf8dbccd08d9a9175bbce15612155","0xb7e3da2282b65dc8333592bb345a473f03bd6df69170055fec60222de9897184536bf22b9388b08160321144d0940279","0xa4d976be0f0568f4e57de1460a1729129252b44c552a69fceec44e5b97c96c711763360d11f9e5bf6d86b4976bf40d69","0x85d185f0397c24c2b875b09b6328a23b87982b84ee880f2677a22ff4c9a1ba9f0fea000bb3f7f66375a00d98ebafce17","0xb4ccbb8c3a2606bd9b87ce022704663af71d418351575f3b350d294f4efc68c26f9a2ce49ff81e6ff29c3b63d746294e","0x93ffd3265fddb63724dfde261d1f9e22f15ecf39df28e4d89e9fea03221e8e88b5dd9b77628bacaa783c6f91802d47cc","0xb1fd0f8d7a01378e693da98d03a2d2fda6b099d03454b6f2b1fa6472ff6bb092751ce6290059826b74ac0361eab00e1e","0xa89f440c71c561641589796994dd2769616b9088766e983c873fae0716b95c386c8483ab8a4f367b6a68b72b7456dd32","0xaf4fe92b01d42d03dd5d1e7fa55e96d4bbcb7bf7d4c8c197acd16b3e0f3455807199f683dcd263d74547ef9c244b35cc","0xa8227f6e0a344dfe76bfbe7a1861be32c4f4bed587ccce09f9ce2cf481b2dda8ae4f566154bc663d15f962f2d41761bd","0xa7b361663f7495939ed7f518ba45ea9ff576c4e628995b7aea026480c17a71d63fc2c922319f0502eb7ef8f14a406882","0x8ddcf382a9f39f75777160967c07012cfa89e67b19714a7191f0c68eaf263935e5504e1104aaabd0899348c972a8d3c6","0x98c95b9f6f5c91f805fb185eedd06c6fc4457d37dd248d0be45a6a168a70031715165ea20606245cbdf8815dc0ac697f","0x805b44f96e001e5909834f70c09be3efcd3b43632bcac5b6b66b6d227a03a758e4b1768ce2a723045681a1d34562aaeb","0xb0e81b07cdc45b3dca60882676d9badb99f25c461b7efe56e3043b80100bb62d29e1873ae25eb83087273160ece72a55","0xb0c53f0abe78ee86c7b78c82ae1f7c070bb0b9c45c563a8b3baa2c515d482d7507bb80771e60b38ac13f78b8af92b4a9","0xa7838ef6696a9e4d2e5dfd581f6c8d6a700467e8fd4e85adabb5f7a56f514785dd4ab64f6f1b48366f7d94728359441b","0x88c76f7700a1d23c30366a1d8612a796da57b2500f97f88fdf2d76b045a9d24e7426a8ffa2f4e86d3046937a841dad58","0xad8964baf98c1f02e088d1d9fcb3af6b1dfa44cdfe0ed2eae684e7187c33d3a3c28c38e8f4e015f9c04d451ed6f85ff6","0x90e9d00a098317ececaa9574da91fc149eda5b772dedb3e5a39636da6603aa007804fa86358550cfeff9be5a2cb7845e","0xa56ff4ddd73d9a6f5ab23bb77efa25977917df63571b269f6a999e1ad6681a88387fcc4ca3b26d57badf91b236503a29","0x97ad839a6302c410a47e245df84c01fb9c4dfef86751af3f9340e86ff8fc3cd52fa5ff0b9a0bd1d9f453e02ca80658a6","0xa4c8c44cbffa804129e123474854645107d1f0f463c45c30fd168848ebea94880f7c0c5a45183e9eb837f346270bdb35","0xa72e53d0a1586d736e86427a93569f52edd2f42b01e78aee7e1961c2b63522423877ae3ac1227a2cf1e69f8e1ff15bc3","0x8559f88a7ef13b4f09ac82ae458bbae6ab25671cfbf52dae7eac7280d6565dd3f0c3286aec1a56a8a16dc3b61d78ce47","0x8221503f4cdbed550876c5dc118a3f2f17800c04e8be000266633c83777b039a432d576f3a36c8a01e8fd18289ebc10b","0x99bfbe5f3e46d4d898a578ba86ed26de7ed23914bd3bcdf3c791c0bcd49398a52419077354a5ab75cea63b6c871c6e96","0xaa134416d8ff46f2acd866c1074af67566cfcf4e8be8d97329dfa0f603e1ff208488831ce5948ac8d75bfcba058ddcaa","0xb02609d65ebfe1fe8e52f21224a022ea4b5ea8c1bd6e7b9792eed8975fc387cdf9e3b419b8dd5bcce80703ab3a12a45f","0xa4f14798508698fa3852e5cac42a9db9797ecee7672a54988aa74037d334819aa7b2ac7b14efea6b81c509134a6b7ad2","0x884f01afecbcb987cb3e7c489c43155c416ed41340f61ecb651d8cba884fb9274f6d9e7e4a46dd220253ae561614e44c","0xa05523c9e71dce1fe5307cc71bd721feb3e1a0f57a7d17c7d1c9fb080d44527b7dbaa1f817b1af1c0b4322e37bc4bb1e","0x8560aec176a4242b39f39433dd5a02d554248c9e49d3179530815f5031fee78ba9c71a35ceeb2b9d1f04c3617c13d8f0","0x996aefd402748d8472477cae76d5a2b92e3f092fc834d5222ae50194dd884c9fb8b6ed8e5ccf8f6ed483ddbb4e80c747","0x8fd09900320000cbabc40e16893e2fcf08815d288ec19345ad7b6bb22f7d78a52b6575a3ca1ca2f8bc252d2eafc928ec","0x939e51f73022bc5dc6862a0adf8fb8a3246b7bfb9943cbb4b27c73743926cc20f615a036c7e5b90c80840e7f1bfee0e7","0xa0a6258700cadbb9e241f50766573bf9bdb7ad380b1079dc3afb4054363d838e177b869cad000314186936e40359b1f2","0x972699a4131c8ed27a2d0e2104d54a65a7ff1c450ad9da3a325c662ab26869c21b0a84d0700b98c8b5f6ce3b746873d7","0xa454c7fe870cb8aa6491eafbfb5f7872d6e696033f92e4991d057b59d70671f2acdabef533e229878b60c7fff8f748b1","0xa167969477214201f09c79027b10221e4707662e0c0fde81a0f628249f2f8a859ce3d30a7dcc03b8ecca8f7828ad85c7","0x8ff6b7265175beb8a63e1dbf18c9153fb2578c207c781282374f51b40d57a84fd2ef2ea2b9c6df4a54646788a62fd17f","0xa3d7ebeccde69d73d8b3e76af0da1a30884bb59729503ff0fb0c3bccf9221651b974a6e72ea33b7956fc3ae758226495","0xb71ef144c9a98ce5935620cb86c1590bd4f48e5a2815d25c0cdb008fde628cf628c31450d3d4f67abbfeb16178a74cfd","0xb5e0a16d115134f4e2503990e3f2035ed66b9ccf767063fe6747870d97d73b10bc76ed668550cb82eedc9a2ca6f75524","0xb30ffaaf94ee8cbc42aa2c413175b68afdb207dbf351fb20be3852cb7961b635c22838da97eaf43b103aff37e9e725cc","0x98aa7d52284f6c1f22e272fbddd8c8698cf8f5fbb702d5de96452141fafb559622815981e50b87a72c2b1190f59a7deb","0x81fbacda3905cfaf7780bb4850730c44166ed26a7c8d07197a5d4dcd969c09e94a0461638431476c16397dd7bdc449f9","0x95e47021c1726eac2e5853f570d6225332c6e48e04c9738690d53e07c6b979283ebae31e2af1fc9c9b3e59f87e5195b1","0xac024a661ba568426bb8fce21780406537f518075c066276197300841e811860696f7588188bc01d90bace7bc73d56e3","0xa4ebcaf668a888dd404988ab978594dee193dad2d0aec5cdc0ccaf4ec9a7a8228aa663db1da8ddc52ec8472178e40c32","0xa20421b8eaf2199d93b083f2aff37fb662670bd18689d046ae976d1db1fedd2c2ff897985ecc6277b396db7da68bcb27","0x8bc33d4b40197fd4d49d1de47489d10b90d9b346828f53a82256f3e9212b0cbc6930b895e879da9cec9fedf026aadb3e","0xaaafdd1bec8b757f55a0433eddc0a39f818591954fd4e982003437fcceb317423ad7ee74dbf17a2960380e7067a6b4e2","0xaad34277ebaed81a6ec154d16736866f95832803af28aa5625bf0461a71d02b1faba02d9d9e002be51c8356425a56867","0x976e9c8b150d08706079945bd0e84ab09a648ecc6f64ded9eb5329e57213149ae409ae93e8fbd8eda5b5c69f5212b883","0x8097fae1653247d2aed4111533bc378171d6b2c6d09cbc7baa9b52f188d150d645941f46d19f7f5e27b7f073c1ebd079","0x83905f93b250d3184eaba8ea7d727c4464b6bdb027e5cbe4f597d8b9dc741dcbea709630bd4fd59ce24023bec32fc0f3","0x8095030b7045cff28f34271386e4752f9a9a0312f8df75de4f424366d78534be2b8e1720a19cb1f9a2d21105d790a225","0xa7b7b73a6ae2ed1009c49960374b0790f93c74ee03b917642f33420498c188a169724945a975e5adec0a1e83e07fb1b2","0x856a41c54df393b6660b7f6354572a4e71c8bfca9cabaffb3d4ef2632c015e7ee2bc10056f3eccb3dbed1ad17d939178","0xa8f7a55cf04b38cd4e330394ee6589da3a07dc9673f74804fdf67b364e0b233f14aec42e783200a2e4666f7c5ff62490","0x82c529f4e543c6bca60016dc93232c115b359eaee2798a9cf669a654b800aafe6ab4ba58ea8b9cdda2b371c8d62fa845","0x8caab020c1baddce77a6794113ef1dfeafc5f5000f48e97f4351b588bf02f1f208101745463c480d37f588d5887e6d8c","0x8fa91b3cc400f48b77b6fd77f3b3fbfb3f10cdff408e1fd22d38f77e087b7683adad258804409ba099f1235b4b4d6fea","0x8aa02787663d6be9a35677d9d8188b725d5fcd770e61b11b64e3def8808ea5c71c0a9afd7f6630c48634546088fcd8e2","0xb5635b7b972e195cab878b97dea62237c7f77eb57298538582a330b1082f6207a359f2923864630136d8b1f27c41b9aa","0x8257bb14583551a65975946980c714ecd6e5b629672bb950b9caacd886fbd22704bc9e3ba7d30778adab65dc74f0203a","0xab5fe1cd12634bfa4e5c60d946e2005cbd38f1063ec9a5668994a2463c02449a0a185ef331bd86b68b6e23a8780cb3ba","0xa7d3487da56cda93570cc70215d438204f6a2709bfb5fda6c5df1e77e2efc80f4235c787e57fbf2c74aaff8cbb510a14","0xb61cff7b4c49d010e133319fb828eb900f8a7e55114fc86b39c261a339c74f630e1a7d7e1350244ada566a0ff3d46c4b","0x8d4d1d55d321d278db7a85522ccceca09510374ca81d4d73e3bb5249ace7674b73900c35a531ec4fa6448fabf7ad00dc","0x966492248aee24f0f56c8cfca3c8ec6ba3b19abb69ae642041d4c3be8523d22c65c4dafcab4c58989ccc4e0bd2f77919","0xb20c320a90cb220b86e1af651cdc1e21315cd215da69f6787e28157172f93fc8285dcd59b039c626ed8ca4633cba1a47","0xaae9e6b22f018ceb5c0950210bb8182cb8cb61014b7e14581a09d36ebd1bbfebdb2b82afb7fdb0cf75e58a293d9c456d","0x875547fb67951ad37b02466b79f0c9b985ccbc500cfb431b17823457dc79fb9597ec42cd9f198e15523fcd88652e63a4","0x92afce49773cb2e20fb21e4f86f18e0959ebb9c33361547ddb30454ee8e36b1e234019cbdca0e964cb292f7f77df6b90","0x8af85343dfe1821464c76ba11c216cbef697b5afc69c4d821342e55afdac047081ec2e3f7b09fc14b518d9a23b78c003","0xb7de4a1648fd63f3a918096ea669502af5357438e69dac77cb8102b6e6c15c76e033cfaa80dafc806e535ede5c1a20aa","0xac80e9b545e8bd762951d96c9ce87f629d01ffcde07efc2ef7879ca011f1d0d8a745abf26c9d452541008871304fac00","0xa4cf0f7ed724e481368016c38ea5816698a5f68eb21af4d3c422d2ba55f96a33e427c2aa40de1b56a7cfac7f7cf43ab0","0x899b0a678bb2db2cae1b44e75a661284844ebcdd87abf308fedeb2e4dbe5c5920c07db4db7284a7af806a2382e8b111a","0xaf0588a2a4afce2b1b13c1230816f59e8264177e774e4a341b289a101dcf6af813638fed14fb4d09cb45f35d5d032609","0xa4b8df79e2be76e9f5fc5845f06fe745a724cf37c82fcdb72719b77bdebea3c0e763f37909373e3a94480cc5e875cba0","0x83e42c46d88930c8f386b19fd999288f142d325e2ebc86a74907d6d77112cb0d449bc511c95422cc810574031a8cbba9","0xb5e39534070de1e5f6e27efbdd3dc917d966c2a9b8cf2d893f964256e95e954330f2442027dc148c776d63a95bcde955","0x958607569dc28c075e658cd4ae3927055c6bc456eef6212a6fea8205e48ed8777a8064f584cda38fe5639c371e2e7fba","0x812adf409fa63575113662966f5078a903212ffb65c9b0bbe62da0f13a133443a7062cb8fd70f5e5dd5559a32c26d2c8","0xa679f673e5ce6a3cce7fa31f22ee3785e96bcb55e5a776e2dd3467bef7440e3555d1a9b87cb215e86ee9ed13a090344b","0xafedbb34508b159eb25eb2248d7fe328f86ef8c7d84c62d5b5607d74aae27cc2cc45ee148eb22153b09898a835c58df4","0xb75505d4f6b67d31e665cfaf5e4acdb5838ae069166b7fbcd48937c0608a59e40a25302fcc1873d2e81c1782808c70f0","0xb62515d539ec21a155d94fc00ea3c6b7e5f6636937bce18ed5b618c12257fb82571886287fd5d1da495296c663ebc512","0xab8e1a9446bbdd588d1690243b1549d230e6149c28f59662b66a8391a138d37ab594df38e7720fae53217e5c3573b5be","0xb31e8abf4212e03c3287bb2c0a153065a7290a16764a0bac8f112a72e632185a654bb4e88fdd6053e6c7515d9719fadb","0xb55165477fe15b6abd2d0f4fddaa9c411710dcc4dd712daba3d30e303c9a3ee5415c256f9dc917ecf18c725b4dbab059","0xa0939d4f57cacaae549b78e87cc234de4ff6a35dc0d9cd5d7410abc30ebcd34c135e008651c756e5a9d2ca79c40ef42b","0x8cf10e50769f3443340844aad4d56ec790850fed5a41fcbd739abac4c3015f0a085a038fbe7fae9f5ad899cce5069f6b","0x924055e804d82a99ea4bb160041ea4dc14b568abf379010bc1922fde5d664718c31d103b8b807e3a1ae809390e708c73","0x8ec0f9d26f71b0f2e60a179e4fd1778452e2ffb129d50815e5d7c7cb9415fa69ae5890578086e8ef6bfde35ad2a74661","0x98c7f12b15ec4426b59f737f73bf5faea4572340f4550b7590dfb7f7ffedb2372e3e555977c63946d579544c53210ad0","0x8a935f7a955c78f69d66f18eee0092e5e833fa621781c9581058e219af4d7ceee48b84e472e159dda6199715fb2f9acf","0xb78d4219f95a2dbfaa7d0c8a610c57c358754f4f43c2af312ab0fe8f10a5f0177e475332fb8fd23604e474fc2abeb051","0x8d086a14803392b7318c28f1039a17e3cfdcece8abcaca3657ec3d0ac330842098a85c0212f889fabb296dfb133ce9aa","0xa53249f417aac82f2c2a50c244ce21d3e08a5e5a8bd33bec2a5ab0d6cd17793e34a17edfa3690899244ce201e2fb9986","0x8619b0264f9182867a1425be514dc4f1ababc1093138a728a28bd7e4ecc99b9faaff68c23792264bc6e4dce5f52a5c52","0x8c171edbbbde551ec19e31b2091eb6956107dd9b1f853e1df23bff3c10a3469ac77a58335eee2b79112502e8e163f3de","0xa9d19ec40f0ca07c238e9337c6d6a319190bdba2db76fb63902f3fb459aeeb50a1ac30db5b25ee1b4201f3ca7164a7f4","0xb9c6ec14b1581a03520b8d2c1fbbc31fb8ceaef2c0f1a0d0080b6b96e18442f1734bea7ef7b635d787c691de4765d469","0x8cb437beb4cfa013096f40ccc169a713dc17afee6daa229a398e45fd5c0645a9ad2795c3f0cd439531a7151945d7064d","0xa6e8740cc509126e146775157c2eb278003e5bb6c48465c160ed27888ca803fa12eee1f6a8dd7f444f571664ed87fdc1","0xb75c1fecc85b2732e96b3f23aefb491dbd0206a21d682aee0225838dc057d7ed3b576176353e8e90ae55663f79e986e4","0xad8d249b0aea9597b08358bce6c77c1fd552ef3fbc197d6a1cfe44e5e6f89b628b12a6fb04d5dcfcbacc51f46e4ae7bb","0xb998b2269932cbd58d04b8e898d373ac4bb1a62e8567484f4f83e224061bc0f212459f1daae95abdbc63816ae6486a55","0x827988ef6c1101cddc96b98f4a30365ff08eea2471dd949d2c0a9b35c3bbfa8c07054ad1f4c88c8fbf829b20bb5a9a4f","0x8692e638dd60babf7d9f2f2d2ce58e0ac689e1326d88311416357298c6a2bffbfebf55d5253563e7b3fbbf5072264146","0xa685d75b91aea04dbc14ab3c1b1588e6de96dae414c8e37b8388766029631b28dd860688079b12d09cd27f2c5af11adf","0xb57eced93eec3371c56679c259b34ac0992286be4f4ff9489d81cf9712403509932e47404ddd86f89d7c1c3b6391b28c","0xa1c8b4e42ebcbd8927669a97f1b72e236fb19249325659e72be7ddaaa1d9e81ca2abb643295d41a8c04a2c01f9c0efd7","0x877c33de20d4ed31674a671ba3e8f01a316581e32503136a70c9c15bf0b7cb7b1cba6cd4eb641fad165fb3c3c6c235fd","0xa2a469d84ec478da40838f775d11ad38f6596eb41caa139cc190d6a10b5108c09febae34ffdafac92271d2e73c143693","0x972f817caedb254055d52e963ed28c206848b6c4cfdb69dbc961c891f8458eaf582a6d4403ce1177d87bc2ea410ef60a","0xaccbd739e138007422f28536381decc54bb6bd71d93edf3890e54f9ef339f83d2821697d1a4ac1f5a98175f9a9ecb9b5","0x8940f8772e05389f823b62b3adc3ed541f91647f0318d7a0d3f293aeeb421013de0d0a3664ea53dd24e5fbe02d7efef6","0x8ecce20f3ef6212edef07ec4d6183fda8e0e8cad2c6ccd0b325e75c425ee1faba00b5c26b4d95204238931598d78f49d","0x97cc72c36335bd008afbed34a3b0c7225933faba87f7916d0a6d2161e6f82e0cdcda7959573a366f638ca75d30e9dab1","0x9105f5de8699b5bdb6bd3bb6cc1992d1eac23929c29837985f83b22efdda92af64d9c574aa9640475087201bbbe5fd73","0x8ffb33c4f6d05c413b9647eb6933526a350ed2e4278ca2ecc06b0e8026d8dbe829c476a40e45a6df63a633090a3f82ef","0x8bfc6421fdc9c2d2aaa68d2a69b1a2728c25b84944cc3e6a57ff0c94bfd210d1cbf4ff3f06702d2a8257024d8be7de63","0xa80e1dc1dddfb41a70220939b96dc6935e00b32fb8be5dff4eed1f1c650002ff95e4af481c43292e3827363b7ec4768a","0x96f714ebd54617198bd636ba7f7a7f8995a61db20962f2165078d9ed8ee764d5946ef3cbdc7ebf8435bb8d5dd4c1deac","0x8cdb0890e33144d66391d2ae73f5c71f5a861f72bc93bff6cc399fc25dd1f9e17d8772592b44593429718784802ac377","0x8ccf9a7f80800ee770b92add734ed45a73ecc31e2af0e04364eefc6056a8223834c7c0dc9dfc52495bdec6e74ce69994","0xaa0875f423bd68b5f10ba978ddb79d3b96ec093bfbac9ff366323193e339ed7c4578760fb60f60e93598bdf1e5cc4995","0xa9214f523957b59c7a4cb61a40251ad72aba0b57573163b0dc0f33e41d2df483fb9a1b85a5e7c080e9376c866790f8cb","0xb6224b605028c6673a536cc8ff9aeb94e7a22e686fda82cf16068d326469172f511219b68b2b3affb7933af0c1f80d07","0xb6d58968d8a017c6a34e24c2c09852f736515a2c50f37232ac6b43a38f8faa7572cc31dade543b594b61b5761c4781d0","0x8a97cefe5120020c38deeb861d394404e6c993c6cbd5989b6c9ebffe24f46ad11b4ba6348e2991cbf3949c28cfc3c99d","0x95bf046f8c3a9c0ce2634be4de3713024daec3fc4083e808903b25ce3ac971145af90686b451efcc72f6b22df0216667","0xa6a4e2f71b8fa28801f553231eff2794c0f10d12e7e414276995e21195abc9c2983a8997e41af41e78d19ff6fbb2680b","0x8e5e62a7ca9c2f58ebaab63db2ff1fb1ff0877ae94b7f5e2897f273f684ae639dff44cc65718f78a9c894787602ab26a","0x8542784383eec4f565fcb8b9fc2ad8d7a644267d8d7612a0f476fc8df3aff458897a38003d506d24142ad18f93554f2b","0xb7db68ba4616ea072b37925ec4fb39096358c2832cc6d35169e032326b2d6614479f765ae98913c267105b84afcb9bf2","0x8b31dbb9457d23d416c47542c786e07a489af35c4a87dadb8ee91bea5ac4a5315e65625d78dad2cf8f9561af31b45390","0xa8545a1d91ac17257732033d89e6b7111db8242e9c6ebb0213a88906d5ef407a2c6fdb444e29504b06368b6efb4f4839","0xb1bd85d29ebb28ccfb05779aad8674906b267c2bf8cdb1f9a0591dd621b53a4ee9f2942687ee3476740c0b4a7621a3ae","0xa2b54534e152e46c50d91fff03ae9cd019ff7cd9f4168b2fe7ac08ef8c3bbc134cadd3f9d6bd33d20ae476c2a8596c8a","0xb19b571ff4ae3e9f5d95acda133c455e72c9ea9973cae360732859836c0341c4c29ab039224dc5bc3deb824e031675d8","0x940b5f80478648bac025a30f3efeb47023ce20ee98be833948a248bca6979f206bb28fc0f17b90acf3bb4abd3d14d731","0x8f106b40588586ac11629b96d57808ad2808915d89539409c97414aded90b4ff23286a692608230a52bff696055ba5d6","0xae6bda03aa10da3d2abbc66d764ca6c8d0993e7304a1bdd413eb9622f3ca1913baa6da1e9f4f9e6cf847f14f44d6924d","0xa18e7796054a340ef826c4d6b5a117b80927afaf2ebd547794c400204ae2caf277692e2eabb55bc2f620763c9e9da66d","0x8d2d25180dc2c65a4844d3e66819ccfcf48858f0cc89e1c77553b463ec0f7feb9a4002ce26bc618d1142549b9850f232","0x863f413a394de42cc8166c1c75d513b91d545fff1de6b359037a742c70b008d34bf8e587afa2d62c844d0c6f0ea753e7","0x83cd0cf62d63475e7fcad18a2e74108499cdbf28af2113cfe005e3b5887794422da450b1944d0a986eb7e1f4c3b18f25","0xb4f8b350a6d88fea5ab2e44715a292efb12eb52df738c9b2393da3f1ddee68d0a75b476733ccf93642154bceb208f2b8","0xb3f52aaa4cd4221cb9fc45936cc67fd3864bf6d26bf3dd86aa85aa55ecfc05f5e392ecce5e7cf9406b4b1c4fce0398c8","0xb33137084422fb643123f40a6df2b498065e65230fc65dc31791c330e898c51c3a65ff738930f32c63d78f3c9315f85b","0x91452bfa75019363976bb7337fe3a73f1c10f01637428c135536b0cdc7da5ce558dae3dfc792aa55022292600814a8ef","0xad6ba94c787cd4361ca642c20793ea44f1f127d4de0bb4a77c7fbfebae0fcadbf28e2cb6f0c12c12a07324ec8c19761d","0x890aa6248b17f1501b0f869c556be7bf2b1d31a176f9978bb97ab7a6bd4138eed32467951c5ef1871944b7f620542f43","0x82111db2052194ee7dd22ff1eafffac0443cf969d3762cceae046c9a11561c0fdce9c0711f88ac01d1bed165f8a7cee3","0xb1527b71df2b42b55832f72e772a466e0fa05743aacc7814f4414e4bcc8d42a4010c9e0fd940e6f254cafedff3cd6543","0x922370fa49903679fc565f09c16a5917f8125e72acfeb060fcdbadbd1644eb9f4016229756019c93c6d609cda5d5d174","0xaa4c7d98a96cab138d2a53d4aee8ebff6ef903e3b629a92519608d88b3bbd94de5522291a1097e6acf830270e64c8ee1","0xb3dc21608a389a72d3a752883a382baaafc61ecc44083b832610a237f6a2363f24195acce529eb4aed4ef0e27a12b66e","0x94619f5de05e07b32291e1d7ab1d8b7337a2235e49d4fb5f3055f090a65e932e829efa95db886b32b153bdd05a53ec8c","0xade1e92722c2ffa85865d2426fb3d1654a16477d3abf580cfc45ea4b92d5668afc9d09275d3b79283e13e6b39e47424d","0xb7201589de7bed094911dd62fcd25c459a8e327ac447b69f541cdba30233063e5ddffad0b67e9c3e34adcffedfd0e13d","0x809d325310f862d6549e7cb40f7e5fc9b7544bd751dd28c4f363c724a0378c0e2adcb5e42ec8f912f5f49f18f3365c07","0xa79c20aa533de7a5d671c99eb9eb454803ba54dd4f2efa3c8fec1a38f8308e9905c71e9282955225f686146388506ff6","0xa85eeacb5e8fc9f3ed06a3fe2dc3108ab9f8c5877b148c73cf26e4e979bf5795edbe2e63a8d452565fd1176ed40402b2","0x97ef55662f8a1ec0842b22ee21391227540adf7708f491436044f3a2eb18c471525e78e1e14fa292507c99d74d7437c6","0x93110d64ed5886f3d16ce83b11425576a3a7a9bb831cd0de3f9a0b0f2270a730d68136b4ef7ff035ede004358f419b5c","0xac9ed0a071517f0ae4f61ce95916a90ba9a77a3f84b0ec50ef7298acdcd44d1b94525d191c39d6bd1bb68f4471428760","0x98abd6a02c7690f5a339adf292b8c9368dfc12e0f8069cf26a5e0ce54b4441638f5c66ea735142f3c28e00a0024267e6","0xb51efb73ba6d44146f047d69b19c0722227a7748b0e8f644d0fc9551324cf034c041a2378c56ce8b58d06038fb8a78de","0x8f115af274ef75c1662b588b0896b97d71f8d67986ae846792702c4742ab855952865ce236b27e2321967ce36ff93357","0xb3c4548f14d58b3ab03c222da09e4381a0afe47a72d18d50a94e0008797f78e39e99990e5b4757be62310d400746e35a","0xa9b1883bd5f31f909b8b1b6dcb48c1c60ed20aa7374b3ffa7f5b2ed036599b5bef33289d23c80a5e6420d191723b92f7","0x85d38dffd99487ae5bb41ab4a44d80a46157bbbe8ef9497e68f061721f74e4da513ccc3422936b059575975f6787c936","0xadf870fcb96e972c033ab7a35d28ae79ee795f82bc49c3bd69138f0e338103118d5529c53f2d72a9c0d947bf7d312af2","0xab4c7a44e2d9446c6ff303eb49aef0e367a58b22cc3bb27b4e69b55d1d9ee639c9234148d2ee95f9ca8079b1457d5a75","0xa386420b738aba2d7145eb4cba6d643d96bda3f2ca55bb11980b318d43b289d55a108f4bc23a9606fb0bccdeb3b3bb30","0x847020e0a440d9c4109773ecca5d8268b44d523389993b1f5e60e541187f7c597d79ebd6e318871815e26c96b4a4dbb1","0xa530aa7e5ca86fcd1bec4b072b55cc793781f38a666c2033b510a69e110eeabb54c7d8cbcb9c61fee531a6f635ffa972","0x87364a5ea1d270632a44269d686b2402da737948dac27f51b7a97af80b66728b0256547a5103d2227005541ca4b7ed04","0x8816fc6e16ea277de93a6d793d0eb5c15e9e93eb958c5ef30adaf8241805adeb4da8ce19c3c2167f971f61e0b361077d","0x8836a72d301c42510367181bb091e4be377777aed57b73c29ef2ce1d475feedd7e0f31676284d9a94f6db01cc4de81a2","0xb0d9d8b7116156d9dde138d28aa05a33e61f8a85839c1e9071ccd517b46a5b4b53acb32c2edd7150c15bc1b4bd8db9e3","0xae931b6eaeda790ba7f1cd674e53dc87f6306ff44951fa0df88d506316a5da240df9794ccbd7215a6470e6b31c5ea193","0x8c6d5bdf87bd7f645419d7c6444e244fe054d437ed1ba0c122fde7800603a5fadc061e5b836cb22a6cfb2b466f20f013","0x90d530c6d0cb654999fa771b8d11d723f54b8a8233d1052dc1e839ea6e314fbed3697084601f3e9bbb71d2b4eaa596df","0xb0d341a1422588c983f767b1ed36c18b141774f67ef6a43cff8e18b73a009da10fc12120938b8bba27f225bdfd3138f9","0xa131b56f9537f460d304e9a1dd75702ace8abd68cb45419695cb8dee76998139058336c87b7afd6239dc20d7f8f940cc","0xaa6c51fa28975f709329adee1bbd35d49c6b878041841a94465e8218338e4371f5cb6c17f44a63ac93644bf28f15d20f","0x88440fb584a99ebd7f9ea04aaf622f6e44e2b43bbb49fb5de548d24a238dc8f26c8da2ccf03dd43102bda9f16623f609","0x9777b8695b790e702159a4a750d5e7ff865425b95fa0a3c15495af385b91c90c00a6bd01d1b77bffe8c47d01baae846f","0x8b9d764ece7799079e63c7f01690c8eff00896a26a0d095773dea7a35967a8c40db7a6a74692f0118bf0460c26739af4","0x85808c65c485520609c9e61fa1bb67b28f4611d3608a9f7a5030ee61c3aa3c7e7dc17fff48af76b4aecee2cb0dbd22ac","0xad2783a76f5b3db008ef5f7e67391fda4e7e36abde6b3b089fc4835b5c339370287935af6bd53998bed4e399eda1136d","0x96f18ec03ae47c205cc4242ca58e2eff185c9dca86d5158817e2e5dc2207ab84aadda78725f8dc080a231efdc093b940","0x97de1ab6c6cc646ae60cf7b86df73b9cf56cc0cd1f31b966951ebf79fc153531af55ca643b20b773daa7cab784b832f7","0x870ba266a9bfa86ef644b1ef025a0f1b7609a60de170fe9508de8fd53170c0b48adb37f19397ee8019b041ce29a16576","0xad990e888d279ac4e8db90619d663d5ae027f994a3992c2fbc7d262b5990ae8a243e19157f3565671d1cb0de17fe6e55","0x8d9d5adcdd94c5ba3be4d9a7428133b42e485f040a28d16ee2384758e87d35528f7f9868de9bd23d1a42a594ce50a567","0x85a33ed75d514ece6ad78440e42f7fcdb59b6f4cff821188236d20edae9050b3a042ce9bc7d2054296e133d033e45022","0x92afd2f49a124aaba90de59be85ff269457f982b54c91b06650c1b8055f9b4b0640fd378df02a00e4fc91f7d226ab980","0x8c0ee09ec64bd831e544785e3d65418fe83ed9c920d9bb4d0bf6dd162c1264eb9d6652d2def0722e223915615931581c","0x8369bedfa17b24e9ad48ebd9c5afea4b66b3296d5770e09b00446c5b0a8a373d39d300780c01dcc1c6752792bccf5fd0","0x8b9e960782576a59b2eb2250d346030daa50bbbec114e95cdb9e4b1ba18c3d34525ae388f859708131984976ca439d94","0xb682bface862008fea2b5a07812ca6a28a58fd151a1d54c708fc2f8572916e0d678a9cb8dc1c10c0470025c8a605249e","0xa38d5e189bea540a824b36815fc41e3750760a52be0862c4cac68214febdc1a754fb194a7415a8fb7f96f6836196d82a","0xb9e7fbda650f18c7eb8b40e42cc42273a7298e65e8be524292369581861075c55299ce69309710e5b843cb884de171bd","0xb6657e5e31b3193874a1bace08f42faccbd3c502fb73ad87d15d18a1b6c2a146f1baa929e6f517db390a5a47b66c0acf","0xae15487312f84ed6265e4c28327d24a8a0f4d2d17d4a5b7c29b974139cf93223435aaebe3af918f5b4bb20911799715f","0x8bb4608beb06bc394e1a70739b872ce5a2a3ffc98c7547bf2698c893ca399d6c13686f6663f483894bccaabc3b9c56ad","0xb58ac36bc6847077584308d952c5f3663e3001af5ecf2e19cb162e1c58bd6c49510205d453cffc876ca1dc6b8e04a578","0x924f65ced61266a79a671ffb49b300f0ea44c50a0b4e3b02064faa99fcc3e4f6061ea8f38168ab118c5d47bd7804590e","0x8d67d43b8a06b0ff4fafd7f0483fa9ed1a9e3e658a03fb49d9d9b74e2e24858dc1bed065c12392037b467f255d4e5643","0xb4d4f87813125a6b355e4519a81657fa97c43a6115817b819a6caf4823f1d6a1169683fd68f8d025cdfa40ebf3069acb","0xa7fd4d2c8e7b59b8eed3d4332ae94b77a89a2616347402f880bc81bde072220131e6dbec8a605be3a1c760b775375879","0x8d4a7d8fa6f55a30df37bcf74952e2fa4fd6676a2e4606185cf154bdd84643fd01619f8fb8813a564f72e3f574f8ce30","0x8086fb88e6260e9a9c42e9560fde76315ff5e5680ec7140f2a18438f15bc2cc7d7d43bfb5880b180b738c20a834e6134","0x916c4c54721de03934fee6f43de50bb04c81f6f8dd4f6781e159e71c40c60408aa54251d457369d133d4ba3ed7c12cb4","0x902e5bf468f11ed9954e2a4a595c27e34abe512f1d6dc08bbca1c2441063f9af3dc5a8075ab910a10ff6c05c1c644a35","0xa1302953015e164bf4c15f7d4d35e3633425a78294406b861675667eec77765ff88472306531e5d3a4ec0a2ff0dd6a9e","0x87874461df3c9aa6c0fa91325576c0590f367075f2f0ecfeb34afe162c04c14f8ce9d608c37ac1adc8b9985bc036e366","0x84b50a8a61d3cc609bfb0417348133e698fe09a6d37357ce3358de189efcf35773d78c57635c2d26c3542b13cc371752","0xacaed2cff8633d12c1d12bb7270c54d65b0b0733ab084fd47f81d0a6e1e9b6f300e615e79538239e6160c566d8bb8d29","0x889e6a0e136372ca4bac90d1ab220d4e1cad425a710e8cdd48b400b73bb8137291ceb36a39440fa84305783b1d42c72f","0x90952e5becec45b2b73719c228429a2c364991cf1d5a9d6845ae5b38018c2626f4308daa322cab1c72e0f6c621bb2b35","0x8f5a97a801b6e9dcd66ccb80d337562c96f7914e7169e8ff0fda71534054c64bf2a9493bb830623d612cfe998789be65","0x84f3df8b9847dcf1d63ca470dc623154898f83c25a6983e9b78c6d2d90a97bf5e622445be835f32c1e55e6a0a562ea78","0x91d12095cd7a88e7f57f254f02fdb1a1ab18984871dead2f107404bcf8069fe68258c4e6f6ebd2477bddf738135400bb","0xb771a28bc04baef68604d4723791d3712f82b5e4fe316d7adc2fc01b935d8e644c06d59b83bcb542afc40ebafbee0683","0x872f6341476e387604a7e93ae6d6117e72d164e38ebc2b825bc6df4fcce815004d7516423c190c1575946b5de438c08d","0x90d6b4aa7d40a020cdcd04e8b016d041795961a8e532a0e1f4041252131089114a251791bf57794cadb7d636342f5d1c","0x899023ba6096a181448d927fed7a0fe858be4eac4082a42e30b3050ee065278d72fa9b9d5ce3bc1372d4cbd30a2f2976","0xa28f176571e1a9124f95973f414d5bdbf5794d41c3839d8b917100902ac4e2171eb940431236cec93928a60a77ede793","0x838dbe5bcd29c4e465d02350270fa0036cd46f8730b13d91e77afb7f5ed16525d0021d3b2ae173a76c378516a903e0cb","0x8e105d012dd3f5d20f0f1c4a7e7f09f0fdd74ce554c3032e48da8cce0a77260d7d47a454851387770f5c256fa29bcb88","0x8f4df0f9feeb7a487e1d138d13ea961459a6402fd8f8cabb226a92249a0d04ded5971f3242b9f90d08da5ff66da28af6","0xad1cfda4f2122a20935aa32fb17c536a3653a18617a65c6836700b5537122af5a8206befe9eaea781c1244c43778e7f1","0x832c6f01d6571964ea383292efc8c8fa11e61c0634a25fa180737cc7ab57bc77f25e614aac9a2a03d98f27b3c1c29de2","0x903f89cc13ec6685ac7728521898781fecb300e9094ef913d530bf875c18bcc3ceed7ed51e7b482d45619ab4b025c2e9","0xa03c474bb915aad94f171e8d96f46abb2a19c9470601f4c915512ec8b9e743c3938450a2a5b077b4618b9df8809e1dc1","0x83536c8456f306045a5f38ae4be2e350878fa7e164ea408d467f8c3bc4c2ee396bd5868008c089183868e4dfad7aa50b","0x88f26b4ea1b236cb326cd7ad7e2517ec8c4919598691474fe15d09cabcfc37a8d8b1b818f4d112432ee3a716b0f37871","0xa44324e3fe96e9c12b40ded4f0f3397c8c7ee8ff5e96441118d8a6bfad712d3ac990b2a6a23231a8f691491ac1fd480f","0xb0de4693b4b9f932191a21ee88629964878680152a82996c0019ffc39f8d9369bbe2fe5844b68d6d9589ace54af947e4","0x8e5d8ba948aea5fd26035351a960e87f0d23efddd8e13236cc8e4545a3dda2e9a85e6521efb8577e03772d3637d213d9","0x93efc82d2017e9c57834a1246463e64774e56183bb247c8fc9dd98c56817e878d97b05f5c8d900acf1fbbbca6f146556","0x8731176363ad7658a2862426ee47a5dce9434216cef60e6045fa57c40bb3ce1e78dac4510ae40f1f31db5967022ced32","0xb10c9a96745722c85bdb1a693100104d560433d45b9ac4add54c7646a7310d8e9b3ca9abd1039d473ae768a18e489845","0xa2ac374dfbb464bf850b4a2caf15b112634a6428e8395f9c9243baefd2452b4b4c61b0cb2836d8eae2d57d4900bf407e","0xb69fe3ded0c4f5d44a09a0e0f398221b6d1bf5dbb8bc4e338b93c64f1a3cac1e4b5f73c2b8117158030ec03787f4b452","0x8852cdbaf7d0447a8c6f211b4830711b3b5c105c0f316e3a6a18dcfbb9be08bd6f4e5c8ae0c3692da08a2dfa532f9d5c","0x93bbf6d7432a7d98ade3f94b57bf9f4da9bc221a180a370b113066dd42601bb9e09edd79e2e6e04e00423399339eebda","0xa80941c391f1eeafc1451c59e4775d6a383946ff22997aeaadf806542ba451d3b0f0c6864eeba954174a296efe2c1550","0xa045fe2bb011c2a2f71a0181a8f457a3078470fb74c628eab8b59aef69ffd0d649723bf74d6885af3f028bc5a104fb39","0xb9d8c35911009c4c8cad64692139bf3fc16b78f5a19980790cb6a7aea650a25df4231a4437ae0c351676a7e42c16134f","0x94c79501ded0cfcbab99e1841abe4a00a0252b3870e20774c3da16c982d74c501916ec28304e71194845be6e3113c7ab","0x900a66418b082a24c6348d8644ddb1817df5b25cb33044a519ef47cc8e1f7f1e38d2465b7b96d32ed472d2d17f8414c6","0xb26f45d393b8b2fcb29bdbb16323dc7f4b81c09618519ab3a39f8ee5bd148d0d9f3c0b5dfab55b5ce14a1cb9206d777b","0xaa1a87735fc493a80a96a9a57ca40a6d9c32702bfcaa9869ce1a116ae65d69cefe2f3e79a12454b4590353e96f8912b4","0xa922b188d3d0b69b4e4ea2a2aa076566962844637da12c0832105d7b31dea4a309eee15d12b7a336be3ea36fcbd3e3b7","0x8f3841fcf4105131d8c4d9885e6e11a46c448226401cf99356c291fadb864da9fa9d30f3a73c327f23f9fd99a11d633e","0x9791d1183fae270e226379af6c497e7da803ea854bb20afa74b253239b744c15f670ee808f708ede873e78d79a626c9a","0xa4cad52e3369491ada61bf28ada9e85de4516d21c882e5f1cd845bea9c06e0b2887b0c5527fcff6fc28acd3c04f0a796","0xb9ac86a900899603452bd11a7892a9bfed8054970bfcbeaa8c9d1930db891169e38d6977f5258c25734f96c8462eee3b","0xa3a154c28e5580656a859f4efc2f5ebfa7eaa84ca40e3f134fa7865e8581586db74992dbfa4036aa252fba103773ddde","0x95cc2a0c1885a029e094f5d737e3ecf4d26b99036453a8773c77e360101f9f98676ee246f6f732a377a996702d55691f","0x842651bbe99720438d8d4b0218feb60481280c05beb17750e9ca0d8c0599a60f873b7fbdcc7d8835ba9a6d57b16eec03","0x81ee54699da98f5620307893dcea8f64670609fa20e5622265d66283adeac122d458b3308c5898e6c57c298db2c8b24f","0xb97868b0b2bc98032d68352a535a1b341b9ff3c7af4e3a7f3ebc82d3419daa1b5859d6aedc39994939623c7cd878bd9b","0xb60325cd5d36461d07ef253d826f37f9ee6474a760f2fff80f9873d01fd2b57711543cdc8d7afa1c350aa753c2e33dea","0x8c205326c11d25a46717b780c639d89714c7736c974ae71287e3f4b02e6605ac2d9b4928967b1684f12be040b7bf2dd3","0x95a392d82db51e26ade6c2ccd3396d7e40aff68fa570b5951466580d6e56dda51775dce5cf3a74a7f28c3cb2eb551c4d","0x8f2cc8071eb56dffb70bda6dd433b556221dc8bba21c53353c865f00e7d4d86c9e39f119ea9a8a12ef583e9a55d9a6b6","0x9449a71af9672aaf8856896d7e3d788b22991a7103f75b08c0abbcc2bfe60fda4ed8ce502cea4511ff0ea52a93e81222","0x857090ab9fdb7d59632d068f3cc8cf27e61f0d8322d30e6b38e780a1f05227199b4cd746aac1311c36c659ef20931f28","0x98a891f4973e7d9aaf9ac70854608d4f7493dffc7e0987d7be9dd6029f6ea5636d24ef3a83205615ca1ff403750058e1","0xa486e1365bbc278dd66a2a25d258dc82f46b911103cb16aab3945b9c95ae87b386313a12b566df5b22322ede0afe25ad","0xa9a1eb399ed95d396dccd8d1ac718043446f8b979ec62bdce51c617c97a312f01376ab7fb87d27034e5f5570797b3c33","0xb7abc3858d7a74bb446218d2f5a037e0fae11871ed9caf44b29b69c500c1fa1dcfad64c9cdccc9d80d5e584f06213deb","0x8cfb09fe2e202faa4cebad932b1d35f5ca204e1c2a0c740a57812ac9a6792130d1312aabd9e9d4c58ca168bfebd4c177","0xa90a305c2cd0f184787c6be596fa67f436afd1f9b93f30e875f817ac2aae8bdd2e6e656f6be809467e6b3ad84adb86b1","0x80a9ef993c2b009ae172cc8f7ec036f5734cf4f4dfa06a7db4d54725e7fbfae5e3bc6f22687bdbb6961939d6f0c87537","0x848ade1901931e72b955d7db1893f07003e1708ff5d93174bac5930b9a732640f0578839203e9b77eb27965c700032d3","0x93fdf4697609c5ae9c33b9ca2f5f1af44abeb2b98dc4fdf732cf7388de086f410730dc384d9b7a7f447bb009653c8381","0x89ce3fb805aea618b5715c0d22a9f46da696b6fa86794f56fdf1d44155a33d42daf1920bcbe36cbacf3cf4c92df9cbc7","0x829ce2c342cf82aa469c65f724f308f7a750bd1494adc264609cd790c8718b8b25b5cab5858cf4ee2f8f651d569eea67","0xaf2f0cee7bf413204be8b9df59b9e4991bc9009e0d6dbe6815181df0ec2ca93ab8f4f3135b1c14d8f53d74bff0bd6f27","0xb87998cecf7b88cde93d1779f10a521edd5574a2fbd240102978639ec57433ba08cdb53849038a329cebbe74657268d2","0xa64542a1261a6ed3d720c2c3a802303aad8c4c110c95d0f12e05c1065e66f42da494792b6bfc5b9272363f3b1d457f58","0x86a6fd042e4f282fadf07a4bfee03fc96a3aea49f7a00f52bf249a20f1ec892326855410e61f37fbb27d9305eb2fc713","0x967ea5bc403b6db269682f7fd0df90659350d7e1aa66bc4fab4c9dfcd75ed0bba4b52f1cebc5f34dc8ba810793727629","0xa52990f9f3b8616ce3cdc2c74cd195029e6a969753dcf2d1630438700e7d6ebde36538532b3525ac516f5f2ce9dd27a3","0xa64f7ff870bab4a8bf0d4ef6f5c744e9bf1021ed08b4c80903c7ad318e80ba1817c3180cc45cb5a1cae1170f0241655f","0xb00f706fa4de1f663f021e8ad3d155e84ce6084a409374b6e6cd0f924a0a0b51bebaaaf1d228c77233a73b0a5a0df0e9","0x8b882cc3bff3e42babdb96df95fb780faded84887a0a9bab896bef371cdcf169d909f5658649e93006aa3c6e1146d62e","0x9332663ef1d1dcf805c3d0e4ce7a07d9863fb1731172e766b3cde030bf81682cc011e26b773fb9c68e0477b4ae2cfb79","0xa8aa8151348dbd4ef40aaeb699b71b4c4bfd3218560c120d85036d14f678f6736f0ec68e80ce1459d3d35feccc575164","0xa16cd8b729768f51881c213434aa28301fa78fcb554ddd5f9012ee1e4eae7b5cb3dd88d269d53146dea92d10790faf0b","0x86844f0ef9d37142faf3b1e196e44fbe280a3ba4189aa05c356778cb9e3b388a2bff95eed305ada8769935c9974e4c57","0xae2eec6b328fccf3b47bcdac32901ac2744a51beb410b04c81dea34dee4912b619466a4f5e2780d87ecefaebbe77b46d","0x915df4c38d301c8a4eb2dc5b1ba0ffaad67cbb177e0a80095614e9c711f4ef24a4cef133f9d982a63d2a943ba6c8669d","0xae6a2a4dedfc2d1811711a8946991fede972fdf2a389b282471280737536ffc0ac3a6d885b1f8bda0366eb0b229b9979","0xa9b628c63d08b8aba6b1317f6e91c34b2382a6c85376e8ef2410a463c6796740ae936fc4e9e0737cb9455d1daa287bd8","0x848e30bf7edf2546670b390d5cf9ab71f98fcb6add3c0b582cb34996c26a446dee5d1bde4fdcde4fc80c10936e117b29","0x907d6096c7c8c087d1808dd995d5d2b9169b3768c3f433475b50c2e2bd4b082f4d543afd8b0b0ddffa9c66222a72d51d","0xa59970a2493b07339124d763ac9d793c60a03354539ecbcf6035bc43d1ea6e35718202ae6d7060b7d388f483d971573c","0xb9cfef2af9681b2318f119d8611ff6d9485a68d8044581b1959ab1840cbca576dbb53eec17863d2149966e9feb21122f","0xad47271806161f61d3afa45cdfe2babceef5e90031a21779f83dc8562e6076680525b4970b2f11fe9b2b23c382768323","0x8e425a99b71677b04fe044625d338811fbb8ee32368a424f6ab2381c52e86ee7a6cecedf777dc97181519d41c351bc22","0x86b55b54d7adefc12954a9252ee23ae83efe8b5b4b9a7dc307904413e5d69868c7087a818b2833f9b004213d629be8ad","0xa14fda6b93923dd11e564ae4457a66f397741527166e0b16a8eb91c6701c244fd1c4b63f9dd3515193ec88fa6c266b35","0xa9b17c36ae6cd85a0ed7f6cabc5b47dc8f80ced605db327c47826476dc1fb8f8669aa7a7dc679fbd4ee3d8e8b4bd6a6f","0x82a0829469c1458d959c821148f15dacae9ea94bf56c59a6ab2d4dd8b3d16d73e313b5a3912a6c1f131d73a8f06730c4","0xb22d56d549a53eaef549595924bdb621ff807aa4513feedf3fdcbf7ba8b6b9cfa4481c2f67fc642db397a6b794a8b63a","0x974c59c24392e2cb9294006cbe3c52163e255f3bd0c2b457bdc68a6338e6d5b6f87f716854492f8d880a6b896ccf757c","0xb70d247ba7cad97c50b57f526c2ba915786e926a94e8f8c3eebc2e1be6f4255411b9670e382060049c8f4184302c40b2","0xad80201fe75ef21c3ddbd98cf23591e0d7a3ba1036dfe77785c32f44755a212c31f0ceb0a0b6f5ee9b6dc81f358d30c3","0x8c656e841f9bb90b9a42d425251f3fdbc022a604d75f5845f479ed4be23e02aaf9e6e56cde351dd7449c50574818a199","0x8b88dd3fa209d3063b7c5b058f7249ee9900fbc2287d16da61a0704a0a1d71e45d9c96e1cda7fdf9654534ec44558b22","0x961da00cc8750bd84d253c08f011970ae1b1158ad6778e8ed943d547bceaf52d6d5a212a7de3bf2706688c4389b827d2","0xa5dd379922549a956033e3d51a986a4b1508e575042b8eaa1df007aa77cf0b8c2ab23212f9c075702788fa9c53696133","0xac8fcfde3a349d1e93fc8cf450814e842005c545c4844c0401bc80e6b96cdb77f29285a14455e167c191d4f312e866cd","0xac63d79c799783a8466617030c59dd5a8f92ee6c5204676fd8d881ce5f7f8663bdbeb0379e480ea9b6340ab0dc88e574","0x805874fde19ce359041ae2bd52a39e2841acabfd31f965792f2737d7137f36d4e4722ede8340d8c95afa6af278af8acb","0x8d2f323a228aa8ba7b7dc1399138f9e6b41df1a16a7069003ab8104b8b68506a45141bc5fe66acf430e23e13a545190b","0xa1610c721a2d9af882bb6b39bea97cff1527a3aea041d25934de080214ae77c959e79957164440686d15ab301e897d4d","0xaba16d29a47fc36f12b654fde513896723e2c700c4190f11b26aa4011da57737ad717daa02794aa3246e4ae5f0b0cc3a","0xa406db2f15fdd135f346cc4846623c47edd195e80ba8c7cb447332095314d565e4040694ca924696bb5ee7f8996ea0ba","0x8b30e2cd9b47d75ba57b83630e40f832249af6c058d4f490416562af451993eec46f3e1f90bc4d389e4c06abd1b32a46","0xaacf9eb7036e248e209adbfc3dd7ce386569ea9b312caa4b240726549db3c68c4f1c8cbf8ed5ea9ea60c7e57c9df3b8e","0xb20fcac63bf6f5ee638a42d7f89be847f348c085ddcbec3fa318f4323592d136c230495f188ef2022aa355cc2b0da6f9","0x811eff750456a79ec1b1249d76d7c1547065b839d8d4aaad860f6d4528eb5b669473dcceeeea676cddbc3980b68461b7","0xb52d14ae33f4ab422f953392ae76a19c618cc31afc96290bd3fe2fb44c954b5c92c4789f3f16e8793f2c0c1691ade444","0xa7826dafeeba0db5b66c4dfcf2b17fd7b40507a5a53ac2e42942633a2cb30b95ba1739a6e9f3b7a0e0f1ec729bf274e2","0x8acfd83ddf7c60dd7c8b20c706a3b972c65d336b8f9b3d907bdd8926ced271430479448100050b1ef17578a49c8fa616","0xaf0c69f65184bb06868029ad46f8465d75c36814c621ac20a5c0b06a900d59305584f5a6709683d9c0e4b6cd08d650a6","0xb6cc8588191e00680ee6c3339bd0f0a17ad8fd7f4be57d5d7075bede0ea593a19e67f3d7c1a20114894ee5bfcab71063","0xa82fd4f58635129dbb6cc3eb9391cf2d28400018b105fc41500fbbd12bd890b918f97d3d359c29dd3b4c4e34391dfab0","0x92fc544ed65b4a3625cf03c41ddff7c039bc22d22c0d59dcc00efd5438401f2606adb125a1d5de294cca216ec8ac35a3","0x906f67e4a32582b71f15940523c0c7ce370336935e2646bdaea16a06995256d25e99df57297e39d6c39535e180456407","0x97510337ea5bbd5977287339197db55c60533b2ec35c94d0a460a416ae9f60e85cee39be82abeeacd5813cf54df05862","0x87e6894643815c0ea48cb96c607266c5ee4f1f82ba5fe352fb77f9b6ed14bfc2b8e09e80a99ac9047dfcf62b2ae26795","0xb6fd55dd156622ad7d5d51b7dde75e47bd052d4e542dd6449e72411f68275775c846dde301e84613312be8c7bce58b07","0xb98461ac71f554b2f03a94e429b255af89eec917e208a8e60edf5fc43b65f1d17a20de3f31d2ce9f0cb573c25f2f4d98","0x96f0dea40ca61cefbee41c4e1fe9a7d81fbe1f49bb153d083ab70f5d0488a1f717fd28cedcf6aa18d07cce2c62801898","0x8d7c3ab310184f7dc34b6ce4684e4d29a31e77b09940448ea4daac730b7eb308063125d4dd229046cf11bfd521b771e0","0x96f0564898fe96687918bbf0a6adead99cf72e3a35ea3347e124af9d006221f8e82e5a9d2fe80094d5e8d48e610f415e","0xad50fcb92c2675a398cf07d4c40a579e44bf8d35f27cc330b57e54d5ea59f7d898af0f75dccfe3726e5471133d70f92b","0x828beed62020361689ae7481dd8f116902b522fb0c6c122678e7f949fdef70ead011e0e6bffd25678e388744e17cdb69","0x8349decac1ca16599eee2efc95bcaabf67631107da1d34a2f917884bd70dfec9b4b08ab7bc4379d6c73b19c0b6e54fb8","0xb2a6a2e50230c05613ace9e58bb2e98d94127f196f02d9dddc53c43fc68c184549ca12d713cb1b025d8260a41e947155","0x94ff52181aadae832aed52fc3b7794536e2a31a21fc8be3ea312ca5c695750d37f08002f286b33f4023dba1e3253ecfa","0xa21d56153c7e5972ee9a319501be4faff199fdf09bb821ea9ce64aa815289676c00f105e6f00311b3a5b627091b0d0fc","0xa27a60d219f1f0c971db73a7f563b371b5c9fc3ed1f72883b2eac8a0df6698400c9954f4ca17d7e94e44bd4f95532afb","0xa2fc56fae99b1f18ba5e4fe838402164ce82f8a7f3193d0bbd360c2bac07c46f9330c4c7681ffb47074c6f81ee6e7ac6","0xb748e530cd3afb96d879b83e89c9f1a444f54e55372ab1dcd46a0872f95ce8f49cf2363fc61be82259e04f555937ed16","0x8bf8993e81080c7cbba1e14a798504af1e4950b2f186ab3335b771d6acaee4ffe92131ae9c53d74379d957cb6344d9cd","0x96774d0ef730d22d7ab6d9fb7f90b9ead44285219d076584a901960542756700a2a1603cdf72be4708b267200f6c36a9","0xb47703c2ab17be1e823cc7bf3460db1d6760c0e33862c90ca058845b2ff234b0f9834ddba2efb2ee1770eb261e7d8ffd","0x84319e67c37a9581f8b09b5e4d4ae88d0a7fb4cbb6908971ab5be28070c3830f040b1de83ee663c573e0f2f6198640e4","0x96811875fa83133e0b3c0e0290f9e0e28bca6178b77fdf5350eb19344d453dbd0d71e55a0ef749025a5a2ca0ad251e81","0x81a423423e9438343879f2bfd7ee9f1c74ebebe7ce3cfffc8a11da6f040cc4145c3b527bd3cf63f9137e714dbcb474ef","0xb8c3535701ddbeec2db08e17a4fa99ba6752d32ece5331a0b8743676f421fcb14798afc7c783815484f14693d2f70db8","0x81aee980c876949bf40782835eec8817d535f6f3f7e00bf402ddd61101fdcd60173961ae90a1cf7c5d060339a18c959d","0x87e67b928d97b62c49dac321ce6cb680233f3a394d4c9a899ac2e8db8ccd8e00418e66cdfd68691aa3cb8559723b580c","0x8eac204208d99a2b738648df96353bbb1b1065e33ee4f6bba174b540bbbd37d205855e1f1e69a6b7ff043ca377651126","0x848e6e7a54ad64d18009300b93ea6f459ce855971dddb419b101f5ac4c159215626fadc20cc3b9ab1701d8f6dfaddd8b","0x88aa123d9e0cf309d46dddb6acf634b1ade3b090a2826d6e5e78669fa1220d6df9a6697d7778cd9b627db17eea846126","0x9200c2a629b9144d88a61151b661b6c4256cc5dadfd1e59a8ce17a013c2d8f7e754aabe61663c3b30f1bc47784c1f8cf","0xb6e1a2827c3bdda91715b0e1b1f10dd363cef337e7c80cac1f34165fc0dea7c8b69747e310563db5818390146ce3e231","0x92c333e694f89f0d306d54105b2a5dcc912dbe7654d9e733edab12e8537350815be472b063e56cfde5286df8922fdecb","0xa6fac04b6d86091158ebb286586ccfec2a95c9786e14d91a9c743f5f05546073e5e3cc717635a0c602cad8334e922346","0xa581b4af77feebc1fb897d49b5b507c6ad513d8f09b273328efbb24ef0d91eb740d01b4d398f2738125dacfe550330cd","0x81c4860cccf76a34f8a2bc3f464b7bfd3e909e975cce0d28979f457738a56e60a4af8e68a3992cf273b5946e8d7f76e2","0x8d1eaa09a3180d8af1cbaee673db5223363cc7229a69565f592fa38ba0f9d582cedf91e15dabd06ebbf2862fc0feba54","0x9832f49b0147f4552402e54593cfa51f99540bffada12759b71fcb86734be8e500eea2d8b3d036710bdf04c901432de9","0x8bdb0e8ec93b11e5718e8c13cb4f5de545d24829fd76161216340108098dfe5148ed25e3b57a89a516f09fa79043734d","0xab96f06c4b9b0b2c0571740b24fca758e6976315053a7ecb20119150a9fa416db2d3a2e0f8168b390bb063f0c1caf785","0xab777f5c52acd62ecf4d1f168b9cc8e1a9b45d4ec6a8ff52c583e867c2239aba98d7d3af977289b367edce03d9c2dfb1","0xa09d3ce5e748da84802436951acc3d3ea5d8ec1d6933505ed724d6b4b0d69973ab0930daec9c6606960f6e541e4a3ce2","0x8ef94f7be4d85d5ad3d779a5cf4d7b2fc3e65c52fb8e1c3c112509a4af77a0b5be994f251e5e40fabeeb1f7d5615c22b","0xa7406a5bf5708d9e10922d3c5c45c03ef891b8d0d74ec9f28328a72be4cdc05b4f2703fa99366426659dfca25d007535","0xb7f52709669bf92a2e070bfe740f422f0b7127392c5589c7f0af71bb5a8428697c762d3c0d74532899da24ea7d8695c2","0xb9dfb0c8df84104dbf9239ccefa4672ef95ddabb8801b74997935d1b81a78a6a5669a3c553767ec19a1281f6e570f4ff","0xae4d5c872156061ce9195ac640190d8d71dd406055ee43ffa6f9893eb24b870075b74c94d65bc1d5a07a6573282b5520","0xafe6bd3eb72266d333f1807164900dcfa02a7eb5b1744bb3c86b34b3ee91e3f05e38fa52a50dc64eeb4bdb1dd62874b8","0x948043cf1bc2ef3c01105f6a78dc06487f57548a3e6ef30e6ebc51c94b71e4bf3ff6d0058c72b6f3ecc37efd7c7fa8c0","0xa22fd17c2f7ffe552bb0f23fa135584e8d2d8d75e3f742d94d04aded2a79e22a00dfe7acbb57d44e1cdb962fb22ae170","0x8cd0f4e9e4fb4a37c02c1bde0f69359c43ab012eb662d346487be0c3758293f1ca560122b059b091fddce626383c3a8f","0x90499e45f5b9c81426f3d735a52a564cafbed72711d9279fdd88de8038e953bc48c57b58cba85c3b2e4ce56f1ddb0e11","0x8c30e4c034c02958384564cac4f85022ef36ab5697a3d2feaf6bf105049675bbf23d01b4b6814711d3d9271abff04cac","0x81f7999e7eeea30f3e1075e6780bbf054f2fb6f27628a2afa4d41872a385b4216dd5f549da7ce6cf39049b2251f27fb7","0xb36a7191f82fc39c283ffe53fc1f5a9a00b4c64eee7792a8443475da9a4d226cf257f226ea9d66e329af15d8f04984ec","0xaad4da528fdbb4db504f3041c747455baff5fcd459a2efd78f15bdf3aea0bdb808343e49df88fe7a7c8620009b7964a3","0x99ebd8c6dd5dd299517fb6381cfc2a7f443e6e04a351440260dd7c2aee3f1d8ef06eb6c18820b394366ecdfd2a3ce264","0x8873725b81871db72e4ec3643084b1cdce3cbf80b40b834b092767728605825c19b6847ad3dcf328438607e8f88b4410","0xb008ee2f895daa6abd35bd39b6f7901ae4611a11a3271194e19da1cdcc7f1e1ea008fe5c5440e50d2c273784541ad9c5","0x9036feafb4218d1f576ef89d0e99124e45dacaa6d816988e34d80f454d10e96809791d5b78f7fd65f569e90d4d7238c5","0x92073c1d11b168e4fa50988b0288638b4868e48bbc668c5a6dddf5499875d53be23a285acb5e4bad60114f6cf6c556e9","0x88c87dfcb8ba6cbfe7e1be081ccfadbd589301db2cb7c99f9ee5d7db90aa297ed1538d5a867678a763f2deede5fd219a","0xb42a562805c661a50f5dea63108002c0f27c0da113da6a9864c9feb5552225417c0356c4209e8e012d9bcc9d182c7611","0x8e6317d00a504e3b79cd47feb4c60f9df186467fe9ca0f35b55c0364db30528f5ff071109dabb2fc80bb9cd4949f0c24","0xb7b1ea6a88694f8d2f539e52a47466695e39e43a5eb9c6f23bca15305fe52939d8755cc3ac9d6725e60f82f994a3772f","0xa3cd55161befe795af93a38d33290fb642b8d80da8b786c6e6fb02d393ea308fbe87f486994039cbd7c7b390414594b6","0xb416d2d45b44ead3b1424e92c73c2cf510801897b05d1724ff31cbd741920cd858282fb5d6040fe1f0aa97a65bc49424","0x950ee01291754feace97c2e933e4681e7ddfbc4fcd079eb6ff830b0e481d929c93d0c7fb479c9939c28ca1945c40da09","0x869bd916aee8d86efe362a49010382674825d49195b413b4b4018e88ce43fe091b475d0b863ff0ba2259400f280c2b23","0x9782f38cd9c9d3385ec286ebbc7cba5b718d2e65a5890b0a5906b10a89dc8ed80d417d71d7c213bf52f2af1a1f513ea7","0x91cd33bc2628d096269b23faf47ee15e14cb7fdc6a8e3a98b55e1031ea0b68d10ba30d97e660f7e967d24436d40fad73","0x8becc978129cc96737034c577ae7225372dd855da8811ae4e46328e020c803833b5bdbc4a20a93270e2b8bd1a2feae52","0xa36b1d8076783a9522476ce17f799d78008967728ce920531fdaf88303321bcaf97ecaa08e0c01f77bc32e53c5f09525","0xb4720e744943f70467983aa34499e76de6d59aa6fadf86f6b787fdce32a2f5b535b55db38fe2da95825c51002cfe142d","0x91ad21fc502eda3945f6de874d1b6bf9a9a7711f4d61354f9e5634fc73f9c06ada848de15ab0a75811d3250be862827d","0x84f78e2ebf5fc077d78635f981712daf17e2475e14c2a96d187913006ad69e234746184a51a06ef510c9455b38acb0d7","0x960aa7906e9a2f11db64a26b5892ac45f20d2ccb5480f4888d89973beb6fa0dfdc06d68d241ff5ffc7f1b82b1aac242d","0xa99365dcd1a00c66c9db6924b97c920f5c723380e823b250db85c07631b320ec4e92e586f7319e67a522a0578f7b6d6c","0xa25d92d7f70cf6a88ff317cfec071e13774516da664f5fac0d4ecaa65b8bf4eb87a64a4d5ef2bd97dfae98d388dbf5cc","0xa7af47cd0041295798f9779020a44653007444e8b4ef0712982b06d0dcdd434ec4e1f7c5f7a049326602cb605c9105b7","0xaefe172eac5568369a05980931cc476bebd9dea573ba276d59b9d8c4420784299df5a910033b7e324a6c2dfc62e3ef05","0xb69bc9d22ffa645baa55e3e02522e9892bb2daa7fff7c15846f13517d0799766883ee09ae0869df4139150c5b843ca8a","0x95a10856140e493354fdd12722c7fdded21b6a2ffbc78aa2697104af8ad0c8e2206f44b0bfee077ef3949d46bbf7c16b","0x891f2fcd2c47cbea36b7fa715968540c233313f05333f09d29aba23c193f462ed490dd4d00969656e89c53155fdfe710","0xa6c33e18115e64e385c843dde34e8a228222795c7ca90bc2cc085705d609025f3351d9be61822c69035a49fb3e48f2d5","0xb87fb12f12c0533b005adad0487f03393ff682e13575e3cb57280c3873b2c38ba96a63c49eef7a442753d26b7005230b","0xb905c02ba451bfd411c135036d92c27af3b0b1c9c2f1309d6948544a264b125f39dd41afeff4666b12146c545adc168a","0x8b29c513f43a78951cf742231cf5457a6d9d55edf45df5481a0f299a418d94effef561b15d2c1a01d1b8067e7153fda9","0xb9941cccd51dc645920d2781c81a317e5a33cb7cf76427b60396735912cb6d2ca9292bb4d36b6392467d390d2c58d9f3","0xa8546b627c76b6ef5c93c6a98538d8593dbe21cb7673fd383d5401b0c935eea0bdeeefeb1af6ad41bad8464fb87bbc48","0xaa286b27de2812de63108a1aec29d171775b69538dc6198640ac1e96767c2b83a50391f49259195957d457b493b667c9","0xa932fb229f641e9abbd8eb2bd874015d97b6658ab6d29769fc23b7db9e41dd4f850382d4c1f08af8f156c5937d524473","0xa1412840fcc86e2aeec175526f2fb36e8b3b8d21a78412b7266daf81e51b3f68584ed8bd42a66a43afdd8c297b320520","0x89c78be9efb624c97ebca4fe04c7704fa52311d183ffd87737f76b7dadc187c12c982bd8e9ed7cd8beb48cdaafd2fd01","0xa3f5ddec412a5bec0ce15e3bcb41c6214c2b05d4e9135a0d33c8e50a78eaba71e0a5a6ea8b45854dec5c2ed300971fc2","0x9721f9cec7a68b7758e3887548790de49fa6a442d0396739efa20c2f50352a7f91d300867556d11a703866def2d5f7b5","0xa23764e140a87e5991573521af039630dd28128bf56eed2edbed130fd4278e090b60cf5a1dca9de2910603d44b9f6d45","0xa1a6494a994215e48ab55c70efa8ffdddce6e92403c38ae7e8dd2f8288cad460c6c7db526bbdf578e96ca04d9fe12797","0xb1705ea4cb7e074efe0405fc7b8ee2ec789af0426142f3ec81241cacd4f7edcd88e39435e4e4d8e7b1df64f3880d6613","0x85595d061d677116089a6064418b93eb44ff79e68d12bd9625078d3bbc440a60d0b02944eff6054433ee34710ae6fbb4","0x9978d5e30bedb7526734f9a1febd973a70bfa20890490e7cc6f2f9328feab1e24f991285dbc3711d892514e2d7d005ad","0xaf30243c66ea43b9f87a061f947f7bce745f09194f6e95f379c7582b9fead920e5d6957eaf05c12ae1282ada4670652f","0xa1930efb473f88001e47aa0b2b2a7566848cccf295792e4544096ecd14ee5d7927c173a8576b405bfa2eec551cd67eb5","0xb0446d1c590ee5a45f7e22d269c044f3848c97aec1d226b44bfd0e94d9729c28a38bccddc3a1006cc5fe4e3c24f001f2","0xb8a8380172df3d84b06176df916cf557966d4f2f716d3e9437e415d75b646810f79f2b2b71d857181b7fc944018883a3","0xa563afec25b7817bfa26e19dc9908bc00aa8fc3d19be7d6de23648701659009d10e3e4486c28e9c6b13d48231ae29ac5","0xa5a8e80579de886fb7d6408f542791876885947b27ad6fa99a8a26e381f052598d7b4e647b0115d4b5c64297e00ce28e","0x8f87afcc7ad33c51ac719bade3cd92da671a37a82c14446b0a2073f4a0a23085e2c8d31913ed2d0be928f053297de8f6","0xa43c455ce377e0bc434386c53c752880687e017b2f5ae7f8a15c044895b242dffde4c92fb8f8bb50b18470b17351b156","0x8368f8b12a5bceb1dba25adb3a2e9c7dc9b1a77a1f328e5a693f5aec195cd1e06b0fe9476b554c1c25dac6c4a5b640a3","0x919878b27f3671fc78396f11531c032f3e2bd132d04cc234fa4858676b15fb1db3051c0b1db9b4fc49038216f11321ce","0xb48cd67fb7f1242696c1f877da4bdf188eac676cd0e561fbac1a537f7b8229aff5a043922441d603a26aae56a15faee4","0xa3e0fdfd4d29ea996517a16f0370b54787fefe543c2fe73bfc6f9e560c1fd30dad8409859e2d7fa2d44316f24746c712","0x8bb156ade8faf149df7bea02c140c7e392a4742ae6d0394d880a849127943e6f26312033336d3b9fdc0092d71b5efe87","0x8845e5d5cc555ca3e0523244300f2c8d7e4d02aaebcb5bd749d791208856c209a6f84dd99fd55968c9f0ab5f82916707","0xa3e90bb5c97b07789c2f32dff1aec61d0a2220928202f5ad5355ae71f8249237799d6c8a22602e32e572cb12eabe0c17","0xb150bcc391884c996149dc3779ce71f15dda63a759ee9cc05871f5a8379dcb62b047098922c0f26c7bd04deb394c33f9","0x95cd4ad88d51f0f2efcfd0c2df802fe252bb9704d1afbf9c26a248df22d55da87bdfaf41d7bc6e5df38bd848f0b13f42","0xa05a49a31e91dff6a52ac8b9c2cfdd646a43f0d488253f9e3cfbce52f26667166bbb9b608fc358763a65cbf066cd6d05","0xa59c3c1227fdd7c2e81f5e11ef5c406da44662987bac33caed72314081e2eed66055d38137e01b2268e58ec85dd986c0","0xb7020ec3bd73a99861f0f1d88cf5a19abab1cbe14b7de77c9868398c84bb8e18dbbe9831838a96b6d6ca06e82451c67b","0x98d1ff2525e9718ee59a21d8900621636fcd873d9a564b8dceb4be80a194a0148daf1232742730b3341514b2e5a5436c","0x886d97b635975fc638c1b6afc493e5998ca139edba131b75b65cfe5a8e814f11bb678e0eeee5e6e5cd913ad3f2fefdfc","0x8fb9fd928d38d5d813b671c924edd56601dd7163b686c13f158645c2f869d9250f3859aa5463a39258c90fef0f41190a","0xaac35e1cd655c94dec3580bb3800bd9c2946c4a9856f7d725af15fbea6a2d8ca51c8ad2772abed60ee0e3fb9cb24046b","0xb8d71fa0fa05ac9e443c9b4929df9e7f09a919be679692682e614d24227e04894bfc14a5c73a62fb927fedff4a0e4aa7","0xa45a19f11fbbb531a704badbb813ed8088ab827c884ee4e4ebf363fa1132ff7cfa9d28be9c85b143e4f7cdbc94e7cf1a","0x82b54703a4f295f5471b255ab59dce00f0fe90c9fb6e06b9ee48b15c91d43f4e2ef4a96c3118aeb03b08767be58181bb","0x8283264c8e6d2a36558f0d145c18576b6600ff45ff99cc93eca54b6c6422993cf392668633e5df396b9331e873d457e5","0x8c549c03131ead601bc30eb6b9537b5d3beb7472f5bb1bcbbfd1e9f3704477f7840ab3ab7f7dc13bbbbcdff886a462d4","0xafbb0c520ac1b5486513587700ad53e314cb74bfbc12e0b5fbdcfdaac36d342e8b59856196a0d84a25cff6e6e1d17e76","0x89e4c22ffb51f2829061b3c7c1983c5c750cad158e3a825d46f7cf875677da5d63f653d8a297022b5db5845c9271b32b","0xafb27a86c4c2373088c96b9adf4433f2ebfc78ac5c526e9f0510670b6e4e5e0057c0a4f75b185e1a30331b9e805c1c15","0xa18e16b57445f88730fc5d3567bf5a176861dc14c7a08ed2996fe80eed27a0e7628501bcb78a1727c5e9ac55f29c12c4","0x93d61bf88b192d6825cf4e1120af1c17aa0f994d158b405e25437eaeefae049f7b721a206e7cc8a04fdc29d3c42580a1","0xa99f2995a2e3ed2fd1228d64166112038de2f516410aa439f4c507044e2017ea388604e2d0f7121256fadf7fbe7023d1","0x914fd91cffc23c32f1c6d0e98bf660925090d873367d543034654389916f65f552e445b0300b71b61b721a72e9a5983c","0xb42a578a7787b71f924e7def425d849c1c777156b1d4170a8ee7709a4a914e816935131afd9a0412c4cb952957b20828","0x82fb30590e84b9e45db1ec475a39971cf554dc01bcc7050bc89265740725c02e2be5a972168c5170c86ae83e5b0ad2c0","0xb14f8d8e1e93a84976289e0cf0dfa6f3a1809e98da16ee5c4932d0e1ed6bf8a07697fdd4dd86a3df84fb0003353cdcc0","0x85d7a2f4bda31aa2cb208b771fe03291a4ebdaf6f1dc944c27775af5caec412584c1f45bc741fca2a6a85acb3f26ad7d","0xaf02e56ce886ff2253bc0a68faad76f25ead84b2144e5364f3fb9b648f03a50ee9dc0b2c33ebacf7c61e9e43201ef9ef","0x87e025558c8a0b0abd06dfc350016847ea5ced7af2d135a5c9eec9324a4858c4b21510fb0992ec52a73447f24945058e","0x80fff0bafcd058118f5e7a4d4f1ae0912efeb281d2cbe4d34ba8945cc3dbe5d8baf47fb077343b90b8d895c90b297aca","0xb6edcf3a40e7b1c3c0148f47a263cd819e585a51ef31c2e35a29ce6f04c53e413f743034c0d998d9c00a08ba00166f31","0xabb87ed86098c0c70a76e557262a494ff51a30fb193f1c1a32f8e35eafa34a43fcc07aa93a3b7a077d9e35afa07b1a3d","0xa280214cd3bb0fb7ecd2d8bcf518cbd9078417f2b91d2533ec2717563f090fb84f2a5fcfdbbeb2a2a1f8a71cc5aa5941","0xa63083ca7238ea2b57d15a475963cf1d4f550d8cd76db290014a0461b90351f1f26a67d674c837b0b773b330c7c3d534","0xa8fa39064cb585ece5263e2f42f430206476bf261bd50f18d2b694889bd79d04d56410664cecad62690e5c5a20b3f6ff","0x85ba52ce9d700a5dcf6c5b00559acbe599d671ce5512467ff4b6179d7fad550567ce2a9c126a50964e3096458ea87920","0xb913501e1008f076e5eac6d883105174f88b248e1c9801e568fefaffa1558e4909364fc6d9512aa4d125cbd7cc895f05","0x8eb33b5266c8f2ed4725a6ad147a322e44c9264cf261c933cbbe230a43d47fca0f29ec39756b20561dabafadd5796494","0x850ebc8b661a04318c9db5a0515066e6454fa73865aa4908767a837857ecd717387f614acb614a88e075d4edc53a2f5a","0xa08d6b92d866270f29f4ce23a3f5d99b36b1e241a01271ede02817c8ec3f552a5c562db400766c07b104a331835c0c64","0x8131804c89bb3e74e9718bfc4afa547c1005ff676bd4db9604335032b203390cfa54478d45c6c78d1fe31a436ed4be9f","0x9106d94f23cc1eacec8316f16d6f0a1cc160967c886f51981fdb9f3f12ee1182407d2bb24e5b873de58cb1a3ee915a6b","0xa13806bfc3eae7a7000c9d9f1bd25e10218d4e67f59ae798b145b098bca3edad2b1040e3fc1e6310e612fb8818f459ac","0x8c69fbca502046cb5f6db99900a47b34117aef3f4b241690cdb3b84ca2a2fc7833e149361995dc41fa78892525bce746","0x852c473150c91912d58ecb05769222fa18312800c3f56605ad29eec9e2d8667b0b81c379048d3d29100ed2773bb1f3c5","0xb1767f6074426a00e01095dbb1795beb4e4050c6411792cbad6537bc444c3165d1058bafd1487451f9c5ddd209e0ae7e","0x80c600a5fe99354ce59ff0f84c760923dc8ff66a30bf47dc0a086181785ceb01f9b951c4e66df800ea6d705e8bc47055","0xb5cf19002fbc88a0764865b82afcb4d64a50196ea361e5c71dff7de084f4dcbbc34ec94a45cc9e0247bd51da565981aa","0x93e67a254ea8ce25e112d93cc927fadaa814152a2c4ec7d9a56eaa1ed47aec99b7e9916b02e64452cc724a6641729bbb","0xace70b32491bda18eee4a4d041c3bc9effae9340fe7e6c2f5ad975ee0874c17f1a7da7c96bd85fccff9312c518fac6e9","0xab4cfa02065017dd7f1aadc66f2c92f78f0f11b8597c03a5d69d82cb2eaf95a4476a836ac102908f137662472c8d914b","0xa40b8cd8deb8ae503d20364d64cab7c2801b7728a9646ed19c65edea6a842756a2f636283494299584ad57f4bb12cd0b","0x8594e11d5fc2396bcd9dbf5509ce4816dbb2b7305168021c426171fb444d111da5a152d6835ad8034542277011c26c0e","0x8024de98c26b4c994a66628dc304bb737f4b6859c86ded552c5abb81fd4c6c2e19d5a30beed398a694b9b2fdea1dd06a","0x8843f5872f33f54df8d0e06166c1857d733995f67bc54abb8dfa94ad92407cf0179bc91b0a50bbb56cdc2b350d950329","0xb8bab44c7dd53ef9edf497dcb228e2a41282c90f00ba052fc52d57e87b5c8ab132d227af1fcdff9a12713d1f980bcaae","0x982b4d7b29aff22d527fd82d2a52601d95549bfb000429bb20789ed45e5abf1f4b7416c7b7c4b79431eb3574b29be658","0x8eb1f571b6a1878e11e8c1c757e0bc084bab5e82e897ca9be9b7f4b47b91679a8190bf0fc8f799d9b487da5442415857","0xa6e74b588e5af935c8b243e888582ef7718f8714569dd4992920740227518305eb35fab674d21a5551cca44b3e511ef2","0xa30fc2f3a4cb4f50566e82307de73cd7bd8fe2c1184e9293c136a9b9e926a018d57c6e4f308c95b9eb8299e94d90a2a1","0xa50c5869ca5d2b40722c056a32f918d47e0b65ca9d7863ca7d2fb4a7b64fe523fe9365cf0573733ceaadebf20b48fff8","0x83bbdd32c04d17581418cf360749c7a169b55d54f2427390defd9f751f100897b2d800ce6636c5bbc046c47508d60c8c","0xa82904bdf614de5d8deaff688c8a5e7ac5b3431687acbcda8fa53960b7c417a39c8b2e462d7af91ce6d79260f412db8e","0xa4362e31ff4b05d278b033cf5eebea20de01714ae16d4115d04c1da4754269873afc8171a6f56c5104bfd7b0db93c3e7","0xb5b8daa63a3735581e74a021b684a1038cea77168fdb7fdf83c670c2cfabcfc3ab2fc7359069b5f9048188351aef26b5","0xb48d723894b7782d96ac8433c48faca1bdfa5238019c451a7f47d958097cce3ae599b876cf274269236b9d6ff8b6d7ca","0x98ffff6a61a3a6205c7820a91ca2e7176fab5dba02bc194c4d14942ac421cb254183c705506ab279e4f8db066f941c6c","0xae7db24731da2eaa6efc4f7fcba2ecc26940ddd68038dce43acf2cee15b72dc4ef42a7bfdd32946d1ed78786dd7696b3","0xa656db14f1de9a7eb84f6301b4acb2fbf78bfe867f48a270e416c974ab92821eb4df1cb881b2d600cfed0034ac784641","0xaa315f8ecba85a5535e9a49e558b15f39520fce5d4bf43131bfbf2e2c9dfccc829074f9083e8d49f405fb221d0bc4c3c","0x90bffba5d9ff40a62f6c8e9fc402d5b95f6077ed58d030c93e321b8081b77d6b8dac3f63a92a7ddc01585cf2c127d66c","0xabdd733a36e0e0f05a570d0504e73801bf9b5a25ff2c78786f8b805704997acb2e6069af342538c581144d53149fa6d3","0xb4a723bb19e8c18a01bd449b1bb3440ddb2017f10bb153da27deb7a6a60e9bb37619d6d5435fbb1ba617687838e01dd0","0x870016b4678bab3375516db0187a2108b2e840bae4d264b9f4f27dbbc7cc9cac1d7dc582d7a04d6fd1ed588238e5e513","0x80d33d2e20e8fc170aa3cb4f69fffb72aeafb3b5bb4ea0bc79ab55da14142ca19b2d8b617a6b24d537366e3b49cb67c3","0xa7ee76aec273aaae03b3b87015789289551969fb175c11557da3ab77e39ab49d24634726f92affae9f4d24003050d974","0x8415ea4ab69d779ebd42d0fe0c6aef531d6a465a5739e429b1fcf433ec45aa8296c527e965a20f0ec9f340c9273ea3cf","0x8c7662520794e8b4405d0b33b5cac839784bc86a5868766c06cbc1fa306dbe334978177417b31baf90ce7b0052a29c56","0x902b2abecc053a3dbdea9897ee21e74821f3a1b98b2d560a514a35799f4680322550fd3a728d4f6d64e1de98033c32b8","0xa05e84ed9ecab8d508d670c39f2db61ad6e08d2795ec32a3c9d0d3737ef3801618f4fc2a95f90ec2f068606131e076c5","0x8b9208ff4d5af0c2e3f53c9375da666773ac57197dfabb0d25b1c8d0588ba7f3c15ee9661bb001297f322ea2fbf6928b","0xa3c827741b34a03254d4451b5ab74a96f2b9f7fb069e2f5adaf54fd97cc7a4d516d378db5ca07da87d8566d6eef13726","0x8509d8a3f4a0ed378e0a1e28ea02f6bf1d7f6c819c6c2f5297c7df54c895b848f841653e32ba2a2c22c2ff739571acb8","0xa0ce988b7d3c40b4e496aa83a09e4b5472a2d98679622f32bea23e6d607bc7de1a5374fb162bce0549a67dad948519be","0xaa8a3dd12bd60e3d2e05f9c683cdcb8eab17fc59134815f8d197681b1bcf65108cba63ac5c58ee632b1e5ed6bba5d474","0x8b955f1d894b3aefd883fb4b65f14cd37fc2b9db77db79273f1700bef9973bf3fd123897ea2b7989f50003733f8f7f21","0xac79c00ddac47f5daf8d9418d798d8af89fc6f1682e7e451f71ea3a405b0d36af35388dd2a332af790bc83ca7b819328","0xa0d44dd2a4438b809522b130d0938c3fe7c5c46379365dbd1810a170a9aa5818e1c783470dd5d0b6d4ac7edbb7330910","0xa30b69e39ad43dd540a43c521f05b51b5f1b9c4eed54b8162374ae11eac25da4f5756e7b70ce9f3c92c2eeceee7431ed","0xac43220b762c299c7951222ea19761ab938bf38e4972deef58ed84f4f9c68c230647cf7506d7cbfc08562fcca55f0485","0xb28233b46a8fb424cfa386a845a3b5399d8489ceb83c8f3e05c22c934798d639c93718b7b68ab3ce24c5358339e41cbb","0xac30d50ee8ce59a10d4b37a3a35e62cdb2273e5e52232e202ca7d7b8d09d28958ee667fae41a7bb6cdc6fe8f6e6c9c85","0xb199842d9141ad169f35cc7ff782b274cbaa645fdb727761e0a89edbf0d781a15f8218b4bf4eead326f2903dd88a9cc1","0x85e018c7ddcad34bb8285a737c578bf741ccd547e68c734bdb3808380e12c5d4ef60fc896b497a87d443ff9abd063b38","0x8c856e6ba4a815bdb891e1276f93545b7072f6cb1a9aa6aa5cf240976f29f4dee01878638500a6bf1daf677b96b54343","0xb8a47555fa8710534150e1a3f13eab33666017be6b41005397afa647ea49708565f2b86b77ad4964d140d9ced6b4d585","0x8cd1f1db1b2f4c85a3f46211599caf512d5439e2d8e184663d7d50166fd3008f0e9253272f898d81007988435f715881","0xb1f34b14612c973a3eceb716dc102b82ab18afef9de7630172c2780776679a7706a4874e1df3eaadf541fb009731807f","0xb25464af9cff883b55be2ff8daf610052c02df9a5e147a2cf4df6ce63edcdee6dc535c533590084cc177da85c5dc0baa","0x91c3c4b658b42d8d3448ae1415d4541d02379a40dc51e36a59bd6e7b9ba3ea51533f480c7c6e8405250ee9b96a466c29","0x86dc027b95deb74c36a58a1333a03e63cb5ae22d3b29d114cfd2271badb05268c9d0c819a977f5e0c6014b00c1512e3a","0xae0e6ff58eb5fa35da5107ebeacf222ab8f52a22bb1e13504247c1dfa65320f40d97b0e6b201cb6613476687cb2f0681","0x8f13415d960b9d7a1d93ef28afc2223e926639b63bdefce0f85e945dfc81670a55df288893a0d8b3abe13c5708f82f91","0x956f67ca49ad27c1e3a68c1faad5e7baf0160c459094bf6b7baf36b112de935fdfd79fa4a9ea87ea8de0ac07272969f4","0x835e45e4a67df9fb51b645d37840b3a15c171d571a10b03a406dd69d3c2f22df3aa9c5cbe1e73f8d767ce01c4914ea9a","0x919b938e56d4b32e2667469d0bdccb95d9dda3341aa907683ee70a14bbbe623035014511c261f4f59b318b610ac90aa3","0x96b48182121ccd9d689bf1dfdc228175564cd68dc904a99c808a7f0053a6f636c9d953e12198bdf2ea49ea92772f2e18","0xac5e5a941d567fa38fdbcfa8cf7f85bb304e3401c52d88752bcd516d1fa9bac4572534ea2205e38423c1df065990790f","0xac0bd594fb85a8d4fc26d6df0fa81f11919401f1ecf9168b891ec7f061a2d9368af99f7fd8d9b43b2ce361e7b8482159","0x83d92c69ca540d298fe80d8162a1c7af3fa9b49dfb69e85c1d136a3ec39fe419c9fa78e0bb6d96878771fbd37fe92e40","0xb35443ae8aa66c763c2db9273f908552fe458e96696b90e41dd509c17a5c04ee178e3490d9c6ba2dc0b8f793c433c134","0x923b2d25aa45b2e580ffd94cbb37dc8110f340f0f011217ee1bd81afb0714c0b1d5fb4db86006cdd2457563276f59c59","0x96c9125d38fca1a61ac21257b696f8ac3dae78def50285e44d90ea293d591d1c58f703540a7e4e99e070afe4646bbe15","0xb57946b2332077fbcdcb406b811779aefd54473b5559a163cd65cb8310679b7e2028aa55c12a1401fdcfcac0e6fae29a","0x845daedc5cf972883835d7e13c937b63753c2200324a3b8082a6c4abb4be06c5f7c629d4abe4bfaf1d80a1f073eb6ce6","0x91a55dfd0efefcd03dc6dacc64ec93b8d296cb83c0ee72400a36f27246e7f2a60e73b7b70ba65819e9cfb73edb7bd297","0x8874606b93266455fe8fdd25df9f8d2994e927460af06f2e97dd4d2d90db1e6b06d441b72c2e76504d753badca87fb37","0x8ee99e6d231274ff9252c0f4e84549da173041299ad1230929c3e3d32399731c4f20a502b4a307642cac9306ccd49d3c","0x8836497714a525118e20849d6933bb8535fb6f72b96337d49e3133d936999c90a398a740f42e772353b5f1c63581df6d","0xa6916945e10628f7497a6cdc5e2de113d25f7ade3e41e74d3de48ccd4fce9f2fa9ab69645275002e6f49399b798c40af","0x9597706983107eb23883e0812e1a2c58af7f3499d50c6e29b455946cb9812fde1aa323d9ed30d1c0ffd455abe32303cd","0xa24ee89f7f515cc33bdbdb822e7d5c1877d337f3b2162303cfc2dae028011c3a267c5cb4194afa63a4856a6e1c213448","0x8cd25315e4318801c2776824ae6e7d543cb85ed3bc2498ba5752df2e8142b37653cf9e60104d674be3aeb0a66912e97a","0xb5085ecbe793180b40dbeb879f4c976eaaccaca3a5246807dced5890e0ed24d35f3f86955e2460e14fb44ff5081c07ba","0x960188cc0b4f908633a6840963a6fa2205fc42c511c6c309685234911c5304ef4c304e3ae9c9c69daa2fb6a73560c256","0xa32d0a70bf15d569b4cda5aebe3e41e03c28bf99cdd34ffa6c5d58a097f322772acca904b3a47addb6c7492a7126ebac","0x977f72d06ad72d4aa4765e0f1f9f4a3231d9f030501f320fe7714cc5d329d08112789fa918c60dd7fdb5837d56bb7fc6","0x99fa038bb0470d45852bb871620d8d88520adb701712fcb1f278fed2882722b9e729e6cdce44c82caafad95e37d0e6f7","0xb855e8f4fc7634ada07e83b6c719a1e37acb06394bc8c7dcab7747a8c54e5df3943915f021364bd019fdea103864e55f","0x88bc2cd7458532e98c596ef59ea2cf640d7cc31b4c33cef9ed065c078d1d4eb49677a67de8e6229cc17ea48bace8ee5a","0xaaa78a3feaa836d944d987d813f9b9741afb076e6aca1ffa42682ab06d46d66e0c07b8f40b9dbd63e75e81efa1ef7b08","0xb7b080420cc4d808723b98b2a5b7b59c81e624ab568ecdfdeb8bf3aa151a581b6f56e983ef1b6f909661e25db40b0c69","0xabee85c462ac9a2c58e54f06c91b3e5cd8c5f9ab5b5deb602b53763c54826ed6deb0d6db315a8d7ad88733407e8d35e2","0x994d075c1527407547590df53e9d72dd31f037c763848d1662eebd4cefec93a24328c986802efa80e038cb760a5300f5","0xab8777640116dfb6678e8c7d5b36d01265dfb16321abbfc277da71556a34bb3be04bc4ae90124ed9c55386d2bfb3bda0","0x967e3a828bc59409144463bcf883a3a276b5f24bf3cbfdd7a42343348cba91e00b46ac285835a9b91eef171202974204","0x875a9f0c4ffe5bb1d8da5e3c8e41d0397aa6248422a628bd60bfae536a651417d4e8a7d2fb98e13f2dad3680f7bd86d3","0xacaa330c3e8f95d46b1880126572b238dbb6d04484d2cd4f257ab9642d8c9fc7b212188b9c7ac9e0fd135c520d46b1bf","0xaceb762edbb0f0c43dfcdb01ea7a1ac5918ca3882b1e7ebc4373521742f1ed5250d8966b498c00b2b0f4d13212e6dd0b","0x81d072b4ad258b3646f52f399bced97c613b22e7ad76373453d80b1650c0ca87edb291a041f8253b649b6e5429bb4cff","0x980a47d27416ac39c7c3a0ebe50c492f8c776ea1de44d5159ac7d889b6d554357f0a77f0e5d9d0ff41aae4369eba1fc2","0x8b4dfd5ef5573db1476d5e43aacfb5941e45d6297794508f29c454fe50ea622e6f068b28b3debe8635cf6036007de2e3","0xa60831559d6305839515b68f8c3bc7abbd8212cc4083502e19dd682d56ca37c9780fc3ce4ec2eae81ab23b221452dc57","0x951f6b2c1848ced9e8a2339c65918e00d3d22d3e59a0a660b1eca667d18f8430d737884e9805865ef3ed0fe1638a22d9","0xb02e38fe790b492aa5e89257c4986c9033a8b67010fa2add9787de857d53759170fdd67715ca658220b4e14b0ca48124","0xa51007e4346060746e6b0e4797fc08ef17f04a34fe24f307f6b6817edbb8ce2b176f40771d4ae8a60d6152cbebe62653","0xa510005b05c0b305075b27b243c9d64bcdce85146b6ed0e75a3178b5ff9608213f08c8c9246f2ca6035a0c3e31619860","0xaaff4ef27a7a23be3419d22197e13676d6e3810ceb06a9e920d38125745dc68a930f1741c9c2d9d5c875968e30f34ab5","0x864522a9af9857de9814e61383bebad1ba9a881696925a0ea6bfc6eff520d42c506bbe5685a9946ed710e889765be4a0","0xb63258c080d13f3b7d5b9f3ca9929f8982a6960bdb1b0f8676f4dca823971601672f15e653917bf5d3746bb220504913","0xb51ce0cb10869121ae310c7159ee1f3e3a9f8ad498827f72c3d56864808c1f21fa2881788f19ece884d3f705cd7bd0c5","0x95d9cecfc018c6ed510e441cf84c712d9909c778c16734706c93222257f64dcd2a9f1bd0b400ca271e22c9c487014274","0x8beff4d7d0140b86380ff4842a9bda94c2d2be638e20ac68a4912cb47dbe01a261857536375208040c0554929ced1ddc","0x891ff49258749e2b57c1e9b8e04b12c77d79c3308b1fb615a081f2aacdfb4b39e32d53e069ed136fdbd43c53b87418fa","0x9625cad224e163d387738825982d1e40eeff35fe816d10d7541d15fdc4d3eee48009090f3faef4024b249205b0b28f72","0x8f3947433d9bd01aa335895484b540a9025a19481a1c40b4f72dd676bfcf332713714fd4010bde936eaf9470fd239ed0","0xa00ec2d67789a7054b53f0e858a8a232706ccc29a9f3e389df7455f1a51a2e75801fd78469a13dbc25d28399ae4c6182","0xa3f65884506d4a62b8775a0ea0e3d78f5f46bc07910a93cd604022154eabdf1d73591e304d61edc869e91462951975e1","0xa14eef4fd5dfac311713f0faa9a60415e3d30b95a4590cbf95f2033dffb4d16c02e7ceff3dcd42148a4e3bc49cce2dd4","0x8afa11c0eef3c540e1e3460bc759bb2b6ea90743623f88e62950c94e370fe4fd01c22b6729beba4dcd4d581198d9358f","0xafb05548a69f0845ffcc5f5dc63e3cdb93cd270f5655173b9a950394b0583663f2b7164ba6df8d60c2e775c1d9f120af","0x97f179e01a947a906e1cbeafa083960bc9f1bade45742a3afee488dfb6011c1c6e2db09a355d77f5228a42ccaa7bdf8e","0x8447fca4d35f74b3efcbd96774f41874ca376bf85b79b6e66c92fa3f14bdd6e743a051f12a7fbfd87f319d1c6a5ce217","0xa57ca39c23617cd2cf32ff93b02161bd7baf52c4effb4679d9d5166406e103bc8f3c6b5209e17c37dbb02deb8bc72ddd","0x9667c7300ff80f0140be002b0e36caab07aaee7cce72679197c64d355e20d96196acaf54e06e1382167d081fe6f739c1","0x828126bb0559ce748809b622677267ca896fa2ee76360fd2c02990e6477e06a667241379ca7e65d61a5b64b96d7867de","0x8b8835dea6ba8cf61c91f01a4b3d2f8150b687a4ee09b45f2e5fc8f80f208ae5d142d8e3a18153f0722b90214e60c5a7","0xa98e8ff02049b4da386e3ee93db23bbb13dfeb72f1cfde72587c7e6d962780b7671c63e8ac3fbaeb1a6605e8d79e2f29","0x87a4892a0026d7e39ef3af632172b88337cb03669dea564bcdb70653b52d744730ebb5d642e20cb627acc9dbb547a26b","0x877352a22fc8052878a57effc159dac4d75fe08c84d3d5324c0bab6d564cdf868f33ceee515eee747e5856b62cfa0cc7","0x8b801ba8e2ff019ee62f64b8cb8a5f601fc35423eb0f9494b401050103e1307dc584e4e4b21249cd2c686e32475e96c3","0xa9e7338d6d4d9bfec91b2af28a8ed13b09415f57a3a00e5e777c93d768fdb3f8e4456ae48a2c6626b264226e911a0e28","0x99c05fedf40ac4726ed585d7c1544c6e79619a0d3fb6bda75a08c7f3c0008e8d5e19ed4da48de3216135f34a15eba17c","0xa61cce8a1a8b13a4a650fdbec0eeea8297c352a8238fb7cac95a0df18ed16ee02a3daa2de108fa122aca733bd8ad7855","0xb97f37da9005b440b4cb05870dd881bf8491fe735844f2d5c8281818583b38e02286e653d9f2e7fa5e74c3c3eb616540","0xa72164a8554da8e103f692ac5ebb4aece55d5194302b9f74b6f2a05335b6e39beede0bf7bf8c5bfd4d324a784c5fb08c","0xb87e8221c5341cd9cc8bb99c10fe730bc105550f25ed4b96c0d45e6142193a1b2e72f1b3857373a659b8c09be17b3d91","0xa41fb1f327ef91dcb7ac0787918376584890dd9a9675c297c45796e32d6e5985b12f9b80be47fc3a8596c245f419d395","0x90dafa3592bdbb3465c92e2a54c2531822ba0459d45d3e7a7092fa6b823f55af28357cb51896d4ec2d66029c82f08e26","0xa0a9adc872ebc396557f484f1dd21954d4f4a21c4aa5eec543f5fa386fe590839735c01f236574f7ff95407cd12de103","0xb8c5c940d58be7538acf8672852b5da3af34f82405ef2ce8e4c923f1362f97fc50921568d0fd2fe846edfb0823e62979","0x85aaf06a8b2d0dac89dafd00c28533f35dbd074978c2aaa5bef75db44a7b12aeb222e724f395513b9a535809a275e30b","0x81f3cbe82fbc7028c26a6c1808c604c63ba023a30c9f78a4c581340008dbda5ec07497ee849a2183fcd9124f7936af32","0xa11ac738de75fd60f15a34209d3825d5e23385796a4c7fc5931822f3f380af977dd0f7b59fbd58eed7777a071e21b680","0x85a279c493de03db6fa6c3e3c1b1b29adc9a8c4effc12400ae1128da8421954fa8b75ad19e5388fe4543b76fb0812813","0x83a217b395d59ab20db6c4adb1e9713fc9267f5f31a6c936042fe051ce8b541f579442f3dcf0fa16b9e6de9fd3518191","0x83a0b86e7d4ed8f9ccdc6dfc8ff1484509a6378fa6f09ed908e6ab9d1073f03011dc497e14304e4e3d181b57de06a5ab","0xa63ad69c9d25704ce1cc8e74f67818e5ed985f8f851afa8412248b2df5f833f83b95b27180e9e7273833ed0d07113d3b","0x99b1bc2021e63b561fe44ddd0af81fcc8627a91bfeecbbc989b642bc859abc0c8d636399701aad7bbaf6a385d5f27d61","0xb53434adb66f4a807a6ad917c6e856321753e559b1add70824e5c1e88191bf6993fccb9b8b911fc0f473fb11743acacd","0x97ed3b9e6fb99bf5f945d4a41f198161294866aa23f2327818cdd55cb5dc4c1a8eff29dd8b8d04902d6cd43a71835c82","0xb1e808260e368a18d9d10bdea5d60223ba1713b948c782285a27a99ae50cc5fc2c53d407de07155ecc16fb8a36d744a0","0xa3eb4665f18f71833fec43802730e56b3ee5a357ea30a888ad482725b169d6f1f6ade6e208ee081b2e2633079b82ba7d","0xab8beb2c8353fc9f571c18fdd02bdb977fc883313469e1277b0372fbbb33b80dcff354ca41de436d98d2ed710faa467e","0xaa9071cfa971e4a335a91ad634c98f2be51544cb21f040f2471d01bb97e1df2277ae1646e1ea8f55b7ba9f5c8c599b39","0x80b7dbfdcaf40f0678012acc634eba44ea51181475180d9deb2050dc4f2de395289edd0223018c81057ec79b04b04c49","0x89623d7f6cb17aa877af14de842c2d4ab7fd576d61ddd7518b5878620a01ded40b6010de0da3cdf31d837eecf30e9847","0xa773bb024ae74dd24761f266d4fb27d6fd366a8634febe8235376b1ae9065c2fe12c769f1d0407867dfbe9f5272c352f","0x8455a561c3aaa6ba64c881a5e13921c592b3a02e968f4fb24a2243c36202795d0366d9cc1a24e916f84d6e158b7aeac7","0x81d8bfc4b283cf702a40b87a2b96b275bdbf0def17e67d04842598610b67ea08c804d400c3e69fa09ea001eaf345b276","0xb8f8f82cb11fea1c99467013d7e167ff03deb0c65a677fab76ded58826d1ba29aa7cf9fcd7763615735ea3ad38e28719","0x89a6a04baf9cccc1db55179e1650b1a195dd91fb0aebc197a25143f0f393524d2589975e3fbfc2547126f0bced7fd6f2","0xb81b2162df045390f04df07cbd0962e6b6ca94275a63edded58001a2f28b2ae2af2c7a6cba4ecd753869684e77e7e799","0xa3757f722776e50de45c62d9c4a2ee0f5655a512344c4cbec542d8045332806568dd626a719ef21a4eb06792ca70f204","0x8c5590df96ec22179a4e8786de41beb44f987a1dcc508eb341eecbc0b39236fdfad47f108f852e87179ccf4e10091e59","0x87502f026ed4e10167419130b88c3737635c5b9074c364e1dd247cef5ef0fc064b4ae99b187e33301e438bbd2fe7d032","0xaf925a2165e980ced620ff12289129fe17670a90ae0f4db9d4b39bd887ccb1f5d2514ac9ecf910f6390a8fc66bd5be17","0x857fca899828cf5c65d26e3e8a6e658542782fc72762b3b9c73514919f83259e0f849a9d4838b40dc905fe43024d0d23","0x87ffebdbfb69a9e1007ebac4ffcb4090ff13705967b73937063719aa97908986effcb7262fdadc1ae0f95c3690e3245d","0xa9ff6c347ac6f4c6ab993b748802e96982eaf489dc69032269568412fc9a79e7c2850dfc991b28211b3522ee4454344b","0xa65b3159df4ec48bebb67cb3663cd744027ad98d970d620e05bf6c48f230fa45bf17527fe726fdf705419bb7a1bb913e","0x84b97b1e6408b6791831997b03cd91f027e7660fd492a93d95daafe61f02427371c0e237c75706412f442991dfdff989","0xab761c26527439b209af0ae6afccd9340bbed5fbe098734c3145b76c5d2cd7115d9227b2eb523882b7317fbb09180498","0xa0479a8da06d7a69c0b0fee60df4e691c19c551f5e7da286dab430bfbcabf31726508e20d26ea48c53365a7f00a3ad34","0xa732dfc9baa0f4f40b5756d2e8d8937742999623477458e0bc81431a7b633eefc6f53b3b7939fe0a020018549c954054","0x901502436a1169ba51dc479a5abe7c8d84e0943b16bc3c6a627b49b92cd46263c0005bc324c67509edd693f28e612af1","0xb627aee83474e7f84d1bab9b7f6b605e33b26297ac6bbf52d110d38ba10749032bd551641e73a383a303882367af429b","0x95108866745760baef4a46ef56f82da6de7e81c58b10126ebd2ba2cd13d339f91303bf2fb4dd104a6956aa3b13739503","0x899ed2ade37236cec90056f3569bc50f984f2247792defafcceb49ad0ca5f6f8a2f06573705300e07f0de0c759289ff5","0xa9f5eee196d608efe4bcef9bf71c646d27feb615e21252cf839a44a49fd89da8d26a758419e0085a05b1d59600e2dc42","0xb36c6f68fed6e6c85f1f4a162485f24817f2843ec5cbee45a1ebfa367d44892e464949c6669f7972dc7167af08d55d25","0xaaaede243a9a1b6162afbc8f571a52671a5a4519b4062e3f26777664e245ba873ed13b0492c5dbf0258c788c397a0e9e","0x972b4fb39c31cbe127bf9a32a5cc10d621ebdd9411df5e5da3d457f03b2ab2cd1f6372d8284a4a9400f0b06ecdbfd38e","0x8f6ca1e110e959a4b1d9a5ce5f212893cec21db40d64d5ac4d524f352d72198f923416a850bf845bc5a22a79c0ea2619","0xa0f3c93b22134f66f04b2553a53b738644d1665ceb196b8494b315a4c28236fb492017e4a0de4224827c78e42f9908b7","0x807fb5ee74f6c8735b0b5ca07e28506214fe4047dbeb00045d7c24f7849e98706aea79771241224939cb749cf1366c7d","0x915eb1ff034224c0b645442cdb7d669303fdc00ca464f91aaf0b6fde0b220a3a74ff0cb043c26c9f3a5667b3fdaa9420","0x8fda6cef56ed33fefffa9e6ac8e6f76b1af379f89761945c63dd448801f7bb8ca970504a7105fac2f74f652ccff32327","0x87380cffdcffb1d0820fa36b63cc081e72187f86d487315177d4d04da4533eb19a0e2ff6115ceab528887819c44a5164","0x8cd89e03411a18e7f16f968b89fb500c36d47d229f6487b99e62403a980058db5925ce249206743333538adfad168330","0x974451b1df33522ce7056de9f03e10c70bf302c44b0741a59df3d6877d53d61a7394dcee1dd46e013d7cb9d73419c092","0x98c35ddf645940260c490f384a49496a7352bb8e3f686feed815b1d38f59ded17b1ad6e84a209e773ed08f7b8ff1e4c2","0x963f386cf944bb9b2ddebb97171b64253ea0a2894ac40049bdd86cda392292315f3a3d490ca5d9628c890cfb669f0acb","0x8d507712152babd6d142ee682638da8495a6f3838136088df9424ef50d5ec28d815a198c9a4963610b22e49b4cdf95e9","0x83d4bc6b0be87c8a4f1e9c53f257719de0c73d85b490a41f7420e777311640937320557ff2f1d9bafd1daaa54f932356","0x82f5381c965b7a0718441131c4d13999f4cdce637698989a17ed97c8ea2e5bdb5d07719c5f7be8688edb081b23ede0f4","0xa6ebecab0b72a49dfd01d69fa37a7f74d34fb1d4fef0aa10e3d6fceb9eccd671225c230af89f6eb514250e41a5f91f52","0x846d185bdad6e11e604df7f753b7a08a28b643674221f0e750ebdb6b86ec584a29c869e131bca868972a507e61403f6a","0x85a98332292acb744bd1c0fd6fdcf1f889a78a2c9624d79413ffa194cc8dfa7821a4b60cde8081d4b5f71f51168dd67f","0x8f7d97c3b4597880d73200d074eb813d95432306e82dafc70b580b8e08cb8098b70f2d07b4b3ac6a4d77e92d57035031","0x8185439c8751e595825d7053518cbe121f191846a38d4dbcb558c3f9d7a3104f3153401adaaaf27843bbe2edb504bfe3","0xb3c00d8ece1518fca6b1215a139b0a0e26d9cba1b3a424f7ee59f30ce800a5db967279ed60958dd1f3ee69cf4dd1b204","0xa2e6cb6978e883f9719c3c0d44cfe8de0cc6f644b98f98858433bea8bbe7b612c8aca5952fccce4f195f9d54f9722dc2","0x99663087e3d5000abbec0fbda4e7342ec38846cc6a1505191fb3f1a337cb369455b7f8531a6eb8b0f7b2c4baf83cbe2b","0xab0836c6377a4dbc7ca6a4d6cf021d4cd60013877314dd05f351706b128d4af6337711ed3443cb6ca976f40d74070a9a","0x87abfd5126152fd3bac3c56230579b489436755ea89e0566aa349490b36a5d7b85028e9fb0710907042bcde6a6f5d7e3","0x974ba1033f75f60e0cf7c718a57ae1da3721cf9d0fb925714c46f027632bdd84cd9e6de4cf4d00bc55465b1c5ebb7384","0xa607b49d73689ac64f25cec71221d30d53e781e1100d19a2114a21da6507a60166166369d860bd314acb226596525670","0xa7c2b0b915d7beba94954f2aa7dd08ec075813661e2a3ecca5d28a0733e59583247fed9528eb28aba55b972cdbaf06eb","0xb8b3123e44128cc8efbe3270f2f94e50ca214a4294c71c3b851f8cbb70cb67fe9536cf07d04bf7fe380e5e3a29dd3c15","0xa59a07e343b62ad6445a0859a32b58c21a593f9ddbfe52049650f59628c93715aa1f4e1f45b109321756d0eeec8a5429","0x94f51f8a4ed18a6030d0aaa8899056744bd0e9dc9ac68f62b00355cddab11da5da16798db75f0bfbce0e5bdfe750c0b6","0x97460a97ca1e1fa5ce243b81425edc0ec19b7448e93f0b55bc9785eedeeafe194a3c8b33a61a5c72990edf375f122777","0x8fa859a089bc17d698a7ee381f37ce9beadf4e5b44fce5f6f29762bc04f96faff5d58c48c73631290325f05e9a1ecf49","0xabdf38f3b20fc95eff31de5aa9ef1031abfa48f1305ee57e4d507594570401503476d3bcc493838fc24d6967a3082c7f","0xb8914bfb82815abb86da35c64d39ab838581bc0bf08967192697d9663877825f2b9d6fbdcf9b410463482b3731361aef","0xa8187f9d22b193a5f578999954d6ec9aa9b32338ccadb8a3e1ce5bad5ea361d69016e1cdfac44e9d6c54e49dd88561b9","0xaac262cb7cba7fd62c14daa7b39677cabc1ef0947dd06dd89cac8570006a200f90d5f0353e84f5ff03179e3bebe14231","0xa630ef5ece9733b8c46c0a2df14a0f37647a85e69c63148e79ffdcc145707053f9f9d305c3f1cf3c7915cb46d33abd07","0xb102c237cb2e254588b6d53350dfda6901bd99493a3fbddb4121d45e0b475cf2663a40d7b9a75325eda83e4ba1e68cb3","0x86a930dd1ddcc16d1dfa00aa292cb6c2607d42c367e470aa920964b7c17ab6232a7108d1c2c11fc40fb7496547d0bbf8","0xa832fdc4500683e72a96cce61e62ac9ee812c37fe03527ad4cf893915ca1962cee80e72d4f82b20c8fc0b764376635a1","0x88ad985f448dabb04f8808efd90f273f11f5e6d0468b5489a1a6a3d77de342992a73eb842d419034968d733f101ff683","0x98a8538145f0d86f7fbf9a81c9140f6095c5bdd8960b1c6f3a1716428cd9cca1bf8322e6d0af24e6169abcf7df2b0ff6","0x9048c6eba5e062519011e177e955a200b2c00b3a0b8615bdecdebc217559d41058d3315f6d05617be531ef0f6aef0e51","0x833bf225ab6fc68cdcacf1ec1b50f9d05f5410e6cdcd8d56a3081dc2be8a8d07b81534d1ec93a25c2e270313dfb99e3b","0xa84bcd24c3da5e537e64a811b93c91bfc84d7729b9ead7f79078989a6eb76717d620c1fad17466a0519208651e92f5ff","0xb7cdd0a3fbd79aed93e1b5a44ca44a94e7af5ed911e4492f332e3a5ed146c7286bde01b52276a2fcc02780d2109874dd","0x8a19a09854e627cb95750d83c20c67442b66b35896a476358f993ba9ac114d32c59c1b3d0b8787ee3224cf3888b56c64","0xa9abd5afb8659ee52ada8fa5d57e7dd355f0a7350276f6160bec5fbf70d5f99234dd179eb221c913e22a49ec6d267846","0x8c13c4274c0d30d184e73eaf812200094bbbd57293780bdadbceb262e34dee5b453991e7f37c7333a654fc71c69d6445","0xa4320d73296ff8176ce0127ca1921c450e2a9c06eff936681ebaffb5a0b05b17fded24e548454de89aca2dcf6d7a9de4","0xb2b8b3e15c1f645f07783e5628aba614e60157889db41d8161d977606788842b67f83f361eae91815dc0abd84e09abd5","0xad26c3aa35ddfddc15719b8bb6c264aaec7065e88ac29ba820eb61f220fef451609a7bb037f3722d022e6c86e4f1dc88","0xb8615bf43e13ae5d7b8dd903ce37190800cd490f441c09b22aa29d7a29ed2c0417b7a08ead417868f1de2589deaadd80","0x8d3425e1482cd1e76750a76239d33c06b3554c3c3c87c15cb7ab58b1cee86a4c5c4178b44e23f36928365a1b484bde02","0x806893a62e38c941a7dd6f249c83af16596f69877cc737d8f73f6b8cd93cbc01177a7a276b2b8c6b0e5f2ad864db5994","0x86618f17fa4b0d65496b661bbb5ba3bc3a87129d30a4b7d4f515b904f4206ca5253a41f49fd52095861e5e065ec54f21","0x9551915da1304051e55717f4c31db761dcdcf3a1366c89a4af800a9e99aca93a357bf928307f098e62b44a02cb689a46","0x8f79c4ec0ec1146cb2a523b52fe33def90d7b5652a0cb9c2d1c8808a32293e00aec6969f5b1538e3a94cd1efa3937f86","0xa0c03e329a707300081780f1e310671315b4c6a4cedcb29697aedfabb07a9d5df83f27b20e9c44cf6b16e39d9ded5b98","0x86a7cfa7c8e7ce2c01dd0baec2139e97e8e090ad4e7b5f51518f83d564765003c65968f85481bbb97cb18f005ccc7d9f","0xa33811770c6dfda3f7f74e6ad0107a187fe622d61b444bbd84fd7ef6e03302e693b093df76f6ab39bb4e02afd84a575a","0x85480f5c10d4162a8e6702b5e04f801874d572a62a130be94b0c02b58c3c59bdcd48cd05f0a1c2839f88f06b6e3cd337","0x8e181011564b17f7d787fe0e7f3c87f6b62da9083c54c74fd6c357a1f464c123c1d3d8ade3cf72475000b464b14e2be3","0x8ee178937294b8c991337e0621ab37e9ffa4ca2bdb3284065c5e9c08aad6785d50cf156270ff9daf9a9127289710f55b","0x8bd1e8e2d37379d4b172f1aec96f2e41a6e1393158d7a3dbd9a95c8dd4f8e0b05336a42efc11a732e5f22b47fc5c271d","0x8f3da353cd487c13136a85677de8cedf306faae0edec733cf4f0046f82fa4639db4745b0095ff33a9766aba50de0cbcf","0x8d187c1e97638df0e4792b78e8c23967dac43d98ea268ca4aabea4e0fa06cb93183fd92d4c9df74118d7cc27bf54415e","0xa4c992f08c2f8bac0b74b3702fb0c75c9838d2ce90b28812019553d47613c14d8ce514d15443159d700b218c5a312c49","0xa6fd1874034a34c3ea962a316c018d9493d2b3719bb0ec4edbc7c56b240802b2228ab49bee6f04c8a3e9f6f24a48c1c2","0xb2efed8e799f8a15999020900dc2c58ece5a3641c90811b86a5198e593d7318b9d53b167818ccdfbe7df2414c9c34011","0x995ff7de6181ddf95e3ead746089c6148da3508e4e7a2323c81785718b754d356789b902e7e78e2edc6b0cbd4ff22c78","0x944073d24750a9068cbd020b834afc72d2dde87efac04482b3287b40678ad07588519a4176b10f2172a2c463d063a5cd","0x99db4b1bb76475a6fd75289986ef40367960279524378cc917525fb6ba02a145a218c1e9caeb99332332ab486a125ac0","0x89fce4ecd420f8e477af4353b16faabb39e063f3f3c98fde2858b1f2d1ef6eed46f0975a7c08f233b97899bf60ccd60a","0x8c09a4f07a02b80654798bc63aada39fd638d3e3c4236ccd8a5ca280350c31e4a89e5f4c9aafb34116e71da18c1226b8","0x85325cfa7ded346cc51a2894257eab56e7488dbff504f10f99f4cd2b630d913003761a50f175ed167e8073f1b6b63fb0","0xb678b4fbec09a8cc794dcbca185f133578f29e354e99c05f6d07ac323be20aecb11f781d12898168e86f2e0f09aca15e","0xa249cfcbca4d9ba0a13b5f6aac72bf9b899adf582f9746bb2ad043742b28915607467eb794fca3704278f9136f7642be","0x9438e036c836a990c5e17af3d78367a75b23c37f807228362b4d13e3ddcb9e431348a7b552d09d11a2e9680704a4514f","0x925ab70450af28c21a488bfb5d38ac994f784cf249d7fd9ad251bb7fd897a23e23d2528308c03415074d43330dc37ef4","0xa290563904d5a8c0058fc8330120365bdd2ba1fdbaef7a14bc65d4961bb4217acfaed11ab82669e359531f8bf589b8db","0xa7e07a7801b871fc9b981a71e195a3b4ba6b6313bc132b04796a125157e78fe5c11a3a46cf731a255ac2d78a4ae78cd0","0xb26cd2501ee72718b0eebab6fb24d955a71f363f36e0f6dff0ab1d2d7836dab88474c0cef43a2cc32701fca7e82f7df3","0xa1dc3b6c968f3de00f11275092290afab65b2200afbcfa8ddc70e751fa19dbbc300445d6d479a81bda3880729007e496","0xa9bc213e28b630889476a095947d323b9ac6461dea726f2dc9084473ae8e196d66fb792a21905ad4ec52a6d757863e7d","0xb25d178df8c2df8051e7c888e9fa677fde5922e602a95e966db9e4a3d6b23ce043d7dc48a5b375c6b7c78e966893e8c3","0xa1c8d88d72303692eaa7adf68ea41de4febec40cc14ae551bb4012afd786d7b6444a3196b5d9d5040655a3366d96b7cd","0xb22bd44f9235a47118a9bbe2ba5a2ba9ec62476061be2e8e57806c1a17a02f9a51403e849e2e589520b759abd0117683","0xb8add766050c0d69fe81d8d9ea73e1ed05f0135d093ff01debd7247e42dbb86ad950aceb3b50b9af6cdc14ab443b238f","0xaf2cf95f30ef478f018cf81d70d47d742120b09193d8bb77f0d41a5d2e1a80bfb467793d9e2471b4e0ad0cb2c3b42271","0x8af5ef2107ad284e246bb56e20fef2a255954f72de791cbdfd3be09f825298d8466064f3c98a50496c7277af32b5c0bc","0x85dc19558572844c2849e729395a0c125096476388bd1b14fa7f54a7c38008fc93e578da3aac6a52ff1504d6ca82db05","0xae8c9b43c49572e2e166d704caf5b4b621a3b47827bb2a3bcd71cdc599bba90396fd9a405261b13e831bb5d44c0827d7","0xa7ba7efede25f02e88f6f4cbf70643e76784a03d97e0fbd5d9437c2485283ad7ca3abb638a5f826cd9f6193e5dec0b6c","0x94a9d122f2f06ef709fd8016fd4b712d88052245a65a301f5f177ce22992f74ad05552b1f1af4e70d1eac62cef309752","0x82d999b3e7cf563833b8bc028ff63a6b26eb357dfdb3fd5f10e33a1f80a9b2cfa7814d871b32a7ebfbaa09e753e37c02","0xaec6edcde234df502a3268dd2c26f4a36a2e0db730afa83173f9c78fcb2b2f75510a02b80194327b792811caefda2725","0x94c0bfa66c9f91d462e9194144fdd12d96f9bbe745737e73bab8130607ee6ea9d740e2cfcbbd00a195746edb6369ee61","0xab7573dab8c9d46d339e3f491cb2826cabe8b49f85f1ede78d845fc3995537d1b4ab85140b7d0238d9c24daf0e5e2a7e","0x87e8b16832843251fe952dadfd01d41890ed4bb4b8fa0254550d92c8cced44368225eca83a6c3ad47a7f81ff8a80c984","0x9189d2d9a7c64791b19c0773ad4f0564ce6bea94aa275a917f78ad987f150fdb3e5e26e7fef9982ac184897ecc04683f","0xb3661bf19e2da41415396ae4dd051a9272e8a2580b06f1a1118f57b901fa237616a9f8075af1129af4eabfefedbe2f1c","0xaf43c86661fb15daf5d910a4e06837225e100fb5680bd3e4b10f79a2144c6ec48b1f8d6e6b98e067d36609a5d038889a","0x82ac0c7acaa83ddc86c5b4249aae12f28155989c7c6b91e5137a4ce05113c6cbc16f6c44948b0efd8665362d3162f16a","0x8f268d1195ab465beeeb112cd7ffd5d5548559a8bc01261106d3555533fc1971081b25558d884d552df0db1cddda89d8","0x8ef7caa5521f3e037586ce8ac872a4182ee20c7921c0065ed9986c047e3dda08294da1165f385d008b40d500f07d895f","0x8c2f98f6880550573fad46075d3eba26634b5b025ce25a0b4d6e0193352c8a1f0661064027a70fe8190b522405f9f4e3","0xb7653f353564feb164f0f89ec7949da475b8dad4a4d396d252fc2a884f6932d027b7eb2dc4d280702c74569319ed701a","0xa026904f4066333befd9b87a8fad791d014096af60cdd668ef919c24dbe295ff31f7a790e1e721ba40cf5105abca67f4","0x988f982004ada07a22dd345f2412a228d7a96b9cae2c487de42e392afe1e35c2655f829ce07a14629148ce7079a1f142","0x9616add009067ed135295fb74d5b223b006b312bf14663e547a0d306694ff3a8a7bb9cfc466986707192a26c0bce599f","0xad4c425de9855f6968a17ee9ae5b15e0a5b596411388cf976df62ecc6c847a6e2ddb2cea792a5f6e9113c2445dba3e5c","0xb698ac9d86afa3dc69ff8375061f88e3b0cff92ff6dfe747cebaf142e813c011851e7a2830c10993b715e7fd594604a9","0xa386fa189847bb3b798efca917461e38ead61a08b101948def0f82cd258b945ed4d45b53774b400af500670149e601b7","0x905c95abda2c68a6559d8a39b6db081c68cef1e1b4be63498004e1b2f408409be9350b5b5d86a30fd443e2b3e445640a","0x9116dade969e7ce8954afcdd43e5cab64dc15f6c1b8da9d2d69de3f02ba79e6c4f6c7f54d6bf586d30256ae405cd1e41","0xa3084d173eacd08c9b5084a196719b57e47a0179826fda73466758235d7ecdb87cbcf097bd6b510517d163a85a7c7edd","0x85bb00415ad3c9be99ff9ba83672cc59fdd24356b661ab93713a3c8eab34e125d8867f628a3c3891b8dc056e69cd0e83","0x8d58541f9f39ed2ee4478acce5d58d124031338ec11b0d55551f00a5a9a6351faa903a5d7c132dc5e4bb026e9cbd18e4","0xa622adf72dc250e54f672e14e128c700166168dbe0474cecb340da175346e89917c400677b1bc1c11fcc4cc26591d9db","0xb3f865014754b688ca8372e8448114fff87bf3ca99856ab9168894d0c4679782c1ced703f5b74e851b370630f5e6ee86","0xa7e490b2c40c2446fcd91861c020da9742c326a81180e38110558bb5d9f2341f1c1885e79b364e6419023d1cbdc47380","0xb3748d472b1062e54572badbb8e87ac36534407f74932e7fc5b8392d008e8e89758f1671d1e4d30ab0fa40551b13bb5e","0x89898a5c5ec4313aabc607b0049fd1ebad0e0c074920cf503c9275b564d91916c2c446d3096491c950b7af3ac5e4b0ed","0x8eb8c83fef2c9dd30ea44e286e9599ec5c20aba983f702e5438afe2e5b921884327ad8d1566c72395587efac79ca7d56","0xb92479599e806516ce21fb0bd422a1d1d925335ebe2b4a0a7e044dd275f30985a72b97292477053ac5f00e081430da80","0xa34ae450a324fe8a3c25a4d653a654f9580ed56bbea213b8096987bbad0f5701d809a17076435e18017fea4d69f414bc","0x81381afe6433d62faf62ea488f39675e0091835892ecc238e02acf1662669c6d3962a71a3db652f6fe3bc5f42a0e5dc5","0xa430d475bf8580c59111103316fe1aa79c523ea12f1d47a976bbfae76894717c20220e31cf259f08e84a693da6688d70","0xb842814c359754ece614deb7d184d679d05d16f18a14b288a401cef5dad2cf0d5ee90bad487b80923fc5573779d4e4e8","0x971d9a2627ff2a6d0dcf2af3d895dfbafca28b1c09610c466e4e2bff2746f8369de7f40d65b70aed135fe1d72564aa88","0x8f4ce1c59e22b1ce7a0664caaa7e53735b154cfba8d2c5cc4159f2385843de82ab58ed901be876c6f7fce69cb4130950","0x86cc9dc321b6264297987000d344fa297ef45bcc2a4df04e458fe2d907ad304c0ea2318e32c3179af639a9a56f3263cf","0x8229e0876dfe8f665c3fb19b250bd89d40f039bbf1b331468b403655be7be2e104c2fd07b9983580c742d5462ca39a43","0x99299d73066e8eb128f698e56a9f8506dfe4bd014931e86b6b487d6195d2198c6c5bf15cccb40ccf1f8ddb57e9da44a2","0xa3a3be37ac554c574b393b2f33d0a32a116c1a7cfeaf88c54299a4da2267149a5ecca71f94e6c0ef6e2f472b802f5189","0xa91700d1a00387502cdba98c90f75fbc4066fefe7cc221c8f0e660994c936badd7d2695893fde2260c8c11d5bdcdd951","0x8e03cae725b7f9562c5c5ab6361644b976a68bada3d7ca508abca8dfc80a469975689af1fba1abcf21bc2a190dab397d","0xb01461ad23b2a8fa8a6d241e1675855d23bc977dbf4714add8c4b4b7469ccf2375cec20e80cedfe49361d1a30414ac5b","0xa2673bf9bc621e3892c3d7dd4f1a9497f369add8cbaa3472409f4f86bd21ac67cfac357604828adfee6ada1835365029","0xa042dff4bf0dfc33c178ba1b335e798e6308915128de91b12e5dbbab7c4ac8d60a01f6aea028c3a6d87b9b01e4e74c01","0x86339e8a75293e4b3ae66b5630d375736b6e6b6b05c5cda5e73fbf7b2f2bd34c18a1d6cefede08625ce3046e77905cb8","0xaf2ebe1b7d073d03e3d98bc61af83bf26f7a8c130fd607aa92b75db22d14d016481b8aa231e2c9757695f55b7224a27f","0xa00ee882c9685e978041fd74a2c465f06e2a42ffd3db659053519925be5b454d6f401e3c12c746e49d910e4c5c9c5e8c","0x978a781c0e4e264e0dad57e438f1097d447d891a1e2aa0d5928f79a9d5c3faae6f258bc94fdc530b7b2fa6a9932bb193","0xaa4b7ce2e0c2c9e9655bf21e3e5651c8503bce27483017b0bf476be743ba06db10228b3a4c721219c0779747f11ca282","0xb003d1c459dacbcf1a715551311e45d7dbca83a185a65748ac74d1800bbeaba37765d9f5a1a221805c571910b34ebca8","0x95b6e531b38648049f0d19de09b881baa1f7ea3b2130816b006ad5703901a05da57467d1a3d9d2e7c73fb3f2e409363c","0xa6cf9c06593432d8eba23a4f131bb7f72b9bd51ab6b4b772a749fe03ed72b5ced835a349c6d9920dba2a39669cb7c684","0xaa3d59f6e2e96fbb66195bc58c8704e139fa76cd15e4d61035470bd6e305db9f98bcbf61ac1b95e95b69ba330454c1b3","0xb57f97959c208361de6d7e86dff2b873068adb0f158066e646f42ae90e650079798f165b5cd713141cd3a2a90a961d9a","0xa76ee8ed9052f6a7a8c69774bb2597be182942f08115baba03bf8faaeaee526feba86120039fe8ca7b9354c3b6e0a8e6","0x95689d78c867724823f564627d22d25010f278674c6d2d0cdb10329169a47580818995d1d727ce46c38a1e47943ebb89","0xab676d2256c6288a88e044b3d9ffd43eb9d5aaee00e8fc60ac921395fb835044c71a26ca948e557fed770f52d711e057","0x96351c72785c32e5d004b6f4a1259fb8153d631f0c93fed172f18e8ba438fbc5585c1618deeabd0d6d0b82173c2e6170","0x93dd8d3db576418e22536eba45ab7f56967c6c97c64260d6cddf38fb19c88f2ec5cd0e0156f50e70855eee8a2b879ffd","0xad6ff16f40f6de3d7a737f8e6cebd8416920c4ff89dbdcd75eabab414af9a6087f83ceb9aff7680aa86bff98bd09c8cc","0x84de53b11671abc9c38710e19540c5c403817562aeb22a88404cdaff792c1180f717dbdfe8f54940c062c4d032897429","0x872231b9efa1cdd447b312099a5c164c560440a9441d904e70f5abfc3b2a0d16be9a01aca5e0a2599a61e19407587e3d","0x88f44ac27094a2aa14e9dc40b099ee6d68f97385950f303969d889ee93d4635e34dff9239103bdf66a4b7cbba3e7eb7a","0xa59afebadf0260e832f6f44468443562f53fbaf7bcb5e46e1462d3f328ac437ce56edbca617659ac9883f9e13261fad7","0xb1990e42743a88de4deeacfd55fafeab3bc380cb95de43ed623d021a4f2353530bcab9594389c1844b1c5ea6634c4555","0x85051e841149a10e83f56764e042182208591396d0ce78c762c4a413e6836906df67f38c69793e158d64fef111407ba3","0x9778172bbd9b1f2ec6bbdd61829d7b39a7df494a818e31c654bf7f6a30139899c4822c1bf418dd4f923243067759ce63","0x9355005b4878c87804fc966e7d24f3e4b02bed35b4a77369d01f25a3dcbff7621b08306b1ac85b76fe7b4a3eb5f839b1","0x8f9dc6a54fac052e236f8f0e1f571ac4b5308a43acbe4cc8183bce26262ddaf7994e41cf3034a4cbeca2c505a151e3b1","0x8cc59c17307111723fe313046a09e0e32ea0cce62c13814ab7c6408c142d6a0311d801be4af53fc9240523f12045f9ef","0x8e6057975ed40a1932e47dd3ac778f72ee2a868d8540271301b1aa6858de1a5450f596466494a3e0488be4fbeb41c840","0x812145efbd6559ae13325d56a15940ca4253b17e72a9728986b563bb5acc13ec86453796506ac1a8f12bd6f9e4a288c3","0x911da0a6d6489eb3dab2ec4a16e36127e8a291ae68a6c2c9de33e97f3a9b1f00da57a94e270a0de79ecc5ecb45d19e83","0xb72ea85973f4b2a7e6e71962b0502024e979a73c18a9111130e158541fa47bbaaf53940c8f846913a517dc69982ba9e1","0xa7a56ad1dbdc55f177a7ad1d0af78447dc2673291e34e8ab74b26e2e2e7d8c5fe5dc89e7ef60f04a9508847b5b3a8188","0xb52503f6e5411db5d1e70f5fb72ccd6463fa0f197b3e51ca79c7b5a8ab2e894f0030476ada72534fa4eb4e06c3880f90","0xb51c7957a3d18c4e38f6358f2237b3904618d58b1de5dec53387d25a63772e675a5b714ad35a38185409931157d4b529","0xb86b4266e719d29c043d7ec091547aa6f65bbf2d8d831d1515957c5c06513b72aa82113e9645ad38a7bc3f5383504fa6","0xb95b547357e6601667b0f5f61f261800a44c2879cf94e879def6a105b1ad2bbf1795c3b98a90d588388e81789bd02681","0xa58fd4c5ae4673fa350da6777e13313d5d37ed1dafeeb8f4f171549765b84c895875d9d3ae6a9741f3d51006ef81d962","0x9398dc348d078a604aadc154e6eef2c0be1a93bb93ba7fe8976edc2840a3a318941338cc4d5f743310e539d9b46613d2","0x902c9f0095014c4a2f0dccaaab543debba6f4cc82c345a10aaf4e72511725dbed7a34cd393a5f4e48a3e5142b7be84ed","0xa7c0447849bb44d04a0393a680f6cd390093484a79a147dd238f5d878030d1c26646d88211108e59fe08b58ad20c6fbd","0x80db045535d6e67a422519f5c89699e37098449d249698a7cc173a26ccd06f60238ae6cc7242eb780a340705c906790c","0x8e52b451a299f30124505de2e74d5341e1b5597bdd13301cc39b05536c96e4380e7f1b5c7ef076f5b3005a868657f17c","0x824499e89701036037571761e977654d2760b8ce21f184f2879fda55d3cda1e7a95306b8abacf1caa79d3cc075b9d27f","0x9049b956b77f8453d2070607610b79db795588c0cec12943a0f5fe76f358dea81e4f57a4692112afda0e2c05c142b26f","0x81911647d818a4b5f4990bfd4bc13bf7be7b0059afcf1b6839333e8569cdb0172fd2945410d88879349f677abaed5eb3","0xad4048f19b8194ed45b6317d9492b71a89a66928353072659f5ce6c816d8f21e69b9d1817d793effe49ca1874daa1096","0x8d22f7b2ddb31458661abd34b65819a374a1f68c01fc6c9887edeba8b80c65bceadb8f57a3eb686374004b836261ef67","0x92637280c259bc6842884db3d6e32602a62252811ae9b019b3c1df664e8809ffe86db88cfdeb8af9f46435c9ee790267","0xa2f416379e52e3f5edc21641ea73dc76c99f7e29ea75b487e18bd233856f4c0183429f378d2bfc6cd736d29d6cadfa49","0x882cb6b76dbdc188615dcf1a8439eba05ffca637dd25197508156e03c930b17b9fed2938506fdd7b77567cb488f96222","0xb68b621bb198a763fb0634eddb93ed4b5156e59b96c88ca2246fd1aea3e6b77ed651e112ac41b30cd361fadc011d385e","0xa3cb22f6b675a29b2d1f827cacd30df14d463c93c3502ef965166f20d046af7f9ab7b2586a9c64f4eae4fad2d808a164","0x8302d9ce4403f48ca217079762ce42cee8bc30168686bb8d3a945fbd5acd53b39f028dce757b825eb63af2d5ae41169d","0xb2eef1fbd1a176f1f4cd10f2988c7329abe4eb16c7405099fb92baa724ab397bc98734ef7d4b24c0f53dd90f57520d04","0xa1bbef0bd684a3f0364a66bde9b29326bac7aa3dde4caed67f14fb84fed3de45c55e406702f1495a3e2864d4ee975030","0x976acdb0efb73e3a3b65633197692dedc2adaed674291ae3df76b827fc866d214e9cac9ca46baefc4405ff13f953d936","0xb9fbf71cc7b6690f601f0b1c74a19b7d14254183a2daaafec7dc3830cba5ae173d854bbfebeca985d1d908abe5ef0cda","0x90591d7b483598c94e38969c4dbb92710a1a894bcf147807f1bcbd8aa3ac210b9f2be65519aa829f8e1ccdc83ad9b8cf","0xa30568577c91866b9c40f0719d46b7b3b2e0b4a95e56196ac80898a2d89cc67880e1229933f2cd28ee3286f8d03414d7","0x97589a88c3850556b359ec5e891f0937f922a751ac7c95949d3bbc7058c172c387611c0f4cb06351ef02e5178b3dd9e4","0x98e7bbe27a1711f4545df742f17e3233fbcc63659d7419e1ca633f104cb02a32c84f2fac23ca2b84145c2672f68077ab","0xa7ddb91636e4506d8b7e92aa9f4720491bb71a72dadc47c7f4410e15f93e43d07d2b371951a0e6a18d1bd087aa96a5c4","0xa7c006692227a06db40bceac3d5b1daae60b5692dd9b54772bedb5fea0bcc91cbcdb530cac31900ffc70c5b3ffadc969","0x8d3ec6032778420dfa8be52066ba0e623467df33e4e1901dbadd586c5d750f4ccde499b5197e26b9ea43931214060f69","0x8d9a8410518ea64f89df319bfd1fc97a0971cdb9ad9b11d1f8fe834042ea7f8dce4db56eeaf179ff8dda93b6db93e5ce","0xa3c533e9b3aa04df20b9ff635cb1154ce303e045278fcf3f10f609064a5445552a1f93989c52ce852fd0bbd6e2b6c22e","0x81934f3a7f8c1ae60ec6e4f212986bcc316118c760a74155d06ce0a8c00a9b9669ec4e143ca214e1b995e41271774fd9","0xab8e2d01a71192093ef8fafa7485e795567cc9db95a93fb7cc4cf63a391ef89af5e2bfad4b827fffe02b89271300407f","0x83064a1eaa937a84e392226f1a60b7cfad4efaa802f66de5df7498962f7b2649924f63cd9962d47906380b97b9fe80e1","0xb4f5e64a15c6672e4b55417ee5dc292dcf93d7ea99965a888b1cc4f5474a11e5b6520eacbcf066840b343f4ceeb6bf33","0xa63d278b842456ef15c278b37a6ea0f27c7b3ffffefca77c7a66d2ea06c33c4631eb242bbb064d730e70a8262a7b848a","0x83a41a83dbcdf0d22dc049de082296204e848c453c5ab1ba75aa4067984e053acf6f8b6909a2e1f0009ed051a828a73b","0x819485b036b7958508f15f3c19436da069cbe635b0318ebe8c014cf1ef9ab2df038c81161b7027475bcfa6fff8dd9faf","0xaa40e38172806e1e045e167f3d1677ef12d5dcdc89b43639a170f68054bd196c4fae34c675c1644d198907a03f76ba57","0x969bae484883a9ed1fbed53b26b3d4ee4b0e39a6c93ece5b3a49daa01444a1c25727dabe62518546f36b047b311b177c","0x80a9e73a65da99664988b238096a090d313a0ee8e4235bc102fa79bb337b51bb08c4507814eb5baec22103ec512eaab0","0x86604379aec5bddda6cbe3ef99c0ac3a3c285b0b1a15b50451c7242cd42ae6b6c8acb717dcca7917838432df93a28502","0xa23407ee02a495bed06aa7e15f94cfb05c83e6d6fba64456a9bbabfa76b2b68c5c47de00ba169e710681f6a29bb41a22","0x98cff5ecc73b366c6a01b34ac9066cb34f7eeaf4f38a5429bad2d07e84a237047e2a065c7e8a0a6581017dadb4695deb","0x8de9f68a938f441f3b7ab84bb1f473c5f9e5c9e139e42b7ccee1d254bd57d0e99c2ccda0f3198f1fc5737f6023dd204e","0xb0ce48d815c2768fb472a315cad86aa033d0e9ca506f146656e2941829e0acb735590b4fbc713c2d18d3676db0a954ac","0x82f485cdefd5642a6af58ac6817991c49fac9c10ace60f90b27f1788cc026c2fe8afc83cf499b3444118f9f0103598a8","0x82c24550ed512a0d53fc56f64cc36b553823ae8766d75d772dacf038c460f16f108f87a39ceef7c66389790f799dbab3","0x859ffcf1fe9166388316149b9acc35694c0ea534d43f09dae9b86f4aa00a23b27144dda6a352e74b9516e8c8d6fc809c","0xb8f7f353eec45da77fb27742405e5ad08d95ec0f5b6842025be9def3d9892f85eb5dd0921b41e6eff373618dba215bca","0x8ccca4436f9017e426229290f5cd05eac3f16571a4713141a7461acfe8ae99cd5a95bf5b6df129148693c533966145da","0xa2c67ecc19c0178b2994846fea4c34c327a5d786ac4b09d1d13549d5be5996d8a89021d63d65cb814923388f47cc3a03","0xaa0ff87d676b418ec08f5cbf577ac7e744d1d0e9ebd14615b550eb86931eafd2a36d4732cc5d6fab1713fd7ab2f6f7c0","0x8aef4730bb65e44efd6bb9441c0ae897363a2f3054867590a2c2ecf4f0224e578c7a67f10b40f8453d9f492ac15a9b2d","0x86a187e13d8fba5addcfdd5b0410cedd352016c930f913addd769ee09faa6be5ca3e4b1bdb417a965c643a99bd92be42","0xa0a4e9632a7a094b14b29b78cd9c894218cdf6783e61671e0203865dc2a835350f465fbaf86168f28af7c478ca17bc89","0xa8c7b02d8deff2cd657d8447689a9c5e2cd74ef57c1314ac4d69084ac24a7471954d9ff43fe0907d875dcb65fd0d3ce5","0x97ded38760aa7be6b6960b5b50e83b618fe413cbf2bcc1da64c05140bcc32f5e0e709cd05bf8007949953fac5716bad9","0xb0d293835a24d64c2ae48ce26e550b71a8c94a0883103757fb6b07e30747f1a871707d23389ba2b2065fa6bafe220095","0x8f9e291bf849feaa575592e28e3c8d4b7283f733d41827262367ea1c40f298c7bcc16505255a906b62bf15d9f1ba85fb","0x998f4e2d12708b4fd85a61597ca2eddd750f73c9e0c9b3cf0825d8f8e01f1628fd19797dcaed3b16dc50331fc6b8b821","0xb30d1f8c115d0e63bf48f595dd10908416774c78b3bbb3194192995154d80ea042d2e94d858de5f8aa0261b093c401fd","0xb5d9c75bb41f964cbff3f00e96d9f1480c91df8913f139f0d385d27a19f57a820f838eb728e46823cbff00e21c660996","0xa6edec90b5d25350e2f5f0518777634f9e661ec9d30674cf5b156c4801746d62517751d90074830ac0f4b09911c262f1","0x82f98da1264b6b75b8fbeb6a4d96d6a05b25c24db0d57ba3a38efe3a82d0d4e331b9fc4237d6494ccfe4727206457519","0xb89511843453cf4ecd24669572d6371b1e529c8e284300c43e0d5bb6b3aaf35aeb634b3cb5c0a2868f0d5e959c1d0772","0xa82bf065676583e5c1d3b81987aaae5542f522ba39538263a944bb33ea5b514c649344a96c0205a3b197a3f930fcda6c","0xa37b47ea527b7e06c460776aa662d9a49ff4149d3993f1a974b0dd165f7171770d189b0e2ea54fd5fccb6a14b116e68a","0xa1017677f97dda818274d47556d09d0e4ccacb23a252f82a6cfe78c630ad46fb9806307445a59fb61262182de3a2b29c","0xb01e9fcac239ba270e6877b79273ddd768bf8a51d2ed8a051b1c11e18eff3de5920e2fcbfbd26f06d381eddd3b1f1e1b","0x82fcd53d803b1c8e4ed76adc339b7f3a5962d37042b9683aabac7513ac68775d4a566a9460183926a6a95dbe7d551a1f","0xa763e78995d55cd21cdb7ef75d9642d6e1c72453945e346ab6690c20a4e1eeec61bb848ef830ae4b56182535e3c71d8f","0xb769f4db602251d4b0a1186782799bdcef66de33c110999a5775c50b349666ffd83d4c89714c4e376f2efe021a5cfdb2","0xa59cbd1b785efcfa6e83fc3b1d8cf638820bc0c119726b5368f3fba9dce8e3414204fb1f1a88f6c1ff52e87961252f97","0x95c8c458fd01aa23ecf120481a9c6332ebec2e8bb70a308d0576926a858457021c277958cf79017ddd86a56cacc2d7db","0x82eb41390800287ae56e77f2e87709de5b871c8bdb67c10a80fc65f3acb9f7c29e8fa43047436e8933f27449ea61d94d","0xb3ec25e3545eb83aed2a1f3558d1a31c7edde4be145ecc13b33802654b77dc049b4f0065069dd9047b051e52ab11dcdd","0xb78a0c715738f56f0dc459ab99e252e3b579b208142836b3c416b704ca1de640ca082f29ebbcee648c8c127df06f6b1e","0xa4083149432eaaf9520188ebf4607d09cf664acd1f471d4fb654476e77a9eaae2251424ffda78d09b6cb880df35c1219","0x8c52857d68d6e9672df3db2df2dbf46b516a21a0e8a18eec09a6ae13c1ef8f369d03233320dd1c2c0bbe00abfc1ea18b","0x8c856089488803066bff3f8d8e09afb9baf20cecc33c8823c1c0836c3d45498c3de37e87c016b705207f60d2b00f8609","0x831a3df39be959047b2aead06b4dcd3012d7b29417f642b83c9e8ce8de24a3dbbd29c6fdf55e2db3f7ea04636c94e403","0xaed84d009f66544addabe404bf6d65af7779ce140dc561ff0c86a4078557b96b2053b7b8a43432ffb18cd814f143b9da","0x93282e4d72b0aa85212a77b336007d8ba071eea17492da19860f1ad16c1ea8867ccc27ef5c37c74b052465cc11ea4f52","0xa7b78b8c8d057194e8d68767f1488363f77c77bddd56c3da2bc70b6354c7aa76247c86d51f7371aa38a4aa7f7e3c0bb7","0xb1c77283d01dcd1bde649b5b044eac26befc98ff57cbee379fb5b8e420134a88f2fc7f0bf04d15e1fbd45d29e7590fe6","0xa4aa8de70330a73b2c6458f20a1067eed4b3474829b36970a8df125d53bbdda4f4a2c60063b7cccb0c80fc155527652f","0x948a6c79ba1b8ad7e0bed2fae2f0481c4e41b4d9bbdd9b58164e28e9065700e83f210c8d5351d0212e0b0b68b345b3a5","0x86a48c31dcbbf7b082c92d28e1f613a2378a910677d7db3a349dc089e4a1e24b12eee8e8206777a3a8c64748840b7387","0x976adb1af21e0fc34148917cf43d933d7bfd3fd12ed6c37039dcd5a4520e3c6cf5868539ba5bf082326430deb8a4458d","0xb93e1a4476f2c51864bb4037e7145f0635eb2827ab91732b98d49b6c07f6ac443111aa1f1da76d1888665cb897c3834e","0x8afd46fb23bf869999fa19784b18a432a1f252d09506b8dbb756af900518d3f5f244989b3d7c823d9029218c655d3dc6","0x83f1e59e3abeed18cdc632921672673f1cb6e330326e11c4e600e13e0d5bc11bdc970ae12952e15103a706fe720bf4d6","0x90ce4cc660714b0b673d48010641c09c00fc92a2c596208f65c46073d7f349dd8e6e077ba7dcef9403084971c3295b76","0x8b09b0f431a7c796561ecf1549b85048564de428dac0474522e9558b6065fede231886bc108539c104ce88ebd9b5d1b0","0x85d6e742e2fb16a7b0ba0df64bc2c0dbff9549be691f46a6669bca05e89c884af16822b85faefefb604ec48c8705a309","0xa87989ee231e468a712c66513746fcf03c14f103aadca0eac28e9732487deb56d7532e407953ab87a4bf8961588ef7b0","0xb00da10efe1c29ee03c9d37d5918e391ae30e48304e294696b81b434f65cf8c8b95b9d1758c64c25e534d045ba28696f","0x91c0e1fb49afe46c7056400baa06dbb5f6e479db78ee37e2d76c1f4e88994357e257b83b78624c4ef6091a6c0eb8254d","0x883fb797c498297ccbf9411a3e727c3614af4eccde41619b773dc7f3259950835ee79453debf178e11dec4d3ada687a0","0xa14703347e44eb5059070b2759297fcfcfc60e6893c0373eea069388eba3950aa06f1c57cd2c30984a2d6f9e9c92c79e","0xafebc7585b304ceba9a769634adff35940e89cd32682c78002822aab25eec3edc29342b7f5a42a56a1fec67821172ad5","0xaea3ff3822d09dba1425084ca95fd359718d856f6c133c5fabe2b2eed8303b6e0ba0d8698b48b93136a673baac174fd9","0xaf2456a09aa777d9e67aa6c7c49a1845ea5cdda2e39f4c935c34a5f8280d69d4eec570446998cbbe31ede69a91e90b06","0x82cada19fed16b891ef3442bafd49e1f07c00c2f57b2492dd4ee36af2bd6fd877d6cb41188a4d6ce9ec8d48e8133d697","0x82a21034c832287f616619a37c122cee265cc34ae75e881fcaea4ea7f689f3c2bc8150bbf7dbcfd123522bfb7f7b1d68","0x86877217105f5d0ec3eeff0289fc2a70d505c9fdf7862e8159553ef60908fb1a27bdaf899381356a4ef4649072a9796c","0x82b196e49c6e861089a427c0b4671d464e9d15555ffb90954cd0d630d7ae02eb3d98ceb529d00719c2526cd96481355a","0xa29b41d0d43d26ce76d4358e0db2b77df11f56e389f3b084d8af70a636218bd3ac86b36a9fe46ec9058c26a490f887f7","0xa4311c4c20c4d7dd943765099c50f2fd423e203ccfe98ff00087d205467a7873762510cac5fdce7a308913ed07991ed7","0xb1f040fc5cc51550cb2c25cf1fd418ecdd961635a11f365515f0cb4ffb31da71f48128c233e9cc7c0cf3978d757ec84e","0xa9ebae46f86d3bd543c5f207ed0d1aed94b8375dc991161d7a271f01592912072e083e2daf30c146430894e37325a1b9","0x826418c8e17ad902b5fe88736323a47e0ca7a44bce4cbe27846ec8fe81de1e8942455dda6d30e192cdcc73e11df31256","0x85199db563427c5edcbac21f3d39fec2357be91fb571982ddcdc4646b446ad5ced84410de008cb47b3477ee0d532daf8","0xb7eed9cd400b2ca12bf1d9ae008214b8561fb09c8ad9ff959e626ffde00fee5ff2f5b6612e231f2a1a9b1646fcc575e3","0x8b40bf12501dcbac78f5a314941326bfcddf7907c83d8d887d0bb149207f85d80cd4dfbd7935439ea7b14ea39a3fded7","0x83e3041af302485399ba6cd5120e17af61043977083887e8d26b15feec4a6b11171ac5c06e6ad0971d4b58a81ff12af3","0x8f5b9a0eecc589dbf8c35a65d5e996a659277ef6ea509739c0cb7b3e2da9895e8c8012de662e5b23c5fa85d4a8f48904","0x835d71ed5e919d89d8e6455f234f3ff215462c4e3720c371ac8c75e83b19dfe3ae15a81547e4dc1138e5f5997f413cc9","0x8b7d2e4614716b1db18e9370176ea483e6abe8acdcc3dcdf5fb1f4d22ca55d652feebdccc171c6de38398d9f7bfdec7a","0x93eace72036fe57d019676a02acf3d224cf376f166658c1bf705db4f24295881d477d6fdd7916efcfceff8c7a063deda","0xb1ac460b3d516879a84bc886c54f020a9d799e7c49af3e4d7de5bf0d2793c852254c5d8fe5616147e6659512e5ccb012","0xacd0947a35cb167a48bcd9667620464b54ac0e78f9316b4aa92dcaab5422d7a732087e52e1c827faa847c6b2fe6e7766","0x94ac33d21c3d12ff762d32557860e911cd94d666609ddcc42161b9c16f28d24a526e8b10bb03137257a92cec25ae637d","0x832e02058b6b994eadd8702921486241f9a19e68ed1406dad545e000a491ae510f525ccf9d10a4bba91c68f2c53a0f58","0x9471035d14f78ff8f463b9901dd476b587bb07225c351161915c2e9c6114c3c78a501379ab6fb4eb03194c457cbd22bf","0xab64593e034c6241d357fcbc32d8ea5593445a5e7c24cac81ad12bd2ef01843d477a36dc1ba21dbe63b440750d72096a","0x9850f3b30045e927ad3ec4123a32ed2eb4c911f572b6abb79121873f91016f0d80268de8b12e2093a4904f6e6cab7642","0x987212c36b4722fe2e54fa30c52b1e54474439f9f35ca6ad33c5130cd305b8b54b532dd80ffd2c274105f20ce6d79f6e","0x8b4d0c6abcb239b5ed47bef63bc17efe558a27462c8208fa652b056e9eae9665787cd1aee34fbb55beb045c8bfdb882b","0xa9f3483c6fee2fe41312d89dd4355d5b2193ac413258993805c5cbbf0a59221f879386d3e7a28e73014f10e65dd503d9","0xa2225da3119b9b7c83d514b9f3aeb9a6d9e32d9cbf9309cbb971fd53c4b2c001d10d880a8ad8a7c281b21d85ceca0b7c","0xa050be52e54e676c151f7a54453bbb707232f849beab4f3bf504b4d620f59ed214409d7c2bd3000f3ff13184ccda1c35","0xadbccf681e15b3edb6455a68d292b0a1d0f5a4cb135613f5e6db9943f02181341d5755875db6ee474e19ace1c0634a28","0x8b6eff675632a6fad0111ec72aacc61c7387380eb87933fd1d098856387d418bd38e77d897e65d6fe35951d0627c550b","0xaabe2328ddf90989b15e409b91ef055cb02757d34987849ae6d60bef2c902bf8251ed21ab30acf39e500d1d511e90845","0x92ba4eb1f796bc3d8b03515f65c045b66e2734c2da3fc507fdd9d6b5d1e19ab3893726816a32141db7a31099ca817d96","0x8a98b3cf353138a1810beb60e946183803ef1d39ac4ea92f5a1e03060d35a4774a6e52b14ead54f6794d5f4022b8685c","0x909f8a5c13ec4a59b649ed3bee9f5d13b21d7f3e2636fd2bb3413c0646573fdf9243d63083356f12f5147545339fcd55","0x9359d914d1267633141328ed0790d81c695fea3ddd2d406c0df3d81d0c64931cf316fe4d92f4353c99ff63e2aefc4e34","0xb88302031681b54415fe8fbfa161c032ea345c6af63d2fb8ad97615103fd4d4281c5a9cae5b0794c4657b97571a81d3b","0x992c80192a519038082446b1fb947323005b275e25f2c14c33cc7269e0ec038581cc43705894f94bad62ae33a8b7f965","0xa78253e3e3eece124bef84a0a8807ce76573509f6861d0b6f70d0aa35a30a123a9da5e01e84969708c40b0669eb70aa6","0x8d5724de45270ca91c94792e8584e676547d7ac1ac816a6bb9982ee854eb5df071d20545cdfd3771cd40f90e5ba04c8e","0x825a6f586726c68d45f00ad0f5a4436523317939a47713f78fd4fe81cd74236fdac1b04ecd97c2d0267d6f4981d7beb1"],"g2_monomial":["0x93e02b6052719f607dacd3a088274f65596bd0d09920b61ab5da61bbdc7f5049334cf11213945d57e5ac7d055d042b7e024aa2b2f08f0a91260805272dc51051c6e47ad4fa403b02b4510b647ae3d1770bac0326a805bbefd48056c8c121bdb8","0xb5bfd7dd8cdeb128843bc287230af38926187075cbfbefa81009a2ce615ac53d2914e5870cb452d2afaaab24f3499f72185cbfee53492714734429b7b38608e23926c911cceceac9a36851477ba4c60b087041de621000edc98edada20c1def2","0xb5337ba0ce5d37224290916e268e2060e5c14f3f9fc9e1ec3af5a958e7a0303122500ce18f1a4640bf66525bd10e763501fe986d86649d8d45143c08c3209db3411802c226e9fe9a55716ac4a0c14f9dcef9e70b2bb309553880dc5025eab3cc","0xb3c1dcdc1f62046c786f0b82242ef283e7ed8f5626f72542aa2c7a40f14d9094dd1ebdbd7457ffdcdac45fd7da7e16c51200b06d791e5e43e257e45efdf0bd5b06cd2333beca2a3a84354eb48662d83aef5ecf4e67658c851c10b13d8d87c874","0x954d91c7688983382609fca9e211e461f488a5971fd4e40d7e2892037268eacdfd495cfa0a7ed6eb0eb11ac3ae6f651716757e7526abe1e06c64649d80996fd3105c20c4c94bc2b22d97045356fe9d791f21ea6428ac48db6f9e68e30d875280","0x88a6b6bb26c51cf9812260795523973bb90ce80f6820b6c9048ab366f0fb96e48437a7f7cb62aedf64b11eb4dfefebb0147608793133d32003cb1f2dc47b13b5ff45f1bb1b2408ea45770a08dbfaec60961acb8119c47b139a13b8641e2c9487","0x85cd7be9728bd925d12f47fb04b32d9fad7cab88788b559f053e69ca18e463113ecc8bbb6dbfb024835f901b3a957d3108d6770fb26d4c8be0a9a619f6e3a4bf15cbfd48e61593490885f6cee30e4300c5f9cf5e1c08e60a2d5b023ee94fcad0","0x80477dba360f04399821a48ca388c0fa81102dd15687fea792ee8c1114e00d1bc4839ad37ac58900a118d863723acfbe08126ea883be87f50e4eabe3b5e72f5d9e041db8d9b186409fd4df4a7dde38c0e0a3b1ae29b098e5697e7f110b6b27e4","0xb7a6aec08715a9f8672a2b8c367e407be37e59514ac19dd4f0942a68007bba3923df22da48702c63c0d6b3efd3c2d04e0fe042d8b5a54d562f9f33afc4865dcbcc16e99029e25925580e87920c399e710d438ac1ce3a6dc9b0d76c064a01f6f7","0xac1b001edcea02c8258aeffbf9203114c1c874ad88dae1184fadd7d94cd09053649efd0ca413400e6e9b5fa4eac33261000af88b6bd0d2abf877a4f0355d2fb4d6007adb181695201c5432e50b850b51b3969f893bddf82126c5a71b042b7686","0x90043fda4de53fb364fab2c04be5296c215599105ecff0c12e4917c549257125775c29f2507124d15f56e30447f367db0596c33237242c02d83dfd058735f1e3c1ff99069af55773b6d51d32a68bf75763f59ec4ee7267932ae426522b8aaab6","0xa8660ce853e9dc08271bf882e29cd53397d63b739584dda5263da4c7cc1878d0cf6f3e403557885f557e184700575fee016ee8542dec22c97befe1d10f414d22e84560741cdb3e74c30dda9b42eeaaf53e27822de2ee06e24e912bf764a9a533","0x8fe3921a96d0d065e8aa8fce9aa42c8e1461ca0470688c137be89396dd05103606dab6cdd2a4591efd6addf72026c12e065da7be276dee27a7e30afa2bd81c18f1516e7f068f324d0bad9570b95f6bd02c727cd2343e26db0887c3e4e26dceda","0x8ae1ad97dcb9c192c9a3933541b40447d1dc4eebf380151440bbaae1e120cc5cdf1bcea55180b128d8e180e3af623815191d063cc0d7a47d55fb7687b9d87040bf7bc1a7546b07c61db5ccf1841372d7c2fe4a5431ffff829f3c2eb590b0b710","0x8c2fa96870a88150f7876c931e2d3cc2adeaaaf5c73ef5fa1cf9dfa0991ae4819f9321af7e916e5057d87338e630a2f21242c29d76963cf26035b548d2a63d8ad7bd6efefa01c1df502cbdfdfe0334fb21ceb9f686887440f713bf17a89b8081","0xb9aa98e2f02bb616e22ee5dd74c7d1049321ac9214d093a738159850a1dbcc7138cb8d26ce09d8296368fd5b291d74fa17ac7cc1b80840fdd4ee35e111501e3fa8485b508baecda7c1ab7bd703872b7d64a2a40b3210b6a70e8a6ffe0e5127e3","0x9292db67f8771cdc86854a3f614a73805bf3012b48f1541e704ea4015d2b6b9c9aaed36419769c87c49f9e3165f03edb159c23b3a49c4390951f78e1d9b0ad997129b17cdb57ea1a6638794c0cca7d239f229e589c5ae4f9fe6979f7f8cba1d7","0x91cd9e86550f230d128664f7312591fee6a84c34f5fc7aed557bcf986a409a6de722c4330453a305f06911d2728626e611acfdf81284f77f60a3a1595053a9479964fd713117e27c0222cc679674b03bc8001501aaf9b506196c56de29429b46","0xa9516b73f605cc31b89c68b7675dc451e6364595243d235339437f556cf22d745d4250c1376182273be2d99e02c10eee047410a43eff634d051aeb784e76cb3605d8e079b9eb6ad1957dfdf77e1cd32ce4a573c9dfcc207ca65af6eb187f6c3d","0xa9667271f7d191935cc8ad59ef3ec50229945faea85bfdfb0d582090f524436b348aaa0183b16a6231c00332fdac2826125b8c857a2ed9ec66821cfe02b3a2279be2412441bc2e369b255eb98614e4be8490799c4df22f18d47d24ec70bba5f7","0xa4371144d2aa44d70d3cb9789096d3aa411149a6f800cb46f506461ee8363c8724667974252f28aea61b6030c05930ac039c1ee64bb4bd56532a685cae182bf2ab935eee34718cffcb46cae214c77aaca11dbb1320faf23c47247db1da04d8dc","0x89a7eb441892260b7e81168c386899cd84ffc4a2c5cad2eae0d1ab9e8b5524662e6f660fe3f8bfe4c92f60b060811bc605b14c5631d16709266886d7885a5eb5930097127ec6fb2ebbaf2df65909cf48f253b3d5e22ae48d3e9a2fd2b01f447e","0x9648c42ca97665b5eccb49580d8532df05eb5a68db07f391a2340769b55119eaf4c52fe4f650c09250fa78a76c3a1e271799b8333cc2628e3d4b4a6a3e03da1f771ecf6516dd63236574a7864ff07e319a6f11f153406280d63af9e2b5713283","0x9663bf6dd446ea7a90658ee458578d4196dc0b175ef7fcfa75f44d41670850774c2e46c5a6be132a2c072a3c0180a24f0305d1acac49d2d79878e5cda80c57feda3d01a6af12e78b5874e2a4b3717f11c97503b41a4474e2e95b179113726199","0xb212aeb4814e0915b432711b317923ed2b09e076aaf558c3ae8ef83f9e15a83f9ea3f47805b2750ab9e8106cb4dc6ad003522c84b03dc02829978a097899c773f6fb31f7fe6b8f2d836d96580f216fec20158f1590c3e0d7850622e15194db05","0x925f005059bf07e9ceccbe66c711b048e236ade775720d0fe479aebe6e23e8af281225ad18e62458dc1b03b42ad4ca290d4aa176260604a7aad0d9791337006fbdebe23746f8060d42876f45e4c83c3643931392fde1cd13ff8bddf8111ef974","0x9553edb22b4330c568e156a59ef03b26f5c326424f830fe3e8c0b602f08c124730ffc40bc745bec1a22417adb22a1a960243a10565c2be3066bfdb841d1cd14c624cd06e0008f4beb83f972ce6182a303bee3fcbcabc6cfe48ec5ae4b7941bfc","0x935f5a404f0a78bdcce709899eda0631169b366a669e9b58eacbbd86d7b5016d044b8dfc59ce7ed8de743ae16c2343b50e2f925e88ba6319e33c3fc76b314043abad7813677b4615c8a97eb83cc79de4fedf6ccbcfa4d4cbf759a5a84e4d9742","0xa5b014ab936eb4be113204490e8b61cd38d71da0dec7215125bcd131bf3ab22d0a32ce645bca93e7b3637cf0c2db3d6601a0ddd330dc46f9fae82abe864ffc12d656c88eb50c20782e5bb6f75d18760666f43943abb644b881639083e122f557","0x935b7298ae52862fa22bf03bfc1795b34c70b181679ae27de08a9f5b4b884f824ef1b276b7600efa0d2f1d79e4a470d51692fd565c5cf8343dd80e5d3336968fc21c09ba9348590f6206d4424eb229e767547daefa98bc3aa9f421158dee3f2a","0x9830f92446e708a8f6b091cc3c38b653505414f8b6507504010a96ffda3bcf763d5331eb749301e2a1437f00e2415efb01b799ad4c03f4b02de077569626255ac1165f96ea408915d4cf7955047620da573e5c439671d1fa5c833fb11de7afe6","0x840dcc44f673fff3e387af2bb41e89640f2a70bcd2b92544876daa92143f67c7512faf5f90a04b7191de01f3e2b1bde00622a20dc62ca23bbbfaa6ad220613deff43908382642d4d6a86999f662efd64b1df448b68c847cfa87630a3ffd2ec76","0x92950c895ed54f7f876b2fda17ecc9c41b7accfbdd42c210cc5b475e0737a7279f558148531b5c916e310604a1de25a80940c94fe5389ae5d6a5e9c371be67bceea1877f5401725a6595bcf77ece60905151b6dfcb68b75ed2e708c73632f4fd","0x8010246bf8e94c25fd029b346b5fbadb404ef6f44a58fd9dd75acf62433d8cc6db66974f139a76e0c26dddc1f329a88214dbb63276516cf325c7869e855d07e0852d622c332ac55609ba1ec9258c45746a2aeb1af0800141ee011da80af175d4","0xb0f1bad257ebd187bdc3f37b23f33c6a5d6a8e1f2de586080d6ada19087b0e2bf23b79c1b6da1ee82271323f5bdf3e1b018586b54a5b92ab6a1a16bb3315190a3584a05e6c37d5ca1e05d702b9869e27f513472bcdd00f4d0502a107773097da","0x9636d24f1ede773ce919f309448dd7ce023f424afd6b4b69cb98c2a988d849a283646dc3e469879daa1b1edae91ae41f009887518e7eb5578f88469321117303cd3ac2d7aee4d9cb5f82ab9ae3458e796dfe7c24284b05815acfcaa270ff22e2","0xb373feb5d7012fd60578d7d00834c5c81df2a23d42794fed91aa9535a4771fde0341c4da882261785e0caca40bf83405143085e7f17e55b64f6c5c809680c20b050409bf3702c574769127c854d27388b144b05624a0e24a1cbcc4d08467005b","0xb15680648949ce69f82526e9b67d9b55ce5c537dc6ab7f3089091a9a19a6b90df7656794f6edc87fb387d21573ffc847062623685931c2790a508cbc8c6b231dd2c34f4d37d4706237b1407673605a604bcf6a50cc0b1a2db20485e22b02c17e","0x8817e46672d40c8f748081567b038a3165f87994788ec77ee8daea8587f5540df3422f9e120e94339be67f186f50952504cb44f61e30a5241f1827e501b2de53c4c64473bcc79ab887dd277f282fbfe47997a930dd140ac08b03efac88d81075","0xa6e4ef6c1d1098f95aae119905f87eb49b909d17f9c41bcfe51127aa25fee20782ea884a7fdf7d5e9c245b5a5b32230b07e0dbf7c6743bf52ee20e2acc0b269422bd6cf3c07115df4aa85b11b2c16630a07c974492d9cdd0ec325a3fabd95044","0x8634aa7c3d00e7f17150009698ce440d8e1b0f13042b624a722ace68ead870c3d2212fbee549a2c190e384d7d6ac37ce14ab962c299ea1218ef1b1489c98906c91323b94c587f1d205a6edd5e9d05b42d591c26494a6f6a029a2aadb5f8b6f67","0x821a58092900bdb73decf48e13e7a5012a3f88b06288a97b855ef51306406e7d867d613d9ec738ebacfa6db344b677d21509d93f3b55c2ebf3a2f2a6356f875150554c6fff52e62e3e46f7859be971bf7dd9d5b3e1d799749c8a97c2e04325df","0x8dba356577a3a388f782e90edb1a7f3619759f4de314ad5d95c7cc6e197211446819c4955f99c5fc67f79450d2934e3c09adefc91b724887e005c5190362245eec48ce117d0a94d6fa6db12eda4ba8dde608fbbd0051f54dcf3bb057adfb2493","0xa32a690dc95c23ed9fb46443d9b7d4c2e27053a7fcc216d2b0020a8cf279729c46114d2cda5772fd60a97016a07d6c5a0a7eb085a18307d34194596f5b541cdf01b2ceb31d62d6b55515acfd2b9eec92b27d082fbc4dc59fc63b551eccdb8468","0xa040f7f4be67eaf0a1d658a3175d65df21a7dbde99bfa893469b9b43b9d150fc2e333148b1cb88cfd0447d88fa1a501d126987e9fdccb2852ecf1ba907c2ca3d6f97b055e354a9789854a64ecc8c2e928382cf09dda9abde42bbdf92280cdd96","0x864baff97fa60164f91f334e0c9be00a152a416556b462f96d7c43b59fe1ebaff42f0471d0bf264976f8aa6431176eb905bd875024cf4f76c13a70bede51dc3e47e10b9d5652d30d2663b3af3f08d5d11b9709a0321aba371d2ef13174dcfcaf","0x95a46f32c994133ecc22db49bad2c36a281d6b574c83cfee6680b8c8100466ca034b815cfaedfbf54f4e75188e661df901abd089524e1e0eb0bf48d48caa9dd97482d2e8c1253e7e8ac250a32fd066d5b5cb08a8641bdd64ecfa48289dca83a3","0xa2cce2be4d12144138cb91066e0cd0542c80b478bf467867ebef9ddaf3bd64e918294043500bf5a9f45ee089a8d6ace917108d9ce9e4f41e7e860cbce19ac52e791db3b6dde1c4b0367377b581f999f340e1d6814d724edc94cb07f9c4730774","0xb145f203eee1ac0a1a1731113ffa7a8b0b694ef2312dabc4d431660f5e0645ef5838e3e624cfe1228cfa248d48b5760501f93e6ab13d3159fc241427116c4b90359599a4cb0a86d0bb9190aa7fabff482c812db966fd2ce0a1b48cb8ac8b3bca","0xadabe5d215c608696e03861cbd5f7401869c756b3a5aadc55f41745ad9478145d44393fec8bb6dfc4ad9236dc62b9ada0f7ca57fe2bae1b71565dbf9536d33a68b8e2090b233422313cc96afc7f1f7e0907dc7787806671541d6de8ce47c4cd0","0xae7845fa6b06db53201c1080e01e629781817f421f28956589c6df3091ec33754f8a4bd4647a6bb1c141ac22731e3c1014865d13f3ed538dcb0f7b7576435133d9d03be655f8fbb4c9f7d83e06d1210aedd45128c2b0c9bab45a9ddde1c862a5","0x9159eaa826a24adfa7adf6e8d2832120ebb6eccbeb3d0459ffdc338548813a2d239d22b26451fda98cc0c204d8e1ac69150b5498e0be3045300e789bcb4e210d5cd431da4bdd915a21f407ea296c20c96608ded0b70d07188e96e6c1a7b9b86b","0xa9fc6281e2d54b46458ef564ffaed6944bff71e389d0acc11fa35d3fcd8e10c1066e0dde5b9b6516f691bb478e81c6b20865281104dcb640e29dc116daae2e884f1fe6730d639dbe0e19a532be4fb337bf52ae8408446deb393d224eee7cfa50","0x84291a42f991bfb36358eedead3699d9176a38f6f63757742fdbb7f631f2c70178b1aedef4912fed7b6cf27e88ddc7eb0e2a6aa4b999f3eb4b662b93f386c8d78e9ac9929e21f4c5e63b12991fcde93aa64a735b75b535e730ff8dd2abb16e04","0xa1b7fcacae181495d91765dfddf26581e8e39421579c9cbd0dd27a40ea4c54af3444a36bf85a11dda2114246eaddbdd619397424bb1eb41b5a15004b902a590ede5742cd850cf312555be24d2df8becf48f5afba5a8cd087cb7be0a521728386","0x92feaaf540dbd84719a4889a87cdd125b7e995a6782911931fef26da9afcfbe6f86aaf5328fe1f77631491ce6239c5470f44c7791506c6ef1626803a5794e76d2be0af92f7052c29ac6264b7b9b51f267ad820afc6f881460521428496c6a5f1","0xa525c925bfae1b89320a5054acc1fa11820f73d0cf28d273092b305467b2831fab53b6daf75fb926f332782d50e2522a19edcd85be5eb72f1497193c952d8cd0bcc5d43b39363b206eae4cb1e61668bde28a3fb2fc1e0d3d113f6dfadb799717","0x98752bb6f5a44213f40eda6aa4ff124057c1b13b6529ab42fe575b9afa66e59b9c0ed563fb20dff62130c436c3e905ee17dd8433ba02c445b1d67182ab6504a90bbe12c26a754bbf734665c622f76c62fe2e11dd43ce04fd2b91a8463679058b","0xa9aa9a84729f7c44219ff9e00e651e50ddea3735ef2a73fdf8ed8cd271961d8ed7af5cd724b713a89a097a3fe65a3c0202f69458a8b4c157c62a85668b12fc0d3957774bc9b35f86c184dd03bfefd5c325da717d74192cc9751c2073fe9d170e","0xb221c1fd335a4362eff504cd95145f122bf93ea02ae162a3fb39c75583fc13a932d26050e164da97cff3e91f9a7f6ff80302c19dd1916f24acf6b93b62f36e9665a8785413b0c7d930c7f1668549910f849bca319b00e59dd01e5dec8d2edacc","0xa71e2b1e0b16d754b848f05eda90f67bedab37709550171551050c94efba0bfc282f72aeaaa1f0330041461f5e6aa4d11537237e955e1609a469d38ed17f5c2a35a1752f546db89bfeff9eab78ec944266f1cb94c1db3334ab48df716ce408ef","0xb990ae72768779ba0b2e66df4dd29b3dbd00f901c23b2b4a53419226ef9232acedeb498b0d0687c463e3f1eead58b20b09efcefa566fbfdfe1c6e48d32367936142d0a734143e5e63cdf86be7457723535b787a9cfcfa32fe1d61ad5a2617220","0x8d27e7fbff77d5b9b9bbc864d5231fecf817238a6433db668d5a62a2c1ee1e5694fdd90c3293c06cc0cb15f7cbeab44d0d42be632cb9ff41fc3f6628b4b62897797d7b56126d65b694dcf3e298e3561ac8813fbd7296593ced33850426df42db","0xa92039a08b5502d5b211a7744099c9f93fa8c90cedcb1d05e92f01886219dd464eb5fb0337496ad96ed09c987da4e5f019035c5b01cc09b2a18b8a8dd419bc5895388a07e26958f6bd26751929c25f89b8eb4a299d822e2d26fec9ef350e0d3c","0x92dcc5a1c8c3e1b28b1524e3dd6dbecd63017c9201da9dbe077f1b82adc08c50169f56fc7b5a3b28ec6b89254de3e2fd12838a761053437883c3e01ba616670cea843754548ef84bcc397de2369adcca2ab54cd73c55dc68d87aec3fc2fe4f10"]} \ No newline at end of file diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index 99093cf3b..565b8d789 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -24,7 +24,7 @@ use std::io::{Read, Write}; use std::path::PathBuf; use std::str::FromStr; use std::time::Duration; -use types::{BeaconState, ChainSpec, Config, EthSpec, EthSpecId, Hash256}; +use types::{BeaconState, ChainSpec, Config, Epoch, EthSpec, EthSpecId, Hash256}; use url::Url; pub use eth2_config::GenesisStateSource; @@ -43,6 +43,26 @@ instantiate_hardcoded_nets!(eth2_config); pub const DEFAULT_HARDCODED_NETWORK: &str = "mainnet"; +/// Contains the bytes from the trusted setup json. +/// The mainnet trusted setup is also reused in testnets. +/// +/// This is done to ensure that testnets also inherit the high security and +/// randomness of the mainnet kzg trusted setup ceremony. +/// +/// Note: The trusted setup for both mainnet and minimal presets are the same. +pub const TRUSTED_SETUP_BYTES: &[u8] = + include_bytes!("../built_in_network_configs/trusted_setup.json"); + +/// Returns `Some(TrustedSetup)` if the deneb fork epoch is set and `None` otherwise. +/// +/// Returns an error if the trusted setup parsing failed. +fn get_trusted_setup_from_config(config: &Config) -> Option> { + config + .deneb_fork_epoch + .filter(|epoch| epoch.value != Epoch::max_value()) + .map(|_| TRUSTED_SETUP_BYTES.to_vec()) +} + /// A simple slice-or-vec enum to avoid cloning the beacon state bytes in the /// binary whilst also supporting loading them from a file at runtime. #[derive(Clone, PartialEq, Debug)] @@ -84,6 +104,7 @@ pub struct Eth2NetworkConfig { pub genesis_state_source: GenesisStateSource, pub genesis_state_bytes: Option, pub config: Config, + pub kzg_trusted_setup: Option>, } impl Eth2NetworkConfig { @@ -99,6 +120,9 @@ impl Eth2NetworkConfig { /// Instantiates `Self` from a `HardcodedNet`. fn from_hardcoded_net(net: &HardcodedNet) -> Result { + let config: Config = serde_yaml::from_reader(net.config) + .map_err(|e| format!("Unable to parse yaml config: {:?}", e))?; + let kzg_trusted_setup = get_trusted_setup_from_config(&config); Ok(Self { deposit_contract_deploy_block: serde_yaml::from_reader(net.deploy_block) .map_err(|e| format!("Unable to parse deploy block: {:?}", e))?, @@ -110,8 +134,8 @@ impl Eth2NetworkConfig { genesis_state_bytes: Some(net.genesis_state_bytes) .filter(|bytes| !bytes.is_empty()) .map(Into::into), - config: serde_yaml::from_reader(net.config) - .map_err(|e| format!("Unable to parse yaml config: {:?}", e))?, + config, + kzg_trusted_setup, }) } @@ -335,12 +359,15 @@ impl Eth2NetworkConfig { (None, GenesisStateSource::Unknown) }; + let kzg_trusted_setup = get_trusted_setup_from_config(&config); + Ok(Self { deposit_contract_deploy_block, boot_enr, genesis_state_source, genesis_state_bytes: genesis_state_bytes.map(Into::into), config, + kzg_trusted_setup, }) } } @@ -557,7 +584,7 @@ mod tests { GenesisStateSource::Unknown }; - let testnet: Eth2NetworkConfig = Eth2NetworkConfig { + let testnet = Eth2NetworkConfig { deposit_contract_deploy_block, boot_enr, genesis_state_source, @@ -566,6 +593,7 @@ mod tests { .map(Encode::as_ssz_bytes) .map(Into::into), config, + kzg_trusted_setup: None, }; testnet diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index c31917e04..af1172348 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v4.5.0-", - fallback = "Lighthouse/v4.5.0" + prefix = "Lighthouse/v4.6.0-", + fallback = "Lighthouse/v4.6.0" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/common/logging/Cargo.toml b/common/logging/Cargo.toml index 9c5321591..1fad56d47 100644 --- a/common/logging/Cargo.toml +++ b/common/logging/Cargo.toml @@ -8,15 +8,20 @@ edition = { workspace = true } test_logger = [] # Print log output to stderr when running tests instead of dropping it [dependencies] -slog = { workspace = true } -slog-term = { workspace = true } -tokio = { workspace = true } -lighthouse_metrics = { workspace = true } +chrono = { version = "0.4", default-features = false, features = ["clock", "std"] } lazy_static = { workspace = true } -sloggers = { workspace = true } -slog-async = { workspace = true } -take_mut = "0.2.2" +lighthouse_metrics = { workspace = true } parking_lot = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } -chrono = { version = "0.4", default-features = false, features = ["clock", "std"] } +slog = { workspace = true } +slog-async = { workspace = true } +slog-term = { workspace = true } +sloggers = { workspace = true } +take_mut = "0.2.2" +tokio = { workspace = true, features = [ "time" ] } +tracing = "0.1" +tracing-core = { workspace = true } +tracing-log = { workspace = true } +tracing-subscriber = { workspace = true } +tracing-appender = { workspace = true } diff --git a/common/logging/src/lib.rs b/common/logging/src/lib.rs index a9ad25f3f..caf3e1d2f 100644 --- a/common/logging/src/lib.rs +++ b/common/logging/src/lib.rs @@ -7,13 +7,22 @@ use lighthouse_metrics::{ use slog::Logger; use slog_term::Decorator; use std::io::{Result, Write}; +use std::path::PathBuf; use std::time::{Duration, Instant}; +use tracing_appender::non_blocking::NonBlocking; +use tracing_appender::rolling::{RollingFileAppender, Rotation}; +use tracing_logging_layer::LoggingLayer; +use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; pub const MAX_MESSAGE_WIDTH: usize = 40; pub mod async_record; mod sse_logging_components; +mod tracing_logging_layer; +mod tracing_metrics_layer; + pub use sse_logging_components::SSELoggingComponents; +pub use tracing_metrics_layer::MetricsLayer; /// The minimum interval between log messages indicating that a queue is full. const LOG_DEBOUNCE_INTERVAL: Duration = Duration::from_secs(30); @@ -214,6 +223,65 @@ impl TimeLatch { } } +pub fn create_tracing_layer(base_tracing_log_path: PathBuf, turn_on_terminal_logs: bool) { + let filter_layer = match tracing_subscriber::EnvFilter::try_from_default_env() + .or_else(|_| tracing_subscriber::EnvFilter::try_new("warn")) + { + Ok(filter) => filter, + Err(e) => { + eprintln!("Failed to initialize dependency logging {e}"); + return; + } + }; + + let Ok(libp2p_writer) = RollingFileAppender::builder() + .rotation(Rotation::DAILY) + .max_log_files(2) + .filename_prefix("libp2p") + .filename_suffix("log") + .build(base_tracing_log_path.clone()) + else { + eprintln!("Failed to initialize libp2p rolling file appender"); + return; + }; + + let Ok(discv5_writer) = RollingFileAppender::builder() + .rotation(Rotation::DAILY) + .max_log_files(2) + .filename_prefix("discv5") + .filename_suffix("log") + .build(base_tracing_log_path.clone()) + else { + eprintln!("Failed to initialize discv5 rolling file appender"); + return; + }; + + let (libp2p_non_blocking_writer, libp2p_guard) = NonBlocking::new(libp2p_writer); + let (discv5_non_blocking_writer, discv5_guard) = NonBlocking::new(discv5_writer); + + let custom_layer = LoggingLayer { + libp2p_non_blocking_writer, + libp2p_guard, + discv5_non_blocking_writer, + discv5_guard, + }; + + if let Err(e) = tracing_subscriber::fmt() + .with_env_filter(filter_layer) + .with_writer(move || { + tracing_subscriber::fmt::writer::OptionalWriter::::from( + turn_on_terminal_logs.then(std::io::stdout), + ) + }) + .finish() + .with(MetricsLayer) + .with(custom_layer) + .try_init() + { + eprintln!("Failed to initialize dependency logging {e}"); + } +} + /// Return a logger suitable for test usage. /// /// By default no logs will be printed, but they can be enabled via diff --git a/common/logging/src/tracing_logging_layer.rs b/common/logging/src/tracing_logging_layer.rs new file mode 100644 index 000000000..e7d9109be --- /dev/null +++ b/common/logging/src/tracing_logging_layer.rs @@ -0,0 +1,56 @@ +use chrono::prelude::*; +use std::io::Write; +use tracing::Subscriber; +use tracing_appender::non_blocking::{NonBlocking, WorkerGuard}; +use tracing_subscriber::layer::Context; +use tracing_subscriber::Layer; + +pub struct LoggingLayer { + pub libp2p_non_blocking_writer: NonBlocking, + pub libp2p_guard: WorkerGuard, + pub discv5_non_blocking_writer: NonBlocking, + pub discv5_guard: WorkerGuard, +} + +impl Layer for LoggingLayer +where + S: Subscriber, +{ + fn on_event(&self, event: &tracing::Event<'_>, _ctx: Context) { + let meta = event.metadata(); + let log_level = meta.level(); + let timestamp = Local::now().format("%Y-%m-%d %H:%M:%S").to_string(); + + let target = match meta.target().split_once("::") { + Some((crate_name, _)) => crate_name, + None => "unknown", + }; + + let mut writer = match target { + "libp2p_gossipsub" => self.libp2p_non_blocking_writer.clone(), + "discv5" => self.discv5_non_blocking_writer.clone(), + _ => return, + }; + + let mut visitor = LogMessageExtractor { + message: String::default(), + }; + + event.record(&mut visitor); + let message = format!("{} {} {}\n", timestamp, log_level, visitor.message); + + if let Err(e) = writer.write_all(message.as_bytes()) { + eprintln!("Failed to write log: {}", e); + } + } +} + +struct LogMessageExtractor { + message: String, +} + +impl tracing_core::field::Visit for LogMessageExtractor { + fn record_debug(&mut self, _: &tracing_core::Field, value: &dyn std::fmt::Debug) { + self.message = format!("{} {:?}", self.message, value); + } +} diff --git a/common/logging/src/tracing_metrics_layer.rs b/common/logging/src/tracing_metrics_layer.rs new file mode 100644 index 000000000..08c323ee8 --- /dev/null +++ b/common/logging/src/tracing_metrics_layer.rs @@ -0,0 +1,63 @@ +//! Exposes [`MetricsLayer`]: A tracing layer that registers metrics of logging events. + +use lighthouse_metrics as metrics; +use tracing_log::NormalizeEvent; + +lazy_static! { + /// Count of `INFO` logs registered per enabled dependency. + pub static ref DEP_INFOS_TOTAL: metrics::Result = + metrics::try_create_int_counter_vec( + "dep_info_total", + "Count of infos logged per enabled dependency", + &["target"] + ); + /// Count of `WARN` logs registered per enabled dependency. + pub static ref DEP_WARNS_TOTAL: metrics::Result = + metrics::try_create_int_counter_vec( + "dep_warn_total", + "Count of warns logged per enabled dependency", + &["target"] + ); + /// Count of `ERROR` logs registered per enabled dependency. + pub static ref DEP_ERRORS_TOTAL: metrics::Result = + metrics::try_create_int_counter_vec( + "dep_error_total", + "Count of errors logged per enabled dependency", + &["target"] + ); +} + +/// Layer that registers Prometheus metrics for `INFO`, `WARN` and `ERROR` logs emitted per dependency. +/// Dependencies are enabled via the `RUST_LOG` env flag. +pub struct MetricsLayer; + +impl tracing_subscriber::layer::Layer for MetricsLayer { + fn on_event( + &self, + event: &tracing_core::Event<'_>, + _ctx: tracing_subscriber::layer::Context<'_, S>, + ) { + // get the event's normalized metadata + // this is necessary to get the correct module path for libp2p events + let normalized_meta = event.normalized_metadata(); + let meta = normalized_meta.as_ref().unwrap_or_else(|| event.metadata()); + + if !meta.is_event() { + // ignore tracing span events + return; + } + + let full_target = meta.module_path().unwrap_or_else(|| meta.target()); + let target = full_target + .split_once("::") + .map(|(name, _rest)| name) + .unwrap_or(full_target); + let target = &[target]; + match *meta.level() { + tracing_core::Level::INFO => metrics::inc_counter_vec(&DEP_INFOS_TOTAL, target), + tracing_core::Level::WARN => metrics::inc_counter_vec(&DEP_WARNS_TOTAL, target), + tracing_core::Level::ERROR => metrics::inc_counter_vec(&DEP_ERRORS_TOTAL, target), + _ => {} + } + } +} diff --git a/common/lru_cache/Cargo.toml b/common/lru_cache/Cargo.toml index 73c623ed4..c1bd15f9f 100644 --- a/common/lru_cache/Cargo.toml +++ b/common/lru_cache/Cargo.toml @@ -6,3 +6,6 @@ edition = { workspace = true } [dependencies] fnv = { workspace = true } + +[dev-dependencies] +mock_instant = "0.3" diff --git a/common/lru_cache/src/time.rs b/common/lru_cache/src/time.rs index 966741ca4..0b2fd8356 100644 --- a/common/lru_cache/src/time.rs +++ b/common/lru_cache/src/time.rs @@ -1,7 +1,13 @@ //! This implements a time-based LRU cache for fast checking of duplicates use fnv::FnvHashSet; +#[cfg(test)] +use mock_instant::Instant; use std::collections::VecDeque; -use std::time::{Duration, Instant}; + +#[cfg(not(test))] +use std::time::Instant; + +use std::time::Duration; struct Element { /// The key being inserted. @@ -222,16 +228,16 @@ mod test { cache.insert("a"); cache.insert("b"); - std::thread::sleep(Duration::from_millis(20)); + mock_instant::MockClock::advance(Duration::from_millis(20)); cache.insert("a"); // a is newer now - std::thread::sleep(Duration::from_millis(85)); + mock_instant::MockClock::advance(Duration::from_millis(85)); assert!(cache.contains(&"a"),); // b was inserted first but was not as recent it should have been removed assert!(!cache.contains(&"b")); - std::thread::sleep(Duration::from_millis(16)); + mock_instant::MockClock::advance(Duration::from_millis(16)); assert!(!cache.contains(&"a")); } } diff --git a/common/malloc_utils/src/jemalloc.rs b/common/malloc_utils/src/jemalloc.rs index c796ea39a..92533048c 100644 --- a/common/malloc_utils/src/jemalloc.rs +++ b/common/malloc_utils/src/jemalloc.rs @@ -3,7 +3,7 @@ //! Due to `jemalloc` requiring configuration at compile time or immediately upon runtime //! initialisation it is configured via a Cargo config file in `.cargo/config.toml`. //! -//! The `jemalloc` tuning can be overriden by: +//! The `jemalloc` tuning can be overridden by: //! //! A) `JEMALLOC_SYS_WITH_MALLOC_CONF` at compile-time. //! B) `_RJEM_MALLOC_CONF` at runtime. diff --git a/common/monitoring_api/Cargo.toml b/common/monitoring_api/Cargo.toml index e22f747bb..3731229c3 100644 --- a/common/monitoring_api/Cargo.toml +++ b/common/monitoring_api/Cargo.toml @@ -12,7 +12,6 @@ task_executor = { workspace = true } tokio = { workspace = true } eth2 = { workspace = true } serde_json = { workspace = true } -serde_derive = "1.0.116" serde = { workspace = true } lighthouse_version = { workspace = true } lighthouse_metrics = { workspace = true } diff --git a/common/monitoring_api/src/types.rs b/common/monitoring_api/src/types.rs index 9765e3461..cf33ccb9c 100644 --- a/common/monitoring_api/src/types.rs +++ b/common/monitoring_api/src/types.rs @@ -1,7 +1,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use eth2::lighthouse::{ProcessHealth, SystemHealth}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; pub const VERSION: u64 = 1; pub const CLIENT_NAME: &str = "lighthouse"; diff --git a/common/slot_clock/src/lib.rs b/common/slot_clock/src/lib.rs index 1c8813ca2..6bf746450 100644 --- a/common/slot_clock/src/lib.rs +++ b/common/slot_clock/src/lib.rs @@ -7,8 +7,8 @@ mod system_time_slot_clock; use std::time::Duration; -pub use crate::manual_slot_clock::ManualSlotClock; pub use crate::manual_slot_clock::ManualSlotClock as TestingSlotClock; +pub use crate::manual_slot_clock::ManualSlotClock; pub use crate::system_time_slot_clock::SystemTimeSlotClock; pub use metrics::scrape_for_metrics; use types::consts::merge::INTERVALS_PER_SLOT; @@ -137,4 +137,13 @@ pub trait SlotClock: Send + Sync + Sized + Clone { slot_clock.set_current_time(freeze_at); slot_clock } + + /// Returns the delay between the start of the slot and when a request for block components + /// missed over gossip in the current slot should be made via RPC. + /// + /// Currently set equal to 1/2 of the `unagg_attestation_production_delay`, but this may be + /// changed in the future. + fn single_lookup_delay(&self) -> Duration { + self.unagg_attestation_production_delay() / 2 + } } diff --git a/common/slot_clock/src/system_time_slot_clock.rs b/common/slot_clock/src/system_time_slot_clock.rs index c54646fbc..770132064 100644 --- a/common/slot_clock/src/system_time_slot_clock.rs +++ b/common/slot_clock/src/system_time_slot_clock.rs @@ -2,8 +2,6 @@ use super::{ManualSlotClock, SlotClock}; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use types::Slot; -pub use std::time::SystemTimeError; - /// Determines the present slot based upon the present system time. #[derive(Clone)] pub struct SystemTimeSlotClock { diff --git a/common/system_health/Cargo.toml b/common/system_health/Cargo.toml index c02380c9d..5f0de80d9 100644 --- a/common/system_health/Cargo.toml +++ b/common/system_health/Cargo.toml @@ -8,6 +8,5 @@ lighthouse_network = { workspace = true } types = { workspace = true } sysinfo = { workspace = true } serde = { workspace = true } -serde_derive = "1.0.116" serde_json = { workspace = true } parking_lot = { workspace = true } diff --git a/common/test_random_derive/src/lib.rs b/common/test_random_derive/src/lib.rs index 6c72ecb44..648c20121 100644 --- a/common/test_random_derive/src/lib.rs +++ b/common/test_random_derive/src/lib.rs @@ -20,9 +20,8 @@ pub fn test_random_derive(input: TokenStream) -> TokenStream { let name = &derived_input.ident; let (impl_generics, ty_generics, where_clause) = &derived_input.generics.split_for_impl(); - let struct_data = match &derived_input.data { - syn::Data::Struct(s) => s, - _ => panic!("test_random_derive only supports structs."), + let syn::Data::Struct(struct_data) = &derived_input.data else { + panic!("test_random_derive only supports structs."); }; // Build quotes for fields that should be generated and those that should be built from diff --git a/common/warp_utils/src/metrics.rs b/common/warp_utils/src/metrics.rs index d93b74ca9..eb061c752 100644 --- a/common/warp_utils/src/metrics.rs +++ b/common/warp_utils/src/metrics.rs @@ -14,6 +14,10 @@ lazy_static::lazy_static! { "process_virtual_memory_bytes", "Virtual memory used by the current process" ); + pub static ref PROCESS_SHR_MEM: Result = try_create_int_gauge( + "process_shared_memory_bytes", + "Shared memory used by the current process" + ); pub static ref PROCESS_SECONDS: Result = try_create_int_gauge( "process_cpu_seconds_total", "Total cpu time taken by the current process" @@ -90,6 +94,7 @@ pub fn scrape_process_health_metrics() { set_gauge(&PROCESS_NUM_THREADS, health.pid_num_threads); set_gauge(&PROCESS_RES_MEM, health.pid_mem_resident_set_size as i64); set_gauge(&PROCESS_VIRT_MEM, health.pid_mem_virtual_memory_size as i64); + set_gauge(&PROCESS_SHR_MEM, health.pid_mem_shared_memory_size as i64); set_gauge(&PROCESS_SECONDS, health.pid_process_seconds_total as i64); } } diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index ea3a58127..865a5affb 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -197,7 +197,8 @@ impl From for Error { /// Indicates if a block has been verified by an execution payload. /// /// There is no variant for "invalid", since such a block should never be added to fork choice. -#[derive(Clone, Copy, Debug, PartialEq)] +#[derive(Clone, Copy, Debug, PartialEq, Encode, Decode)] +#[ssz(enum_behaviour = "tag")] pub enum PayloadVerificationStatus { /// An EL has declared the execution payload to be valid. Verified, @@ -290,7 +291,7 @@ pub enum AttestationFromBlock { } /// Parameters which are cached between calls to `ForkChoice::get_head`. -#[derive(Clone, Copy)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct ForkchoiceUpdateParameters { /// The most recent result of running `ForkChoice::get_head`. pub head_root: Hash256, @@ -680,7 +681,7 @@ where .ok_or_else(|| Error::InvalidBlock(InvalidBlock::UnknownParent(block.parent_root())))?; // Blocks cannot be in the future. If they are, their consideration must be delayed until - // the are in the past. + // they are in the past. // // Note: presently, we do not delay consideration. We just drop the block. if block.slot() > current_slot { @@ -722,7 +723,8 @@ where // Add proposer score boost if the block is timely. let is_before_attesting_interval = block_delay < Duration::from_secs(spec.seconds_per_slot / INTERVALS_PER_SLOT); - if current_slot == block.slot() && is_before_attesting_interval { + let is_first_block = self.fc_store.proposer_boost_root().is_zero(); + if current_slot == block.slot() && is_before_attesting_interval && is_first_block { self.fc_store.set_proposer_boost_root(block_root); } @@ -762,7 +764,8 @@ where (parent_justified, parent_finalized) } else { let justification_and_finalization_state = match block { - BeaconBlockRef::Capella(_) + BeaconBlockRef::Deneb(_) + | BeaconBlockRef::Capella(_) | BeaconBlockRef::Merge(_) | BeaconBlockRef::Altair(_) => match progressive_balances_mode { ProgressiveBalancesMode::Disabled => { diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index e7ca84efb..5e8cfb1ee 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -7,4 +7,6 @@ pub use crate::fork_choice::{ QueuedAttestation, ResetPayloadStatuses, }; pub use fork_choice_store::ForkChoiceStore; -pub use proto_array::{Block as ProtoBlock, ExecutionStatus, InvalidationOperation}; +pub use proto_array::{ + Block as ProtoBlock, ExecutionStatus, InvalidationOperation, ProposerHeadError, +}; diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index d28210aa1..649fbcc55 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -1,9 +1,5 @@ #![cfg(not(debug_assertions))] -use std::fmt; -use std::sync::Mutex; -use std::time::Duration; - use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; @@ -14,6 +10,9 @@ use beacon_chain::{ use fork_choice::{ ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, QueuedAttestation, }; +use std::fmt; +use std::sync::Mutex; +use std::time::Duration; use store::MemoryStore; use types::{ test_utils::generate_deterministic_keypair, BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, @@ -93,8 +92,7 @@ impl ForkChoiceTest { T: Fn(&BeaconForkChoiceStore, MemoryStore>) -> U, { func( - &self - .harness + self.harness .chain .canonical_head .fork_choice_read_lock() @@ -195,12 +193,13 @@ impl ForkChoiceTest { let validators = self.harness.get_all_validators(); loop { let slot = self.harness.get_current_slot(); - let (block, state_) = self.harness.make_block(state, slot).await; + let (block_contents, state_) = self.harness.make_block(state, slot).await; state = state_; - if !predicate(block.message(), &state) { + if !predicate(block_contents.0.message(), &state) { break; } - if let Ok(block_hash) = self.harness.process_block_result(block.clone()).await { + let block = block_contents.0.clone(); + if let Ok(block_hash) = self.harness.process_block_result(block_contents).await { self.harness.attest_block( &state, block.state_root(), @@ -324,8 +323,9 @@ impl ForkChoiceTest { ) .unwrap(); let slot = self.harness.get_current_slot(); - let (mut signed_block, mut state) = self.harness.make_block(state, slot).await; - func(&mut signed_block, &mut state); + let ((block_arc, _block_blobs), mut state) = self.harness.make_block(state, slot).await; + let mut block = (*block_arc).clone(); + func(&mut block, &mut state); let current_slot = self.harness.get_current_slot(); self.harness .chain @@ -333,8 +333,8 @@ impl ForkChoiceTest { .fork_choice_write_lock() .on_block( current_slot, - signed_block.message(), - signed_block.canonical_root(), + block.message(), + block.canonical_root(), Duration::from_secs(0), &state, PayloadVerificationStatus::Verified, @@ -367,8 +367,9 @@ impl ForkChoiceTest { ) .unwrap(); let slot = self.harness.get_current_slot(); - let (mut signed_block, mut state) = self.harness.make_block(state, slot).await; - mutation_func(&mut signed_block, &mut state); + let ((block_arc, _block_blobs), mut state) = self.harness.make_block(state, slot).await; + let mut block = (*block_arc).clone(); + mutation_func(&mut block, &mut state); let current_slot = self.harness.get_current_slot(); let err = self .harness @@ -377,8 +378,8 @@ impl ForkChoiceTest { .fork_choice_write_lock() .on_block( current_slot, - signed_block.message(), - signed_block.canonical_root(), + block.message(), + block.canonical_root(), Duration::from_secs(0), &state, PayloadVerificationStatus::Verified, @@ -386,8 +387,7 @@ impl ForkChoiceTest { &self.harness.chain.spec, self.harness.logger(), ) - .err() - .expect("on_block did not return an error"); + .expect_err("on_block did not return an error"); comparison_func(err); self } @@ -841,7 +841,7 @@ async fn valid_attestation() { .apply_attestation_to_chain( MutationDelay::NoDelay, |_, _| {}, - |result| assert_eq!(result.unwrap(), ()), + |result| assert!(result.is_ok()), ) .await; } @@ -1074,7 +1074,7 @@ async fn invalid_attestation_delayed_slot() { .apply_attestation_to_chain( MutationDelay::NoDelay, |_, _| {}, - |result| assert_eq!(result.unwrap(), ()), + |result| assert!(result.is_ok()), ) .await .inspect_queued_attestations(|queue| assert_eq!(queue.len(), 1)) @@ -1183,7 +1183,7 @@ async fn weak_subjectivity_check_fails_early_epoch() { let mut checkpoint = setup_harness.harness.finalized_checkpoint(); - checkpoint.epoch = checkpoint.epoch - 1; + checkpoint.epoch -= 1; let chain_config = ChainConfig { weak_subjectivity_checkpoint: Some(checkpoint), @@ -1210,7 +1210,7 @@ async fn weak_subjectivity_check_fails_late_epoch() { let mut checkpoint = setup_harness.harness.finalized_checkpoint(); - checkpoint.epoch = checkpoint.epoch + 1; + checkpoint.epoch += 1; let chain_config = ChainConfig { weak_subjectivity_checkpoint: Some(checkpoint), @@ -1353,6 +1353,14 @@ async fn progressive_balances_cache_attester_slashing() { .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) .await .unwrap() + // Note: This test may fail if the shuffling used changes, right now it re-runs with + // deterministic shuffling. A shuffling change my cause the slashed proposer to propose + // again in the next epoch, which results in a block processing failure + // (`HeaderInvalid::ProposerSlashed`). The harness should be re-worked to successfully skip + // the slot in this scenario rather than panic-ing. The same applies to + // `progressive_balances_cache_proposer_slashing`. + .apply_blocks(1) + .await .add_previous_epoch_attester_slashing() .await // expect fork choice to import blocks successfully after a previous epoch attester is @@ -1376,6 +1384,14 @@ async fn progressive_balances_cache_proposer_slashing() { .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) .await .unwrap() + // Note: This test may fail if the shuffling used changes, right now it re-runs with + // deterministic shuffling. A shuffling change my cause the slashed proposer to propose + // again in the next epoch, which results in a block processing failure + // (`HeaderInvalid::ProposerSlashed`). The harness should be re-worked to successfully skip + // the slot in this scenario rather than panic-ing. The same applies to + // `progressive_balances_cache_attester_slashing`. + .apply_blocks(1) + .await .add_previous_epoch_proposer_slashing(MainnetEthSpec::slots_per_epoch()) .await // expect fork choice to import blocks successfully after a previous epoch proposer is diff --git a/consensus/merkle_proof/src/lib.rs b/consensus/merkle_proof/src/lib.rs index dc3de71ce..2d2d2afdd 100644 --- a/consensus/merkle_proof/src/lib.rs +++ b/consensus/merkle_proof/src/lib.rs @@ -250,7 +250,7 @@ impl MerkleTree { if deposit_count == (0x1 << level) { return Ok(MerkleTree::Finalized( *finalized_branch - .get(0) + .first() .ok_or(MerkleTreeError::PleaseNotifyTheDevs)?, )); } @@ -369,7 +369,7 @@ pub fn verify_merkle_proof( } /// Compute a root hash from a leaf and a Merkle proof. -fn merkle_root_from_branch(leaf: H256, branch: &[H256], depth: usize, index: usize) -> H256 { +pub fn merkle_root_from_branch(leaf: H256, branch: &[H256], depth: usize, index: usize) -> H256 { assert_eq!(branch.len(), depth, "proof length should equal depth"); let mut merkle_root = leaf.as_bytes().to_vec(); diff --git a/consensus/proto_array/Cargo.toml b/consensus/proto_array/Cargo.toml index b30173eb7..99f98cf54 100644 --- a/consensus/proto_array/Cargo.toml +++ b/consensus/proto_array/Cargo.toml @@ -13,7 +13,6 @@ types = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } serde = { workspace = true } -serde_derive = "1.0.116" serde_yaml = { workspace = true } safe_arith = { workspace = true } -superstruct = { workspace = true } \ No newline at end of file +superstruct = { workspace = true } diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 98d43e485..ebb639819 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -5,7 +5,7 @@ mod votes; use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice}; use crate::{InvalidationOperation, JustifiedBalances}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::collections::BTreeSet; use types::{ AttestationShufflingId, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 7b6afb94f..4726715a1 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1,6 +1,6 @@ use crate::error::InvalidBestNodeInfo; use crate::{error::Error, Block, ExecutionStatus, JustifiedBalances}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::four_byte_option_impl; use ssz::Encode; use ssz_derive::{Decode, Encode}; @@ -1035,13 +1035,11 @@ impl ProtoArray { .epoch .start_slot(E::slots_per_epoch()); - let mut node = if let Some(node) = self + let Some(mut node) = self .indices .get(&root) .and_then(|index| self.nodes.get(*index)) - { - node - } else { + else { // An unknown root is not a finalized descendant. This line can only // be reached if the user supplies a root that is not known to fork // choice. diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 5911e50fc..1c41b1855 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -7,7 +7,7 @@ use crate::{ ssz_container::SszContainer, JustifiedBalances, }; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use std::{ @@ -188,7 +188,7 @@ where } /// Information about the proposer head used for opportunistic re-orgs. -#[derive(Clone)] +#[derive(Debug, Clone)] pub struct ProposerHeadInfo { /// Information about the *current* head block, which may be re-orged. pub head_node: ProtoNode, @@ -206,7 +206,7 @@ pub struct ProposerHeadInfo { /// /// This type intentionally does not implement `Debug` so that callers are forced to handle the /// enum. -#[derive(Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq)] pub enum ProposerHeadError { DoNotReOrg(DoNotReOrg), Error(E), @@ -243,7 +243,7 @@ impl ProposerHeadError { /// Reasons why a re-org should not be attempted. /// /// This type intentionally does not implement `Debug` so that the `Display` impl must be used. -#[derive(Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq)] pub enum DoNotReOrg { MissingHeadOrParentNode, MissingHeadFinalizedCheckpoint, diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index e4dfb45d5..7279fd28f 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -40,3 +40,4 @@ arbitrary-fuzz = [ "ssz_types/arbitrary", "tree_hash/arbitrary", ] +portable = ["bls/supranational-portable"] \ No newline at end of file diff --git a/consensus/state_processing/src/block_replayer.rs b/consensus/state_processing/src/block_replayer.rs index ed5e64294..f502d7f69 100644 --- a/consensus/state_processing/src/block_replayer.rs +++ b/consensus/state_processing/src/block_replayer.rs @@ -3,6 +3,8 @@ use crate::{ BlockProcessingError, BlockSignatureStrategy, ConsensusContext, SlotProcessingError, VerifyBlockRoot, }; +use itertools::Itertools; +use std::iter::Peekable; use std::marker::PhantomData; use types::{BeaconState, BlindedPayload, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, Slot}; @@ -25,7 +27,7 @@ pub struct BlockReplayer< 'a, Spec: EthSpec, Error = BlockReplayError, - StateRootIter = StateRootIterDefault, + StateRootIter: Iterator> = StateRootIterDefault, > { state: BeaconState, spec: &'a ChainSpec, @@ -36,7 +38,7 @@ pub struct BlockReplayer< post_block_hook: Option>, pre_slot_hook: Option>, post_slot_hook: Option>, - state_root_iter: Option, + pub(crate) state_root_iter: Option>, state_root_miss: bool, _phantom: PhantomData, } @@ -138,7 +140,7 @@ where /// `self.state.slot` to the `target_slot` supplied to `apply_blocks` (inclusive of both /// endpoints). pub fn state_root_iter(mut self, iter: StateRootIter) -> Self { - self.state_root_iter = Some(iter); + self.state_root_iter = Some(iter.peekable()); self } @@ -192,7 +194,7 @@ where // If a state root iterator is configured, use it to find the root. if let Some(ref mut state_root_iter) = self.state_root_iter { let opt_root = state_root_iter - .take_while(|res| res.as_ref().map_or(true, |(_, s)| *s <= slot)) + .peeking_take_while(|res| res.as_ref().map_or(true, |(_, s)| *s <= slot)) .find(|res| res.as_ref().map_or(true, |(_, s)| *s == slot)) .transpose()?; diff --git a/consensus/state_processing/src/common/get_attestation_participation.rs b/consensus/state_processing/src/common/get_attestation_participation.rs index 499d8fa8f..e4e30230a 100644 --- a/consensus/state_processing/src/common/get_attestation_participation.rs +++ b/consensus/state_processing/src/common/get_attestation_participation.rs @@ -44,8 +44,21 @@ pub fn get_attestation_participation_flag_indices( if is_matching_source && inclusion_delay <= T::slots_per_epoch().integer_sqrt() { participation_flag_indices.push(TIMELY_SOURCE_FLAG_INDEX); } - if is_matching_target && inclusion_delay <= T::slots_per_epoch() { - participation_flag_indices.push(TIMELY_TARGET_FLAG_INDEX); + match state { + &BeaconState::Base(_) + | &BeaconState::Altair(_) + | &BeaconState::Merge(_) + | &BeaconState::Capella(_) => { + if is_matching_target && inclusion_delay <= T::slots_per_epoch() { + participation_flag_indices.push(TIMELY_TARGET_FLAG_INDEX); + } + } + &BeaconState::Deneb(_) => { + if is_matching_target { + // [Modified in Deneb:EIP7045] + participation_flag_indices.push(TIMELY_TARGET_FLAG_INDEX); + } + } } if is_matching_head && inclusion_delay == spec.min_attestation_inclusion_delay { participation_flag_indices.push(TIMELY_HEAD_FLAG_INDEX); diff --git a/consensus/state_processing/src/common/slash_validator.rs b/consensus/state_processing/src/common/slash_validator.rs index d54da43a0..d8b1c1a10 100644 --- a/consensus/state_processing/src/common/slash_validator.rs +++ b/consensus/state_processing/src/common/slash_validator.rs @@ -53,11 +53,12 @@ pub fn slash_validator( validator_effective_balance.safe_div(spec.whistleblower_reward_quotient)?; let proposer_reward = match state { BeaconState::Base(_) => whistleblower_reward.safe_div(spec.proposer_reward_quotient)?, - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { - whistleblower_reward - .safe_mul(PROPOSER_WEIGHT)? - .safe_div(WEIGHT_DENOMINATOR)? - } + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Deneb(_) => whistleblower_reward + .safe_mul(PROPOSER_WEIGHT)? + .safe_div(WEIGHT_DENOMINATOR)?, }; // Ensure the whistleblower index is in the validator registry. diff --git a/consensus/state_processing/src/consensus_context.rs b/consensus/state_processing/src/consensus_context.rs index ccf8cefb6..8e49a0d49 100644 --- a/consensus/state_processing/src/consensus_context.rs +++ b/consensus/state_processing/src/consensus_context.rs @@ -1,14 +1,14 @@ use crate::common::get_indexed_attestation; use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError}; +use ssz_derive::{Decode, Encode}; use std::collections::{hash_map::Entry, HashMap}; -use std::marker::PhantomData; use tree_hash::TreeHash; use types::{ AbstractExecPayload, Attestation, AttestationData, BeaconState, BeaconStateError, BitList, ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, }; -#[derive(Debug)] +#[derive(Debug, PartialEq, Clone, Encode, Decode)] pub struct ConsensusContext { /// Slot to act as an identifier/safeguard slot: Slot, @@ -17,9 +17,10 @@ pub struct ConsensusContext { /// Block root of the block at `slot`. current_block_root: Option, /// Cache of indexed attestations constructed during block processing. + /// We can skip serializing / deserializing this as the cache will just be rebuilt + #[ssz(skip_serializing, skip_deserializing)] indexed_attestations: HashMap<(AttestationData, BitList), IndexedAttestation>, - _phantom: PhantomData, } #[derive(Debug, PartialEq, Clone)] @@ -42,7 +43,6 @@ impl ConsensusContext { proposer_index: None, current_block_root: None, indexed_attestations: HashMap::new(), - _phantom: PhantomData, } } diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index ebbc8f9f3..284a7019f 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -2,7 +2,9 @@ use super::per_block_processing::{ errors::BlockProcessingError, process_operations::process_deposit, }; use crate::common::DepositDataTree; -use crate::upgrade::{upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella}; +use crate::upgrade::{ + upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_deneb, +}; use safe_arith::{ArithError, SafeArith}; use tree_hash::TreeHash; use types::DEPOSIT_TREE_DEPTH; @@ -91,6 +93,23 @@ pub fn initialize_beacon_state_from_eth1( } } + // Upgrade to deneb if configured from genesis + if spec + .deneb_fork_epoch + .map_or(false, |fork_epoch| fork_epoch == T::genesis_epoch()) + { + upgrade_to_deneb(&mut state, spec)?; + + // Remove intermediate Capella fork from `state.fork`. + state.fork_mut().previous_version = spec.deneb_fork_version; + + // Override latest execution payload header. + // See https://github.com/ethereum/consensus-specs/blob/dev/specs/deneb/beacon-chain.md#testing + if let Some(ExecutionPayloadHeader::Deneb(header)) = execution_payload_header { + *state.latest_execution_payload_header_deneb_mut()? = header; + } + } + // Now that we have our validators, initialize the caches (including the committees) state.build_caches(spec)?; diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index b8b76a499..b9a147a5a 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -26,6 +26,7 @@ pub use verify_exit::verify_exit; pub mod altair; pub mod block_signature_verifier; +pub mod deneb; pub mod errors; mod is_valid_indexed_attestation; pub mod process_operations; @@ -166,11 +167,11 @@ pub fn per_block_processing>( // `process_randao` as the former depends on the `randao_mix` computed with the reveal of the // previous block. if is_execution_enabled(state, block.body()) { - let payload = block.body().execution_payload()?; + let body = block.body(); if state_processing_strategy == StateProcessingStrategy::Accurate { - process_withdrawals::(state, payload, spec)?; + process_withdrawals::(state, body.execution_payload()?, spec)?; } - process_execution_payload::(state, payload, spec)?; + process_execution_payload::(state, body, spec)?; } process_randao(state, block, verify_randao, ctxt, spec)?; @@ -355,9 +356,10 @@ pub fn get_new_eth1_data( pub fn partially_verify_execution_payload>( state: &BeaconState, block_slot: Slot, - payload: Payload::Ref<'_>, + body: BeaconBlockBodyRef, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { + let payload = body.execution_payload()?; if is_merge_transition_complete(state) { block_verify!( payload.parent_hash() == state.latest_execution_payload_header()?.block_hash(), @@ -384,6 +386,17 @@ pub fn partially_verify_execution_payload>( state: &mut BeaconState, - payload: Payload::Ref<'_>, + body: BeaconBlockBodyRef, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - partially_verify_execution_payload::(state, state.slot(), payload, spec)?; - + partially_verify_execution_payload::(state, state.slot(), body, spec)?; + let payload = body.execution_payload()?; match state.latest_execution_payload_header_mut()? { ExecutionPayloadHeaderRefMut::Merge(header_mut) => { match payload.to_execution_payload_header() { @@ -414,6 +427,12 @@ pub fn process_execution_payload>( _ => return Err(BlockProcessingError::IncorrectStateType), } } + ExecutionPayloadHeaderRefMut::Deneb(header_mut) => { + match payload.to_execution_payload_header() { + ExecutionPayloadHeader::Deneb(header) => *header_mut = header, + _ => return Err(BlockProcessingError::IncorrectStateType), + } + } } Ok(()) @@ -422,15 +441,19 @@ pub fn process_execution_payload>( /// These functions will definitely be called before the merge. Their entire purpose is to check if /// the merge has happened or if we're on the transition block. Thus we don't want to propagate /// errors from the `BeaconState` being an earlier variant than `BeaconStateMerge` as we'd have to -/// repeaetedly write code to treat these errors as false. +/// repeatedly write code to treat these errors as false. /// https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#is_merge_transition_complete pub fn is_merge_transition_complete(state: &BeaconState) -> bool { - // We must check defaultness against the payload header with 0x0 roots, as that's what's meant - // by `ExecutionPayloadHeader()` in the spec. - state - .latest_execution_payload_header() - .map(|header| !header.is_default_with_zero_roots()) - .unwrap_or(false) + match state { + // We must check defaultness against the payload header with 0x0 roots, as that's what's meant + // by `ExecutionPayloadHeader()` in the spec. + BeaconState::Merge(_) => state + .latest_execution_payload_header() + .map(|header| !header.is_default_with_zero_roots()) + .unwrap_or(false), + BeaconState::Deneb(_) | BeaconState::Capella(_) => true, + BeaconState::Base(_) | BeaconState::Altair(_) => false, + } } /// https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#is_merge_transition_block pub fn is_merge_transition_block>( @@ -526,7 +549,7 @@ pub fn process_withdrawals>( ) -> Result<(), BlockProcessingError> { match state { BeaconState::Merge(_) => Ok(()), - BeaconState::Capella(_) => { + BeaconState::Capella(_) | BeaconState::Deneb(_) => { let expected_withdrawals = get_expected_withdrawals(state, spec)?; let expected_root = expected_withdrawals.tree_hash_root(); let withdrawals_root = payload.withdrawals_root()?; diff --git a/consensus/state_processing/src/per_block_processing/deneb.rs b/consensus/state_processing/src/per_block_processing/deneb.rs new file mode 100644 index 000000000..217c2ea30 --- /dev/null +++ b/consensus/state_processing/src/per_block_processing/deneb.rs @@ -0,0 +1,8 @@ +use ethereum_hashing::hash_fixed; +use types::{KzgCommitment, VersionedHash, VERSIONED_HASH_VERSION_KZG}; + +pub fn kzg_commitment_to_versioned_hash(kzg_commitment: &KzgCommitment) -> VersionedHash { + let mut hashed_commitment = hash_fixed(&kzg_commitment.0); + hashed_commitment[0] = VERSIONED_HASH_VERSION_KZG; + VersionedHash::from(hashed_commitment) +} diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index 0aba1d83f..de1c13295 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -78,6 +78,10 @@ pub enum BlockProcessingError { expected: u64, found: u64, }, + ExecutionInvalidBlobsLen { + max: usize, + actual: usize, + }, ExecutionInvalid, ConsensusContext(ContextError), WithdrawalsRootMismatch { diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 1dbcb7fb8..cb24a7ba7 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -95,7 +95,7 @@ pub mod base { } } -pub mod altair { +pub mod altair_deneb { use super::*; use crate::common::update_progressive_balances_cache::update_progressive_balances_on_attestation; use types::consts::altair::TIMELY_TARGET_FLAG_INDEX; @@ -267,8 +267,9 @@ pub fn process_attestations>( } BeaconBlockBodyRef::Altair(_) | BeaconBlockBodyRef::Merge(_) - | BeaconBlockBodyRef::Capella(_) => { - altair::process_attestations( + | BeaconBlockBodyRef::Capella(_) + | BeaconBlockBodyRef::Deneb(_) => { + altair_deneb::process_attestations( state, block_body.attestations(), verify_signatures, diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index c05d3f057..fcd324e9e 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -387,12 +387,23 @@ where let exit = &signed_exit.message; let proposer_index = exit.validator_index as usize; - let domain = spec.get_domain( - exit.epoch, - Domain::VoluntaryExit, - &state.fork(), - state.genesis_validators_root(), - ); + let domain = match state { + BeaconState::Base(_) + | BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) => spec.get_domain( + exit.epoch, + Domain::VoluntaryExit, + &state.fork(), + state.genesis_validators_root(), + ), + // EIP-7044 + BeaconState::Deneb(_) => spec.compute_domain( + Domain::VoluntaryExit, + spec.capella_fork_version, + state.genesis_validators_root(), + ), + }; let message = exit.signing_root(domain); diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index 16fa2462f..83fd0f232 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -1,11 +1,11 @@ -#![cfg(all(test, not(feature = "fake_crypto")))] +#![cfg(all(test, not(feature = "fake_crypto"), not(debug_assertions)))] use crate::per_block_processing::errors::{ AttestationInvalid, AttesterSlashingInvalid, BlockOperationError, BlockProcessingError, DepositInvalid, HeaderInvalid, IndexedAttestationInvalid, IntoWithIndex, ProposerSlashingInvalid, }; -use crate::{per_block_processing, StateProcessingStrategy}; +use crate::{per_block_processing, BlockReplayError, BlockReplayer, StateProcessingStrategy}; use crate::{ per_block_processing::{process_operations, verify_exit::verify_exit}, BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, VerifySignatures, @@ -34,7 +34,7 @@ async fn get_harness( // Set the state and block to be in the last slot of the `epoch_offset`th epoch. let last_slot_of_epoch = (MainnetEthSpec::genesis_epoch() + epoch_offset).end_slot(E::slots_per_epoch()); - let harness = BeaconChainHarness::builder(E::default()) + let harness = BeaconChainHarness::>::builder(E::default()) .default_spec() .keypairs(KEYPAIRS[0..num_validators].to_vec()) .fresh_ephemeral_store() @@ -63,7 +63,7 @@ async fn valid_block_ok() { let state = harness.get_current_state(); let slot = state.slot(); - let (block, mut state) = harness + let ((block, _), mut state) = harness .make_block_return_pre_state(state, slot + Slot::new(1)) .await; @@ -89,8 +89,8 @@ async fn invalid_block_header_state_slot() { let state = harness.get_current_state(); let slot = state.slot() + Slot::new(1); - let (signed_block, mut state) = harness.make_block_return_pre_state(state, slot).await; - let (mut block, signature) = signed_block.deconstruct(); + let ((signed_block, _), mut state) = harness.make_block_return_pre_state(state, slot).await; + let (mut block, signature) = (*signed_block).clone().deconstruct(); *block.slot_mut() = slot + Slot::new(1); let mut ctxt = ConsensusContext::new(block.slot()); @@ -120,10 +120,10 @@ async fn invalid_parent_block_root() { let state = harness.get_current_state(); let slot = state.slot(); - let (signed_block, mut state) = harness + let ((signed_block, _), mut state) = harness .make_block_return_pre_state(state, slot + Slot::new(1)) .await; - let (mut block, signature) = signed_block.deconstruct(); + let (mut block, signature) = (*signed_block).clone().deconstruct(); *block.parent_root_mut() = Hash256::from([0xAA; 32]); let mut ctxt = ConsensusContext::new(block.slot()); @@ -155,10 +155,10 @@ async fn invalid_block_signature() { let state = harness.get_current_state(); let slot = state.slot(); - let (signed_block, mut state) = harness + let ((signed_block, _), mut state) = harness .make_block_return_pre_state(state, slot + Slot::new(1)) .await; - let (block, _) = signed_block.deconstruct(); + let (block, _) = (*signed_block).clone().deconstruct(); let mut ctxt = ConsensusContext::new(block.slot()); let result = per_block_processing( @@ -188,7 +188,7 @@ async fn invalid_randao_reveal_signature() { let state = harness.get_current_state(); let slot = state.slot(); - let (signed_block, mut state) = harness + let ((signed_block, _), mut state) = harness .make_block_with_modifier(state, slot + 1, |block| { *block.body_mut().randao_reveal_mut() = Signature::empty(); }) @@ -1035,3 +1035,51 @@ async fn fork_spanning_exit() { ) .expect_err("phase0 exit does not verify against bellatrix state"); } + +/// Check that the block replayer does not consume state roots unnecessarily. +#[tokio::test] +async fn block_replayer_peeking_state_roots() { + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + + let target_state = harness.get_current_state(); + let target_block_root = harness.head_block_root(); + let target_block = harness + .chain + .get_blinded_block(&target_block_root) + .unwrap() + .unwrap(); + + let parent_block_root = target_block.parent_root(); + let parent_block = harness + .chain + .get_blinded_block(&parent_block_root) + .unwrap() + .unwrap(); + let parent_state = harness + .chain + .get_state(&parent_block.state_root(), Some(parent_block.slot())) + .unwrap() + .unwrap(); + + // Omit the state root for `target_state` but provide a dummy state root at the *next* slot. + // If the block replayer is peeking at the state roots rather than consuming them, then the + // dummy state should still be there after block replay completes. + let dummy_state_root = Hash256::repeat_byte(0xff); + let dummy_slot = target_state.slot() + 1; + let state_root_iter = vec![Ok::<_, BlockReplayError>((dummy_state_root, dummy_slot))]; + let block_replayer = BlockReplayer::new(parent_state, &harness.chain.spec) + .state_root_iter(state_root_iter.into_iter()) + .no_signature_verification() + .apply_blocks(vec![target_block], None) + .unwrap(); + + assert_eq!( + block_replayer + .state_root_iter + .unwrap() + .next() + .unwrap() + .unwrap(), + (dummy_state_root, dummy_slot) + ); +} diff --git a/consensus/state_processing/src/per_block_processing/verify_attestation.rs b/consensus/state_processing/src/per_block_processing/verify_attestation.rs index 303a6e391..b7aa4643e 100644 --- a/consensus/state_processing/src/per_block_processing/verify_attestation.rs +++ b/consensus/state_processing/src/per_block_processing/verify_attestation.rs @@ -32,13 +32,22 @@ pub fn verify_attestation_for_block_inclusion<'ctxt, T: EthSpec>( attestation: data.slot, } ); - verify!( - state.slot() <= data.slot.safe_add(T::slots_per_epoch())?, - Invalid::IncludedTooLate { - state: state.slot(), - attestation: data.slot, + match state { + BeaconState::Base(_) + | BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) => { + verify!( + state.slot() <= data.slot.safe_add(T::slots_per_epoch())?, + Invalid::IncludedTooLate { + state: state.slot(), + attestation: data.slot, + } + ); } - ); + // [Modified in Deneb:EIP7045] + BeaconState::Deneb(_) => {} + } verify_attestation_for_state(state, attestation, ctxt, verify_signatures, spec) } diff --git a/consensus/state_processing/src/per_epoch_processing.rs b/consensus/state_processing/src/per_epoch_processing.rs index 6350685f8..d5d06037c 100644 --- a/consensus/state_processing/src/per_epoch_processing.rs +++ b/consensus/state_processing/src/per_epoch_processing.rs @@ -40,7 +40,7 @@ pub fn process_epoch( match state { BeaconState::Base(_) => base::process_epoch(state, spec), BeaconState::Altair(_) | BeaconState::Merge(_) => altair::process_epoch(state, spec), - BeaconState::Capella(_) => capella::process_epoch(state, spec), + BeaconState::Capella(_) | BeaconState::Deneb(_) => capella::process_epoch(state, spec), } } diff --git a/consensus/state_processing/src/per_epoch_processing/registry_updates.rs b/consensus/state_processing/src/per_epoch_processing/registry_updates.rs index 4fd2d6858..833be4138 100644 --- a/consensus/state_processing/src/per_epoch_processing/registry_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/registry_updates.rs @@ -50,9 +50,9 @@ pub fn process_registry_updates( .collect_vec(); // Dequeue validators for activation up to churn limit - let churn_limit = state.get_churn_limit(spec)? as usize; + let activation_churn_limit = state.get_activation_churn_limit(spec)? as usize; let delayed_activation_epoch = state.compute_activation_exit_epoch(current_epoch, spec)?; - for index in activation_queue.into_iter().take(churn_limit) { + for index in activation_queue.into_iter().take(activation_churn_limit) { state.get_validator_mut(index)?.activation_epoch = delayed_activation_epoch; } diff --git a/consensus/state_processing/src/per_slot_processing.rs b/consensus/state_processing/src/per_slot_processing.rs index e16fb4a7b..e89a78c4d 100644 --- a/consensus/state_processing/src/per_slot_processing.rs +++ b/consensus/state_processing/src/per_slot_processing.rs @@ -1,4 +1,6 @@ -use crate::upgrade::{upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella}; +use crate::upgrade::{ + upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_deneb, +}; use crate::{per_epoch_processing::EpochProcessingSummary, *}; use safe_arith::{ArithError, SafeArith}; use types::*; @@ -59,6 +61,10 @@ pub fn per_slot_processing( if spec.capella_fork_epoch == Some(state.current_epoch()) { upgrade_to_capella(state, spec)?; } + // Deneb + if spec.deneb_fork_epoch == Some(state.current_epoch()) { + upgrade_to_deneb(state, spec)?; + } } Ok(summary) diff --git a/consensus/state_processing/src/upgrade.rs b/consensus/state_processing/src/upgrade.rs index a57d5923f..1509ee0e5 100644 --- a/consensus/state_processing/src/upgrade.rs +++ b/consensus/state_processing/src/upgrade.rs @@ -1,7 +1,9 @@ pub mod altair; pub mod capella; +pub mod deneb; pub mod merge; pub use altair::upgrade_to_altair; pub use capella::upgrade_to_capella; +pub use deneb::upgrade_to_deneb; pub use merge::upgrade_to_bellatrix; diff --git a/consensus/state_processing/src/upgrade/altair.rs b/consensus/state_processing/src/upgrade/altair.rs index 26b1192bc..5bb4f0bd5 100644 --- a/consensus/state_processing/src/upgrade/altair.rs +++ b/consensus/state_processing/src/upgrade/altair.rs @@ -54,7 +54,7 @@ pub fn upgrade_to_altair( VariableList::new(vec![ParticipationFlags::default(); pre.validators.len()])?; let inactivity_scores = VariableList::new(vec![0; pre.validators.len()])?; - let temp_sync_committee = Arc::new(SyncCommittee::temporary()?); + let temp_sync_committee = Arc::new(SyncCommittee::temporary()); // Where possible, use something like `mem::take` to move fields from behind the &mut // reference. For other fields that don't have a good default value, use `clone`. diff --git a/consensus/state_processing/src/upgrade/deneb.rs b/consensus/state_processing/src/upgrade/deneb.rs new file mode 100644 index 000000000..c253a8c16 --- /dev/null +++ b/consensus/state_processing/src/upgrade/deneb.rs @@ -0,0 +1,76 @@ +use std::mem; +use types::{BeaconState, BeaconStateDeneb, BeaconStateError as Error, ChainSpec, EthSpec, Fork}; + +/// Transform a `Capella` state into an `Deneb` state. +pub fn upgrade_to_deneb( + pre_state: &mut BeaconState, + spec: &ChainSpec, +) -> Result<(), Error> { + let epoch = pre_state.current_epoch(); + let pre = pre_state.as_capella_mut()?; + + let previous_fork_version = pre.fork.current_version; + + // Where possible, use something like `mem::take` to move fields from behind the &mut + // reference. For other fields that don't have a good default value, use `clone`. + // + // Fixed size vectors get cloned because replacing them would require the same size + // allocation as cloning. + let post = BeaconState::Deneb(BeaconStateDeneb { + // Versioning + genesis_time: pre.genesis_time, + genesis_validators_root: pre.genesis_validators_root, + slot: pre.slot, + fork: Fork { + previous_version: previous_fork_version, + current_version: spec.deneb_fork_version, + epoch, + }, + // History + latest_block_header: pre.latest_block_header.clone(), + block_roots: pre.block_roots.clone(), + state_roots: pre.state_roots.clone(), + historical_roots: mem::take(&mut pre.historical_roots), + // Eth1 + eth1_data: pre.eth1_data.clone(), + eth1_data_votes: mem::take(&mut pre.eth1_data_votes), + eth1_deposit_index: pre.eth1_deposit_index, + // Registry + validators: mem::take(&mut pre.validators), + balances: mem::take(&mut pre.balances), + // Randomness + randao_mixes: pre.randao_mixes.clone(), + // Slashings + slashings: pre.slashings.clone(), + // `Participation + previous_epoch_participation: mem::take(&mut pre.previous_epoch_participation), + current_epoch_participation: mem::take(&mut pre.current_epoch_participation), + // Finality + justification_bits: pre.justification_bits.clone(), + previous_justified_checkpoint: pre.previous_justified_checkpoint, + current_justified_checkpoint: pre.current_justified_checkpoint, + finalized_checkpoint: pre.finalized_checkpoint, + // Inactivity + inactivity_scores: mem::take(&mut pre.inactivity_scores), + // Sync committees + current_sync_committee: pre.current_sync_committee.clone(), + next_sync_committee: pre.next_sync_committee.clone(), + // Execution + latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_deneb(), + // Capella + next_withdrawal_index: pre.next_withdrawal_index, + next_withdrawal_validator_index: pre.next_withdrawal_validator_index, + historical_summaries: pre.historical_summaries.clone(), + // Caches + total_active_balance: pre.total_active_balance, + progressive_balances_cache: mem::take(&mut pre.progressive_balances_cache), + committee_caches: mem::take(&mut pre.committee_caches), + pubkey_cache: mem::take(&mut pre.pubkey_cache), + exit_cache: mem::take(&mut pre.exit_cache), + tree_hash_cache: mem::take(&mut pre.tree_hash_cache), + }); + + *pre_state = post; + + Ok(()) +} diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index d705dfdd5..db15f5353 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -11,6 +11,7 @@ harness = false [dependencies] merkle_proof = { workspace = true } bls = { workspace = true, features = ["arbitrary"] } +kzg = { workspace = true } compare_fields = { workspace = true } compare_fields_derive = { workspace = true } eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" } @@ -23,7 +24,6 @@ rayon = { workspace = true } rand = { workspace = true } safe_arith = { workspace = true } serde = { workspace = true, features = ["rc"] } -serde_derive = "1.0.116" slog = { workspace = true } ethereum_ssz = { workspace = true, features = ["arbitrary"] } ethereum_ssz_derive = { workspace = true } @@ -50,7 +50,6 @@ superstruct = { workspace = true } metastruct = "0.1.0" serde_json = { workspace = true } smallvec = { workspace = true } -serde_with = "1.13.0" maplit = { workspace = true } strum = { workspace = true } @@ -69,3 +68,4 @@ sqlite = [] # The `arbitrary-fuzz` feature is a no-op provided for backwards compatibility. # For simplicity `Arbitrary` is now derived regardless of the feature's presence. arbitrary-fuzz = [] +portable = ["bls/supranational-portable"] \ No newline at end of file diff --git a/consensus/types/presets/gnosis/deneb.yaml b/consensus/types/presets/gnosis/deneb.yaml new file mode 100644 index 000000000..d2d7d0abe --- /dev/null +++ b/consensus/types/presets/gnosis/deneb.yaml @@ -0,0 +1,14 @@ +# Gnosis preset - Deneb +# NOTE: The below are PLACEHOLDER values from Mainnet. +# Gnosis preset for the Deneb fork TBD: https://github.com/gnosischain/configs/tree/main/presets/gnosis + +# Misc +# --------------------------------------------------------------- +# `uint64(4096)` +FIELD_ELEMENTS_PER_BLOB: 4096 +# `uint64(2**12)` (= 4096) +MAX_BLOB_COMMITMENTS_PER_BLOCK: 4096 +# `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 +# `floorlog2(BLOB_KZG_COMMITMENTS_GINDEX) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 12 = 17 +KZG_COMMITMENT_INCLUSION_PROOF_DEPTH: 17 diff --git a/consensus/types/presets/mainnet/deneb.yaml b/consensus/types/presets/mainnet/deneb.yaml new file mode 100644 index 000000000..6d2fb4abd --- /dev/null +++ b/consensus/types/presets/mainnet/deneb.yaml @@ -0,0 +1,12 @@ +# Mainnet preset - Deneb + +# Misc +# --------------------------------------------------------------- +# `uint64(4096)` +FIELD_ELEMENTS_PER_BLOB: 4096 +# `uint64(2**12)` (= 4096) +MAX_BLOB_COMMITMENTS_PER_BLOCK: 4096 +# `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 +# `floorlog2(BLOB_KZG_COMMITMENTS_GINDEX) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 12 = 17 +KZG_COMMITMENT_INCLUSION_PROOF_DEPTH: 17 diff --git a/consensus/types/presets/minimal/deneb.yaml b/consensus/types/presets/minimal/deneb.yaml new file mode 100644 index 000000000..be2b9fadf --- /dev/null +++ b/consensus/types/presets/minimal/deneb.yaml @@ -0,0 +1,12 @@ +# Minimal preset - Deneb + +# Misc +# --------------------------------------------------------------- +# [customized] +FIELD_ELEMENTS_PER_BLOB: 4096 +# [customized] +MAX_BLOB_COMMITMENTS_PER_BLOCK: 16 +# `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 +# [customized] `floorlog2(BLOB_KZG_COMMITMENTS_GINDEX) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 4 = 9 +KZG_COMMITMENT_INCLUSION_PROOF_DEPTH: 9 diff --git a/consensus/types/src/aggregate_and_proof.rs b/consensus/types/src/aggregate_and_proof.rs index 20d66cd44..ac31e78cb 100644 --- a/consensus/types/src/aggregate_and_proof.rs +++ b/consensus/types/src/aggregate_and_proof.rs @@ -3,7 +3,7 @@ use super::{ Signature, SignedRoot, }; use crate::test_utils::TestRandom; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index 5c333e0d4..ac4a583cb 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -1,6 +1,6 @@ use derivative::Derivative; use safe_arith::ArithError; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/attestation_data.rs b/consensus/types/src/attestation_data.rs index 286502b44..7578981f5 100644 --- a/consensus/types/src/attestation_data.rs +++ b/consensus/types/src/attestation_data.rs @@ -2,7 +2,7 @@ use crate::test_utils::TestRandom; use crate::{Checkpoint, Hash256, SignedRoot, Slot}; use crate::slot_data::SlotData; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/attestation_duty.rs b/consensus/types/src/attestation_duty.rs index 93a4c147b..22b03dda6 100644 --- a/consensus/types/src/attestation_duty.rs +++ b/consensus/types/src/attestation_duty.rs @@ -1,5 +1,5 @@ use crate::*; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; #[derive(arbitrary::Arbitrary, Debug, PartialEq, Clone, Copy, Default, Serialize, Deserialize)] pub struct AttestationDuty { diff --git a/consensus/types/src/attester_slashing.rs b/consensus/types/src/attester_slashing.rs index c56349507..c2bbea637 100644 --- a/consensus/types/src/attester_slashing.rs +++ b/consensus/types/src/attester_slashing.rs @@ -1,7 +1,7 @@ use crate::{test_utils::TestRandom, EthSpec, IndexedAttestation}; use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 1b40fe76d..90dff84b3 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -1,12 +1,12 @@ use crate::beacon_block_body::{ - BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyMerge, BeaconBlockBodyRef, - BeaconBlockBodyRefMut, + BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyDeneb, BeaconBlockBodyMerge, + BeaconBlockBodyRef, BeaconBlockBodyRefMut, }; use crate::test_utils::TestRandom; use crate::*; use bls::Signature; use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError}; use ssz_derive::{Decode, Encode}; use std::marker::PhantomData; @@ -17,7 +17,7 @@ use tree_hash_derive::TreeHash; /// A block of the `BeaconChain`. #[superstruct( - variants(Base, Altair, Merge, Capella), + variants(Base, Altair, Merge, Capella, Deneb), variant_attributes( derive( Debug, @@ -72,6 +72,8 @@ pub struct BeaconBlock = FullPayload pub body: BeaconBlockBodyMerge, #[superstruct(only(Capella), partial_getter(rename = "body_capella"))] pub body: BeaconBlockBodyCapella, + #[superstruct(only(Deneb), partial_getter(rename = "body_deneb"))] + pub body: BeaconBlockBodyDeneb, } pub type BlindedBeaconBlock = BeaconBlock>; @@ -110,12 +112,15 @@ impl> BeaconBlock { let slot = Slot::from_ssz_bytes(slot_bytes)?; let fork_at_slot = spec.fork_name_at_slot::(slot); + Self::from_ssz_bytes_for_fork(bytes, fork_at_slot) + } - Ok(map_fork_name!( - fork_at_slot, - Self, - <_>::from_ssz_bytes(bytes)? - )) + /// Custom SSZ decoder that takes a `ForkName` as context. + pub fn from_ssz_bytes_for_fork( + bytes: &[u8], + fork_name: ForkName, + ) -> Result { + Ok(map_fork_name!(fork_name, Self, <_>::from_ssz_bytes(bytes)?)) } /// Try decoding each beacon block variant in sequence. @@ -124,8 +129,9 @@ impl> BeaconBlock { /// Usually it's better to prefer `from_ssz_bytes` which will decode the correct variant based /// on the fork slot. pub fn any_from_ssz_bytes(bytes: &[u8]) -> Result { - BeaconBlockCapella::from_ssz_bytes(bytes) - .map(BeaconBlock::Capella) + BeaconBlockDeneb::from_ssz_bytes(bytes) + .map(BeaconBlock::Deneb) + .or_else(|_| BeaconBlockCapella::from_ssz_bytes(bytes).map(BeaconBlock::Capella)) .or_else(|_| BeaconBlockMerge::from_ssz_bytes(bytes).map(BeaconBlock::Merge)) .or_else(|_| BeaconBlockAltair::from_ssz_bytes(bytes).map(BeaconBlock::Altair)) .or_else(|_| BeaconBlockBase::from_ssz_bytes(bytes).map(BeaconBlock::Base)) @@ -198,12 +204,7 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload> BeaconBlockRef<'a, T, Payl /// dictated by `self.slot()`. pub fn fork_name(&self, spec: &ChainSpec) -> Result { let fork_at_slot = spec.fork_name_at_slot::(self.slot()); - let object_fork = match self { - BeaconBlockRef::Base { .. } => ForkName::Base, - BeaconBlockRef::Altair { .. } => ForkName::Altair, - BeaconBlockRef::Merge { .. } => ForkName::Merge, - BeaconBlockRef::Capella { .. } => ForkName::Capella, - }; + let object_fork = self.fork_name_unchecked(); if fork_at_slot == object_fork { Ok(object_fork) @@ -215,6 +216,19 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload> BeaconBlockRef<'a, T, Payl } } + /// Returns the name of the fork pertaining to `self`. + /// + /// Does not check that the fork is consistent with the slot. + pub fn fork_name_unchecked(&self) -> ForkName { + match self { + BeaconBlockRef::Base { .. } => ForkName::Base, + BeaconBlockRef::Altair { .. } => ForkName::Altair, + BeaconBlockRef::Merge { .. } => ForkName::Merge, + BeaconBlockRef::Capella { .. } => ForkName::Capella, + BeaconBlockRef::Deneb { .. } => ForkName::Deneb, + } + } + /// Convenience accessor for the `body` as a `BeaconBlockBodyRef`. pub fn body(&self) -> BeaconBlockBodyRef<'a, T, Payload> { map_beacon_block_ref_into_beacon_block_body_ref!(&'a _, *self, |block, cons| cons( @@ -556,6 +570,36 @@ impl> EmptyBlock for BeaconBlockCape } } +impl> EmptyBlock for BeaconBlockDeneb { + /// Returns an empty Deneb block to be used during genesis. + fn empty(spec: &ChainSpec) -> Self { + BeaconBlockDeneb { + slot: spec.genesis_slot, + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body: BeaconBlockBodyDeneb { + randao_reveal: Signature::empty(), + eth1_data: Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + deposit_count: 0, + }, + graffiti: Graffiti::default(), + proposer_slashings: VariableList::empty(), + attester_slashings: VariableList::empty(), + attestations: VariableList::empty(), + deposits: VariableList::empty(), + voluntary_exits: VariableList::empty(), + sync_aggregate: SyncAggregate::empty(), + execution_payload: Payload::Deneb::default(), + bls_to_execution_changes: VariableList::empty(), + blob_kzg_commitments: VariableList::empty(), + }, + } + } +} + // We can convert pre-Bellatrix blocks without payloads into blocks "with" payloads. impl From>> for BeaconBlockBase> @@ -635,6 +679,7 @@ impl_from!(BeaconBlockBase, >, >, |body: impl_from!(BeaconBlockAltair, >, >, |body: BeaconBlockBodyAltair<_, _>| body.into()); impl_from!(BeaconBlockMerge, >, >, |body: BeaconBlockBodyMerge<_, _>| body.into()); impl_from!(BeaconBlockCapella, >, >, |body: BeaconBlockBodyCapella<_, _>| body.into()); +impl_from!(BeaconBlockDeneb, >, >, |body: BeaconBlockBodyDeneb<_, _>| body.into()); // We can clone blocks with payloads to blocks without payloads, without cloning the payload. macro_rules! impl_clone_as_blinded { @@ -666,6 +711,7 @@ impl_clone_as_blinded!(BeaconBlockBase, >, >, >); impl_clone_as_blinded!(BeaconBlockMerge, >, >); impl_clone_as_blinded!(BeaconBlockCapella, >, >); +impl_clone_as_blinded!(BeaconBlockDeneb, >, >); // A reference to a full beacon block can be cloned into a blinded beacon block, without cloning the // execution payload. @@ -781,6 +827,25 @@ mod tests { }); } + #[test] + fn roundtrip_4844_block() { + let rng = &mut XorShiftRng::from_seed([42; 16]); + let spec = &ForkName::Deneb.make_genesis_spec(MainnetEthSpec::default_spec()); + + let inner_block = BeaconBlockDeneb { + slot: Slot::random_for_test(rng), + proposer_index: u64::random_for_test(rng), + parent_root: Hash256::random_for_test(rng), + state_root: Hash256::random_for_test(rng), + body: BeaconBlockBodyDeneb::random_for_test(rng), + }; + let block = BeaconBlock::Deneb(inner_block.clone()); + + test_ssz_tree_hash_pair_with(&block, &inner_block, |bytes| { + BeaconBlock::from_ssz_bytes(bytes, spec) + }); + } + #[test] fn decode_base_and_altair() { type E = MainnetEthSpec; @@ -796,9 +861,12 @@ mod tests { let altair_slot = altair_epoch.start_slot(E::slots_per_epoch()); let capella_epoch = altair_fork_epoch + 1; let capella_slot = capella_epoch.start_slot(E::slots_per_epoch()); + let deneb_epoch = capella_epoch + 1; + let deneb_slot = deneb_epoch.start_slot(E::slots_per_epoch()); spec.altair_fork_epoch = Some(altair_epoch); spec.capella_fork_epoch = Some(capella_epoch); + spec.deneb_fork_epoch = Some(deneb_epoch); // BeaconBlockBase { @@ -865,5 +933,27 @@ mod tests { BeaconBlock::from_ssz_bytes(&bad_block.as_ssz_bytes(), &spec) .expect_err("bad capella block cannot be decoded"); } + + // BeaconBlockDeneb + { + let good_block = BeaconBlock::Deneb(BeaconBlockDeneb { + slot: deneb_slot, + ..<_>::random_for_test(rng) + }); + // It's invalid to have an Capella block with a epoch lower than the fork epoch. + let bad_block = { + let mut bad = good_block.clone(); + *bad.slot_mut() = capella_slot; + bad + }; + + assert_eq!( + BeaconBlock::from_ssz_bytes(&good_block.as_ssz_bytes(), &spec) + .expect("good deneb block can be decoded"), + good_block + ); + BeaconBlock::from_ssz_bytes(&bad_block.as_ssz_bytes(), &spec) + .expect_err("bad deneb block cannot be decoded"); + } } } diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index dce1be742..146dff895 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -1,19 +1,29 @@ use crate::test_utils::TestRandom; use crate::*; use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; +use merkle_proof::{MerkleTree, MerkleTreeError}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; use std::marker::PhantomData; use superstruct::superstruct; use test_random_derive::TestRandom; +use tree_hash::{TreeHash, BYTES_PER_CHUNK}; use tree_hash_derive::TreeHash; +pub type KzgCommitments = + VariableList::MaxBlobCommitmentsPerBlock>; +pub type KzgCommitmentOpts = + FixedVector, ::MaxBlobsPerBlock>; + +/// Index of the `blob_kzg_commitments` leaf in the `BeaconBlockBody` tree post-deneb. +pub const BLOB_KZG_COMMITMENTS_INDEX: usize = 11; + /// The body of a `BeaconChain` block, containing operations. /// /// This *superstruct* abstracts over the hard-fork. #[superstruct( - variants(Base, Altair, Merge, Capella), + variants(Base, Altair, Merge, Capella, Deneb), variant_attributes( derive( Debug, @@ -51,7 +61,7 @@ pub struct BeaconBlockBody = FullPay pub attestations: VariableList, T::MaxAttestations>, pub deposits: VariableList, pub voluntary_exits: VariableList, - #[superstruct(only(Altair, Merge, Capella))] + #[superstruct(only(Altair, Merge, Capella, Deneb))] pub sync_aggregate: SyncAggregate, // We flatten the execution payload so that serde can use the name of the inner type, // either `execution_payload` for full payloads, or `execution_payload_header` for blinded @@ -62,9 +72,14 @@ pub struct BeaconBlockBody = FullPay #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] #[serde(flatten)] pub execution_payload: Payload::Capella, - #[superstruct(only(Capella))] + #[superstruct(only(Deneb), partial_getter(rename = "execution_payload_deneb"))] + #[serde(flatten)] + pub execution_payload: Payload::Deneb, + #[superstruct(only(Capella, Deneb))] pub bls_to_execution_changes: VariableList, + #[superstruct(only(Deneb))] + pub blob_kzg_commitments: KzgCommitments, #[superstruct(only(Base, Altair))] #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] @@ -85,6 +100,80 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, T, Self::Base(_) | Self::Altair(_) => Err(Error::IncorrectStateVariant), Self::Merge(body) => Ok(Payload::Ref::from(&body.execution_payload)), Self::Capella(body) => Ok(Payload::Ref::from(&body.execution_payload)), + Self::Deneb(body) => Ok(Payload::Ref::from(&body.execution_payload)), + } + } + + /// Produces the proof of inclusion for a `KzgCommitment` in `self.blob_kzg_commitments` + /// at `index`. + pub fn kzg_commitment_merkle_proof( + &self, + index: usize, + ) -> Result, Error> { + match self { + Self::Base(_) | Self::Altair(_) | Self::Merge(_) | Self::Capella(_) => { + Err(Error::IncorrectStateVariant) + } + Self::Deneb(body) => { + // We compute the branches by generating 2 merkle trees: + // 1. Merkle tree for the `blob_kzg_commitments` List object + // 2. Merkle tree for the `BeaconBlockBody` container + // We then merge the branches for both the trees all the way up to the root. + + // Part1 (Branches for the subtree rooted at `blob_kzg_commitments`) + // + // Branches for `blob_kzg_commitments` without length mix-in + let depth = T::max_blob_commitments_per_block() + .next_power_of_two() + .ilog2(); + let leaves: Vec<_> = body + .blob_kzg_commitments + .iter() + .map(|commitment| commitment.tree_hash_root()) + .collect(); + let tree = MerkleTree::create(&leaves, depth as usize); + let (_, mut proof) = tree + .generate_proof(index, depth as usize) + .map_err(Error::MerkleTreeError)?; + + // Add the branch corresponding to the length mix-in. + let length = body.blob_kzg_commitments.len(); + let usize_len = std::mem::size_of::(); + let mut length_bytes = [0; BYTES_PER_CHUNK]; + length_bytes + .get_mut(0..usize_len) + .ok_or(Error::MerkleTreeError(MerkleTreeError::PleaseNotifyTheDevs))? + .copy_from_slice(&length.to_le_bytes()); + let length_root = Hash256::from_slice(length_bytes.as_slice()); + proof.push(length_root); + + // Part 2 + // Branches for `BeaconBlockBody` container + let leaves = [ + body.randao_reveal.tree_hash_root(), + body.eth1_data.tree_hash_root(), + body.graffiti.tree_hash_root(), + body.proposer_slashings.tree_hash_root(), + body.attester_slashings.tree_hash_root(), + body.attestations.tree_hash_root(), + body.deposits.tree_hash_root(), + body.voluntary_exits.tree_hash_root(), + body.sync_aggregate.tree_hash_root(), + body.execution_payload.tree_hash_root(), + body.bls_to_execution_changes.tree_hash_root(), + body.blob_kzg_commitments.tree_hash_root(), + ]; + let beacon_block_body_depth = leaves.len().next_power_of_two().ilog2() as usize; + let tree = MerkleTree::create(&leaves, beacon_block_body_depth); + let (_, mut proof_body) = tree + .generate_proof(BLOB_KZG_COMMITMENTS_INDEX, beacon_block_body_depth) + .map_err(Error::MerkleTreeError)?; + // Join the proofs for the subtree and the main tree + proof.append(&mut proof_body); + + debug_assert_eq!(proof.len(), T::kzg_proof_inclusion_proof_depth()); + Ok(proof.into()) + } } } } @@ -97,6 +186,7 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, T, BeaconBlockBodyRef::Altair { .. } => ForkName::Altair, BeaconBlockBodyRef::Merge { .. } => ForkName::Merge, BeaconBlockBodyRef::Capella { .. } => ForkName::Capella, + BeaconBlockBodyRef::Deneb { .. } => ForkName::Deneb, } } } @@ -321,6 +411,50 @@ impl From>> } } +impl From>> + for ( + BeaconBlockBodyDeneb>, + Option>, + ) +{ + fn from(body: BeaconBlockBodyDeneb>) -> Self { + let BeaconBlockBodyDeneb { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayloadDeneb { execution_payload }, + bls_to_execution_changes, + blob_kzg_commitments, + } = body; + + ( + BeaconBlockBodyDeneb { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: BlindedPayloadDeneb { + execution_payload_header: From::from(&execution_payload), + }, + bls_to_execution_changes, + blob_kzg_commitments, + }, + Some(execution_payload), + ) + } +} + // We can clone a full block into a blinded block, without cloning the payload. impl BeaconBlockBodyBase> { pub fn clone_as_blinded(&self) -> BeaconBlockBodyBase> { @@ -402,6 +536,42 @@ impl BeaconBlockBodyCapella> { } } +impl BeaconBlockBodyDeneb> { + pub fn clone_as_blinded(&self) -> BeaconBlockBodyDeneb> { + let BeaconBlockBodyDeneb { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayloadDeneb { execution_payload }, + bls_to_execution_changes, + blob_kzg_commitments, + } = self; + + BeaconBlockBodyDeneb { + randao_reveal: randao_reveal.clone(), + eth1_data: eth1_data.clone(), + graffiti: *graffiti, + proposer_slashings: proposer_slashings.clone(), + attester_slashings: attester_slashings.clone(), + attestations: attestations.clone(), + deposits: deposits.clone(), + voluntary_exits: voluntary_exits.clone(), + sync_aggregate: sync_aggregate.clone(), + execution_payload: BlindedPayloadDeneb { + execution_payload_header: execution_payload.into(), + }, + bls_to_execution_changes: bls_to_execution_changes.clone(), + blob_kzg_commitments: blob_kzg_commitments.clone(), + } + } +} + impl From>> for ( BeaconBlockBody>, @@ -416,6 +586,14 @@ impl From>> } } +/// Util method helpful for logging. +pub fn format_kzg_commitments(commitments: &[KzgCommitment]) -> String { + let commitment_strings: Vec = commitments.iter().map(|x| x.to_string()).collect(); + let commitments_joined = commitment_strings.join(", "); + let surrounded_commitments = format!("[{}]", commitments_joined); + surrounded_commitments +} + #[cfg(test)] mod tests { mod base { diff --git a/consensus/types/src/beacon_block_header.rs b/consensus/types/src/beacon_block_header.rs index f2ef0a3dc..b38235931 100644 --- a/consensus/types/src/beacon_block_header.rs +++ b/consensus/types/src/beacon_block_header.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::*; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; @@ -60,6 +60,16 @@ impl BeaconBlockHeader { signature, } } + + pub fn empty() -> Self { + Self { + body_root: Default::default(), + parent_root: Default::default(), + proposer_index: Default::default(), + slot: Default::default(), + state_root: Default::default(), + } + } } #[cfg(test)] diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 6a205e307..e2e25f24b 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -9,7 +9,7 @@ use ethereum_hashing::hash; use int_to_bytes::{int_to_bytes4, int_to_bytes8}; use pubkey_cache::PubkeyCache; use safe_arith::{ArithError, SafeArith}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{ssz_encode, Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use ssz_types::{typenum::Unsigned, BitVector, FixedVector}; @@ -183,7 +183,7 @@ impl From for Hash256 { /// The state of the `BeaconChain` at some slot. #[superstruct( - variants(Base, Altair, Merge, Capella), + variants(Base, Altair, Merge, Capella, Deneb), variant_attributes( derive( Derivative, @@ -263,9 +263,9 @@ where pub current_epoch_attestations: VariableList, T::MaxPendingAttestations>, // Participation (Altair and later) - #[superstruct(only(Altair, Merge, Capella))] + #[superstruct(only(Altair, Merge, Capella, Deneb))] pub previous_epoch_participation: VariableList, - #[superstruct(only(Altair, Merge, Capella))] + #[superstruct(only(Altair, Merge, Capella, Deneb))] pub current_epoch_participation: VariableList, // Finality @@ -280,13 +280,13 @@ where // Inactivity #[serde(with = "ssz_types::serde_utils::quoted_u64_var_list")] - #[superstruct(only(Altair, Merge, Capella))] + #[superstruct(only(Altair, Merge, Capella, Deneb))] pub inactivity_scores: VariableList, // Light-client sync committees - #[superstruct(only(Altair, Merge, Capella))] + #[superstruct(only(Altair, Merge, Capella, Deneb))] pub current_sync_committee: Arc>, - #[superstruct(only(Altair, Merge, Capella))] + #[superstruct(only(Altair, Merge, Capella, Deneb))] pub next_sync_committee: Arc>, // Execution @@ -300,16 +300,21 @@ where partial_getter(rename = "latest_execution_payload_header_capella") )] pub latest_execution_payload_header: ExecutionPayloadHeaderCapella, + #[superstruct( + only(Deneb), + partial_getter(rename = "latest_execution_payload_header_deneb") + )] + pub latest_execution_payload_header: ExecutionPayloadHeaderDeneb, // Capella - #[superstruct(only(Capella), partial_getter(copy))] + #[superstruct(only(Capella, Deneb), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] pub next_withdrawal_index: u64, - #[superstruct(only(Capella), partial_getter(copy))] + #[superstruct(only(Capella, Deneb), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] pub next_withdrawal_validator_index: u64, // Deep history valid from Capella onwards. - #[superstruct(only(Capella))] + #[superstruct(only(Capella, Deneb))] pub historical_summaries: VariableList, // Caching (not in the spec) @@ -424,12 +429,7 @@ impl BeaconState { /// dictated by `self.slot()`. pub fn fork_name(&self, spec: &ChainSpec) -> Result { let fork_at_slot = spec.fork_name_at_epoch(self.current_epoch()); - let object_fork = match self { - BeaconState::Base { .. } => ForkName::Base, - BeaconState::Altair { .. } => ForkName::Altair, - BeaconState::Merge { .. } => ForkName::Merge, - BeaconState::Capella { .. } => ForkName::Capella, - }; + let object_fork = self.fork_name_unchecked(); if fork_at_slot == object_fork { Ok(object_fork) @@ -441,6 +441,19 @@ impl BeaconState { } } + /// Returns the name of the fork pertaining to `self`. + /// + /// Does not check if `self` is consistent with the fork dictated by `self.slot()`. + pub fn fork_name_unchecked(&self) -> ForkName { + match self { + BeaconState::Base { .. } => ForkName::Base, + BeaconState::Altair { .. } => ForkName::Altair, + BeaconState::Merge { .. } => ForkName::Merge, + BeaconState::Capella { .. } => ForkName::Capella, + BeaconState::Deneb { .. } => ForkName::Deneb, + } + } + /// Specialised deserialisation method that uses the `ChainSpec` as context. #[allow(clippy::arithmetic_side_effects)] pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { @@ -613,6 +626,25 @@ impl BeaconState { cache.get_all_beacon_committees() } + /// Returns the block root which decided the proposer shuffling for the epoch passed in parameter. This root + /// can be used to key this proposer shuffling. + /// + /// ## Notes + /// + /// The `block_root` must be equal to the latest block applied to `self`. + pub fn proposer_shuffling_decision_root_at_epoch( + &self, + epoch: Epoch, + block_root: Hash256, + ) -> Result { + let decision_slot = self.proposer_shuffling_decision_slot(epoch); + if self.slot() <= decision_slot { + Ok(block_root) + } else { + self.get_block_root(decision_slot).map(|root| *root) + } + } + /// Returns the block root which decided the proposer shuffling for the current epoch. This root /// can be used to key this proposer shuffling. /// @@ -621,7 +653,7 @@ impl BeaconState { /// The `block_root` covers the one-off scenario where the genesis block decides its own /// shuffling. It should be set to the latest block applied to `self` or the genesis block root. pub fn proposer_shuffling_decision_root(&self, block_root: Hash256) -> Result { - let decision_slot = self.proposer_shuffling_decision_slot(); + let decision_slot = self.proposer_shuffling_decision_slot(self.current_epoch()); if self.slot() == decision_slot { Ok(block_root) } else { @@ -630,11 +662,9 @@ impl BeaconState { } /// Returns the slot at which the proposer shuffling was decided. The block root at this slot - /// can be used to key the proposer shuffling for the current epoch. - fn proposer_shuffling_decision_slot(&self) -> Slot { - self.current_epoch() - .start_slot(T::slots_per_epoch()) - .saturating_sub(1_u64) + /// can be used to key the proposer shuffling for the given epoch. + fn proposer_shuffling_decision_slot(&self, epoch: Epoch) -> Slot { + epoch.start_slot(T::slots_per_epoch()).saturating_sub(1_u64) } /// Returns the block root which decided the attester shuffling for the given `relative_epoch`. @@ -728,6 +758,9 @@ impl BeaconState { BeaconState::Capella(state) => Ok(ExecutionPayloadHeaderRef::Capella( &state.latest_execution_payload_header, )), + BeaconState::Deneb(state) => Ok(ExecutionPayloadHeaderRef::Deneb( + &state.latest_execution_payload_header, + )), } } @@ -742,6 +775,9 @@ impl BeaconState { BeaconState::Capella(state) => Ok(ExecutionPayloadHeaderRefMut::Capella( &mut state.latest_execution_payload_header, )), + BeaconState::Deneb(state) => Ok(ExecutionPayloadHeaderRefMut::Deneb( + &mut state.latest_execution_payload_header, + )), } } @@ -1188,6 +1224,11 @@ impl BeaconState { &mut state.balances, &mut state.progressive_balances_cache, ), + BeaconState::Deneb(state) => ( + &mut state.validators, + &mut state.balances, + &mut state.progressive_balances_cache, + ), } } @@ -1298,6 +1339,24 @@ impl BeaconState { )) } + /// Return the activation churn limit for the current epoch (number of validators who can enter per epoch). + /// + /// Uses the epoch cache, and will error if it isn't initialized. + /// + /// Spec v1.4.0 + pub fn get_activation_churn_limit(&self, spec: &ChainSpec) -> Result { + Ok(match self { + BeaconState::Base(_) + | BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) => self.get_churn_limit(spec)?, + BeaconState::Deneb(_) => std::cmp::min( + spec.max_per_epoch_activation_churn_limit, + self.get_churn_limit(spec)?, + ), + }) + } + /// Returns the `slot`, `index`, `committee_position` and `committee_len` for which a validator must produce an /// attestation. /// @@ -1385,6 +1444,7 @@ impl BeaconState { BeaconState::Altair(state) => Ok(&mut state.current_epoch_participation), BeaconState::Merge(state) => Ok(&mut state.current_epoch_participation), BeaconState::Capella(state) => Ok(&mut state.current_epoch_participation), + BeaconState::Deneb(state) => Ok(&mut state.current_epoch_participation), } } else if epoch == self.previous_epoch() { match self { @@ -1392,6 +1452,7 @@ impl BeaconState { BeaconState::Altair(state) => Ok(&mut state.previous_epoch_participation), BeaconState::Merge(state) => Ok(&mut state.previous_epoch_participation), BeaconState::Capella(state) => Ok(&mut state.previous_epoch_participation), + BeaconState::Deneb(state) => Ok(&mut state.previous_epoch_participation), } } else { Err(BeaconStateError::EpochOutOfBounds) @@ -1703,6 +1764,7 @@ impl BeaconState { BeaconState::Altair(inner) => BeaconState::Altair(inner.clone()), BeaconState::Merge(inner) => BeaconState::Merge(inner.clone()), BeaconState::Capella(inner) => BeaconState::Capella(inner.clone()), + BeaconState::Deneb(inner) => BeaconState::Deneb(inner.clone()), }; if config.committee_caches { *res.committee_caches_mut() = self.committee_caches().clone(); @@ -1880,6 +1942,7 @@ impl CompareFields for BeaconState { (BeaconState::Altair(x), BeaconState::Altair(y)) => x.compare_fields(y), (BeaconState::Merge(x), BeaconState::Merge(y)) => x.compare_fields(y), (BeaconState::Capella(x), BeaconState::Capella(y)) => x.compare_fields(y), + (BeaconState::Deneb(x), BeaconState::Deneb(y)) => x.compare_fields(y), _ => panic!("compare_fields: mismatched state variants",), } } diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index 64bf686f3..8d29bc221 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -4,7 +4,7 @@ use super::BeaconState; use crate::*; use core::num::NonZeroUsize; use safe_arith::SafeArith; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{four_byte_option_impl, Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use std::ops::Range; diff --git a/consensus/types/src/beacon_state/exit_cache.rs b/consensus/types/src/beacon_state/exit_cache.rs index b657d62ae..cb96fba69 100644 --- a/consensus/types/src/beacon_state/exit_cache.rs +++ b/consensus/types/src/beacon_state/exit_cache.rs @@ -1,6 +1,6 @@ use super::{BeaconStateError, ChainSpec, Epoch, Validator}; use safe_arith::SafeArith; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::collections::HashMap; /// Map from exit epoch to the number of validators with that exit epoch. diff --git a/consensus/types/src/beacon_state/progressive_balances_cache.rs b/consensus/types/src/beacon_state/progressive_balances_cache.rs index 9f5c223d5..6c0682480 100644 --- a/consensus/types/src/beacon_state/progressive_balances_cache.rs +++ b/consensus/types/src/beacon_state/progressive_balances_cache.rs @@ -2,7 +2,7 @@ use crate::beacon_state::balance::Balance; use crate::{BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec}; use arbitrary::Arbitrary; use safe_arith::SafeArith; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use strum::{Display, EnumString, EnumVariantNames}; /// This cache keeps track of the accumulated target attestation balance for the current & previous @@ -179,6 +179,9 @@ impl ProgressiveBalancesMode { pub fn is_progressive_balances_enabled(state: &BeaconState) -> bool { match state { BeaconState::Base(_) => false, - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => true, + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Deneb(_) => true, } } diff --git a/consensus/types/src/beacon_state/pubkey_cache.rs b/consensus/types/src/beacon_state/pubkey_cache.rs index 590ea30f9..c56c9077e 100644 --- a/consensus/types/src/beacon_state/pubkey_cache.rs +++ b/consensus/types/src/beacon_state/pubkey_cache.rs @@ -1,5 +1,5 @@ use crate::*; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::collections::HashMap; type ValidatorIndex = usize; diff --git a/consensus/types/src/blob_sidecar.rs b/consensus/types/src/blob_sidecar.rs new file mode 100644 index 000000000..c249d8b4d --- /dev/null +++ b/consensus/types/src/blob_sidecar.rs @@ -0,0 +1,281 @@ +use crate::test_utils::TestRandom; +use crate::{ + beacon_block_body::BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockHeader, BeaconStateError, Blob, + EthSpec, Hash256, SignedBeaconBlockHeader, Slot, +}; +use crate::{KzgProofs, SignedBeaconBlock}; +use bls::Signature; +use derivative::Derivative; +use kzg::{ + Blob as KzgBlob, Kzg, KzgCommitment, KzgProof, BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT, + FIELD_ELEMENTS_PER_BLOB, +}; +use merkle_proof::{merkle_root_from_branch, verify_merkle_proof, MerkleTreeError}; +use rand::Rng; +use safe_arith::{ArithError, SafeArith}; +use serde::{Deserialize, Serialize}; +use ssz::Encode; +use ssz_derive::{Decode, Encode}; +use ssz_types::{FixedVector, VariableList}; +use std::fmt::Debug; +use std::hash::Hash; +use std::sync::Arc; +use test_random_derive::TestRandom; +use tree_hash::TreeHash; +use tree_hash_derive::TreeHash; + +/// Container of the data that identifies an individual blob. +#[derive( + Serialize, Deserialize, Encode, Decode, TreeHash, Copy, Clone, Debug, PartialEq, Eq, Hash, +)] +pub struct BlobIdentifier { + pub block_root: Hash256, + pub index: u64, +} + +impl BlobIdentifier { + pub fn get_all_blob_ids(block_root: Hash256) -> Vec { + let mut blob_ids = Vec::with_capacity(E::max_blobs_per_block()); + for i in 0..E::max_blobs_per_block() { + blob_ids.push(BlobIdentifier { + block_root, + index: i as u64, + }); + } + blob_ids + } +} + +impl PartialOrd for BlobIdentifier { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for BlobIdentifier { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.index.cmp(&other.index) + } +} + +#[derive( + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + Derivative, + arbitrary::Arbitrary, +)] +#[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] +#[derivative(PartialEq, Eq, Hash(bound = "T: EthSpec"))] +pub struct BlobSidecar { + #[serde(with = "serde_utils::quoted_u64")] + pub index: u64, + #[serde(with = "ssz_types::serde_utils::hex_fixed_vec")] + pub blob: Blob, + pub kzg_commitment: KzgCommitment, + pub kzg_proof: KzgProof, + pub signed_block_header: SignedBeaconBlockHeader, + pub kzg_commitment_inclusion_proof: FixedVector, +} + +impl PartialOrd for BlobSidecar { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for BlobSidecar { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.index.cmp(&other.index) + } +} + +#[derive(Debug)] +pub enum BlobSidecarError { + PreDeneb, + MissingKzgCommitment, + BeaconState(BeaconStateError), + MerkleTree(MerkleTreeError), + ArithError(ArithError), +} + +impl From for BlobSidecarError { + fn from(e: BeaconStateError) -> Self { + BlobSidecarError::BeaconState(e) + } +} + +impl From for BlobSidecarError { + fn from(e: MerkleTreeError) -> Self { + BlobSidecarError::MerkleTree(e) + } +} + +impl From for BlobSidecarError { + fn from(e: ArithError) -> Self { + BlobSidecarError::ArithError(e) + } +} + +impl BlobSidecar { + pub fn new( + index: usize, + blob: Blob, + signed_block: &SignedBeaconBlock, + kzg_proof: KzgProof, + ) -> Result { + let expected_kzg_commitments = signed_block + .message() + .body() + .blob_kzg_commitments() + .map_err(|_e| BlobSidecarError::PreDeneb)?; + let kzg_commitment = *expected_kzg_commitments + .get(index) + .ok_or(BlobSidecarError::MissingKzgCommitment)?; + let kzg_commitment_inclusion_proof = signed_block + .message() + .body() + .kzg_commitment_merkle_proof(index)?; + + Ok(Self { + index: index as u64, + blob, + kzg_commitment, + kzg_proof, + signed_block_header: signed_block.signed_block_header(), + kzg_commitment_inclusion_proof, + }) + } + + pub fn id(&self) -> BlobIdentifier { + BlobIdentifier { + block_root: self.block_root(), + index: self.index, + } + } + + pub fn slot(&self) -> Slot { + self.signed_block_header.message.slot + } + + pub fn block_root(&self) -> Hash256 { + self.signed_block_header.message.tree_hash_root() + } + + pub fn block_parent_root(&self) -> Hash256 { + self.signed_block_header.message.parent_root + } + + pub fn block_proposer_index(&self) -> u64 { + self.signed_block_header.message.proposer_index + } + + pub fn empty() -> Self { + Self { + index: 0, + blob: Blob::::default(), + kzg_commitment: KzgCommitment::empty_for_testing(), + kzg_proof: KzgProof::empty(), + signed_block_header: SignedBeaconBlockHeader { + message: BeaconBlockHeader::empty(), + signature: Signature::empty(), + }, + kzg_commitment_inclusion_proof: Default::default(), + } + } + + /// Verifies the kzg commitment inclusion merkle proof. + pub fn verify_blob_sidecar_inclusion_proof(&self) -> Result { + // Depth of the subtree rooted at `blob_kzg_commitments` in the `BeaconBlockBody` + // is equal to depth of the ssz List max size + 1 for the length mixin + let kzg_commitments_tree_depth = (T::max_blob_commitments_per_block() + .next_power_of_two() + .ilog2() + .safe_add(1))? as usize; + // Compute the `tree_hash_root` of the `blob_kzg_commitments` subtree using the + // inclusion proof branches + let blob_kzg_commitments_root = merkle_root_from_branch( + self.kzg_commitment.tree_hash_root(), + self.kzg_commitment_inclusion_proof + .get(0..kzg_commitments_tree_depth) + .ok_or(MerkleTreeError::PleaseNotifyTheDevs)?, + kzg_commitments_tree_depth, + self.index as usize, + ); + // The remaining inclusion proof branches are for the top level `BeaconBlockBody` tree + Ok(verify_merkle_proof( + blob_kzg_commitments_root, + self.kzg_commitment_inclusion_proof + .get(kzg_commitments_tree_depth..T::kzg_proof_inclusion_proof_depth()) + .ok_or(MerkleTreeError::PleaseNotifyTheDevs)?, + T::kzg_proof_inclusion_proof_depth().safe_sub(kzg_commitments_tree_depth)?, + BLOB_KZG_COMMITMENTS_INDEX, + self.signed_block_header.message.body_root, + )) + } + + pub fn random_valid(rng: &mut R, kzg: &Kzg) -> Result { + let mut blob_bytes = vec![0u8; BYTES_PER_BLOB]; + rng.fill_bytes(&mut blob_bytes); + // Ensure that the blob is canonical by ensuring that + // each field element contained in the blob is < BLS_MODULUS + for i in 0..FIELD_ELEMENTS_PER_BLOB { + let Some(byte) = blob_bytes.get_mut( + i.checked_mul(BYTES_PER_FIELD_ELEMENT) + .ok_or("overflow".to_string())?, + ) else { + return Err(format!("blob byte index out of bounds: {:?}", i)); + }; + *byte = 0; + } + + let blob = Blob::::new(blob_bytes) + .map_err(|e| format!("error constructing random blob: {:?}", e))?; + let kzg_blob = KzgBlob::from_bytes(&blob).unwrap(); + + let commitment = kzg + .blob_to_kzg_commitment(&kzg_blob) + .map_err(|e| format!("error computing kzg commitment: {:?}", e))?; + + let proof = kzg + .compute_blob_kzg_proof(&kzg_blob, commitment) + .map_err(|e| format!("error computing kzg proof: {:?}", e))?; + + Ok(Self { + blob, + kzg_commitment: commitment, + kzg_proof: proof, + ..Self::empty() + }) + } + + #[allow(clippy::arithmetic_side_effects)] + pub fn max_size() -> usize { + // Fixed part + Self::empty().as_ssz_bytes().len() + } + + pub fn build_sidecars( + blobs: BlobsList, + block: &SignedBeaconBlock, + kzg_proofs: KzgProofs, + ) -> Result, BlobSidecarError> { + let mut blob_sidecars = vec![]; + for (i, (kzg_proof, blob)) in kzg_proofs.iter().zip(blobs).enumerate() { + let blob_sidecar = BlobSidecar::new(i, blob, block, *kzg_proof)?; + blob_sidecars.push(Arc::new(blob_sidecar)); + } + Ok(VariableList::from(blob_sidecars)) + } +} + +pub type BlobSidecarList = VariableList>, ::MaxBlobsPerBlock>; +pub type FixedBlobSidecarList = + FixedVector>>, ::MaxBlobsPerBlock>; +pub type BlobsList = VariableList, ::MaxBlobCommitmentsPerBlock>; diff --git a/consensus/types/src/bls_to_execution_change.rs b/consensus/types/src/bls_to_execution_change.rs index 3ed9ee925..baa65f517 100644 --- a/consensus/types/src/bls_to_execution_change.rs +++ b/consensus/types/src/bls_to_execution_change.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::*; use bls::PublicKeyBytes; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs index 8723c2afe..f43585000 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder_bid.rs @@ -1,76 +1,97 @@ +use crate::beacon_block_body::KzgCommitments; use crate::{ - AbstractExecPayload, ChainSpec, EthSpec, ExecPayload, ExecutionPayloadHeader, ForkName, + ChainSpec, EthSpec, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, + ExecutionPayloadHeaderMerge, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, ForkName, ForkVersionDeserialize, SignedRoot, Uint256, }; use bls::PublicKeyBytes; use bls::Signature; -use serde::{Deserialize as De, Deserializer, Serialize as Ser, Serializer}; -use serde_derive::{Deserialize, Serialize}; -use serde_with::{serde_as, DeserializeAs, SerializeAs}; -use std::marker::PhantomData; +use serde::{Deserialize, Deserializer, Serialize}; +use superstruct::superstruct; use tree_hash_derive::TreeHash; -#[serde_as] +#[superstruct( + variants(Merge, Capella, Deneb), + variant_attributes( + derive(PartialEq, Debug, Serialize, Deserialize, TreeHash, Clone), + serde(bound = "E: EthSpec", deny_unknown_fields) + ), + map_ref_into(ExecutionPayloadHeaderRef), + map_ref_mut_into(ExecutionPayloadHeaderRefMut) +)] #[derive(PartialEq, Debug, Serialize, Deserialize, TreeHash, Clone)] -#[serde(bound = "E: EthSpec, Payload: ExecPayload")] -pub struct BuilderBid> { - #[serde_as(as = "BlindedPayloadAsHeader")] - pub header: Payload, +#[serde(bound = "E: EthSpec", deny_unknown_fields, untagged)] +#[tree_hash(enum_behaviour = "transparent")] +pub struct BuilderBid { + #[superstruct(only(Merge), partial_getter(rename = "header_merge"))] + pub header: ExecutionPayloadHeaderMerge, + #[superstruct(only(Capella), partial_getter(rename = "header_capella"))] + pub header: ExecutionPayloadHeaderCapella, + #[superstruct(only(Deneb), partial_getter(rename = "header_deneb"))] + pub header: ExecutionPayloadHeaderDeneb, + #[superstruct(only(Deneb))] + pub blob_kzg_commitments: KzgCommitments, #[serde(with = "serde_utils::quoted_u256")] pub value: Uint256, pub pubkey: PublicKeyBytes, - #[serde(skip)] - #[tree_hash(skip_hashing)] - _phantom_data: PhantomData, } -impl> SignedRoot for BuilderBid {} - -/// Validator registration, for use in interacting with servers implementing the builder API. -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] -#[serde(bound = "E: EthSpec, Payload: ExecPayload")] -pub struct SignedBuilderBid> { - pub message: BuilderBid, - pub signature: Signature, +impl BuilderBid { + pub fn header(&self) -> ExecutionPayloadHeaderRef<'_, E> { + self.to_ref().header() + } } -impl> ForkVersionDeserialize - for BuilderBid -{ - fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( - value: serde_json::value::Value, - fork_name: ForkName, - ) -> Result { - let convert_err = |_| { - serde::de::Error::custom( - "BuilderBid failed to deserialize: unable to convert payload header to payload", - ) - }; +impl<'a, E: EthSpec> BuilderBidRef<'a, E> { + pub fn header(&self) -> ExecutionPayloadHeaderRef<'a, E> { + map_builder_bid_ref_into_execution_payload_header_ref!(&'a _, self, |bid, cons| cons( + &bid.header + )) + } +} - #[derive(Deserialize)] - struct Helper { - header: serde_json::Value, - #[serde(with = "serde_utils::quoted_u256")] - value: Uint256, - pubkey: PublicKeyBytes, - } - let helper: Helper = serde_json::from_value(value).map_err(serde::de::Error::custom)?; - let payload_header = - ExecutionPayloadHeader::deserialize_by_fork::<'de, D>(helper.header, fork_name)?; - - Ok(Self { - header: Payload::try_from(payload_header).map_err(convert_err)?, - value: helper.value, - pubkey: helper.pubkey, - _phantom_data: Default::default(), +impl<'a, E: EthSpec> BuilderBidRefMut<'a, E> { + pub fn header_mut(self) -> ExecutionPayloadHeaderRefMut<'a, E> { + map_builder_bid_ref_mut_into_execution_payload_header_ref_mut!(&'a _, self, |bid, cons| { + cons(&mut bid.header) }) } } -impl> ForkVersionDeserialize - for SignedBuilderBid -{ - fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( +impl SignedRoot for BuilderBid {} + +/// Validator registration, for use in interacting with servers implementing the builder API. +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +#[serde(bound = "E: EthSpec")] +pub struct SignedBuilderBid { + pub message: BuilderBid, + pub signature: Signature, +} + +impl ForkVersionDeserialize for BuilderBid { + fn deserialize_by_fork<'de, D: Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + let convert_err = + |e| serde::de::Error::custom(format!("BuilderBid failed to deserialize: {:?}", e)); + + Ok(match fork_name { + ForkName::Merge => Self::Merge(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Deneb => Self::Deneb(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Base | ForkName::Altair => { + return Err(serde::de::Error::custom(format!( + "BuilderBid failed to deserialize: unsupported fork '{}'", + fork_name + ))); + } + }) + } +} + +impl ForkVersionDeserialize for SignedBuilderBid { + fn deserialize_by_fork<'de, D: Deserializer<'de>>( value: serde_json::value::Value, fork_name: ForkName, ) -> Result { @@ -88,34 +109,10 @@ impl> ForkVersionDeserialize } } -struct BlindedPayloadAsHeader(PhantomData); - -impl> SerializeAs for BlindedPayloadAsHeader { - fn serialize_as(source: &Payload, serializer: S) -> Result - where - S: Serializer, - { - source.to_execution_payload_header().serialize(serializer) - } -} - -impl<'de, E: EthSpec, Payload: AbstractExecPayload> DeserializeAs<'de, Payload> - for BlindedPayloadAsHeader -{ - fn deserialize_as(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let payload_header = ExecutionPayloadHeader::deserialize(deserializer)?; - Payload::try_from(payload_header) - .map_err(|_| serde::de::Error::custom("unable to convert payload header to payload")) - } -} - -impl> SignedBuilderBid { +impl SignedBuilderBid { pub fn verify_signature(&self, spec: &ChainSpec) -> bool { self.message - .pubkey + .pubkey() .decompress() .map(|pubkey| { let domain = spec.get_builder_domain(); diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index a13d3116d..b2120fb04 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -1,9 +1,10 @@ use crate::application_domain::{ApplicationDomain, APPLICATION_DOMAIN_BUILDER}; use crate::*; use int_to_bytes::int_to_bytes4; +use serde::Deserialize; use serde::{Deserializer, Serialize, Serializer}; -use serde_derive::Deserialize; use serde_utils::quoted_u64::MaybeQuoted; +use ssz::Encode; use std::fs::File; use std::path::Path; use std::time::Duration; @@ -50,6 +51,7 @@ pub struct ChainSpec { pub max_committees_per_slot: usize, pub target_committee_size: usize, pub min_per_epoch_churn_limit: u64, + pub max_per_epoch_activation_churn_limit: u64, pub churn_limit_quotient: u64, pub shuffle_round_count: u8, pub min_genesis_active_validator_count: u64, @@ -160,27 +162,51 @@ pub struct ChainSpec { pub capella_fork_epoch: Option, pub max_validators_per_withdrawals_sweep: u64, + /* + * Deneb hard fork params + */ + pub deneb_fork_version: [u8; 4], + pub deneb_fork_epoch: Option, + /* * Networking */ pub boot_nodes: Vec, pub network_id: u8, - pub attestation_propagation_slot_range: u64, - pub maximum_gossip_clock_disparity_millis: u64, pub target_aggregators_per_committee: u64, - pub attestation_subnet_count: u64, - pub subnets_per_node: u8, - pub epochs_per_subnet_subscription: u64, pub gossip_max_size: u64, + pub max_request_blocks: u64, + pub epochs_per_subnet_subscription: u64, pub min_epochs_for_block_requests: u64, pub max_chunk_size: u64, pub ttfb_timeout: u64, pub resp_timeout: u64, + pub attestation_propagation_slot_range: u64, + pub maximum_gossip_clock_disparity_millis: u64, pub message_domain_invalid_snappy: [u8; 4], pub message_domain_valid_snappy: [u8; 4], + pub subnets_per_node: u8, + pub attestation_subnet_count: u64, pub attestation_subnet_extra_bits: u8, pub attestation_subnet_prefix_bits: u8, + /* + * Networking Deneb + */ + pub max_request_blocks_deneb: u64, + pub max_request_blob_sidecars: u64, + pub min_epochs_for_blob_sidecars_requests: u64, + pub blob_sidecar_subnet_count: u64, + + /* + * Networking Derived + * + * When adding fields here, make sure any values are derived again during `apply_to_chain_spec`. + */ + pub max_blocks_by_root_request: usize, + pub max_blocks_by_root_request_deneb: usize, + pub max_blobs_by_root_request: usize, + /* * Application params */ @@ -254,13 +280,16 @@ impl ChainSpec { /// Returns the name of the fork which is active at `epoch`. pub fn fork_name_at_epoch(&self, epoch: Epoch) -> ForkName { - match self.capella_fork_epoch { - Some(fork_epoch) if epoch >= fork_epoch => ForkName::Capella, - _ => match self.bellatrix_fork_epoch { - Some(fork_epoch) if epoch >= fork_epoch => ForkName::Merge, - _ => match self.altair_fork_epoch { - Some(fork_epoch) if epoch >= fork_epoch => ForkName::Altair, - _ => ForkName::Base, + match self.deneb_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Deneb, + _ => match self.capella_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Capella, + _ => match self.bellatrix_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Merge, + _ => match self.altair_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Altair, + _ => ForkName::Base, + }, }, }, } @@ -273,6 +302,7 @@ impl ChainSpec { ForkName::Altair => self.altair_fork_version, ForkName::Merge => self.bellatrix_fork_version, ForkName::Capella => self.capella_fork_version, + ForkName::Deneb => self.deneb_fork_version, } } @@ -283,6 +313,7 @@ impl ChainSpec { ForkName::Altair => self.altair_fork_epoch, ForkName::Merge => self.bellatrix_fork_epoch, ForkName::Capella => self.capella_fork_epoch, + ForkName::Deneb => self.deneb_fork_epoch, } } @@ -293,6 +324,7 @@ impl ChainSpec { BeaconState::Altair(_) => self.inactivity_penalty_quotient_altair, BeaconState::Merge(_) => self.inactivity_penalty_quotient_bellatrix, BeaconState::Capella(_) => self.inactivity_penalty_quotient_bellatrix, + BeaconState::Deneb(_) => self.inactivity_penalty_quotient_bellatrix, } } @@ -306,6 +338,7 @@ impl ChainSpec { BeaconState::Altair(_) => self.proportional_slashing_multiplier_altair, BeaconState::Merge(_) => self.proportional_slashing_multiplier_bellatrix, BeaconState::Capella(_) => self.proportional_slashing_multiplier_bellatrix, + BeaconState::Deneb(_) => self.proportional_slashing_multiplier_bellatrix, } } @@ -319,6 +352,7 @@ impl ChainSpec { BeaconState::Altair(_) => self.min_slashing_penalty_quotient_altair, BeaconState::Merge(_) => self.min_slashing_penalty_quotient_bellatrix, BeaconState::Capella(_) => self.min_slashing_penalty_quotient_bellatrix, + BeaconState::Deneb(_) => self.min_slashing_penalty_quotient_bellatrix, } } @@ -472,6 +506,25 @@ impl ChainSpec { Duration::from_secs(self.resp_timeout) } + pub fn max_blocks_by_root_request(&self, fork_name: ForkName) -> usize { + match fork_name { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + self.max_blocks_by_root_request + } + ForkName::Deneb => self.max_blocks_by_root_request_deneb, + } + } + + pub fn max_request_blocks(&self, fork_name: ForkName) -> usize { + let max_request_blocks = match fork_name { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + self.max_request_blocks + } + ForkName::Deneb => self.max_request_blocks_deneb, + }; + max_request_blocks as usize + } + /// Returns a `ChainSpec` compatible with the Ethereum Foundation specification. pub fn mainnet() -> Self { Self { @@ -493,6 +546,7 @@ impl ChainSpec { max_committees_per_slot: 64, target_committee_size: 128, min_per_epoch_churn_limit: 4, + max_per_epoch_activation_churn_limit: 8, churn_limit_quotient: 65_536, shuffle_round_count: 90, min_genesis_active_validator_count: 16_384, @@ -621,17 +675,23 @@ impl ChainSpec { capella_fork_epoch: Some(Epoch::new(194048)), max_validators_per_withdrawals_sweep: 16384, + /* + * Deneb hard fork params + */ + deneb_fork_version: [0x04, 0x00, 0x00, 0x00], + deneb_fork_epoch: None, + /* * Network specific */ boot_nodes: vec![], network_id: 1, // mainnet network id - attestation_propagation_slot_range: 32, + attestation_propagation_slot_range: default_attestation_propagation_slot_range(), attestation_subnet_count: 64, subnets_per_node: 2, - maximum_gossip_clock_disparity_millis: 500, + maximum_gossip_clock_disparity_millis: default_maximum_gossip_clock_disparity_millis(), target_aggregators_per_committee: 16, - epochs_per_subnet_subscription: 256, + epochs_per_subnet_subscription: default_epochs_per_subnet_subscription(), gossip_max_size: default_gossip_max_size(), min_epochs_for_block_requests: default_min_epochs_for_block_requests(), max_chunk_size: default_max_chunk_size(), @@ -641,6 +701,23 @@ impl ChainSpec { message_domain_valid_snappy: default_message_domain_valid_snappy(), attestation_subnet_extra_bits: default_attestation_subnet_extra_bits(), attestation_subnet_prefix_bits: default_attestation_subnet_prefix_bits(), + max_request_blocks: default_max_request_blocks(), + + /* + * Networking Deneb Specific + */ + max_request_blocks_deneb: default_max_request_blocks_deneb(), + max_request_blob_sidecars: default_max_request_blob_sidecars(), + min_epochs_for_blob_sidecars_requests: default_min_epochs_for_blob_sidecars_requests(), + blob_sidecar_subnet_count: default_blob_sidecar_subnet_count(), + + /* + * Derived Deneb Specific + */ + max_blocks_by_root_request: default_max_blocks_by_root_request(), + max_blocks_by_root_request_deneb: default_max_blocks_by_root_request_deneb(), + max_blobs_by_root_request: default_max_blobs_by_root_request(), + /* * Application specific */ @@ -662,6 +739,8 @@ impl ChainSpec { config_name: None, max_committees_per_slot: 4, target_committee_size: 4, + min_per_epoch_churn_limit: 2, + max_per_epoch_activation_churn_limit: 4, churn_limit_quotient: 32, shuffle_round_count: 10, min_genesis_active_validator_count: 64, @@ -693,6 +772,9 @@ impl ChainSpec { capella_fork_version: [0x03, 0x00, 0x00, 0x01], capella_fork_epoch: None, max_validators_per_withdrawals_sweep: 16, + // Deneb + deneb_fork_version: [0x04, 0x00, 0x00, 0x01], + deneb_fork_epoch: None, // Other network_id: 2, // lighthouse testnet network id deposit_chain_id: 5, @@ -723,6 +805,7 @@ impl ChainSpec { max_committees_per_slot: 64, target_committee_size: 128, min_per_epoch_churn_limit: 4, + max_per_epoch_activation_churn_limit: 8, churn_limit_quotient: 4_096, shuffle_round_count: 90, min_genesis_active_validator_count: 4_096, @@ -853,17 +936,23 @@ impl ChainSpec { capella_fork_epoch: Some(Epoch::new(648704)), max_validators_per_withdrawals_sweep: 8192, + /* + * Deneb hard fork params + */ + deneb_fork_version: [0x04, 0x00, 0x00, 0x64], + deneb_fork_epoch: None, + /* * Network specific */ boot_nodes: vec![], network_id: 100, // Gnosis Chain network id - attestation_propagation_slot_range: 32, + attestation_propagation_slot_range: default_attestation_propagation_slot_range(), attestation_subnet_count: 64, subnets_per_node: 4, // Make this larger than usual to avoid network damage - maximum_gossip_clock_disparity_millis: 500, + maximum_gossip_clock_disparity_millis: default_maximum_gossip_clock_disparity_millis(), target_aggregators_per_committee: 16, - epochs_per_subnet_subscription: 256, + epochs_per_subnet_subscription: default_epochs_per_subnet_subscription(), gossip_max_size: default_gossip_max_size(), min_epochs_for_block_requests: default_min_epochs_for_block_requests(), max_chunk_size: default_max_chunk_size(), @@ -873,6 +962,22 @@ impl ChainSpec { message_domain_valid_snappy: default_message_domain_valid_snappy(), attestation_subnet_extra_bits: default_attestation_subnet_extra_bits(), attestation_subnet_prefix_bits: default_attestation_subnet_prefix_bits(), + max_request_blocks: default_max_request_blocks(), + + /* + * Networking Deneb Specific + */ + max_request_blocks_deneb: default_max_request_blocks_deneb(), + max_request_blob_sidecars: default_max_request_blob_sidecars(), + min_epochs_for_blob_sidecars_requests: default_min_epochs_for_blob_sidecars_requests(), + blob_sidecar_subnet_count: default_blob_sidecar_subnet_count(), + + /* + * Derived Deneb Specific + */ + max_blocks_by_root_request: default_max_blocks_by_root_request(), + max_blocks_by_root_request_deneb: default_max_blocks_by_root_request_deneb(), + max_blobs_by_root_request: default_max_blobs_by_root_request(), /* * Application specific @@ -950,6 +1055,14 @@ pub struct Config { #[serde(deserialize_with = "deserialize_fork_epoch")] pub capella_fork_epoch: Option>, + #[serde(default = "default_deneb_fork_version")] + #[serde(with = "serde_utils::bytes_4_hex")] + deneb_fork_version: [u8; 4], + #[serde(default)] + #[serde(serialize_with = "serialize_fork_epoch")] + #[serde(deserialize_with = "deserialize_fork_epoch")] + pub deneb_fork_epoch: Option>, + #[serde(with = "serde_utils::quoted_u64")] seconds_per_slot: u64, #[serde(with = "serde_utils::quoted_u64")] @@ -972,6 +1085,9 @@ pub struct Config { ejection_balance: u64, #[serde(with = "serde_utils::quoted_u64")] min_per_epoch_churn_limit: u64, + #[serde(default = "default_max_per_epoch_activation_churn_limit")] + #[serde(with = "serde_utils::quoted_u64")] + max_per_epoch_activation_churn_limit: u64, #[serde(with = "serde_utils::quoted_u64")] churn_limit_quotient: u64, @@ -987,6 +1103,12 @@ pub struct Config { #[serde(default = "default_gossip_max_size")] #[serde(with = "serde_utils::quoted_u64")] gossip_max_size: u64, + #[serde(default = "default_max_request_blocks")] + #[serde(with = "serde_utils::quoted_u64")] + max_request_blocks: u64, + #[serde(default = "default_epochs_per_subnet_subscription")] + #[serde(with = "serde_utils::quoted_u64")] + epochs_per_subnet_subscription: u64, #[serde(default = "default_min_epochs_for_block_requests")] #[serde(with = "serde_utils::quoted_u64")] min_epochs_for_block_requests: u64, @@ -999,6 +1121,12 @@ pub struct Config { #[serde(default = "default_resp_timeout")] #[serde(with = "serde_utils::quoted_u64")] resp_timeout: u64, + #[serde(default = "default_attestation_propagation_slot_range")] + #[serde(with = "serde_utils::quoted_u64")] + attestation_propagation_slot_range: u64, + #[serde(default = "default_maximum_gossip_clock_disparity_millis")] + #[serde(with = "serde_utils::quoted_u64")] + maximum_gossip_clock_disparity_millis: u64, #[serde(default = "default_message_domain_invalid_snappy")] #[serde(with = "serde_utils::bytes_4_hex")] message_domain_invalid_snappy: [u8; 4], @@ -1011,6 +1139,18 @@ pub struct Config { #[serde(default = "default_attestation_subnet_prefix_bits")] #[serde(with = "serde_utils::quoted_u8")] attestation_subnet_prefix_bits: u8, + #[serde(default = "default_max_request_blocks_deneb")] + #[serde(with = "serde_utils::quoted_u64")] + max_request_blocks_deneb: u64, + #[serde(default = "default_max_request_blob_sidecars")] + #[serde(with = "serde_utils::quoted_u64")] + max_request_blob_sidecars: u64, + #[serde(default = "default_min_epochs_for_blob_sidecars_requests")] + #[serde(with = "serde_utils::quoted_u64")] + min_epochs_for_blob_sidecars_requests: u64, + #[serde(default = "default_blob_sidecar_subnet_count")] + #[serde(with = "serde_utils::quoted_u64")] + blob_sidecar_subnet_count: u64, } fn default_bellatrix_fork_version() -> [u8; 4] { @@ -1023,6 +1163,11 @@ fn default_capella_fork_version() -> [u8; 4] { [0xff, 0xff, 0xff, 0xff] } +fn default_deneb_fork_version() -> [u8; 4] { + // This value shouldn't be used. + [0xff, 0xff, 0xff, 0xff] +} + /// Placeholder value: 2^256-2^10 (115792089237316195423570985008687907853269984665640564039457584007913129638912). /// /// Taken from https://github.com/ethereum/consensus-specs/blob/d5e4828aecafaf1c57ef67a5f23c4ae7b08c5137/configs/mainnet.yaml#L15-L16 @@ -1051,6 +1196,10 @@ fn default_subnets_per_node() -> u8 { 2u8 } +const fn default_max_per_epoch_activation_churn_limit() -> u64 { + 8 +} + const fn default_gossip_max_size() -> u64 { 10485760 } @@ -1087,6 +1236,70 @@ const fn default_attestation_subnet_prefix_bits() -> u8 { 6 } +const fn default_max_request_blocks() -> u64 { + 1024 +} + +const fn default_max_request_blocks_deneb() -> u64 { + 128 +} + +const fn default_max_request_blob_sidecars() -> u64 { + 768 +} + +const fn default_min_epochs_for_blob_sidecars_requests() -> u64 { + 4096 +} + +const fn default_blob_sidecar_subnet_count() -> u64 { + 6 +} + +const fn default_epochs_per_subnet_subscription() -> u64 { + 256 +} + +const fn default_attestation_propagation_slot_range() -> u64 { + 32 +} + +const fn default_maximum_gossip_clock_disparity_millis() -> u64 { + 500 +} + +fn max_blocks_by_root_request_common(max_request_blocks: u64) -> usize { + let max_request_blocks = max_request_blocks as usize; + RuntimeVariableList::::from_vec( + vec![Hash256::zero(); max_request_blocks], + max_request_blocks, + ) + .as_ssz_bytes() + .len() +} + +fn max_blobs_by_root_request_common(max_request_blob_sidecars: u64) -> usize { + let max_request_blob_sidecars = max_request_blob_sidecars as usize; + RuntimeVariableList::::from_vec( + vec![Hash256::zero(); max_request_blob_sidecars], + max_request_blob_sidecars, + ) + .as_ssz_bytes() + .len() +} + +fn default_max_blocks_by_root_request() -> usize { + max_blocks_by_root_request_common(default_max_request_blocks()) +} + +fn default_max_blocks_by_root_request_deneb() -> usize { + max_blocks_by_root_request_common(default_max_request_blocks_deneb()) +} + +fn default_max_blobs_by_root_request() -> usize { + max_blobs_by_root_request_common(default_max_request_blob_sidecars()) +} + impl Default for Config { fn default() -> Self { let chain_spec = MainnetEthSpec::default_spec(); @@ -1163,6 +1376,10 @@ impl Config { capella_fork_epoch: spec .capella_fork_epoch .map(|epoch| MaybeQuoted { value: epoch }), + deneb_fork_version: spec.deneb_fork_version, + deneb_fork_epoch: spec + .deneb_fork_epoch + .map(|epoch| MaybeQuoted { value: epoch }), seconds_per_slot: spec.seconds_per_slot, seconds_per_eth1_block: spec.seconds_per_eth1_block, @@ -1176,6 +1393,7 @@ impl Config { ejection_balance: spec.ejection_balance, churn_limit_quotient: spec.churn_limit_quotient, min_per_epoch_churn_limit: spec.min_per_epoch_churn_limit, + max_per_epoch_activation_churn_limit: spec.max_per_epoch_activation_churn_limit, proposer_score_boost: spec.proposer_score_boost.map(|value| MaybeQuoted { value }), @@ -1184,14 +1402,22 @@ impl Config { deposit_contract_address: spec.deposit_contract_address, gossip_max_size: spec.gossip_max_size, + max_request_blocks: spec.max_request_blocks, + epochs_per_subnet_subscription: spec.epochs_per_subnet_subscription, min_epochs_for_block_requests: spec.min_epochs_for_block_requests, max_chunk_size: spec.max_chunk_size, ttfb_timeout: spec.ttfb_timeout, resp_timeout: spec.resp_timeout, + attestation_propagation_slot_range: spec.attestation_propagation_slot_range, + maximum_gossip_clock_disparity_millis: spec.maximum_gossip_clock_disparity_millis, message_domain_invalid_snappy: spec.message_domain_invalid_snappy, message_domain_valid_snappy: spec.message_domain_valid_snappy, attestation_subnet_extra_bits: spec.attestation_subnet_extra_bits, attestation_subnet_prefix_bits: spec.attestation_subnet_prefix_bits, + max_request_blocks_deneb: spec.max_request_blocks_deneb, + max_request_blob_sidecars: spec.max_request_blob_sidecars, + min_epochs_for_blob_sidecars_requests: spec.min_epochs_for_blob_sidecars_requests, + blob_sidecar_subnet_count: spec.blob_sidecar_subnet_count, } } @@ -1221,6 +1447,8 @@ impl Config { bellatrix_fork_version, capella_fork_epoch, capella_fork_version, + deneb_fork_epoch, + deneb_fork_version, seconds_per_slot, seconds_per_eth1_block, min_validator_withdrawability_delay, @@ -1231,6 +1459,7 @@ impl Config { inactivity_score_recovery_rate, ejection_balance, min_per_epoch_churn_limit, + max_per_epoch_activation_churn_limit, churn_limit_quotient, proposer_score_boost, deposit_chain_id, @@ -1245,6 +1474,14 @@ impl Config { message_domain_valid_snappy, attestation_subnet_extra_bits, attestation_subnet_prefix_bits, + max_request_blocks, + epochs_per_subnet_subscription, + attestation_propagation_slot_range, + maximum_gossip_clock_disparity_millis, + max_request_blocks_deneb, + max_request_blob_sidecars, + min_epochs_for_blob_sidecars_requests, + blob_sidecar_subnet_count, } = self; if preset_base != T::spec_name().to_string().as_str() { @@ -1263,6 +1500,8 @@ impl Config { bellatrix_fork_version, capella_fork_epoch: capella_fork_epoch.map(|q| q.value), capella_fork_version, + deneb_fork_epoch: deneb_fork_epoch.map(|q| q.value), + deneb_fork_version, seconds_per_slot, seconds_per_eth1_block, min_validator_withdrawability_delay, @@ -1273,6 +1512,7 @@ impl Config { inactivity_score_recovery_rate, ejection_balance, min_per_epoch_churn_limit, + max_per_epoch_activation_churn_limit, churn_limit_quotient, proposer_score_boost: proposer_score_boost.map(|q| q.value), deposit_chain_id, @@ -1291,6 +1531,22 @@ impl Config { message_domain_valid_snappy, attestation_subnet_extra_bits, attestation_subnet_prefix_bits, + max_request_blocks, + epochs_per_subnet_subscription, + attestation_propagation_slot_range, + maximum_gossip_clock_disparity_millis, + max_request_blocks_deneb, + max_request_blob_sidecars, + min_epochs_for_blob_sidecars_requests, + blob_sidecar_subnet_count, + + // We need to re-derive any values that might have changed in the config. + max_blocks_by_root_request: max_blocks_by_root_request_common(max_request_blocks), + max_blocks_by_root_request_deneb: max_blocks_by_root_request_common( + max_request_blocks_deneb, + ), + max_blobs_by_root_request: max_blobs_by_root_request_common(max_request_blob_sidecars), + ..chain_spec.clone() }) } @@ -1525,6 +1781,7 @@ mod yaml_tests { INACTIVITY_SCORE_RECOVERY_RATE: 16 EJECTION_BALANCE: 16000000000 MIN_PER_EPOCH_CHURN_LIMIT: 4 + MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 CHURN_LIMIT_QUOTIENT: 65536 PROPOSER_SCORE_BOOST: 40 DEPOSIT_CHAIN_ID: 1 diff --git a/consensus/types/src/checkpoint.rs b/consensus/types/src/checkpoint.rs index e84798f6f..044fc57f2 100644 --- a/consensus/types/src/checkpoint.rs +++ b/consensus/types/src/checkpoint.rs @@ -1,6 +1,6 @@ use crate::test_utils::TestRandom; use crate::{Epoch, Hash256}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index 01f86d348..b651d34af 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -1,9 +1,9 @@ use crate::{ consts::altair, AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, ChainSpec, Config, - EthSpec, ForkName, + DenebPreset, EthSpec, ForkName, }; use maplit::hashmap; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use serde_json::Value; use std::collections::HashMap; use superstruct::superstruct; @@ -12,7 +12,7 @@ use superstruct::superstruct; /// /// Mostly useful for the API. #[superstruct( - variants(Bellatrix, Capella), + variants(Capella, Deneb), variant_attributes(derive(Serialize, Deserialize, Debug, PartialEq, Clone)) )] #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] @@ -27,9 +27,11 @@ pub struct ConfigAndPreset { pub altair_preset: AltairPreset, #[serde(flatten)] pub bellatrix_preset: BellatrixPreset, - #[superstruct(only(Capella))] #[serde(flatten)] pub capella_preset: CapellaPreset, + #[superstruct(only(Deneb))] + #[serde(flatten)] + pub deneb_preset: DenebPreset, /// The `extra_fields` map allows us to gracefully decode fields intended for future hard forks. #[serde(flatten)] pub extra_fields: HashMap, @@ -41,14 +43,24 @@ impl ConfigAndPreset { let base_preset = BasePreset::from_chain_spec::(spec); let altair_preset = AltairPreset::from_chain_spec::(spec); let bellatrix_preset = BellatrixPreset::from_chain_spec::(spec); + let capella_preset = CapellaPreset::from_chain_spec::(spec); let extra_fields = get_extra_fields(spec); - if spec.capella_fork_epoch.is_some() + if spec.deneb_fork_epoch.is_some() || fork_name.is_none() - || fork_name == Some(ForkName::Capella) + || fork_name == Some(ForkName::Deneb) { - let capella_preset = CapellaPreset::from_chain_spec::(spec); - + let deneb_preset = DenebPreset::from_chain_spec::(spec); + ConfigAndPreset::Deneb(ConfigAndPresetDeneb { + config, + base_preset, + altair_preset, + bellatrix_preset, + capella_preset, + deneb_preset, + extra_fields, + }) + } else { ConfigAndPreset::Capella(ConfigAndPresetCapella { config, base_preset, @@ -57,14 +69,6 @@ impl ConfigAndPreset { capella_preset, extra_fields, }) - } else { - ConfigAndPreset::Bellatrix(ConfigAndPresetBellatrix { - config, - base_preset, - altair_preset, - bellatrix_preset, - extra_fields, - }) } } } @@ -132,8 +136,8 @@ mod test { .write(false) .open(tmp_file.as_ref()) .expect("error while opening the file"); - let from: ConfigAndPresetCapella = + let from: ConfigAndPresetDeneb = serde_yaml::from_reader(reader).expect("error while deserializing"); - assert_eq!(ConfigAndPreset::Capella(from), yamlconfig); + assert_eq!(ConfigAndPreset::Deneb(from), yamlconfig); } } diff --git a/consensus/types/src/contribution_and_proof.rs b/consensus/types/src/contribution_and_proof.rs index 7e757f89b..aba98c92b 100644 --- a/consensus/types/src/contribution_and_proof.rs +++ b/consensus/types/src/contribution_and_proof.rs @@ -3,7 +3,7 @@ use super::{ SyncSelectionProof, }; use crate::test_utils::TestRandom; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/deposit.rs b/consensus/types/src/deposit.rs index bbc3bd9fb..c818c7d80 100644 --- a/consensus/types/src/deposit.rs +++ b/consensus/types/src/deposit.rs @@ -1,6 +1,6 @@ use crate::test_utils::TestRandom; use crate::*; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::typenum::U33; use test_random_derive::TestRandom; diff --git a/consensus/types/src/deposit_data.rs b/consensus/types/src/deposit_data.rs index d75643f65..e074ffdfa 100644 --- a/consensus/types/src/deposit_data.rs +++ b/consensus/types/src/deposit_data.rs @@ -2,7 +2,7 @@ use crate::test_utils::TestRandom; use crate::*; use bls::{PublicKeyBytes, SignatureBytes}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/deposit_message.rs b/consensus/types/src/deposit_message.rs index 1096cfaa2..e5c666df8 100644 --- a/consensus/types/src/deposit_message.rs +++ b/consensus/types/src/deposit_message.rs @@ -2,7 +2,7 @@ use crate::test_utils::TestRandom; use crate::*; use bls::PublicKeyBytes; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/deposit_tree_snapshot.rs b/consensus/types/src/deposit_tree_snapshot.rs index 12e81d002..d4dcdb2ed 100644 --- a/consensus/types/src/deposit_tree_snapshot.rs +++ b/consensus/types/src/deposit_tree_snapshot.rs @@ -1,7 +1,7 @@ use crate::*; use ethereum_hashing::{hash32_concat, ZERO_HASHES}; use int_to_bytes::int_to_bytes32; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use test_utils::TestRandom; diff --git a/consensus/types/src/enr_fork_id.rs b/consensus/types/src/enr_fork_id.rs index 409383c90..3ae7c39cf 100644 --- a/consensus/types/src/enr_fork_id.rs +++ b/consensus/types/src/enr_fork_id.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::Epoch; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/eth1_data.rs b/consensus/types/src/eth1_data.rs index d8f476b99..e2c4e511e 100644 --- a/consensus/types/src/eth1_data.rs +++ b/consensus/types/src/eth1_data.rs @@ -1,7 +1,7 @@ use super::Hash256; use crate::test_utils::TestRandom; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 64bfb8da0..17baad9c4 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -1,11 +1,12 @@ use crate::*; use safe_arith::SafeArith; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_types::typenum::{ - bit::B0, UInt, Unsigned, U0, U1024, U1048576, U1073741824, U1099511627776, U128, U16, - U16777216, U2, U2048, U256, U32, U4, U4096, U512, U625, U64, U65536, U8, U8192, + bit::B0, UInt, Unsigned, U0, U1024, U1048576, U1073741824, U1099511627776, U128, U131072, U16, + U16777216, U2, U2048, U256, U32, U4, U4096, U512, U6, U625, U64, U65536, U8, U8192, }; +use ssz_types::typenum::{U17, U9}; use std::fmt::{self, Debug}; use std::str::FromStr; @@ -102,6 +103,14 @@ pub trait EthSpec: */ type MaxBlsToExecutionChanges: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxWithdrawalsPerPayload: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /* + * New in Deneb + */ + type MaxBlobsPerBlock: Unsigned + Clone + Sync + Send + Debug + PartialEq + Unpin; + type MaxBlobCommitmentsPerBlock: Unsigned + Clone + Sync + Send + Debug + PartialEq + Unpin; + type FieldElementsPerBlob: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type BytesPerFieldElement: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type KzgCommitmentInclusionProofDepth: Unsigned + Clone + Sync + Send + Debug + PartialEq; /* * Derived values (set these CAREFULLY) */ @@ -120,6 +129,11 @@ pub trait EthSpec: /// Must be set to `SyncCommitteeSize / SyncCommitteeSubnetCount`. type SyncSubcommitteeSize: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /// The total length of a blob in bytes. + /// + /// Must be set to `BytesPerFieldElement * FieldElementsPerBlob`. + type BytesPerBlob: Unsigned + Clone + Sync + Send + Debug + PartialEq; + fn default_spec() -> ChainSpec; fn spec_name() -> EthSpecId; @@ -239,6 +253,30 @@ pub trait EthSpec: fn max_withdrawals_per_payload() -> usize { Self::MaxWithdrawalsPerPayload::to_usize() } + + /// Returns the `MAX_BLOBS_PER_BLOCK` constant for this specification. + fn max_blobs_per_block() -> usize { + Self::MaxBlobsPerBlock::to_usize() + } + + /// Returns the `MAX_BLOB_COMMITMENTS_PER_BLOCK` constant for this specification. + fn max_blob_commitments_per_block() -> usize { + Self::MaxBlobCommitmentsPerBlock::to_usize() + } + + /// Returns the `FIELD_ELEMENTS_PER_BLOB` constant for this specification. + fn field_elements_per_blob() -> usize { + Self::FieldElementsPerBlob::to_usize() + } + + /// Returns the `BYTES_PER_BLOB` constant for this specification. + fn bytes_per_blob() -> usize { + Self::BytesPerBlob::to_usize() + } + /// Returns the `KZG_COMMITMENT_INCLUSION_PROOF_DEPTH` preset for this specification. + fn kzg_proof_inclusion_proof_depth() -> usize { + Self::KzgCommitmentInclusionProofDepth::to_usize() + } } /// Macro to inherit some type values from another EthSpec. @@ -278,6 +316,12 @@ impl EthSpec for MainnetEthSpec { type GasLimitDenominator = U1024; type MinGasLimit = U5000; type MaxExtraDataBytes = U32; + type MaxBlobsPerBlock = U6; + type MaxBlobCommitmentsPerBlock = U4096; + type BytesPerFieldElement = U32; + type FieldElementsPerBlob = U4096; + type BytesPerBlob = U131072; + type KzgCommitmentInclusionProofDepth = U17; type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count type MaxPendingAttestations = U4096; // 128 max attestations * 32 slots per epoch type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch @@ -308,6 +352,10 @@ impl EthSpec for MinimalEthSpec { type MaxPendingAttestations = U1024; // 128 max attestations * 8 slots per epoch type SlotsPerEth1VotingPeriod = U32; // 4 epochs * 8 slots per epoch type MaxWithdrawalsPerPayload = U4; + type FieldElementsPerBlob = U4096; + type BytesPerBlob = U131072; + type MaxBlobCommitmentsPerBlock = U16; + type KzgCommitmentInclusionProofDepth = U9; params_from_eth_spec!(MainnetEthSpec { JustificationBitsLength, @@ -328,7 +376,9 @@ impl EthSpec for MinimalEthSpec { GasLimitDenominator, MinGasLimit, MaxExtraDataBytes, - MaxBlsToExecutionChanges + MaxBlsToExecutionChanges, + MaxBlobsPerBlock, + BytesPerFieldElement }); fn default_spec() -> ChainSpec { @@ -374,6 +424,12 @@ impl EthSpec for GnosisEthSpec { type SlotsPerEth1VotingPeriod = U1024; // 64 epochs * 16 slots per epoch type MaxBlsToExecutionChanges = U16; type MaxWithdrawalsPerPayload = U8; + type MaxBlobsPerBlock = U6; + type MaxBlobCommitmentsPerBlock = U4096; + type FieldElementsPerBlob = U4096; + type BytesPerFieldElement = U32; + type BytesPerBlob = U131072; + type KzgCommitmentInclusionProofDepth = U17; fn default_spec() -> ChainSpec { ChainSpec::gnosis() diff --git a/consensus/types/src/execution_block_hash.rs b/consensus/types/src/execution_block_hash.rs index 363a35a86..b2401f0c0 100644 --- a/consensus/types/src/execution_block_hash.rs +++ b/consensus/types/src/execution_block_hash.rs @@ -2,7 +2,7 @@ use crate::test_utils::TestRandom; use crate::Hash256; use derivative::Derivative; use rand::RngCore; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; use std::fmt; @@ -20,7 +20,7 @@ use std::fmt; )] #[derivative(Debug = "transparent")] #[serde(transparent)] -pub struct ExecutionBlockHash(Hash256); +pub struct ExecutionBlockHash(pub Hash256); impl ExecutionBlockHash { pub fn zero() -> Self { diff --git a/consensus/types/src/execution_block_header.rs b/consensus/types/src/execution_block_header.rs index b19988ff7..945222a92 100644 --- a/consensus/types/src/execution_block_header.rs +++ b/consensus/types/src/execution_block_header.rs @@ -24,9 +24,12 @@ use metastruct::metastruct; /// /// Credit to Reth for the type definition. #[derive(Debug, Clone, PartialEq, Eq, Hash)] -#[metastruct(mappings(map_execution_block_header_fields_except_withdrawals(exclude( - withdrawals_root -))))] +#[metastruct(mappings(map_execution_block_header_fields_base(exclude( + withdrawals_root, + blob_gas_used, + excess_blob_gas, + parent_beacon_block_root +)),))] pub struct ExecutionBlockHeader { pub parent_hash: Hash256, pub ommers_hash: Hash256, @@ -45,6 +48,9 @@ pub struct ExecutionBlockHeader { pub nonce: Hash64, pub base_fee_per_gas: Uint256, pub withdrawals_root: Option, + pub blob_gas_used: Option, + pub excess_blob_gas: Option, + pub parent_beacon_block_root: Option, } impl ExecutionBlockHeader { @@ -53,6 +59,9 @@ impl ExecutionBlockHeader { rlp_empty_list_root: Hash256, rlp_transactions_root: Hash256, rlp_withdrawals_root: Option, + rlp_blob_gas_used: Option, + rlp_excess_blob_gas: Option, + rlp_parent_beacon_block_root: Option, ) -> Self { // Most of these field mappings are defined in EIP-3675 except for `mixHash`, which is // defined in EIP-4399. @@ -74,6 +83,9 @@ impl ExecutionBlockHeader { nonce: Hash64::zero(), base_fee_per_gas: payload.base_fee_per_gas(), withdrawals_root: rlp_withdrawals_root, + blob_gas_used: rlp_blob_gas_used, + excess_blob_gas: rlp_excess_blob_gas, + parent_beacon_block_root: rlp_parent_beacon_block_root, } } } diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 690138da6..1dc5951b2 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -1,6 +1,6 @@ use crate::{test_utils::TestRandom, *}; use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; @@ -15,7 +15,7 @@ pub type Transactions = VariableList< pub type Withdrawals = VariableList::MaxWithdrawalsPerPayload>; #[superstruct( - variants(Merge, Capella), + variants(Merge, Capella, Deneb), variant_attributes( derive( Default, @@ -81,8 +81,14 @@ pub struct ExecutionPayload { pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: Transactions, - #[superstruct(only(Capella))] + #[superstruct(only(Capella, Deneb))] pub withdrawals: Withdrawals, + #[superstruct(only(Deneb), partial_getter(copy))] + #[serde(with = "serde_utils::quoted_u64")] + pub blob_gas_used: u64, + #[superstruct(only(Deneb), partial_getter(copy))] + #[serde(with = "serde_utils::quoted_u64")] + pub excess_blob_gas: u64, } impl<'a, T: EthSpec> ExecutionPayloadRef<'a, T> { @@ -103,6 +109,7 @@ impl ExecutionPayload { ))), ForkName::Merge => ExecutionPayloadMerge::from_ssz_bytes(bytes).map(Self::Merge), ForkName::Capella => ExecutionPayloadCapella::from_ssz_bytes(bytes).map(Self::Capella), + ForkName::Deneb => ExecutionPayloadDeneb::from_ssz_bytes(bytes).map(Self::Deneb), } } @@ -129,6 +136,19 @@ impl ExecutionPayload { // Max size of variable length `withdrawals` field + (T::max_withdrawals_per_payload() * ::ssz_fixed_len()) } + + #[allow(clippy::arithmetic_side_effects)] + /// Returns the maximum size of an execution payload. + pub fn max_execution_payload_deneb_size() -> usize { + // Fixed part + ExecutionPayloadDeneb::::default().as_ssz_bytes().len() + // Max size of variable length `extra_data` field + + (T::max_extra_data_bytes() * ::ssz_fixed_len()) + // Max size of variable length `transactions` field + + (T::max_transactions_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + T::max_bytes_per_transaction())) + // Max size of variable length `withdrawals` field + + (T::max_withdrawals_per_payload() * ::ssz_fixed_len()) + } } impl ForkVersionDeserialize for ExecutionPayload { @@ -143,6 +163,7 @@ impl ForkVersionDeserialize for ExecutionPayload { Ok(match fork_name { ForkName::Merge => Self::Merge(serde_json::from_value(value).map_err(convert_err)?), ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Deneb => Self::Deneb(serde_json::from_value(value).map_err(convert_err)?), ForkName::Base | ForkName::Altair => { return Err(serde::de::Error::custom(format!( "ExecutionPayload failed to deserialize: unsupported fork '{}'", @@ -158,6 +179,7 @@ impl ExecutionPayload { match self { ExecutionPayload::Merge(_) => ForkName::Merge, ExecutionPayload::Capella(_) => ForkName::Capella, + ExecutionPayload::Deneb(_) => ForkName::Deneb, } } } diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 77bea03db..e0859c0a1 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -1,6 +1,6 @@ use crate::{test_utils::TestRandom, *}; use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::Decode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; @@ -9,7 +9,7 @@ use tree_hash_derive::TreeHash; use BeaconStateError; #[superstruct( - variants(Merge, Capella), + variants(Merge, Capella, Deneb), variant_attributes( derive( Default, @@ -77,9 +77,17 @@ pub struct ExecutionPayloadHeader { pub block_hash: ExecutionBlockHash, #[superstruct(getter(copy))] pub transactions_root: Hash256, - #[superstruct(only(Capella))] + #[superstruct(only(Capella, Deneb))] #[superstruct(getter(copy))] pub withdrawals_root: Hash256, + #[superstruct(only(Deneb))] + #[serde(with = "serde_utils::quoted_u64")] + #[superstruct(getter(copy))] + pub blob_gas_used: u64, + #[superstruct(only(Deneb))] + #[serde(with = "serde_utils::quoted_u64")] + #[superstruct(getter(copy))] + pub excess_blob_gas: u64, } impl ExecutionPayloadHeader { @@ -96,6 +104,7 @@ impl ExecutionPayloadHeader { ForkName::Capella => { ExecutionPayloadHeaderCapella::from_ssz_bytes(bytes).map(Self::Capella) } + ForkName::Deneb => ExecutionPayloadHeaderDeneb::from_ssz_bytes(bytes).map(Self::Deneb), } } } @@ -131,6 +140,30 @@ impl ExecutionPayloadHeaderMerge { } } +impl ExecutionPayloadHeaderCapella { + pub fn upgrade_to_deneb(&self) -> ExecutionPayloadHeaderDeneb { + ExecutionPayloadHeaderDeneb { + parent_hash: self.parent_hash, + fee_recipient: self.fee_recipient, + state_root: self.state_root, + receipts_root: self.receipts_root, + logs_bloom: self.logs_bloom.clone(), + prev_randao: self.prev_randao, + block_number: self.block_number, + gas_limit: self.gas_limit, + gas_used: self.gas_used, + timestamp: self.timestamp, + extra_data: self.extra_data.clone(), + base_fee_per_gas: self.base_fee_per_gas, + block_hash: self.block_hash, + transactions_root: self.transactions_root, + withdrawals_root: self.withdrawals_root, + blob_gas_used: 0, + excess_blob_gas: 0, + } + } +} + impl<'a, T: EthSpec> From<&'a ExecutionPayloadMerge> for ExecutionPayloadHeaderMerge { fn from(payload: &'a ExecutionPayloadMerge) -> Self { Self { @@ -173,6 +206,30 @@ impl<'a, T: EthSpec> From<&'a ExecutionPayloadCapella> for ExecutionPayloadHe } } +impl<'a, T: EthSpec> From<&'a ExecutionPayloadDeneb> for ExecutionPayloadHeaderDeneb { + fn from(payload: &'a ExecutionPayloadDeneb) -> Self { + Self { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom.clone(), + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data.clone(), + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions_root: payload.transactions.tree_hash_root(), + withdrawals_root: payload.withdrawals.tree_hash_root(), + blob_gas_used: payload.blob_gas_used, + excess_blob_gas: payload.excess_blob_gas, + } + } +} + // These impls are required to work around an inelegance in `to_execution_payload_header`. // They only clone headers so they should be relatively cheap. impl<'a, T: EthSpec> From<&'a Self> for ExecutionPayloadHeaderMerge { @@ -187,6 +244,12 @@ impl<'a, T: EthSpec> From<&'a Self> for ExecutionPayloadHeaderCapella { } } +impl<'a, T: EthSpec> From<&'a Self> for ExecutionPayloadHeaderDeneb { + fn from(payload: &'a Self) -> Self { + payload.clone() + } +} + impl<'a, T: EthSpec> From> for ExecutionPayloadHeader { fn from(payload: ExecutionPayloadRef<'a, T>) -> Self { map_execution_payload_ref_into_execution_payload_header!( @@ -217,6 +280,15 @@ impl TryFrom> for ExecutionPayloadHeaderCa } } } +impl TryFrom> for ExecutionPayloadHeaderDeneb { + type Error = BeaconStateError; + fn try_from(header: ExecutionPayloadHeader) -> Result { + match header { + ExecutionPayloadHeader::Deneb(execution_payload_header) => Ok(execution_payload_header), + _ => Err(BeaconStateError::IncorrectStateVariant), + } + } +} impl ForkVersionDeserialize for ExecutionPayloadHeader { fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( @@ -233,6 +305,7 @@ impl ForkVersionDeserialize for ExecutionPayloadHeader { Ok(match fork_name { ForkName::Merge => Self::Merge(serde_json::from_value(value).map_err(convert_err)?), ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Deneb => Self::Deneb(serde_json::from_value(value).map_err(convert_err)?), ForkName::Base | ForkName::Altair => { return Err(serde::de::Error::custom(format!( "ExecutionPayloadHeader failed to deserialize: unsupported fork '{}'", diff --git a/consensus/types/src/fork.rs b/consensus/types/src/fork.rs index 4650881f7..b23113f43 100644 --- a/consensus/types/src/fork.rs +++ b/consensus/types/src/fork.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::Epoch; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index 90d1fbc68..999289271 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -9,6 +9,7 @@ pub struct ForkContext { current_fork: RwLock, fork_to_digest: HashMap, digest_to_fork: HashMap<[u8; 4], ForkName>, + pub spec: ChainSpec, } impl ForkContext { @@ -54,6 +55,13 @@ impl ForkContext { )); } + if spec.deneb_fork_epoch.is_some() { + fork_to_digest.push(( + ForkName::Deneb, + ChainSpec::compute_fork_digest(spec.deneb_fork_version, genesis_validators_root), + )); + } + let fork_to_digest: HashMap = fork_to_digest.into_iter().collect(); let digest_to_fork = fork_to_digest @@ -66,6 +74,7 @@ impl ForkContext { current_fork: RwLock::new(spec.fork_name_at_slot::(current_slot)), fork_to_digest, digest_to_fork, + spec: spec.clone(), } } diff --git a/consensus/types/src/fork_data.rs b/consensus/types/src/fork_data.rs index bf9c48cd7..52ce57a2a 100644 --- a/consensus/types/src/fork_data.rs +++ b/consensus/types/src/fork_data.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::{Hash256, SignedRoot}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index 85144a613..6523b2a67 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -1,17 +1,20 @@ use crate::{ChainSpec, Epoch}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; use std::convert::TryFrom; use std::fmt::{self, Display, Formatter}; use std::str::FromStr; -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, Decode, Encode, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(try_from = "String")] #[serde(into = "String")] +#[ssz(enum_behaviour = "tag")] pub enum ForkName { Base, Altair, Merge, Capella, + Deneb, } impl ForkName { @@ -21,6 +24,7 @@ impl ForkName { ForkName::Altair, ForkName::Merge, ForkName::Capella, + ForkName::Deneb, ] } @@ -38,24 +42,35 @@ impl ForkName { spec.altair_fork_epoch = None; spec.bellatrix_fork_epoch = None; spec.capella_fork_epoch = None; + spec.deneb_fork_epoch = None; spec } ForkName::Altair => { spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = None; spec.capella_fork_epoch = None; + spec.deneb_fork_epoch = None; spec } ForkName::Merge => { spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = Some(Epoch::new(0)); spec.capella_fork_epoch = None; + spec.deneb_fork_epoch = None; spec } ForkName::Capella => { spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = Some(Epoch::new(0)); spec.capella_fork_epoch = Some(Epoch::new(0)); + spec.deneb_fork_epoch = None; + spec + } + ForkName::Deneb => { + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(0)); + spec.deneb_fork_epoch = Some(Epoch::new(0)); spec } } @@ -70,6 +85,7 @@ impl ForkName { ForkName::Altair => Some(ForkName::Base), ForkName::Merge => Some(ForkName::Altair), ForkName::Capella => Some(ForkName::Merge), + ForkName::Deneb => Some(ForkName::Capella), } } @@ -81,7 +97,8 @@ impl ForkName { ForkName::Base => Some(ForkName::Altair), ForkName::Altair => Some(ForkName::Merge), ForkName::Merge => Some(ForkName::Capella), - ForkName::Capella => None, + ForkName::Capella => Some(ForkName::Deneb), + ForkName::Deneb => None, } } } @@ -127,6 +144,10 @@ macro_rules! map_fork_name_with { let (value, extra_data) = $body; ($t::Capella(value), extra_data) } + ForkName::Deneb => { + let (value, extra_data) = $body; + ($t::Deneb(value), extra_data) + } } }; } @@ -140,6 +161,7 @@ impl FromStr for ForkName { "altair" => ForkName::Altair, "bellatrix" | "merge" => ForkName::Merge, "capella" => ForkName::Capella, + "deneb" => ForkName::Deneb, _ => return Err(format!("unknown fork name: {}", fork_name)), }) } @@ -152,6 +174,7 @@ impl Display for ForkName { ForkName::Altair => "altair".fmt(f), ForkName::Merge => "bellatrix".fmt(f), ForkName::Capella => "capella".fmt(f), + ForkName::Deneb => "deneb".fmt(f), } } } diff --git a/consensus/types/src/fork_versioned_response.rs b/consensus/types/src/fork_versioned_response.rs index 2d97dc121..195c083e2 100644 --- a/consensus/types/src/fork_versioned_response.rs +++ b/consensus/types/src/fork_versioned_response.rs @@ -4,47 +4,6 @@ use serde::{Deserialize, Deserializer, Serialize}; use serde_json::value::Value; use std::sync::Arc; -// Deserialize is only implemented for types that implement ForkVersionDeserialize -#[derive(Debug, PartialEq, Clone, Serialize)] -pub struct ExecutionOptimisticFinalizedForkVersionedResponse { - #[serde(skip_serializing_if = "Option::is_none")] - pub version: Option, - pub execution_optimistic: Option, - pub finalized: Option, - pub data: T, -} - -impl<'de, F> serde::Deserialize<'de> for ExecutionOptimisticFinalizedForkVersionedResponse -where - F: ForkVersionDeserialize, -{ - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - #[derive(Deserialize)] - struct Helper { - version: Option, - execution_optimistic: Option, - finalized: Option, - data: serde_json::Value, - } - - let helper = Helper::deserialize(deserializer)?; - let data = match helper.version { - Some(fork_name) => F::deserialize_by_fork::<'de, D>(helper.data, fork_name)?, - None => serde_json::from_value(helper.data).map_err(serde::de::Error::custom)?, - }; - - Ok(ExecutionOptimisticFinalizedForkVersionedResponse { - version: helper.version, - execution_optimistic: helper.execution_optimistic, - finalized: helper.finalized, - data, - }) - } -} - pub trait ForkVersionDeserialize: Sized + DeserializeOwned { fn deserialize_by_fork<'de, D: Deserializer<'de>>( value: Value, @@ -52,17 +11,41 @@ pub trait ForkVersionDeserialize: Sized + DeserializeOwned { ) -> Result; } -// Deserialize is only implemented for types that implement ForkVersionDeserialize +/// Deserialize is only implemented for types that implement ForkVersionDeserialize. +/// +/// The metadata of type M should be set to `EmptyMetadata` if you don't care about adding fields other than +/// version. If you *do* care about adding other fields you can mix in any type that implements +/// `Deserialize`. #[derive(Debug, PartialEq, Clone, Serialize)] -pub struct ForkVersionedResponse { +pub struct ForkVersionedResponse { #[serde(skip_serializing_if = "Option::is_none")] pub version: Option, + #[serde(flatten)] + pub metadata: M, pub data: T, } -impl<'de, F> serde::Deserialize<'de> for ForkVersionedResponse +/// Metadata type similar to unit (i.e. `()`) but deserializes from a map (`serde_json::Value`). +/// +/// Unfortunately the braces are semantically significant, i.e. `struct EmptyMetadata;` does not +/// work. +#[derive(Debug, PartialEq, Clone, Default, Deserialize, Serialize)] +pub struct EmptyMetadata {} + +/// Fork versioned response with extra information about finalization & optimistic execution. +pub type ExecutionOptimisticFinalizedForkVersionedResponse = + ForkVersionedResponse; + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct ExecutionOptimisticFinalizedMetadata { + pub execution_optimistic: Option, + pub finalized: Option, +} + +impl<'de, F, M> serde::Deserialize<'de> for ForkVersionedResponse where F: ForkVersionDeserialize, + M: DeserializeOwned, { fn deserialize(deserializer: D) -> Result where @@ -71,6 +54,8 @@ where #[derive(Deserialize)] struct Helper { version: Option, + #[serde(flatten)] + metadata: serde_json::Value, data: serde_json::Value, } @@ -79,9 +64,11 @@ where Some(fork_name) => F::deserialize_by_fork::<'de, D>(helper.data, fork_name)?, None => serde_json::from_value(helper.data).map_err(serde::de::Error::custom)?, }; + let metadata = serde_json::from_value(helper.metadata).map_err(serde::de::Error::custom)?; Ok(ForkVersionedResponse { version: helper.version, + metadata, data, }) } @@ -98,6 +85,22 @@ impl ForkVersionDeserialize for Arc { } } +impl ForkVersionedResponse { + /// Apply a function to the inner `data`, potentially changing its type. + pub fn map_data(self, f: impl FnOnce(T) -> U) -> ForkVersionedResponse { + let ForkVersionedResponse { + version, + metadata, + data, + } = self; + ForkVersionedResponse { + version, + metadata, + data: f(data), + } + } +} + #[cfg(test)] mod fork_version_response_tests { use crate::{ @@ -112,6 +115,7 @@ mod fork_version_response_tests { let response_json = serde_json::to_string(&json!(ForkVersionedResponse::> { version: Some(ForkName::Merge), + metadata: Default::default(), data: ExecutionPayload::Merge(ExecutionPayloadMerge::default()), })) .unwrap(); @@ -129,6 +133,7 @@ mod fork_version_response_tests { let response_json = serde_json::to_string(&json!(ForkVersionedResponse::> { version: Some(ForkName::Capella), + metadata: Default::default(), data: ExecutionPayload::Merge(ExecutionPayloadMerge::default()), })) .unwrap(); diff --git a/consensus/types/src/historical_batch.rs b/consensus/types/src/historical_batch.rs index e75b64cae..e3e037fd6 100644 --- a/consensus/types/src/historical_batch.rs +++ b/consensus/types/src/historical_batch.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::*; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::FixedVector; use test_random_derive::TestRandom; diff --git a/consensus/types/src/historical_summary.rs b/consensus/types/src/historical_summary.rs index 84d87b85f..dcc387d3d 100644 --- a/consensus/types/src/historical_summary.rs +++ b/consensus/types/src/historical_summary.rs @@ -4,7 +4,7 @@ use crate::{BeaconState, EthSpec, Hash256}; use cached_tree_hash::Error; use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, TreeHashCache}; use compare_fields_derive::CompareFields; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; use test_random_derive::TestRandom; diff --git a/consensus/types/src/indexed_attestation.rs b/consensus/types/src/indexed_attestation.rs index c59cbef30..c2d48d724 100644 --- a/consensus/types/src/indexed_attestation.rs +++ b/consensus/types/src/indexed_attestation.rs @@ -1,6 +1,6 @@ use crate::{test_utils::TestRandom, AggregateSignature, AttestationData, EthSpec, VariableList}; use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; use std::hash::{Hash, Hasher}; diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 85ce35176..b07b497a2 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -98,6 +98,11 @@ pub mod slot_data; #[cfg(feature = "sqlite")] pub mod sqlite; +pub mod blob_sidecar; +pub mod light_client_header; +pub mod non_zero_usize; +pub mod runtime_var_list; + use ethereum_types::{H160, H256}; pub use crate::aggregate_and_proof::AggregateAndProof; @@ -106,22 +111,21 @@ pub use crate::attestation_data::AttestationData; pub use crate::attestation_duty::AttestationDuty; pub use crate::attester_slashing::AttesterSlashing; pub use crate::beacon_block::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockMerge, - BeaconBlockRef, BeaconBlockRefMut, BlindedBeaconBlock, EmptyBlock, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockDeneb, + BeaconBlockMerge, BeaconBlockRef, BeaconBlockRefMut, BlindedBeaconBlock, EmptyBlock, }; pub use crate::beacon_block_body::{ BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyCapella, - BeaconBlockBodyMerge, BeaconBlockBodyRef, BeaconBlockBodyRefMut, + BeaconBlockBodyDeneb, BeaconBlockBodyMerge, BeaconBlockBodyRef, BeaconBlockBodyRefMut, }; pub use crate::beacon_block_header::BeaconBlockHeader; pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; pub use crate::beacon_state::{BeaconTreeHashCache, Error as BeaconStateError, *}; +pub use crate::blob_sidecar::{BlobSidecar, BlobSidecarList, BlobsList}; pub use crate::bls_to_execution_change::BlsToExecutionChange; pub use crate::chain_spec::{ChainSpec, Config, Domain}; pub use crate::checkpoint::Checkpoint; -pub use crate::config_and_preset::{ - ConfigAndPreset, ConfigAndPresetBellatrix, ConfigAndPresetCapella, -}; +pub use crate::config_and_preset::{ConfigAndPreset, ConfigAndPresetCapella, ConfigAndPresetDeneb}; pub use crate::contribution_and_proof::ContributionAndProof; pub use crate::deposit::{Deposit, DEPOSIT_TREE_DEPTH}; pub use crate::deposit_data::DepositData; @@ -133,12 +137,12 @@ pub use crate::eth_spec::EthSpecId; pub use crate::execution_block_hash::ExecutionBlockHash; pub use crate::execution_block_header::ExecutionBlockHeader; pub use crate::execution_payload::{ - ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge, ExecutionPayloadRef, - Transaction, Transactions, Withdrawals, + ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadMerge, + ExecutionPayloadRef, Transaction, Transactions, Withdrawals, }; pub use crate::execution_payload_header::{ - ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderMerge, - ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, + ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, + ExecutionPayloadHeaderMerge, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, }; pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; @@ -148,26 +152,32 @@ pub use crate::fork_versioned_response::{ForkVersionDeserialize, ForkVersionedRe pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN}; pub use crate::historical_batch::HistoricalBatch; pub use crate::indexed_attestation::IndexedAttestation; +pub use crate::light_client_bootstrap::LightClientBootstrap; pub use crate::light_client_finality_update::LightClientFinalityUpdate; +pub use crate::light_client_header::LightClientHeader; pub use crate::light_client_optimistic_update::LightClientOptimisticUpdate; +pub use crate::light_client_update::{Error as LightClientError, LightClientUpdate}; pub use crate::participation_flags::ParticipationFlags; pub use crate::participation_list::ParticipationList; pub use crate::payload::{ - AbstractExecPayload, BlindedPayload, BlindedPayloadCapella, BlindedPayloadMerge, - BlindedPayloadRef, BlockType, ExecPayload, FullPayload, FullPayloadCapella, FullPayloadMerge, - FullPayloadRef, OwnedExecPayload, + AbstractExecPayload, BlindedPayload, BlindedPayloadCapella, BlindedPayloadDeneb, + BlindedPayloadMerge, BlindedPayloadRef, BlockType, ExecPayload, FullPayload, + FullPayloadCapella, FullPayloadDeneb, FullPayloadMerge, FullPayloadRef, OwnedExecPayload, }; pub use crate::pending_attestation::PendingAttestation; -pub use crate::preset::{AltairPreset, BasePreset, BellatrixPreset, CapellaPreset}; +pub use crate::preset::{AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, DenebPreset}; pub use crate::proposer_preparation_data::ProposerPreparationData; pub use crate::proposer_slashing::ProposerSlashing; pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; +pub use crate::runtime_var_list::RuntimeVariableList; pub use crate::selection_proof::SelectionProof; pub use crate::shuffling_id::AttestationShufflingId; pub use crate::signed_aggregate_and_proof::SignedAggregateAndProof; pub use crate::signed_beacon_block::{ - SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockCapella, - SignedBeaconBlockHash, SignedBeaconBlockMerge, SignedBlindedBeaconBlock, + ssz_tagged_signed_beacon_block, ssz_tagged_signed_beacon_block_arc, SignedBeaconBlock, + SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockCapella, + SignedBeaconBlockDeneb, SignedBeaconBlockHash, SignedBeaconBlockMerge, + SignedBlindedBeaconBlock, }; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; pub use crate::signed_bls_to_execution_change::SignedBlsToExecutionChange; @@ -198,6 +208,8 @@ pub type Uint256 = ethereum_types::U256; pub type Address = H160; pub type ForkVersion = [u8; 4]; pub type BLSFieldElement = Uint256; +pub type Blob = FixedVector::BytesPerBlob>; +pub type KzgProofs = VariableList::MaxBlobCommitmentsPerBlock>; pub type VersionedHash = Hash256; pub type Hash64 = ethereum_types::H64; @@ -205,5 +217,8 @@ pub use bls::{ AggregatePublicKey, AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey, Signature, SignatureBytes, }; + +pub use kzg::{KzgCommitment, KzgProof, VERSIONED_HASH_VERSION_KZG}; + pub use ssz_types::{typenum, typenum::Unsigned, BitList, BitVector, FixedVector, VariableList}; pub use superstruct::superstruct; diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index 1a5eed220..616aced48 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -1,10 +1,13 @@ -use super::{BeaconBlockHeader, BeaconState, EthSpec, FixedVector, Hash256, SyncCommittee}; -use crate::{light_client_update::*, test_utils::TestRandom}; -use serde_derive::{Deserialize, Serialize}; +use super::{BeaconState, EthSpec, FixedVector, Hash256, SyncCommittee}; +use crate::{ + light_client_update::*, test_utils::TestRandom, ForkName, ForkVersionDeserialize, + LightClientHeader, +}; +use serde::{Deserialize, Deserializer, Serialize}; +use serde_json::Value; use ssz_derive::{Decode, Encode}; use std::sync::Arc; use test_random_derive::TestRandom; -use tree_hash::TreeHash; /// A LightClientBootstrap is the initializer we send over to lightclient nodes /// that are trying to generate their basic storage when booting up. @@ -22,8 +25,8 @@ use tree_hash::TreeHash; #[serde(bound = "T: EthSpec")] #[arbitrary(bound = "T: EthSpec")] pub struct LightClientBootstrap { - /// Requested beacon block header. - pub header: BeaconBlockHeader, + /// The requested beacon block header. + pub header: LightClientHeader, /// The `SyncCommittee` used in the requested period. pub current_sync_committee: Arc>, /// Merkle proof for sync committee @@ -33,17 +36,37 @@ pub struct LightClientBootstrap { impl LightClientBootstrap { pub fn from_beacon_state(beacon_state: &mut BeaconState) -> Result { let mut header = beacon_state.latest_block_header().clone(); - header.state_root = beacon_state.tree_hash_root(); + header.state_root = beacon_state.update_tree_hash_cache()?; let current_sync_committee_branch = beacon_state.compute_merkle_proof(CURRENT_SYNC_COMMITTEE_INDEX)?; Ok(LightClientBootstrap { - header, + header: header.into(), current_sync_committee: beacon_state.current_sync_committee()?.clone(), current_sync_committee_branch: FixedVector::new(current_sync_committee_branch)?, }) } } +impl ForkVersionDeserialize for LightClientBootstrap { + fn deserialize_by_fork<'de, D: Deserializer<'de>>( + value: Value, + fork_name: ForkName, + ) -> Result { + match fork_name { + ForkName::Altair | ForkName::Merge => { + Ok(serde_json::from_value::>(value) + .map_err(serde::de::Error::custom))? + } + ForkName::Base | ForkName::Capella | ForkName::Deneb => { + Err(serde::de::Error::custom(format!( + "LightClientBootstrap failed to deserialize: unsupported fork '{}'", + fork_name + ))) + } + } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index 08069c930..87601b815 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -1,9 +1,12 @@ use super::{ - BeaconBlockHeader, EthSpec, FixedVector, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, - Slot, SyncAggregate, + EthSpec, FixedVector, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, Slot, SyncAggregate, }; -use crate::{light_client_update::*, test_utils::TestRandom, BeaconState, ChainSpec}; -use serde_derive::{Deserialize, Serialize}; +use crate::{ + light_client_update::*, test_utils::TestRandom, BeaconState, ChainSpec, ForkName, + ForkVersionDeserialize, LightClientHeader, +}; +use serde::{Deserialize, Deserializer, Serialize}; +use serde_json::Value; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; @@ -25,9 +28,9 @@ use tree_hash::TreeHash; #[arbitrary(bound = "T: EthSpec")] pub struct LightClientFinalityUpdate { /// The last `BeaconBlockHeader` from the last attested block by the sync committee. - pub attested_header: BeaconBlockHeader, + pub attested_header: LightClientHeader, /// The last `BeaconBlockHeader` from the last attested finalized block (end of epoch). - pub finalized_header: BeaconBlockHeader, + pub finalized_header: LightClientHeader, /// Merkle proof attesting finalized header. pub finality_branch: FixedVector, /// current sync aggreggate @@ -68,8 +71,8 @@ impl LightClientFinalityUpdate { let finality_branch = attested_state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?; Ok(Self { - attested_header, - finalized_header, + attested_header: attested_header.into(), + finalized_header: finalized_header.into(), finality_branch: FixedVector::new(finality_branch)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block.slot(), @@ -77,6 +80,26 @@ impl LightClientFinalityUpdate { } } +impl ForkVersionDeserialize for LightClientFinalityUpdate { + fn deserialize_by_fork<'de, D: Deserializer<'de>>( + value: Value, + fork_name: ForkName, + ) -> Result { + match fork_name { + ForkName::Altair | ForkName::Merge => Ok(serde_json::from_value::< + LightClientFinalityUpdate, + >(value) + .map_err(serde::de::Error::custom))?, + ForkName::Base | ForkName::Capella | ForkName::Deneb => { + Err(serde::de::Error::custom(format!( + "LightClientFinalityUpdate failed to deserialize: unsupported fork '{}'", + fork_name + ))) + } + } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/light_client_header.rs b/consensus/types/src/light_client_header.rs new file mode 100644 index 000000000..8fe31f7af --- /dev/null +++ b/consensus/types/src/light_client_header.rs @@ -0,0 +1,26 @@ +use crate::test_utils::TestRandom; +use crate::BeaconBlockHeader; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; + +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + arbitrary::Arbitrary, +)] +pub struct LightClientHeader { + pub beacon: BeaconBlockHeader, +} + +impl From for LightClientHeader { + fn from(beacon: BeaconBlockHeader) -> Self { + LightClientHeader { beacon } + } +} diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client_optimistic_update.rs index 7a39bd9ac..d883d735f 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client_optimistic_update.rs @@ -1,8 +1,10 @@ -use super::{BeaconBlockHeader, EthSpec, Slot, SyncAggregate}; +use super::{EthSpec, ForkName, ForkVersionDeserialize, Slot, SyncAggregate}; +use crate::light_client_header::LightClientHeader; use crate::{ light_client_update::Error, test_utils::TestRandom, BeaconState, ChainSpec, SignedBeaconBlock, }; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Deserializer, Serialize}; +use serde_json::Value; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; @@ -24,7 +26,7 @@ use tree_hash::TreeHash; #[arbitrary(bound = "T: EthSpec")] pub struct LightClientOptimisticUpdate { /// The last `BeaconBlockHeader` from the last attested block by the sync committee. - pub attested_header: BeaconBlockHeader, + pub attested_header: LightClientHeader, /// current sync aggreggate pub sync_aggregate: SyncAggregate, /// Slot of the sync aggregated singature @@ -53,13 +55,33 @@ impl LightClientOptimisticUpdate { let mut attested_header = attested_state.latest_block_header().clone(); attested_header.state_root = attested_state.tree_hash_root(); Ok(Self { - attested_header, + attested_header: attested_header.into(), sync_aggregate: sync_aggregate.clone(), signature_slot: block.slot(), }) } } +impl ForkVersionDeserialize for LightClientOptimisticUpdate { + fn deserialize_by_fork<'de, D: Deserializer<'de>>( + value: Value, + fork_name: ForkName, + ) -> Result { + match fork_name { + ForkName::Altair | ForkName::Merge => Ok(serde_json::from_value::< + LightClientOptimisticUpdate, + >(value) + .map_err(serde::de::Error::custom))?, + ForkName::Base | ForkName::Capella | ForkName::Deneb => { + Err(serde::de::Error::custom(format!( + "LightClientOptimisticUpdate failed to deserialize: unsupported fork '{}'", + fork_name + ))) + } + } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index ca35f9680..718cd7553 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -1,7 +1,11 @@ use super::{BeaconBlockHeader, EthSpec, FixedVector, Hash256, Slot, SyncAggregate, SyncCommittee}; -use crate::{beacon_state, test_utils::TestRandom, BeaconBlock, BeaconState, ChainSpec}; +use crate::{ + beacon_state, test_utils::TestRandom, BeaconBlock, BeaconState, ChainSpec, ForkName, + ForkVersionDeserialize, LightClientHeader, +}; use safe_arith::ArithError; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Deserializer, Serialize}; +use serde_json::Value; use ssz_derive::{Decode, Encode}; use ssz_types::typenum::{U5, U6}; use std::sync::Arc; @@ -67,13 +71,13 @@ impl From for Error { #[arbitrary(bound = "T: EthSpec")] pub struct LightClientUpdate { /// The last `BeaconBlockHeader` from the last attested block by the sync committee. - pub attested_header: BeaconBlockHeader, + pub attested_header: LightClientHeader, /// The `SyncCommittee` used in the next period. pub next_sync_committee: Arc>, /// Merkle proof for next sync committee pub next_sync_committee_branch: FixedVector, /// The last `BeaconBlockHeader` from the last attested finalized block (end of epoch). - pub finalized_header: BeaconBlockHeader, + pub finalized_header: LightClientHeader, /// Merkle proof attesting finalized header. pub finality_branch: FixedVector, /// current sync aggreggate @@ -128,10 +132,10 @@ impl LightClientUpdate { attested_state.compute_merkle_proof(NEXT_SYNC_COMMITTEE_INDEX)?; let finality_branch = attested_state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?; Ok(Self { - attested_header, + attested_header: attested_header.into(), next_sync_committee: attested_state.next_sync_committee()?.clone(), next_sync_committee_branch: FixedVector::new(next_sync_committee_branch)?, - finalized_header, + finalized_header: finalized_header.into(), finality_branch: FixedVector::new(finality_branch)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block.slot(), @@ -139,6 +143,26 @@ impl LightClientUpdate { } } +impl ForkVersionDeserialize for LightClientUpdate { + fn deserialize_by_fork<'de, D: Deserializer<'de>>( + value: Value, + fork_name: ForkName, + ) -> Result { + match fork_name { + ForkName::Altair | ForkName::Merge => { + Ok(serde_json::from_value::>(value) + .map_err(serde::de::Error::custom))? + } + ForkName::Base | ForkName::Capella | ForkName::Deneb => { + Err(serde::de::Error::custom(format!( + "LightClientUpdate failed to deserialize: unsupported fork '{}'", + fork_name + ))) + } + } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/non_zero_usize.rs b/consensus/types/src/non_zero_usize.rs new file mode 100644 index 000000000..d61000c9a --- /dev/null +++ b/consensus/types/src/non_zero_usize.rs @@ -0,0 +1,8 @@ +use std::num::NonZeroUsize; + +pub const fn new_non_zero_usize(x: usize) -> NonZeroUsize { + match NonZeroUsize::new(x) { + Some(n) => n, + None => panic!("Expected a non zero usize."), + } +} diff --git a/consensus/types/src/participation_flags.rs b/consensus/types/src/participation_flags.rs index 4f170a60b..e94e56f0c 100644 --- a/consensus/types/src/participation_flags.rs +++ b/consensus/types/src/participation_flags.rs @@ -1,6 +1,6 @@ use crate::{consts::altair::NUM_FLAG_INDICES, test_utils::TestRandom, Hash256}; use safe_arith::{ArithError, SafeArith}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; use test_random_derive::TestRandom; use tree_hash::{PackedEncoding, TreeHash, TreeHashType}; diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 2795c7f10..2f7975161 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -39,6 +39,7 @@ pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + fn transactions(&self) -> Option<&Transactions>; /// fork-specific fields fn withdrawals_root(&self) -> Result; + fn blob_gas_used(&self) -> Result; /// Is this a default payload with 0x0 roots for transactions and withdrawals? fn is_default_with_zero_roots(&self) -> bool; @@ -81,8 +82,13 @@ pub trait AbstractExecPayload: + TryFrom> + TryInto + TryInto + + TryInto { - type Ref<'a>: ExecPayload + Copy + From<&'a Self::Merge> + From<&'a Self::Capella>; + type Ref<'a>: ExecPayload + + Copy + + From<&'a Self::Merge> + + From<&'a Self::Capella> + + From<&'a Self::Deneb>; type Merge: OwnedExecPayload + Into @@ -92,12 +98,14 @@ pub trait AbstractExecPayload: + Into + for<'a> From>> + TryFrom>; - - fn default_at_fork(fork_name: ForkName) -> Result; + type Deneb: OwnedExecPayload + + Into + + for<'a> From>> + + TryFrom>; } #[superstruct( - variants(Merge, Capella), + variants(Merge, Capella, Deneb), variant_attributes( derive( Debug, @@ -136,6 +144,8 @@ pub struct FullPayload { pub execution_payload: ExecutionPayloadMerge, #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] pub execution_payload: ExecutionPayloadCapella, + #[superstruct(only(Deneb), partial_getter(rename = "execution_payload_deneb"))] + pub execution_payload: ExecutionPayloadDeneb, } impl From> for ExecutionPayload { @@ -239,6 +249,16 @@ impl ExecPayload for FullPayload { FullPayload::Capella(ref inner) => { Ok(inner.execution_payload.withdrawals.tree_hash_root()) } + FullPayload::Deneb(ref inner) => { + Ok(inner.execution_payload.withdrawals.tree_hash_root()) + } + } + } + + fn blob_gas_used(&self) -> Result { + match self { + FullPayload::Merge(_) | FullPayload::Capella(_) => Err(Error::IncorrectStateVariant), + FullPayload::Deneb(ref inner) => Ok(inner.execution_payload.blob_gas_used), } } @@ -261,6 +281,15 @@ impl FullPayload { cons(inner.execution_payload) }) } + + pub fn default_at_fork(fork_name: ForkName) -> Result { + match fork_name { + ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant), + ForkName::Merge => Ok(FullPayloadMerge::default().into()), + ForkName::Capella => Ok(FullPayloadCapella::default().into()), + ForkName::Deneb => Ok(FullPayloadDeneb::default().into()), + } + } } impl<'a, T: EthSpec> FullPayloadRef<'a, T> { @@ -345,6 +374,18 @@ impl<'b, T: EthSpec> ExecPayload for FullPayloadRef<'b, T> { FullPayloadRef::Capella(inner) => { Ok(inner.execution_payload.withdrawals.tree_hash_root()) } + FullPayloadRef::Deneb(inner) => { + Ok(inner.execution_payload.withdrawals.tree_hash_root()) + } + } + } + + fn blob_gas_used(&self) -> Result { + match self { + FullPayloadRef::Merge(_) | FullPayloadRef::Capella(_) => { + Err(Error::IncorrectStateVariant) + } + FullPayloadRef::Deneb(inner) => Ok(inner.execution_payload.blob_gas_used), } } @@ -365,14 +406,7 @@ impl AbstractExecPayload for FullPayload { type Ref<'a> = FullPayloadRef<'a, T>; type Merge = FullPayloadMerge; type Capella = FullPayloadCapella; - - fn default_at_fork(fork_name: ForkName) -> Result { - match fork_name { - ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant), - ForkName::Merge => Ok(FullPayloadMerge::default().into()), - ForkName::Capella => Ok(FullPayloadCapella::default().into()), - } - } + type Deneb = FullPayloadDeneb; } impl From> for FullPayload { @@ -391,7 +425,7 @@ impl TryFrom> for FullPayload { } #[superstruct( - variants(Merge, Capella), + variants(Merge, Capella, Deneb), variant_attributes( derive( Debug, @@ -429,6 +463,8 @@ pub struct BlindedPayload { pub execution_payload_header: ExecutionPayloadHeaderMerge, #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] pub execution_payload_header: ExecutionPayloadHeaderCapella, + #[superstruct(only(Deneb), partial_getter(rename = "execution_payload_deneb"))] + pub execution_payload_header: ExecutionPayloadHeaderDeneb, } impl<'a, T: EthSpec> From> for BlindedPayload { @@ -510,6 +546,16 @@ impl ExecPayload for BlindedPayload { BlindedPayload::Capella(ref inner) => { Ok(inner.execution_payload_header.withdrawals_root) } + BlindedPayload::Deneb(ref inner) => Ok(inner.execution_payload_header.withdrawals_root), + } + } + + fn blob_gas_used(&self) -> Result { + match self { + BlindedPayload::Merge(_) | BlindedPayload::Capella(_) => { + Err(Error::IncorrectStateVariant) + } + BlindedPayload::Deneb(ref inner) => Ok(inner.execution_payload_header.blob_gas_used), } } @@ -597,6 +643,16 @@ impl<'b, T: EthSpec> ExecPayload for BlindedPayloadRef<'b, T> { BlindedPayloadRef::Capella(inner) => { Ok(inner.execution_payload_header.withdrawals_root) } + BlindedPayloadRef::Deneb(inner) => Ok(inner.execution_payload_header.withdrawals_root), + } + } + + fn blob_gas_used(&self) -> Result { + match self { + BlindedPayloadRef::Merge(_) | BlindedPayloadRef::Capella(_) => { + Err(Error::IncorrectStateVariant) + } + BlindedPayloadRef::Deneb(inner) => Ok(inner.execution_payload_header.blob_gas_used), } } @@ -625,7 +681,8 @@ macro_rules! impl_exec_payload_common { $block_type_variant:ident, // Blinded | Full $is_default_with_empty_roots:block, $f:block, - $g:block) => { + $g:block, + $h:block) => { impl ExecPayload for $wrapper_type { fn block_type() -> BlockType { BlockType::$block_type_variant @@ -683,6 +740,11 @@ macro_rules! impl_exec_payload_common { let g = $g; g(self) } + + fn blob_gas_used(&self) -> Result { + let h = $h; + h(self) + } } impl From<$wrapped_type> for $wrapper_type { @@ -720,6 +782,14 @@ macro_rules! impl_exec_payload_for_fork { wrapper_ref_type.withdrawals_root() }; c + }, + { + let c: for<'a> fn(&'a $wrapper_type_header) -> Result = + |payload: &$wrapper_type_header| { + let wrapper_ref_type = BlindedPayloadRef::$fork_variant(&payload); + wrapper_ref_type.blob_gas_used() + }; + c } ); @@ -799,6 +869,14 @@ macro_rules! impl_exec_payload_for_fork { wrapper_ref_type.withdrawals_root() }; c + }, + { + let c: for<'a> fn(&'a $wrapper_type_full) -> Result = + |payload: &$wrapper_type_full| { + let wrapper_ref_type = FullPayloadRef::$fork_variant(&payload); + wrapper_ref_type.blob_gas_used() + }; + c } ); @@ -860,19 +938,19 @@ impl_exec_payload_for_fork!( ExecutionPayloadCapella, Capella ); +impl_exec_payload_for_fork!( + BlindedPayloadDeneb, + FullPayloadDeneb, + ExecutionPayloadHeaderDeneb, + ExecutionPayloadDeneb, + Deneb +); impl AbstractExecPayload for BlindedPayload { type Ref<'a> = BlindedPayloadRef<'a, T>; type Merge = BlindedPayloadMerge; type Capella = BlindedPayloadCapella; - - fn default_at_fork(fork_name: ForkName) -> Result { - match fork_name { - ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant), - ForkName::Merge => Ok(BlindedPayloadMerge::default().into()), - ForkName::Capella => Ok(BlindedPayloadCapella::default().into()), - } - } + type Deneb = BlindedPayloadDeneb; } impl From> for BlindedPayload { @@ -899,6 +977,11 @@ impl From> for BlindedPayload { execution_payload_header, }) } + ExecutionPayloadHeader::Deneb(execution_payload_header) => { + Self::Deneb(BlindedPayloadDeneb { + execution_payload_header, + }) + } } } } @@ -912,6 +995,16 @@ impl From> for ExecutionPayloadHeader { BlindedPayload::Capella(blinded_payload) => { ExecutionPayloadHeader::Capella(blinded_payload.execution_payload_header) } + BlindedPayload::Deneb(blinded_payload) => { + ExecutionPayloadHeader::Deneb(blinded_payload.execution_payload_header) + } } } } + +/// The block production flow version to be used. +pub enum BlockProductionVersion { + V3, + BlindedV2, + FullV2, +} diff --git a/consensus/types/src/pending_attestation.rs b/consensus/types/src/pending_attestation.rs index 88db0ec4d..d25a6987c 100644 --- a/consensus/types/src/pending_attestation.rs +++ b/consensus/types/src/pending_attestation.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::{AttestationData, BitList, EthSpec}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index e65dd8f60..63a372ea1 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -1,5 +1,5 @@ use crate::{ChainSpec, Epoch, EthSpec, Unsigned}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; /// Value-level representation of an Ethereum consensus "preset". /// @@ -205,6 +205,27 @@ impl CapellaPreset { } } +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[serde(rename_all = "UPPERCASE")] +pub struct DenebPreset { + #[serde(with = "serde_utils::quoted_u64")] + pub max_blobs_per_block: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub max_blob_commitments_per_block: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub field_elements_per_blob: u64, +} + +impl DenebPreset { + pub fn from_chain_spec(_spec: &ChainSpec) -> Self { + Self { + max_blobs_per_block: T::max_blobs_per_block() as u64, + max_blob_commitments_per_block: T::max_blob_commitments_per_block() as u64, + field_elements_per_blob: T::field_elements_per_blob() as u64, + } + } +} + #[cfg(test)] mod test { use super::*; @@ -243,6 +264,9 @@ mod test { let capella: CapellaPreset = preset_from_file(&preset_name, "capella.yaml"); assert_eq!(capella, CapellaPreset::from_chain_spec::(&spec)); + + let deneb: DenebPreset = preset_from_file(&preset_name, "deneb.yaml"); + assert_eq!(deneb, DenebPreset::from_chain_spec::(&spec)); } #[test] diff --git a/consensus/types/src/proposer_slashing.rs b/consensus/types/src/proposer_slashing.rs index 1ac2464a4..ee55d62c2 100644 --- a/consensus/types/src/proposer_slashing.rs +++ b/consensus/types/src/proposer_slashing.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::SignedBeaconBlockHeader; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/runtime_var_list.rs b/consensus/types/src/runtime_var_list.rs new file mode 100644 index 000000000..84ad5d074 --- /dev/null +++ b/consensus/types/src/runtime_var_list.rs @@ -0,0 +1,137 @@ +use ssz::{Decode, Encode}; +use ssz_derive::Encode; + +#[derive(Debug, Clone, PartialEq, Encode)] +#[ssz(struct_behaviour = "transparent")] +pub struct RuntimeVariableList { + vec: Vec, + #[ssz(skip_serializing, skip_deserializing)] + max_len: usize, +} + +impl RuntimeVariableList { + pub fn new(vec: Vec, max_len: usize) -> Result { + if vec.len() <= max_len { + Ok(Self { vec, max_len }) + } else { + Err(ssz_types::Error::OutOfBounds { + i: vec.len(), + len: max_len, + }) + } + } + + pub fn from_vec(mut vec: Vec, max_len: usize) -> Self { + vec.truncate(max_len); + + Self { vec, max_len } + } + + pub fn to_vec(&self) -> Vec { + self.vec.clone() + } + + pub fn as_slice(&self) -> &[T] { + self.vec.as_slice() + } + + pub fn len(&self) -> usize { + self.vec.len() + } + + pub fn is_empty(&self) -> bool { + self.vec.is_empty() + } + + pub fn from_ssz_bytes(bytes: &[u8], max_len: usize) -> Result { + let vec = if bytes.is_empty() { + vec![] + } else if ::is_ssz_fixed_len() { + let num_items = bytes + .len() + .checked_div(::ssz_fixed_len()) + .ok_or(ssz::DecodeError::ZeroLengthItem)?; + + if num_items > max_len { + return Err(ssz::DecodeError::BytesInvalid(format!( + "VariableList of {} items exceeds maximum of {}", + num_items, max_len + ))); + } + + bytes + .chunks(::ssz_fixed_len()) + .try_fold(Vec::with_capacity(num_items), |mut vec, chunk| { + vec.push(::from_ssz_bytes(chunk)?); + Ok(vec) + }) + .map(Into::into)? + } else { + ssz::decode_list_of_variable_length_items(bytes, Some(max_len))? + }; + Ok(Self { vec, max_len }) + } +} + +#[cfg(test)] +mod test { + use ssz_types::{typenum::U4, VariableList}; + + use super::*; + + #[test] + fn new() { + let vec = vec![42; 5]; + let runtime_var_list: Result, _> = + RuntimeVariableList::new(vec, 4); + assert!(runtime_var_list.is_err()); + + let vec = vec![42; 3]; + let runtime_var_list: Result, _> = + RuntimeVariableList::new(vec, 4); + assert!(runtime_var_list.is_ok()); + + let vec = vec![42; 4]; + let runtime_var_list: Result, _> = + RuntimeVariableList::new(vec, 4); + assert!(runtime_var_list.is_ok()); + } + + #[test] + fn length() { + let vec = vec![42; 3]; + let runtime_var_list: RuntimeVariableList = + RuntimeVariableList::new(vec.clone(), 4).unwrap(); + let var_list: VariableList = VariableList::from(vec.clone()); + assert_eq!(&runtime_var_list.as_slice()[0..3], &vec[..]); + assert_eq!(runtime_var_list.as_slice(), &vec![42, 42, 42][..]); + assert_eq!(runtime_var_list.len(), var_list.len()); + + let vec = vec![]; + let runtime_var_list: RuntimeVariableList = RuntimeVariableList::new(vec, 4).unwrap(); + assert_eq!(runtime_var_list.as_slice(), &[] as &[u64]); + assert!(runtime_var_list.is_empty()); + } + + #[test] + fn encode() { + let runtime_var_list: RuntimeVariableList = + RuntimeVariableList::new(vec![0; 2], 2).unwrap(); + + assert_eq!(runtime_var_list.as_ssz_bytes(), vec![0, 0, 0, 0]); + assert_eq!( as Encode>::ssz_fixed_len(), 4); + } + + #[test] + fn round_trip() { + let item = RuntimeVariableList::::new(vec![42; 8], 8).unwrap(); + let encoded = &item.as_ssz_bytes(); + assert_eq!(item.ssz_bytes_len(), encoded.len()); + assert_eq!(RuntimeVariableList::from_ssz_bytes(encoded, 8), Ok(item)); + + let item = RuntimeVariableList::::new(vec![0; 8], 8).unwrap(); + let encoded = &item.as_ssz_bytes(); + assert_eq!(item.ssz_bytes_len(), encoded.len()); + assert_eq!(RuntimeVariableList::from_ssz_bytes(encoded, 8), Ok(item)); + } +} diff --git a/consensus/types/src/shuffling_id.rs b/consensus/types/src/shuffling_id.rs index 120d744a5..a5bdc8667 100644 --- a/consensus/types/src/shuffling_id.rs +++ b/consensus/types/src/shuffling_id.rs @@ -1,5 +1,5 @@ use crate::*; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use std::hash::Hash; diff --git a/consensus/types/src/signed_aggregate_and_proof.rs b/consensus/types/src/signed_aggregate_and_proof.rs index 6d86c0563..10010073e 100644 --- a/consensus/types/src/signed_aggregate_and_proof.rs +++ b/consensus/types/src/signed_aggregate_and_proof.rs @@ -3,7 +3,7 @@ use super::{ SelectionProof, Signature, SignedRoot, }; use crate::test_utils::TestRandom; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 5f623cf07..37304de1f 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -1,7 +1,8 @@ +use crate::beacon_block_body::format_kzg_commitments; use crate::*; use bls::Signature; use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use std::fmt; use superstruct::superstruct; @@ -37,7 +38,7 @@ impl From for Hash256 { /// A `BeaconBlock` and a signature from its proposer. #[superstruct( - variants(Base, Altair, Merge, Capella), + variants(Base, Altair, Merge, Capella, Deneb), variant_attributes( derive( Debug, @@ -76,6 +77,8 @@ pub struct SignedBeaconBlock = FullP pub message: BeaconBlockMerge, #[superstruct(only(Capella), partial_getter(rename = "message_capella"))] pub message: BeaconBlockCapella, + #[superstruct(only(Deneb), partial_getter(rename = "message_deneb"))] + pub message: BeaconBlockDeneb, pub signature: Signature, } @@ -90,11 +93,27 @@ impl> SignedBeaconBlock self.message().fork_name(spec) } + /// Returns the name of the fork pertaining to `self` + /// Does not check that the fork is consistent with the slot. + pub fn fork_name_unchecked(&self) -> ForkName { + self.message().fork_name_unchecked() + } + /// SSZ decode with fork variant determined by slot. pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { Self::from_ssz_bytes_with(bytes, |bytes| BeaconBlock::from_ssz_bytes(bytes, spec)) } + /// SSZ decode with explicit fork variant. + pub fn from_ssz_bytes_for_fork( + bytes: &[u8], + fork_name: ForkName, + ) -> Result { + Self::from_ssz_bytes_with(bytes, |bytes| { + BeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) + }) + } + /// SSZ decode which attempts to decode all variants (slow). pub fn any_from_ssz_bytes(bytes: &[u8]) -> Result { Self::from_ssz_bytes_with(bytes, BeaconBlock::any_from_ssz_bytes) @@ -136,6 +155,9 @@ impl> SignedBeaconBlock BeaconBlock::Capella(message) => { SignedBeaconBlock::Capella(SignedBeaconBlockCapella { message, signature }) } + BeaconBlock::Deneb(message) => { + SignedBeaconBlock::Deneb(SignedBeaconBlockDeneb { message, signature }) + } } } @@ -186,7 +208,7 @@ impl> SignedBeaconBlock } let domain = spec.get_domain( - self.slot().epoch(E::slots_per_epoch()), + self.epoch(), Domain::BeaconProposer, fork, genesis_validators_root, @@ -218,6 +240,11 @@ impl> SignedBeaconBlock self.message().slot() } + /// Convenience accessor for the block's epoch. + pub fn epoch(&self) -> Epoch { + self.message().slot().epoch(E::slots_per_epoch()) + } + /// Convenience accessor for the block's parent root. pub fn parent_root(&self) -> Hash256 { self.message().parent_root() @@ -232,6 +259,23 @@ impl> SignedBeaconBlock pub fn canonical_root(&self) -> Hash256 { self.message().tree_hash_root() } + + pub fn num_expected_blobs(&self) -> usize { + self.message() + .body() + .blob_kzg_commitments() + .map(|c| c.len()) + .unwrap_or(0) + } + + /// Used for displaying commitments in logs. + pub fn commitments_formatted(&self) -> String { + let Ok(commitments) = self.message().body().blob_kzg_commitments() else { + return "[]".to_string(); + }; + + format_kzg_commitments(commitments.as_ref()) + } } // We can convert pre-Bellatrix blocks without payloads into blocks with payloads. @@ -368,6 +412,62 @@ impl SignedBeaconBlockCapella> { } } +impl SignedBeaconBlockDeneb> { + pub fn into_full_block( + self, + execution_payload: ExecutionPayloadDeneb, + ) -> SignedBeaconBlockDeneb> { + let SignedBeaconBlockDeneb { + message: + BeaconBlockDeneb { + slot, + proposer_index, + parent_root, + state_root, + body: + BeaconBlockBodyDeneb { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: BlindedPayloadDeneb { .. }, + bls_to_execution_changes, + blob_kzg_commitments, + }, + }, + signature, + } = self; + SignedBeaconBlockDeneb { + message: BeaconBlockDeneb { + slot, + proposer_index, + parent_root, + state_root, + body: BeaconBlockBodyDeneb { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayloadDeneb { execution_payload }, + bls_to_execution_changes, + blob_kzg_commitments, + }, + }, + signature, + } + } +} + impl SignedBeaconBlock> { pub fn try_into_full_block( self, @@ -382,10 +482,14 @@ impl SignedBeaconBlock> { (SignedBeaconBlock::Capella(block), Some(ExecutionPayload::Capella(payload))) => { SignedBeaconBlock::Capella(block.into_full_block(payload)) } + (SignedBeaconBlock::Deneb(block), Some(ExecutionPayload::Deneb(payload))) => { + SignedBeaconBlock::Deneb(block.into_full_block(payload)) + } // avoid wildcard matching forks so that compiler will // direct us here when a new fork has been added (SignedBeaconBlock::Merge(_), _) => return None, (SignedBeaconBlock::Capella(_), _) => return None, + (SignedBeaconBlock::Deneb(_), _) => return None, }; Some(full_block) } @@ -440,6 +544,120 @@ impl> ForkVersionDeserialize } } +/// This module can be used to encode and decode a `SignedBeaconBlock` the same way it +/// would be done if we had tagged the superstruct enum with +/// `#[ssz(enum_behaviour = "union")]` +/// This should _only_ be used *some* cases when storing these objects in the database +/// and _NEVER_ for encoding / decoding blocks sent over the network! +pub mod ssz_tagged_signed_beacon_block { + use super::*; + pub mod encode { + use super::*; + #[allow(unused_imports)] + use ssz::*; + + pub fn is_ssz_fixed_len() -> bool { + false + } + + pub fn ssz_fixed_len() -> usize { + BYTES_PER_LENGTH_OFFSET + } + + pub fn ssz_bytes_len>( + block: &SignedBeaconBlock, + ) -> usize { + block + .ssz_bytes_len() + .checked_add(1) + .expect("encoded length must be less than usize::max") + } + + pub fn ssz_append>( + block: &SignedBeaconBlock, + buf: &mut Vec, + ) { + let fork_name = block.fork_name_unchecked(); + fork_name.ssz_append(buf); + block.ssz_append(buf); + } + + pub fn as_ssz_bytes>( + block: &SignedBeaconBlock, + ) -> Vec { + let mut buf = vec![]; + ssz_append(block, &mut buf); + + buf + } + } + + pub mod decode { + use super::*; + #[allow(unused_imports)] + use ssz::*; + + pub fn is_ssz_fixed_len() -> bool { + false + } + + pub fn ssz_fixed_len() -> usize { + BYTES_PER_LENGTH_OFFSET + } + + pub fn from_ssz_bytes>( + bytes: &[u8], + ) -> Result, DecodeError> { + let fork_byte = bytes + .first() + .copied() + .ok_or(DecodeError::OutOfBoundsByte { i: 0 })?; + let body = bytes + .get(1..) + .ok_or(DecodeError::OutOfBoundsByte { i: 1 })?; + + match ForkName::from_ssz_bytes(&[fork_byte])? { + ForkName::Base => Ok(SignedBeaconBlock::Base( + SignedBeaconBlockBase::from_ssz_bytes(body)?, + )), + ForkName::Altair => Ok(SignedBeaconBlock::Altair( + SignedBeaconBlockAltair::from_ssz_bytes(body)?, + )), + ForkName::Merge => Ok(SignedBeaconBlock::Merge( + SignedBeaconBlockMerge::from_ssz_bytes(body)?, + )), + ForkName::Capella => Ok(SignedBeaconBlock::Capella( + SignedBeaconBlockCapella::from_ssz_bytes(body)?, + )), + ForkName::Deneb => Ok(SignedBeaconBlock::Deneb( + SignedBeaconBlockDeneb::from_ssz_bytes(body)?, + )), + } + } + } +} + +pub mod ssz_tagged_signed_beacon_block_arc { + use super::*; + pub mod encode { + pub use super::ssz_tagged_signed_beacon_block::encode::*; + } + + pub mod decode { + pub use super::ssz_tagged_signed_beacon_block::decode::{is_ssz_fixed_len, ssz_fixed_len}; + use super::*; + #[allow(unused_imports)] + use ssz::*; + use std::sync::Arc; + + pub fn from_ssz_bytes>( + bytes: &[u8], + ) -> Result>, DecodeError> { + ssz_tagged_signed_beacon_block::decode::from_ssz_bytes(bytes).map(Arc::new) + } + } +} + #[cfg(test)] mod test { use super::*; @@ -481,4 +699,38 @@ mod test { assert_eq!(reconstructed, block); } } + + #[test] + fn test_ssz_tagged_signed_beacon_block() { + type E = MainnetEthSpec; + + let spec = &E::default_spec(); + let sig = Signature::empty(); + let blocks = vec![ + SignedBeaconBlock::::from_block( + BeaconBlock::Base(BeaconBlockBase::empty(spec)), + sig.clone(), + ), + SignedBeaconBlock::from_block( + BeaconBlock::Altair(BeaconBlockAltair::empty(spec)), + sig.clone(), + ), + SignedBeaconBlock::from_block( + BeaconBlock::Merge(BeaconBlockMerge::empty(spec)), + sig.clone(), + ), + SignedBeaconBlock::from_block( + BeaconBlock::Capella(BeaconBlockCapella::empty(spec)), + sig.clone(), + ), + SignedBeaconBlock::from_block(BeaconBlock::Deneb(BeaconBlockDeneb::empty(spec)), sig), + ]; + + for block in blocks { + let encoded = ssz_tagged_signed_beacon_block::encode::as_ssz_bytes(&block); + let decoded = ssz_tagged_signed_beacon_block::decode::from_ssz_bytes::(&encoded) + .expect("should decode"); + assert_eq!(decoded, block); + } + } } diff --git a/consensus/types/src/signed_beacon_block_header.rs b/consensus/types/src/signed_beacon_block_header.rs index c265eded1..3d4269a2c 100644 --- a/consensus/types/src/signed_beacon_block_header.rs +++ b/consensus/types/src/signed_beacon_block_header.rs @@ -2,7 +2,7 @@ use crate::{ test_utils::TestRandom, BeaconBlockHeader, ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, Signature, SignedRoot, }; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/signed_bls_to_execution_change.rs b/consensus/types/src/signed_bls_to_execution_change.rs index 2b17095ae..2a4ecdf43 100644 --- a/consensus/types/src/signed_bls_to_execution_change.rs +++ b/consensus/types/src/signed_bls_to_execution_change.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::*; use bls::Signature; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/signed_contribution_and_proof.rs b/consensus/types/src/signed_contribution_and_proof.rs index 4cb358843..6cb45ac8e 100644 --- a/consensus/types/src/signed_contribution_and_proof.rs +++ b/consensus/types/src/signed_contribution_and_proof.rs @@ -3,7 +3,7 @@ use super::{ SignedRoot, SyncCommitteeContribution, SyncSelectionProof, }; use crate::test_utils::TestRandom; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/signed_voluntary_exit.rs b/consensus/types/src/signed_voluntary_exit.rs index 3392826a6..30eda1179 100644 --- a/consensus/types/src/signed_voluntary_exit.rs +++ b/consensus/types/src/signed_voluntary_exit.rs @@ -1,7 +1,7 @@ use crate::{test_utils::TestRandom, VoluntaryExit}; use bls::Signature; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/signing_data.rs b/consensus/types/src/signing_data.rs index b80d4a40d..f30d5fdfc 100644 --- a/consensus/types/src/signing_data.rs +++ b/consensus/types/src/signing_data.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::Hash256; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; diff --git a/consensus/types/src/slot_epoch.rs b/consensus/types/src/slot_epoch.rs index e9f1e192b..ec659d1db 100644 --- a/consensus/types/src/slot_epoch.rs +++ b/consensus/types/src/slot_epoch.rs @@ -15,7 +15,7 @@ use crate::{ChainSpec, SignedRoot}; use rand::RngCore; use safe_arith::{ArithError, SafeArith}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; use std::fmt; use std::hash::Hash; @@ -76,8 +76,8 @@ impl Slot { } impl Epoch { - pub const fn new(slot: u64) -> Epoch { - Epoch(slot) + pub const fn new(epoch: u64) -> Epoch { + Epoch(epoch) } pub fn max_value() -> Epoch { diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index 415d6a140..2752e3109 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -1,7 +1,7 @@ //! Identifies each shard by an integer identifier. use crate::{AttestationData, ChainSpec, CommitteeIndex, Epoch, EthSpec, Slot}; use safe_arith::{ArithError, SafeArith}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::ops::{Deref, DerefMut}; use swap_or_not_shuffle::compute_shuffled_index; diff --git a/consensus/types/src/sync_aggregate.rs b/consensus/types/src/sync_aggregate.rs index 300c86fc0..bb00c4aa2 100644 --- a/consensus/types/src/sync_aggregate.rs +++ b/consensus/types/src/sync_aggregate.rs @@ -3,7 +3,7 @@ use crate::test_utils::TestRandom; use crate::{AggregateSignature, BitVector, EthSpec, SyncCommitteeContribution}; use derivative::Derivative; use safe_arith::{ArithError, SafeArith}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/sync_aggregator_selection_data.rs b/consensus/types/src/sync_aggregator_selection_data.rs index b10106812..3da130bb0 100644 --- a/consensus/types/src/sync_aggregator_selection_data.rs +++ b/consensus/types/src/sync_aggregator_selection_data.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::{SignedRoot, Slot}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -25,11 +25,11 @@ pub struct SyncAggregatorSelectionData { pub subcommittee_index: u64, } +impl SignedRoot for SyncAggregatorSelectionData {} + #[cfg(test)] mod tests { use super::*; ssz_and_tree_hash_tests!(SyncAggregatorSelectionData); } - -impl SignedRoot for SyncAggregatorSelectionData {} diff --git a/consensus/types/src/sync_committee.rs b/consensus/types/src/sync_committee.rs index 43ba23f12..b42a000bb 100644 --- a/consensus/types/src/sync_committee.rs +++ b/consensus/types/src/sync_committee.rs @@ -1,9 +1,8 @@ use crate::test_utils::TestRandom; -use crate::typenum::Unsigned; use crate::{EthSpec, FixedVector, SyncSubnetId}; use bls::PublicKeyBytes; use safe_arith::{ArithError, SafeArith}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use std::collections::HashMap; use test_random_derive::TestRandom; @@ -46,14 +45,11 @@ pub struct SyncCommittee { impl SyncCommittee { /// Create a temporary sync committee that should *never* be included in a legitimate consensus object. - pub fn temporary() -> Result { - Ok(Self { - pubkeys: FixedVector::new(vec![ - PublicKeyBytes::empty(); - T::SyncCommitteeSize::to_usize() - ])?, + pub fn temporary() -> Self { + Self { + pubkeys: FixedVector::from_elem(PublicKeyBytes::empty()), aggregate_pubkey: PublicKeyBytes::empty(), - }) + } } /// Return the pubkeys in this `SyncCommittee` for the given `subcommittee_index`. diff --git a/consensus/types/src/sync_committee_contribution.rs b/consensus/types/src/sync_committee_contribution.rs index 425f8f116..b8ee5c2e3 100644 --- a/consensus/types/src/sync_committee_contribution.rs +++ b/consensus/types/src/sync_committee_contribution.rs @@ -2,7 +2,7 @@ use super::{AggregateSignature, EthSpec, SignedRoot}; use crate::slot_data::SlotData; use crate::{test_utils::TestRandom, BitVector, Hash256, Slot, SyncCommitteeMessage}; use safe_arith::ArithError; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/sync_committee_message.rs b/consensus/types/src/sync_committee_message.rs index d0301cdf6..d7d309cd5 100644 --- a/consensus/types/src/sync_committee_message.rs +++ b/consensus/types/src/sync_committee_message.rs @@ -2,7 +2,7 @@ use crate::test_utils::TestRandom; use crate::{ChainSpec, Domain, EthSpec, Fork, Hash256, SecretKey, Signature, SignedRoot, Slot}; use crate::slot_data::SlotData; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/sync_duty.rs b/consensus/types/src/sync_duty.rs index e3ffe62bf..1058b9d3b 100644 --- a/consensus/types/src/sync_duty.rs +++ b/consensus/types/src/sync_duty.rs @@ -1,7 +1,7 @@ use crate::{EthSpec, SyncCommittee, SyncSubnetId}; use bls::PublicKeyBytes; use safe_arith::ArithError; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::collections::HashSet; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] diff --git a/consensus/types/src/sync_subnet_id.rs b/consensus/types/src/sync_subnet_id.rs index 5af756ae0..560548292 100644 --- a/consensus/types/src/sync_subnet_id.rs +++ b/consensus/types/src/sync_subnet_id.rs @@ -2,7 +2,7 @@ use crate::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use crate::EthSpec; use safe_arith::{ArithError, SafeArith}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_types::typenum::Unsigned; use std::collections::HashSet; use std::fmt::{self, Display}; diff --git a/consensus/types/src/test_utils/test_random.rs b/consensus/types/src/test_utils/test_random.rs index 51b79d8d5..f31df2ce1 100644 --- a/consensus/types/src/test_utils/test_random.rs +++ b/consensus/types/src/test_utils/test_random.rs @@ -10,6 +10,8 @@ mod address; mod aggregate_signature; mod bitfield; mod hash256; +mod kzg_commitment; +mod kzg_proof; mod public_key; mod public_key_bytes; mod secret_key; diff --git a/consensus/types/src/test_utils/test_random/bitfield.rs b/consensus/types/src/test_utils/test_random/bitfield.rs index 5cb4e7d52..3992421e3 100644 --- a/consensus/types/src/test_utils/test_random/bitfield.rs +++ b/consensus/types/src/test_utils/test_random/bitfield.rs @@ -4,8 +4,21 @@ use smallvec::smallvec; impl TestRandom for BitList { fn random_for_test(rng: &mut impl RngCore) -> Self { - let mut raw_bytes = smallvec![0; std::cmp::max(1, (N::to_usize() + 7) / 8)]; + let initial_len = std::cmp::max(1, (N::to_usize() + 7) / 8); + let mut raw_bytes = smallvec![0; initial_len]; rng.fill_bytes(&mut raw_bytes); + + let non_zero_bytes = raw_bytes + .iter() + .enumerate() + .rev() + .find_map(|(i, byte)| (*byte > 0).then_some(i + 1)) + .unwrap_or(0); + + if non_zero_bytes < initial_len { + raw_bytes.truncate(non_zero_bytes); + } + Self::from_bytes(raw_bytes).expect("we generate a valid BitList") } } diff --git a/consensus/types/src/test_utils/test_random/kzg_commitment.rs b/consensus/types/src/test_utils/test_random/kzg_commitment.rs new file mode 100644 index 000000000..a4030f2b6 --- /dev/null +++ b/consensus/types/src/test_utils/test_random/kzg_commitment.rs @@ -0,0 +1,7 @@ +use super::*; + +impl TestRandom for KzgCommitment { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { + KzgCommitment(<[u8; 48] as TestRandom>::random_for_test(rng)) + } +} diff --git a/consensus/types/src/test_utils/test_random/kzg_proof.rs b/consensus/types/src/test_utils/test_random/kzg_proof.rs new file mode 100644 index 000000000..d6d8ed2d0 --- /dev/null +++ b/consensus/types/src/test_utils/test_random/kzg_proof.rs @@ -0,0 +1,10 @@ +use super::*; +use kzg::{KzgProof, BYTES_PER_COMMITMENT}; + +impl TestRandom for KzgProof { + fn random_for_test(rng: &mut impl RngCore) -> Self { + let mut bytes = [0; BYTES_PER_COMMITMENT]; + rng.fill_bytes(&mut bytes); + Self(bytes) + } +} diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 6860397fb..8fbd9009e 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -2,7 +2,7 @@ use crate::{ test_utils::TestRandom, Address, BeaconState, ChainSpec, Epoch, EthSpec, Hash256, PublicKeyBytes, }; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/validator_registration_data.rs b/consensus/types/src/validator_registration_data.rs index de7f26cc6..174014df8 100644 --- a/consensus/types/src/validator_registration_data.rs +++ b/consensus/types/src/validator_registration_data.rs @@ -21,3 +21,17 @@ pub struct ValidatorRegistrationData { } impl SignedRoot for ValidatorRegistrationData {} + +impl SignedValidatorRegistrationData { + pub fn verify_signature(&self, spec: &ChainSpec) -> bool { + self.message + .pubkey + .decompress() + .map(|pubkey| { + let domain = spec.get_builder_domain(); + let message = self.message.signing_root(domain); + self.signature.verify(&pubkey, message) + }) + .unwrap_or(false) + } +} diff --git a/consensus/types/src/voluntary_exit.rs b/consensus/types/src/voluntary_exit.rs index 02686fef9..a24f7376a 100644 --- a/consensus/types/src/voluntary_exit.rs +++ b/consensus/types/src/voluntary_exit.rs @@ -1,9 +1,9 @@ use crate::{ - test_utils::TestRandom, ChainSpec, Domain, Epoch, Fork, Hash256, SecretKey, SignedRoot, + test_utils::TestRandom, ChainSpec, Domain, Epoch, ForkName, Hash256, SecretKey, SignedRoot, SignedVoluntaryExit, }; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -37,16 +37,20 @@ impl VoluntaryExit { pub fn sign( self, secret_key: &SecretKey, - fork: &Fork, genesis_validators_root: Hash256, spec: &ChainSpec, ) -> SignedVoluntaryExit { - let domain = spec.get_domain( - self.epoch, - Domain::VoluntaryExit, - fork, - genesis_validators_root, - ); + let fork_name = spec.fork_name_at_epoch(self.epoch); + let fork_version = match fork_name { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + spec.fork_version_for_name(fork_name) + } + // EIP-7044 + ForkName::Deneb => spec.fork_version_for_name(ForkName::Capella), + }; + let domain = + spec.compute_domain(Domain::VoluntaryExit, fork_version, genesis_validators_root); + let message = self.signing_root(domain); SignedVoluntaryExit { message: self, diff --git a/consensus/types/src/withdrawal.rs b/consensus/types/src/withdrawal.rs index eed7c7e27..3e6115655 100644 --- a/consensus/types/src/withdrawal.rs +++ b/consensus/types/src/withdrawal.rs @@ -1,6 +1,6 @@ use crate::test_utils::TestRandom; use crate::*; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/crypto/bls/Cargo.toml b/crypto/bls/Cargo.toml index 4340fb3e8..1216fc2a9 100644 --- a/crypto/bls/Cargo.toml +++ b/crypto/bls/Cargo.toml @@ -10,7 +10,6 @@ tree_hash = { workspace = true } milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v1.5.1", optional = true } rand = { workspace = true } serde = { workspace = true } -serde_derive = "1.0.116" ethereum_serde_utils = { workspace = true } hex = { workspace = true } ethereum_hashing = { workspace = true } diff --git a/crypto/bls/src/impls/blst.rs b/crypto/bls/src/impls/blst.rs index bd28abff9..e831a175c 100644 --- a/crypto/bls/src/impls/blst.rs +++ b/crypto/bls/src/impls/blst.rs @@ -99,9 +99,8 @@ pub fn verify_signature_sets<'a>( // Aggregate all the public keys. // Public keys have already been checked for subgroup and infinity - let agg_pk = match blst_core::AggregatePublicKey::aggregate(&signing_keys, false) { - Ok(agg_pk) => agg_pk, - Err(_) => return false, + let Ok(agg_pk) = blst_core::AggregatePublicKey::aggregate(&signing_keys, false) else { + return false; }; pks.push(agg_pk.to_public_key()); } diff --git a/crypto/bls/src/zeroize_hash.rs b/crypto/bls/src/zeroize_hash.rs index 41136f97a..e346f456d 100644 --- a/crypto/bls/src/zeroize_hash.rs +++ b/crypto/bls/src/zeroize_hash.rs @@ -1,5 +1,5 @@ use super::SECRET_KEY_BYTES_LEN; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use zeroize::Zeroize; /// Provides a wrapper around a `[u8; SECRET_KEY_BYTES_LEN]` that implements `Zeroize` on `Drop`. diff --git a/crypto/kzg/Cargo.toml b/crypto/kzg/Cargo.toml new file mode 100644 index 000000000..7b70166f9 --- /dev/null +++ b/crypto/kzg/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "kzg" +version = "0.1.0" +authors = ["Pawan Dhananjay "] +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +arbitrary = { workspace = true } +ethereum_ssz = { workspace = true } +ethereum_ssz_derive = { workspace = true } +tree_hash = { workspace = true } +derivative = { workspace = true } +serde = { workspace = true } +ethereum_serde_utils = { workspace = true } +hex = { workspace = true } +ethereum_hashing = { workspace = true } +c-kzg = { git = "https://github.com/ethereum/c-kzg-4844", rev = "748283cced543c486145d5f3f38684becdfe3e1b"} \ No newline at end of file diff --git a/crypto/kzg/src/kzg_commitment.rs b/crypto/kzg/src/kzg_commitment.rs new file mode 100644 index 000000000..cfab09f63 --- /dev/null +++ b/crypto/kzg/src/kzg_commitment.rs @@ -0,0 +1,144 @@ +use c_kzg::BYTES_PER_COMMITMENT; +use derivative::Derivative; +use ethereum_hashing::hash_fixed; +use serde::de::{Deserialize, Deserializer}; +use serde::ser::{Serialize, Serializer}; +use ssz_derive::{Decode, Encode}; +use std::fmt; +use std::fmt::{Debug, Display, Formatter}; +use std::str::FromStr; +use tree_hash::{Hash256, PackedEncoding, TreeHash}; + +pub const VERSIONED_HASH_VERSION_KZG: u8 = 0x01; + +#[derive(Derivative, Clone, Copy, Encode, Decode)] +#[derivative(PartialEq, Eq, Hash)] +#[ssz(struct_behaviour = "transparent")] +pub struct KzgCommitment(pub [u8; c_kzg::BYTES_PER_COMMITMENT]); + +impl KzgCommitment { + pub fn calculate_versioned_hash(&self) -> Hash256 { + let mut versioned_hash = hash_fixed(&self.0); + versioned_hash[0] = VERSIONED_HASH_VERSION_KZG; + Hash256::from_slice(versioned_hash.as_slice()) + } + + pub fn empty_for_testing() -> Self { + KzgCommitment([0; c_kzg::BYTES_PER_COMMITMENT]) + } +} + +impl From for c_kzg::Bytes48 { + fn from(value: KzgCommitment) -> Self { + value.0.into() + } +} + +impl Display for KzgCommitment { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "0x")?; + for i in &self.0[0..2] { + write!(f, "{:02x}", i)?; + } + write!(f, "…")?; + for i in &self.0[BYTES_PER_COMMITMENT - 2..BYTES_PER_COMMITMENT] { + write!(f, "{:02x}", i)?; + } + Ok(()) + } +} + +impl TreeHash for KzgCommitment { + fn tree_hash_type() -> tree_hash::TreeHashType { + <[u8; BYTES_PER_COMMITMENT] as TreeHash>::tree_hash_type() + } + + fn tree_hash_packed_encoding(&self) -> PackedEncoding { + self.0.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + <[u8; BYTES_PER_COMMITMENT] as TreeHash>::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> tree_hash::Hash256 { + self.0.tree_hash_root() + } +} + +impl Serialize for KzgCommitment { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&format!("{:?}", self)) + } +} + +impl<'de> Deserialize<'de> for KzgCommitment { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let string = String::deserialize(deserializer)?; + Self::from_str(&string).map_err(serde::de::Error::custom) + } +} + +impl FromStr for KzgCommitment { + type Err = String; + + fn from_str(s: &str) -> Result { + if let Some(stripped) = s.strip_prefix("0x") { + let bytes = hex::decode(stripped).map_err(|e| e.to_string())?; + if bytes.len() == BYTES_PER_COMMITMENT { + let mut kzg_commitment_bytes = [0; BYTES_PER_COMMITMENT]; + kzg_commitment_bytes[..].copy_from_slice(&bytes); + Ok(Self(kzg_commitment_bytes)) + } else { + Err(format!( + "InvalidByteLength: got {}, expected {}", + bytes.len(), + BYTES_PER_COMMITMENT + )) + } + } else { + Err("must start with 0x".to_string()) + } + } +} + +impl Debug for KzgCommitment { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", serde_utils::hex::encode(self.0)) + } +} + +impl arbitrary::Arbitrary<'_> for KzgCommitment { + fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { + let mut bytes = [0u8; BYTES_PER_COMMITMENT]; + u.fill_buffer(&mut bytes)?; + Ok(KzgCommitment(bytes)) + } +} + +#[test] +fn kzg_commitment_display() { + let display_commitment_str = "0x53fa…adac"; + let display_commitment = KzgCommitment::from_str( + "0x53fa09af35d1d1a9e76f65e16112a9064ce30d1e4e2df98583f0f5dc2e7dd13a4f421a9c89f518fafd952df76f23adac", + ) + .unwrap() + .to_string(); + + assert_eq!(display_commitment, display_commitment_str); +} + +#[test] +fn kzg_commitment_debug() { + let debug_commitment_str = + "0x53fa09af35d1d1a9e76f65e16112a9064ce30d1e4e2df98583f0f5dc2e7dd13a4f421a9c89f518fafd952df76f23adac"; + let debug_commitment = KzgCommitment::from_str(debug_commitment_str).unwrap(); + + assert_eq!(format!("{debug_commitment:?}"), debug_commitment_str); +} diff --git a/crypto/kzg/src/kzg_proof.rs b/crypto/kzg/src/kzg_proof.rs new file mode 100644 index 000000000..c9a138a31 --- /dev/null +++ b/crypto/kzg/src/kzg_proof.rs @@ -0,0 +1,119 @@ +use c_kzg::BYTES_PER_PROOF; +use serde::de::{Deserialize, Deserializer}; +use serde::ser::{Serialize, Serializer}; +use ssz_derive::{Decode, Encode}; +use std::fmt; +use std::fmt::Debug; +use std::str::FromStr; +use tree_hash::{PackedEncoding, TreeHash}; + +#[derive(PartialEq, Hash, Clone, Copy, Encode, Decode)] +#[ssz(struct_behaviour = "transparent")] +pub struct KzgProof(pub [u8; BYTES_PER_PROOF]); + +impl From for c_kzg::Bytes48 { + fn from(value: KzgProof) -> Self { + value.0.into() + } +} + +impl KzgProof { + /// Creates a valid proof using `G1_POINT_AT_INFINITY`. + pub fn empty() -> Self { + let mut bytes = [0; BYTES_PER_PROOF]; + bytes[0] = 0xc0; + Self(bytes) + } +} + +impl fmt::Display for KzgProof { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", serde_utils::hex::encode(self.0)) + } +} + +impl From<[u8; BYTES_PER_PROOF]> for KzgProof { + fn from(bytes: [u8; BYTES_PER_PROOF]) -> Self { + Self(bytes) + } +} + +impl Into<[u8; BYTES_PER_PROOF]> for KzgProof { + fn into(self) -> [u8; BYTES_PER_PROOF] { + self.0 + } +} + +impl TreeHash for KzgProof { + fn tree_hash_type() -> tree_hash::TreeHashType { + <[u8; BYTES_PER_PROOF]>::tree_hash_type() + } + + fn tree_hash_packed_encoding(&self) -> PackedEncoding { + self.0.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + <[u8; BYTES_PER_PROOF]>::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> tree_hash::Hash256 { + self.0.tree_hash_root() + } +} + +impl Serialize for KzgProof { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&self.to_string()) + } +} + +impl<'de> Deserialize<'de> for KzgProof { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let string = String::deserialize(deserializer)?; + Self::from_str(&string).map_err(serde::de::Error::custom) + } +} + +impl FromStr for KzgProof { + type Err = String; + + fn from_str(s: &str) -> Result { + if let Some(stripped) = s.strip_prefix("0x") { + let bytes = hex::decode(stripped).map_err(|e| e.to_string())?; + if bytes.len() == BYTES_PER_PROOF { + let mut kzg_proof_bytes = [0; BYTES_PER_PROOF]; + kzg_proof_bytes[..].copy_from_slice(&bytes); + Ok(Self(kzg_proof_bytes)) + } else { + Err(format!( + "InvalidByteLength: got {}, expected {}", + bytes.len(), + BYTES_PER_PROOF + )) + } + } else { + Err("must start with 0x".to_string()) + } + } +} + +impl Debug for KzgProof { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", serde_utils::hex::encode(self.0)) + } +} + +impl arbitrary::Arbitrary<'_> for KzgProof { + fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { + let mut bytes = [0u8; BYTES_PER_PROOF]; + u.fill_buffer(&mut bytes)?; + Ok(KzgProof(bytes)) + } +} diff --git a/crypto/kzg/src/lib.rs b/crypto/kzg/src/lib.rs new file mode 100644 index 000000000..0e096ba55 --- /dev/null +++ b/crypto/kzg/src/lib.rs @@ -0,0 +1,144 @@ +mod kzg_commitment; +mod kzg_proof; +mod trusted_setup; + +use std::fmt::Debug; + +pub use crate::{ + kzg_commitment::{KzgCommitment, VERSIONED_HASH_VERSION_KZG}, + kzg_proof::KzgProof, + trusted_setup::TrustedSetup, +}; +pub use c_kzg::{ + Blob, Bytes32, Bytes48, KzgSettings, BYTES_PER_BLOB, BYTES_PER_COMMITMENT, + BYTES_PER_FIELD_ELEMENT, BYTES_PER_PROOF, FIELD_ELEMENTS_PER_BLOB, +}; +#[derive(Debug)] +pub enum Error { + /// An error from the underlying kzg library. + Kzg(c_kzg::Error), + /// The kzg verification failed + KzgVerificationFailed, +} + +impl From for Error { + fn from(value: c_kzg::Error) -> Self { + Error::Kzg(value) + } +} + +/// A wrapper over a kzg library that holds the trusted setup parameters. +#[derive(Debug)] +pub struct Kzg { + trusted_setup: KzgSettings, +} + +impl Kzg { + /// Load the kzg trusted setup parameters from a vec of G1 and G2 points. + pub fn new_from_trusted_setup(trusted_setup: TrustedSetup) -> Result { + Ok(Self { + trusted_setup: KzgSettings::load_trusted_setup( + &trusted_setup.g1_points(), + &trusted_setup.g2_points(), + )?, + }) + } + + /// Compute the kzg proof given a blob and its kzg commitment. + pub fn compute_blob_kzg_proof( + &self, + blob: &Blob, + kzg_commitment: KzgCommitment, + ) -> Result { + c_kzg::KzgProof::compute_blob_kzg_proof(blob, &kzg_commitment.into(), &self.trusted_setup) + .map(|proof| KzgProof(proof.to_bytes().into_inner())) + .map_err(Into::into) + } + + /// Verify a kzg proof given the blob, kzg commitment and kzg proof. + pub fn verify_blob_kzg_proof( + &self, + blob: &Blob, + kzg_commitment: KzgCommitment, + kzg_proof: KzgProof, + ) -> Result<(), Error> { + if !c_kzg::KzgProof::verify_blob_kzg_proof( + blob, + &kzg_commitment.into(), + &kzg_proof.into(), + &self.trusted_setup, + )? { + Err(Error::KzgVerificationFailed) + } else { + Ok(()) + } + } + + /// Verify a batch of blob commitment proof triplets. + /// + /// Note: This method is slightly faster than calling `Self::verify_blob_kzg_proof` in a loop sequentially. + /// TODO(pawan): test performance against a parallelized rayon impl. + pub fn verify_blob_kzg_proof_batch( + &self, + blobs: &[Blob], + kzg_commitments: &[KzgCommitment], + kzg_proofs: &[KzgProof], + ) -> Result<(), Error> { + let commitments_bytes = kzg_commitments + .iter() + .map(|comm| Bytes48::from(*comm)) + .collect::>(); + + let proofs_bytes = kzg_proofs + .iter() + .map(|proof| Bytes48::from(*proof)) + .collect::>(); + + if !c_kzg::KzgProof::verify_blob_kzg_proof_batch( + blobs, + &commitments_bytes, + &proofs_bytes, + &self.trusted_setup, + )? { + Err(Error::KzgVerificationFailed) + } else { + Ok(()) + } + } + + /// Converts a blob to a kzg commitment. + pub fn blob_to_kzg_commitment(&self, blob: &Blob) -> Result { + c_kzg::KzgCommitment::blob_to_kzg_commitment(blob, &self.trusted_setup) + .map(|commitment| KzgCommitment(commitment.to_bytes().into_inner())) + .map_err(Into::into) + } + + /// Computes the kzg proof for a given `blob` and an evaluation point `z` + pub fn compute_kzg_proof( + &self, + blob: &Blob, + z: &Bytes32, + ) -> Result<(KzgProof, Bytes32), Error> { + c_kzg::KzgProof::compute_kzg_proof(blob, z, &self.trusted_setup) + .map(|(proof, y)| (KzgProof(proof.to_bytes().into_inner()), y)) + .map_err(Into::into) + } + + /// Verifies a `kzg_proof` for a `kzg_commitment` that evaluating a polynomial at `z` results in `y` + pub fn verify_kzg_proof( + &self, + kzg_commitment: KzgCommitment, + z: &Bytes32, + y: &Bytes32, + kzg_proof: KzgProof, + ) -> Result { + c_kzg::KzgProof::verify_kzg_proof( + &kzg_commitment.into(), + z, + y, + &kzg_proof.into(), + &self.trusted_setup, + ) + .map_err(Into::into) + } +} diff --git a/crypto/kzg/src/trusted_setup.rs b/crypto/kzg/src/trusted_setup.rs new file mode 100644 index 000000000..55a00eed1 --- /dev/null +++ b/crypto/kzg/src/trusted_setup.rs @@ -0,0 +1,142 @@ +use c_kzg::{BYTES_PER_G1_POINT, BYTES_PER_G2_POINT}; +use serde::{ + de::{self, Deserializer, Visitor}, + Deserialize, Serialize, +}; + +/// Wrapper over a BLS G1 point's byte representation. +#[derive(Debug, Clone, PartialEq)] +struct G1Point([u8; BYTES_PER_G1_POINT]); + +/// Wrapper over a BLS G2 point's byte representation. +#[derive(Debug, Clone, PartialEq)] +struct G2Point([u8; BYTES_PER_G2_POINT]); + +/// Contains the trusted setup parameters that are required to instantiate a +/// `c_kzg::KzgSettings` object. +/// +/// The serialize/deserialize implementations are written according to +/// the format specified in the the ethereum consensus specs trusted setup files. +/// +/// See https://github.com/ethereum/consensus-specs/blob/dev/presets/mainnet/trusted_setups/trusted_setup_4096.json +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct TrustedSetup { + #[serde(rename = "g1_lagrange")] + g1_points: Vec, + #[serde(rename = "g2_monomial")] + g2_points: Vec, +} + +impl TrustedSetup { + pub fn g1_points(&self) -> Vec<[u8; BYTES_PER_G1_POINT]> { + self.g1_points.iter().map(|p| p.0).collect() + } + + pub fn g2_points(&self) -> Vec<[u8; BYTES_PER_G2_POINT]> { + self.g2_points.iter().map(|p| p.0).collect() + } + + pub fn g1_len(&self) -> usize { + self.g1_points.len() + } +} + +impl Serialize for G1Point { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + let point = hex::encode(self.0); + serializer.serialize_str(&point) + } +} + +impl Serialize for G2Point { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + let point = hex::encode(self.0); + serializer.serialize_str(&point) + } +} + +impl<'de> Deserialize<'de> for G1Point { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct G1PointVisitor; + + impl<'de> Visitor<'de> for G1PointVisitor { + type Value = G1Point; + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str("A 48 byte hex encoded string") + } + + fn visit_str(self, v: &str) -> Result + where + E: de::Error, + { + let point = hex::decode(strip_prefix(v)) + .map_err(|e| de::Error::custom(format!("Failed to decode G1 point: {}", e)))?; + if point.len() != BYTES_PER_G1_POINT { + return Err(de::Error::custom(format!( + "G1 point has invalid length. Expected {} got {}", + BYTES_PER_G1_POINT, + point.len() + ))); + } + let mut res = [0; BYTES_PER_G1_POINT]; + res.copy_from_slice(&point); + Ok(G1Point(res)) + } + } + + deserializer.deserialize_str(G1PointVisitor) + } +} + +impl<'de> Deserialize<'de> for G2Point { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct G2PointVisitor; + + impl<'de> Visitor<'de> for G2PointVisitor { + type Value = G2Point; + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str("A 96 byte hex encoded string") + } + + fn visit_str(self, v: &str) -> Result + where + E: de::Error, + { + let point = hex::decode(strip_prefix(v)) + .map_err(|e| de::Error::custom(format!("Failed to decode G2 point: {}", e)))?; + if point.len() != BYTES_PER_G2_POINT { + return Err(de::Error::custom(format!( + "G2 point has invalid length. Expected {} got {}", + BYTES_PER_G2_POINT, + point.len() + ))); + } + let mut res = [0; BYTES_PER_G2_POINT]; + res.copy_from_slice(&point); + Ok(G2Point(res)) + } + } + + deserializer.deserialize_str(G2PointVisitor) + } +} + +fn strip_prefix(s: &str) -> &str { + if let Some(stripped) = s.strip_prefix("0x") { + stripped + } else { + s + } +} diff --git a/database_manager/Cargo.toml b/database_manager/Cargo.toml index 1570c171c..07045dd95 100644 --- a/database_manager/Cargo.toml +++ b/database_manager/Cargo.toml @@ -9,6 +9,7 @@ beacon_node = { workspace = true } clap = { workspace = true } clap_utils = { workspace = true } environment = { workspace = true } +hex = { workspace = true } logging = { workspace = true } sloggers = { workspace = true } store = { workspace = true } diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index ce0b094b7..95af4d638 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -5,17 +5,18 @@ use beacon_chain::{ use beacon_node::{get_data_dir, get_slots_per_restore_point, ClientConfig}; use clap::{App, Arg, ArgMatches}; use environment::{Environment, RuntimeContext}; -use slog::{info, Logger}; +use slog::{info, warn, Logger}; use std::fs; use std::io::Write; use std::path::PathBuf; +use store::metadata::STATE_UPPER_LIMIT_NO_RETAIN; use store::{ errors::Error, metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION}, DBColumn, HotColdDB, KeyValueStore, LevelDB, }; use strum::{EnumString, EnumVariantNames, VariantNames}; -use types::EthSpec; +use types::{BeaconState, EthSpec, Slot}; pub const CMD: &str = "database_manager"; @@ -60,6 +61,24 @@ pub fn inspect_cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("sizes") .possible_values(InspectTarget::VARIANTS), ) + .arg( + Arg::with_name("skip") + .long("skip") + .value_name("N") + .help("Skip over the first N keys"), + ) + .arg( + Arg::with_name("limit") + .long("limit") + .value_name("N") + .help("Output at most N keys"), + ) + .arg( + Arg::with_name("freezer") + .long("freezer") + .help("Inspect the freezer DB rather than the hot DB") + .takes_value(false), + ) .arg( Arg::with_name("output-dir") .long("output-dir") @@ -70,11 +89,35 @@ pub fn inspect_cli_app<'a, 'b>() -> App<'a, 'b> { } pub fn prune_payloads_app<'a, 'b>() -> App<'a, 'b> { - App::new("prune_payloads") + App::new("prune-payloads") + .alias("prune_payloads") .setting(clap::AppSettings::ColoredHelp) .about("Prune finalized execution payloads") } +pub fn prune_blobs_app<'a, 'b>() -> App<'a, 'b> { + App::new("prune-blobs") + .alias("prune_blobs") + .setting(clap::AppSettings::ColoredHelp) + .about("Prune blobs older than data availability boundary") +} + +pub fn prune_states_app<'a, 'b>() -> App<'a, 'b> { + App::new("prune-states") + .alias("prune_states") + .arg( + Arg::with_name("confirm") + .long("confirm") + .help( + "Commit to pruning states irreversably. Without this flag the command will \ + just check that the database is capable of being pruned.", + ) + .takes_value(false), + ) + .setting(clap::AppSettings::ColoredHelp) + .about("Prune all beacon states from the freezer database") +} + pub fn cli_app<'a, 'b>() -> App<'a, 'b> { App::new(CMD) .visible_aliases(&["db"]) @@ -98,10 +141,30 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("Data directory for the freezer database.") .takes_value(true), ) + .arg( + Arg::with_name("blob-prune-margin-epochs") + .long("blob-prune-margin-epochs") + .value_name("EPOCHS") + .help( + "The margin for blob pruning in epochs. The oldest blobs are pruned \ + up until data_availability_boundary - blob_prune_margin_epochs.", + ) + .takes_value(true) + .default_value("0"), + ) + .arg( + Arg::with_name("blobs-dir") + .long("blobs-dir") + .value_name("DIR") + .help("Data directory for the blobs database.") + .takes_value(true), + ) .subcommand(migrate_cli_app()) .subcommand(version_cli_app()) .subcommand(inspect_cli_app()) .subcommand(prune_payloads_app()) + .subcommand(prune_blobs_app()) + .subcommand(prune_states_app()) } fn parse_client_config( @@ -116,10 +179,20 @@ fn parse_client_config( client_config.freezer_db_path = Some(freezer_dir); } + if let Some(blobs_db_dir) = clap_utils::parse_optional(cli_args, "blobs-dir")? { + client_config.blobs_db_path = Some(blobs_db_dir); + } + let (sprp, sprp_explicit) = get_slots_per_restore_point::(cli_args)?; client_config.store.slots_per_restore_point = sprp; client_config.store.slots_per_restore_point_set_explicitly = sprp_explicit; + if let Some(blob_prune_margin_epochs) = + clap_utils::parse_optional(cli_args, "blob-prune-margin-epochs")? + { + client_config.store.blob_prune_margin_epochs = blob_prune_margin_epochs; + } + Ok(client_config) } @@ -131,11 +204,13 @@ pub fn display_db_version( let spec = runtime_context.eth2_config.spec.clone(); let hot_path = client_config.get_db_path(); let cold_path = client_config.get_freezer_db_path(); + let blobs_path = client_config.get_blobs_db_path(); let mut version = CURRENT_SCHEMA_VERSION; HotColdDB::, LevelDB>::open( &hot_path, &cold_path, + &blobs_path, |_, from, _| { version = from; Ok(()) @@ -158,7 +233,7 @@ pub fn display_db_version( Ok(()) } -#[derive(Debug, EnumString, EnumVariantNames)] +#[derive(Debug, PartialEq, Eq, EnumString, EnumVariantNames)] pub enum InspectTarget { #[strum(serialize = "sizes")] ValueSizes, @@ -166,11 +241,16 @@ pub enum InspectTarget { ValueTotal, #[strum(serialize = "values")] Values, + #[strum(serialize = "gaps")] + Gaps, } pub struct InspectConfig { column: DBColumn, target: InspectTarget, + skip: Option, + limit: Option, + freezer: bool, /// Configures where the inspect output should be stored. output_dir: PathBuf, } @@ -178,11 +258,18 @@ pub struct InspectConfig { fn parse_inspect_config(cli_args: &ArgMatches) -> Result { let column = clap_utils::parse_required(cli_args, "column")?; let target = clap_utils::parse_required(cli_args, "output")?; + let skip = clap_utils::parse_optional(cli_args, "skip")?; + let limit = clap_utils::parse_optional(cli_args, "limit")?; + let freezer = cli_args.is_present("freezer"); + let output_dir: PathBuf = clap_utils::parse_optional(cli_args, "output-dir")?.unwrap_or_else(PathBuf::new); Ok(InspectConfig { column, target, + skip, + limit, + freezer, output_dir, }) } @@ -196,10 +283,12 @@ pub fn inspect_db( let spec = runtime_context.eth2_config.spec.clone(); let hot_path = client_config.get_db_path(); let cold_path = client_config.get_freezer_db_path(); + let blobs_path = client_config.get_blobs_db_path(); let db = HotColdDB::, LevelDB>::open( &hot_path, &cold_path, + &blobs_path, |_, _, _| Ok(()), client_config.store, spec, @@ -208,6 +297,20 @@ pub fn inspect_db( .map_err(|e| format!("{:?}", e))?; let mut total = 0; + let mut num_keys = 0; + + let sub_db = if inspect_config.freezer { + &db.cold_db + } else { + &db.hot_db + }; + + let skip = inspect_config.skip.unwrap_or(0); + let limit = inspect_config.limit.unwrap_or(usize::MAX); + + let mut prev_key = 0; + let mut found_gaps = false; + let base_path = &inspect_config.output_dir; if let InspectTarget::Values = inspect_config.target { @@ -215,20 +318,41 @@ pub fn inspect_db( .map_err(|e| format!("Unable to create import directory: {:?}", e))?; } - for res in db.hot_db.iter_column(inspect_config.column) { + for res in sub_db + .iter_column::>(inspect_config.column) + .skip(skip) + .take(limit) + { let (key, value) = res.map_err(|e| format!("{:?}", e))?; match inspect_config.target { InspectTarget::ValueSizes => { - println!("{:?}: {} bytes", key, value.len()); - total += value.len(); + println!("{}: {} bytes", hex::encode(&key), value.len()); } - InspectTarget::ValueTotal => { - total += value.len(); + InspectTarget::Gaps => { + // Convert last 8 bytes of key to u64. + let numeric_key = u64::from_be_bytes( + key[key.len() - 8..] + .try_into() + .expect("key is at least 8 bytes"), + ); + + if numeric_key > prev_key + 1 { + println!( + "gap between keys {} and {} (offset: {})", + prev_key, numeric_key, num_keys, + ); + found_gaps = true; + } + prev_key = numeric_key; } + InspectTarget::ValueTotal => (), InspectTarget::Values => { - let file_path = - base_path.join(format!("{}_{}.ssz", inspect_config.column.as_str(), key)); + let file_path = base_path.join(format!( + "{}_{}.ssz", + inspect_config.column.as_str(), + hex::encode(&key) + )); let write_result = fs::OpenOptions::new() .create(true) @@ -244,18 +368,19 @@ pub fn inspect_db( } else { println!("Successfully saved values to file: {:?}", file_path); } - - total += value.len(); } } + total += value.len(); + num_keys += 1; } - match inspect_config.target { - InspectTarget::ValueSizes | InspectTarget::ValueTotal | InspectTarget::Values => { - println!("Total: {} bytes", total); - } + if inspect_config.target == InspectTarget::Gaps && !found_gaps { + println!("No gaps found!"); } + println!("Num keys: {}", num_keys); + println!("Total: {} bytes", total); + Ok(()) } @@ -278,12 +403,14 @@ pub fn migrate_db( let spec = &runtime_context.eth2_config.spec; let hot_path = client_config.get_db_path(); let cold_path = client_config.get_freezer_db_path(); + let blobs_path = client_config.get_blobs_db_path(); let mut from = CURRENT_SCHEMA_VERSION; let to = migrate_config.to; let db = HotColdDB::, LevelDB>::open( &hot_path, &cold_path, + &blobs_path, |_, db_initial_version, _| { from = db_initial_version; Ok(()) @@ -318,10 +445,12 @@ pub fn prune_payloads( let spec = &runtime_context.eth2_config.spec; let hot_path = client_config.get_db_path(); let cold_path = client_config.get_freezer_db_path(); + let blobs_path = client_config.get_blobs_db_path(); let db = HotColdDB::, LevelDB>::open( &hot_path, &cold_path, + &blobs_path, |_, _, _| Ok(()), client_config.store, spec.clone(), @@ -334,6 +463,111 @@ pub fn prune_payloads( db.try_prune_execution_payloads(force) } +pub fn prune_blobs( + client_config: ClientConfig, + runtime_context: &RuntimeContext, + log: Logger, +) -> Result<(), Error> { + let spec = &runtime_context.eth2_config.spec; + let hot_path = client_config.get_db_path(); + let cold_path = client_config.get_freezer_db_path(); + let blobs_path = client_config.get_blobs_db_path(); + + let db = HotColdDB::, LevelDB>::open( + &hot_path, + &cold_path, + &blobs_path, + |_, _, _| Ok(()), + client_config.store, + spec.clone(), + log, + )?; + + // If we're triggering a prune manually then ignore the check on `epochs_per_blob_prune` that + // bails out early by passing true to the force parameter. + db.try_prune_most_blobs(true) +} + +pub struct PruneStatesConfig { + confirm: bool, +} + +fn parse_prune_states_config(cli_args: &ArgMatches) -> Result { + let confirm = cli_args.is_present("confirm"); + Ok(PruneStatesConfig { confirm }) +} + +pub fn prune_states( + client_config: ClientConfig, + prune_config: PruneStatesConfig, + mut genesis_state: BeaconState, + runtime_context: &RuntimeContext, + log: Logger, +) -> Result<(), String> { + let spec = &runtime_context.eth2_config.spec; + let hot_path = client_config.get_db_path(); + let cold_path = client_config.get_freezer_db_path(); + let blobs_path = client_config.get_blobs_db_path(); + + let db = HotColdDB::, LevelDB>::open( + &hot_path, + &cold_path, + &blobs_path, + |_, _, _| Ok(()), + client_config.store, + spec.clone(), + log.clone(), + ) + .map_err(|e| format!("Unable to open database: {e:?}"))?; + + // Load the genesis state from the database to ensure we're deleting states for the + // correct network, and that we don't end up storing the wrong genesis state. + let genesis_from_db = db + .load_cold_state_by_slot(Slot::new(0)) + .map_err(|e| format!("Error reading genesis state: {e:?}"))? + .ok_or("Error: genesis state missing from database. Check schema version.")?; + + if genesis_from_db.genesis_validators_root() != genesis_state.genesis_validators_root() { + return Err(format!( + "Error: Wrong network. Genesis state in DB does not match {} genesis.", + spec.config_name.as_deref().unwrap_or("") + )); + } + + // Check that the user has confirmed they want to proceed. + if !prune_config.confirm { + match db.get_anchor_info() { + Some(anchor_info) if anchor_info.state_upper_limit == STATE_UPPER_LIMIT_NO_RETAIN => { + info!(log, "States have already been pruned"); + return Ok(()); + } + _ => { + info!(log, "Ready to prune states"); + } + } + warn!( + log, + "Pruning states is irreversible"; + ); + warn!( + log, + "Re-run this command with --confirm to commit to state deletion" + ); + info!(log, "Nothing has been pruned on this run"); + return Err("Error: confirmation flag required".into()); + } + + // Delete all historic state data and *re-store* the genesis state. + let genesis_state_root = genesis_state + .update_tree_hash_cache() + .map_err(|e| format!("Error computing genesis state root: {e:?}"))?; + db.prune_historic_states(genesis_state_root, &genesis_state) + .map_err(|e| format!("Failed to prune due to error: {e:?}"))?; + + info!(log, "Historic states pruned successfully"); + Ok(()) +} + /// Run the database manager, returning an error string if the operation did not succeed. pub fn run(cli_args: &ArgMatches<'_>, env: Environment) -> Result<(), String> { let client_config = parse_client_config(cli_args, &env)?; @@ -353,9 +587,34 @@ pub fn run(cli_args: &ArgMatches<'_>, env: Environment) -> Result let inspect_config = parse_inspect_config(cli_args)?; inspect_db(inspect_config, client_config, &context, log) } - ("prune_payloads", Some(_)) => { + ("prune-payloads", Some(_)) => { prune_payloads(client_config, &context, log).map_err(format_err) } + ("prune-blobs", Some(_)) => prune_blobs(client_config, &context, log).map_err(format_err), + ("prune-states", Some(cli_args)) => { + let executor = env.core_context().executor; + let network_config = context + .eth2_network_config + .clone() + .ok_or("Missing network config")?; + + let genesis_state = executor + .block_on_dangerous( + network_config.genesis_state::( + client_config.genesis_state_url.as_deref(), + client_config.genesis_state_url_timeout, + &log, + ), + "get_genesis_state", + ) + .ok_or("Shutting down")? + .map_err(|e| format!("Error getting genesis state: {e}"))? + .ok_or("Genesis state missing")?; + + let prune_config = parse_prune_states_config(cli_args)?; + + prune_states(client_config, prune_config, genesis_state, &context, log) + } _ => Err("Unknown subcommand, for help `lighthouse database_manager --help`".into()), } } diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 854f718c5..1bba8242c 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "4.5.0" +version = "4.6.0" authors = ["Paul Hauner "] edition = { workspace = true } @@ -43,6 +43,8 @@ beacon_chain = { workspace = true } store = { workspace = true } malloc_utils = { workspace = true } rayon = { workspace = true } +execution_layer = { workspace = true } +hex = { workspace = true } [package.metadata.cargo-udeps.ignore] normal = ["malloc_utils"] diff --git a/lcli/Dockerfile b/lcli/Dockerfile index cd7f45ec7..e39e2a483 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -1,7 +1,7 @@ # `lcli` requires the full project to be in scope, so this should be built either: # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` -FROM rust:1.69.0-bullseye AS builder +FROM rust:1.73.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse ARG PORTABLE diff --git a/lcli/src/create_payload_header.rs b/lcli/src/create_payload_header.rs index 6c0e8dcec..5c9603585 100644 --- a/lcli/src/create_payload_header.rs +++ b/lcli/src/create_payload_header.rs @@ -5,8 +5,8 @@ use std::fs::File; use std::io::Write; use std::time::{SystemTime, UNIX_EPOCH}; use types::{ - EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderMerge, - ForkName, + EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, + ExecutionPayloadHeaderMerge, ForkName, }; pub fn run(matches: &ArgMatches) -> Result<(), String> { @@ -40,6 +40,14 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { prev_randao: eth1_block_hash.into_root(), ..ExecutionPayloadHeaderCapella::default() }), + ForkName::Deneb => ExecutionPayloadHeader::Deneb(ExecutionPayloadHeaderDeneb { + gas_limit, + base_fee_per_gas, + timestamp: genesis_time, + block_hash: eth1_block_hash, + prev_randao: eth1_block_hash.into_root(), + ..ExecutionPayloadHeaderDeneb::default() + }), }; let mut file = File::create(file_name).map_err(|_| "Unable to create file".to_string())?; diff --git a/lcli/src/generate_bootnode_enr.rs b/lcli/src/generate_bootnode_enr.rs index 0584cd654..1d41bedc8 100644 --- a/lcli/src/generate_bootnode_enr.rs +++ b/lcli/src/generate_bootnode_enr.rs @@ -4,16 +4,16 @@ use lighthouse_network::{ libp2p::identity::secp256k1, NetworkConfig, NETWORK_KEY_FILENAME, }; -use std::fs::File; use std::io::Write; use std::path::PathBuf; use std::{fs, net::Ipv4Addr}; +use std::{fs::File, num::NonZeroU16}; use types::{ChainSpec, EnrForkId, Epoch, EthSpec, Hash256}; pub fn run(matches: &ArgMatches) -> Result<(), String> { let ip: Ipv4Addr = clap_utils::parse_required(matches, "ip")?; - let udp_port: u16 = clap_utils::parse_required(matches, "udp-port")?; - let tcp_port: u16 = clap_utils::parse_required(matches, "tcp-port")?; + let udp_port: NonZeroU16 = clap_utils::parse_required(matches, "udp-port")?; + let tcp_port: NonZeroU16 = clap_utils::parse_required(matches, "tcp-port")?; let output_dir: PathBuf = clap_utils::parse_required(matches, "output-dir")?; let genesis_fork_version: [u8; 4] = clap_utils::parse_ssz_required(matches, "genesis-fork-version")?; diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 38fec2ebb..17fafe6ec 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -11,6 +11,7 @@ mod indexed_attestations; mod insecure_validators; mod interop_genesis; mod mnemonic_validators; +mod mock_el; mod new_testnet; mod parse_ssz; mod replace_state_pubkeys; @@ -432,7 +433,7 @@ fn main() { .takes_value(true) .default_value("bellatrix") .help("The fork for which the execution payload header should be created.") - .possible_values(&["merge", "bellatrix", "capella"]) + .possible_values(&["merge", "bellatrix", "capella", "deneb"]) ) ) .subcommand( @@ -608,6 +609,15 @@ fn main() { "The epoch at which to enable the Capella hard fork", ), ) + .arg( + Arg::with_name("deneb-fork-epoch") + .long("deneb-fork-epoch") + .value_name("EPOCH") + .takes_value(true) + .help( + "The epoch at which to enable the deneb hard fork", + ), + ) .arg( Arg::with_name("ttd") .long("ttd") @@ -882,6 +892,61 @@ fn main() { .help("Number of repeat runs, useful for benchmarking."), ) ) + .subcommand( + SubCommand::with_name("mock-el") + .about("Creates a mock execution layer server. This is NOT SAFE and should only \ + be used for testing and development on testnets. Do not use in production. Do not \ + use on mainnet. It cannot perform validator duties.") + .arg( + Arg::with_name("jwt-output-path") + .long("jwt-output-path") + .value_name("PATH") + .takes_value(true) + .required(true) + .help("Path to write the JWT secret."), + ) + .arg( + Arg::with_name("listen-address") + .long("listen-address") + .value_name("IP_ADDRESS") + .takes_value(true) + .help("The server will listen on this address.") + .default_value("127.0.0.1") + ) + .arg( + Arg::with_name("listen-port") + .long("listen-port") + .value_name("PORT") + .takes_value(true) + .help("The server will listen on this port.") + .default_value("8551") + ) + .arg( + Arg::with_name("all-payloads-valid") + .long("all-payloads-valid") + .takes_value(true) + .help("Controls the response to newPayload and forkchoiceUpdated. \ + Set to 'true' to return VALID. Set to 'false' to return SYNCING.") + .default_value("false") + .hidden(true) + ) + .arg( + Arg::with_name("shanghai-time") + .long("shanghai-time") + .value_name("UNIX_TIMESTAMP") + .takes_value(true) + .help("The payload timestamp that enables Shanghai. Defaults to the mainnet value.") + .default_value("1681338479") + ) + .arg( + Arg::with_name("cancun-time") + .long("cancun-time") + .value_name("UNIX_TIMESTAMP") + .takes_value(true) + .help("The payload timestamp that enables Cancun. No default is provided \ + until Cancun is triggered on mainnet.") + ) + ) .get_matches(); let result = matches @@ -1023,6 +1088,8 @@ fn run( state_root::run::(env, network_config, matches) .map_err(|e| format!("Failed to run state-root command: {}", e)) } + ("mock-el", Some(matches)) => mock_el::run::(env, matches) + .map_err(|e| format!("Failed to run mock-el command: {}", e)), (other, _) => Err(format!("Unknown subcommand {}. See --help.", other)), } } diff --git a/lcli/src/mock_el.rs b/lcli/src/mock_el.rs new file mode 100644 index 000000000..094e23c3b --- /dev/null +++ b/lcli/src/mock_el.rs @@ -0,0 +1,62 @@ +use clap::ArgMatches; +use clap_utils::{parse_optional, parse_required}; +use environment::Environment; +use execution_layer::{ + auth::JwtKey, + test_utils::{ + Config, MockExecutionConfig, MockServer, DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, + }, +}; +use std::net::Ipv4Addr; +use std::path::PathBuf; +use types::*; + +pub fn run(mut env: Environment, matches: &ArgMatches) -> Result<(), String> { + let jwt_path: PathBuf = parse_required(matches, "jwt-output-path")?; + let listen_addr: Ipv4Addr = parse_required(matches, "listen-address")?; + let listen_port: u16 = parse_required(matches, "listen-port")?; + let all_payloads_valid: bool = parse_required(matches, "all-payloads-valid")?; + let shanghai_time = parse_required(matches, "shanghai-time")?; + let cancun_time = parse_optional(matches, "cancun-time")?; + + let handle = env.core_context().executor.handle().unwrap(); + let spec = &T::default_spec(); + let jwt_key = JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(); + std::fs::write(jwt_path, hex::encode(DEFAULT_JWT_SECRET)).unwrap(); + + let config = MockExecutionConfig { + server_config: Config { + listen_addr, + listen_port, + }, + jwt_key, + terminal_difficulty: spec.terminal_total_difficulty, + terminal_block: DEFAULT_TERMINAL_BLOCK, + terminal_block_hash: spec.terminal_block_hash, + shanghai_time: Some(shanghai_time), + cancun_time, + }; + let kzg = None; + let server: MockServer = MockServer::new_with_config(&handle, config, kzg); + + if all_payloads_valid { + eprintln!( + "Using --all-payloads-valid=true can be dangerous. \ + Never use this flag when operating validators." + ); + // Indicate that all payloads are valid. + server.all_payloads_valid(); + } + + eprintln!( + "This tool is for TESTING PURPOSES ONLY. Do not use in production or on mainnet. \ + It cannot perform validator duties. It may cause nodes to follow an invalid chain." + ); + eprintln!("Server listening on {}:{}", listen_addr, listen_port); + + let shutdown_reason = env.block_until_shutdown_requested()?; + + eprintln!("Shutting down: {:?}", shutdown_reason); + + Ok(()) +} diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index 973993f97..3a0c7a9f6 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -1,7 +1,7 @@ use account_utils::eth2_keystore::keypair_from_secret; use clap::ArgMatches; use clap_utils::{parse_optional, parse_required, parse_ssz_optional}; -use eth2_network_config::{Eth2NetworkConfig, GenesisStateSource}; +use eth2_network_config::{Eth2NetworkConfig, GenesisStateSource, TRUSTED_SETUP_BYTES}; use eth2_wallet::bip39::Seed; use eth2_wallet::bip39::{Language, Mnemonic}; use eth2_wallet::{recover_validator_secret_from_mnemonic, KeyType}; @@ -19,8 +19,8 @@ use types::ExecutionBlockHash; use types::{ test_utils::generate_deterministic_keypairs, Address, BeaconState, ChainSpec, Config, Epoch, Eth1Data, EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, - ExecutionPayloadHeaderMerge, ExecutionPayloadHeaderRefMut, ForkName, Hash256, Keypair, - PublicKey, Validator, + ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderMerge, ExecutionPayloadHeaderRefMut, + ForkName, Hash256, Keypair, PublicKey, Validator, }; pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Result<(), String> { @@ -85,6 +85,10 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul spec.capella_fork_epoch = Some(fork_epoch); } + if let Some(fork_epoch) = parse_optional(matches, "deneb-fork-epoch")? { + spec.deneb_fork_epoch = Some(fork_epoch); + } + if let Some(ttd) = parse_optional(matches, "ttd")? { spec.terminal_total_difficulty = ttd; } @@ -111,6 +115,10 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul ExecutionPayloadHeaderCapella::::from_ssz_bytes(bytes.as_slice()) .map(ExecutionPayloadHeader::Capella) } + ForkName::Deneb => { + ExecutionPayloadHeaderDeneb::::from_ssz_bytes(bytes.as_slice()) + .map(ExecutionPayloadHeader::Deneb) + } } .map_err(|e| format!("SSZ decode failed: {:?}", e)) }) @@ -187,12 +195,23 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul None }; + let kzg_trusted_setup = if let Some(epoch) = spec.deneb_fork_epoch { + // Only load the trusted setup if the deneb fork epoch is set + if epoch != Epoch::max_value() { + Some(TRUSTED_SETUP_BYTES.to_vec()) + } else { + None + } + } else { + None + }; let testnet = Eth2NetworkConfig { deposit_contract_deploy_block, boot_enr: Some(vec![]), genesis_state_bytes: genesis_state_bytes.map(Into::into), genesis_state_source: GenesisStateSource::IncludedBytes, config: Config::from_chain_spec::(&spec), + kzg_trusted_setup, }; testnet.write_to_file(testnet_dir_path, overwrite_files) @@ -300,6 +319,9 @@ fn initialize_state_with_validators( ExecutionPayloadHeaderRefMut::Capella(_) => { return Err("Cannot start genesis from a capella state".to_string()) } + ExecutionPayloadHeaderRefMut::Deneb(_) => { + return Err("Cannot start genesis from a deneb state".to_string()) + } } } diff --git a/lcli/src/parse_ssz.rs b/lcli/src/parse_ssz.rs index 5c306f4fd..453169cdc 100644 --- a/lcli/src/parse_ssz.rs +++ b/lcli/src/parse_ssz.rs @@ -78,6 +78,9 @@ pub fn run_parse_ssz( SignedBeaconBlockCapella::::from_ssz_bytes, format, )?, + "SignedBeaconBlockDeneb" => { + decode_and_print(&bytes, SignedBeaconBlockDeneb::::from_ssz_bytes, format)? + } "BeaconState" => decode_and_print::>( &bytes, |bytes| BeaconState::from_ssz_bytes(bytes, spec), @@ -95,6 +98,10 @@ pub fn run_parse_ssz( "BeaconStateCapella" => { decode_and_print(&bytes, BeaconStateCapella::::from_ssz_bytes, format)? } + "BeaconStateDeneb" => { + decode_and_print(&bytes, BeaconStateDeneb::::from_ssz_bytes, format)? + } + "BlobSidecar" => decode_and_print(&bytes, BlobSidecar::::from_ssz_bytes, format)?, other => return Err(format!("Unknown type: {}", other)), }; diff --git a/lcli/src/skip_slots.rs b/lcli/src/skip_slots.rs index 31fe9fe64..cdbacfe4d 100644 --- a/lcli/src/skip_slots.rs +++ b/lcli/src/skip_slots.rs @@ -109,6 +109,7 @@ pub fn run( } _ => return Err("must supply either --state-path or --beacon-url".into()), }; + let mut post_state = None; let initial_slot = state.slot(); let target_slot = initial_slot + slots; @@ -140,14 +141,15 @@ pub fn run( let duration = Instant::now().duration_since(start); info!("Run {}: {:?}", i, duration); + post_state = Some(state); } - if let Some(output_path) = output_path { + if let (Some(post_state), Some(output_path)) = (post_state, output_path) { let mut output_file = File::create(output_path) .map_err(|e| format!("Unable to create output file: {:?}", e))?; output_file - .write_all(&state.as_ssz_bytes()) + .write_all(&post_state.as_ssz_bytes()) .map_err(|e| format!("Unable to write to output file: {:?}", e))?; } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 48b4eb037..8517c66c3 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "lighthouse" -version = "4.5.0" +version = "4.6.0" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false -rust-version = "1.69.0" +rust-version = "1.73.0" [features] default = ["slasher-lmdb"] @@ -35,7 +35,6 @@ types = { workspace = true } bls = { workspace = true } ethereum_hashing = { workspace = true } clap = { workspace = true } -env_logger = { workspace = true } environment = { workspace = true } boot_node = { path = "../boot_node" } futures = { workspace = true } @@ -57,6 +56,8 @@ unused_port = { workspace = true } database_manager = { path = "../database_manager" } slasher = { workspace = true } validator_manager = { path = "../validator_manager" } +tracing-subscriber = { workspace = true } +logging = { workspace = true } [dev-dependencies] tempfile = { workspace = true } diff --git a/lighthouse/environment/Cargo.toml b/lighthouse/environment/Cargo.toml index d2a181a1b..b57e1e9de 100644 --- a/lighthouse/environment/Cargo.toml +++ b/lighthouse/environment/Cargo.toml @@ -19,7 +19,6 @@ futures = { workspace = true } slog-json = "2.3.0" exit-future = { workspace = true } serde = { workspace = true } -serde_derive = "1.0.116" [target.'cfg(not(target_family = "unix"))'.dependencies] ctrlc = { version = "3.1.6", features = ["termination"] } diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index fc7ab8d52..a1f6b26a9 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -13,7 +13,7 @@ use futures::channel::mpsc::{channel, Receiver, Sender}; use futures::{future, StreamExt}; use logging::SSELoggingComponents; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use slog::{error, info, o, warn, Drain, Duplicate, Level, Logger}; use sloggers::{file::FileLoggerBuilder, types::Format, types::Severity, Build}; use std::fs::create_dir_all; @@ -254,12 +254,9 @@ impl EnvironmentBuilder { } // Disable file logging if no path is specified. - let path = match config.path { - Some(path) => path, - None => { - self.log = Some(stdout_logger); - return Ok(self); - } + let Some(path) = config.path else { + self.log = Some(stdout_logger); + return Ok(self); }; // Ensure directories are created becfore the logfile. diff --git a/lighthouse/environment/tests/testnet_dir/config.yaml b/lighthouse/environment/tests/testnet_dir/config.yaml index b98145163..dbb6819cd 100644 --- a/lighthouse/environment/tests/testnet_dir/config.yaml +++ b/lighthouse/environment/tests/testnet_dir/config.yaml @@ -67,6 +67,8 @@ INACTIVITY_SCORE_RECOVERY_RATE: 16 EJECTION_BALANCE: 16000000000 # 2**2 (= 4) MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**3 (= 8) +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 # 2**16 (= 65,536) CHURN_LIMIT_QUOTIENT: 65536 diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index f98af9617..06eb06fc0 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -4,14 +4,13 @@ use beacon_node::ProductionBeaconNode; use clap::{App, Arg, ArgMatches}; use clap_utils::{flags::DISABLE_MALLOC_TUNING_FLAG, get_eth2_network_config}; use directory::{parse_path_or_default, DEFAULT_BEACON_NODE_DIR, DEFAULT_VALIDATOR_DIR}; -use env_logger::{Builder, Env}; use environment::{EnvironmentBuilder, LoggerConfig}; use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK, HARDCODED_NET_NAMES}; use ethereum_hashing::have_sha_extensions; use futures::TryFutureExt; use lighthouse_version::VERSION; use malloc_utils::configure_memory_allocator; -use slog::{crit, info, warn}; +use slog::{crit, info}; use std::path::PathBuf; use std::process::exit; use task_executor::ShutdownReason; @@ -81,20 +80,11 @@ fn main() { cfg!(feature = "gnosis"), ).as_str() ) - .arg( - Arg::with_name("spec") - .short("s") - .long("spec") - .value_name("DEPRECATED") - .help("This flag is deprecated, it will be disallowed in a future release. This \ - value is now derived from the --network or --testnet-dir flags.") - .takes_value(true) - .global(true) - ) .arg( Arg::with_name("env_log") .short("l") - .help("Enables environment logging giving access to sub-protocol logs such as discv5 and libp2p", + .help( + "DEPRECATED Enables environment logging giving access to sub-protocol logs such as discv5 and libp2p", ) .takes_value(false), ) @@ -374,11 +364,6 @@ fn main() { } } - // Debugging output for libp2p and external crates. - if matches.is_present("env_log") { - Builder::from_env(Env::default()).init(); - } - let result = get_eth2_network_config(&matches).and_then(|eth2_network_config| { let eth_spec_id = eth2_network_config.eth_spec_id()?; @@ -520,7 +505,7 @@ fn run( }; let logger_config = LoggerConfig { - path: log_path, + path: log_path.clone(), debug_level: String::from(debug_level), logfile_debug_level: String::from(logfile_debug_level), log_format: log_format.map(String::from), @@ -543,22 +528,31 @@ fn run( let log = environment.core_context().log().clone(); + let mut tracing_log_path: Option = clap_utils::parse_optional(matches, "logfile")?; + + if tracing_log_path.is_none() { + tracing_log_path = Some( + parse_path_or_default(matches, "datadir")? + .join(DEFAULT_BEACON_NODE_DIR) + .join("logs"), + ) + } + + let path = tracing_log_path.clone().unwrap(); + + let turn_on_terminal_logs = matches.is_present("env_log"); + + logging::create_tracing_layer(path, turn_on_terminal_logs); + // Allow Prometheus to export the time at which the process was started. metrics::expose_process_start_time(&log); // Allow Prometheus access to the version and commit of the Lighthouse build. metrics::expose_lighthouse_version(); - if matches.is_present("spec") { - warn!( - log, - "The --spec flag is deprecated and will be removed in a future release" - ); - } - #[cfg(all(feature = "modern", target_arch = "x86_64"))] if !std::is_x86_feature_detected!("adx") { - warn!( + slog::warn!( log, "CPU seems incompatible with optimized Lighthouse build"; "advice" => "If you get a SIGILL, please try Lighthouse portable build" diff --git a/lighthouse/tests/account_manager.rs b/lighthouse/tests/account_manager.rs index 63d79fceb..f82e3ec71 100644 --- a/lighthouse/tests/account_manager.rs +++ b/lighthouse/tests/account_manager.rs @@ -492,6 +492,8 @@ fn validator_import_launchpad() { suggested_fee_recipient: None, gas_limit: None, builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, voting_public_key: keystore.public_key().unwrap(), signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, @@ -614,6 +616,8 @@ fn validator_import_launchpad_no_password_then_add_password() { suggested_fee_recipient: None, gas_limit: None, builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, voting_public_key: keystore.public_key().unwrap(), signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, @@ -640,6 +644,8 @@ fn validator_import_launchpad_no_password_then_add_password() { suggested_fee_recipient: None, gas_limit: None, builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, voting_public_key: keystore.public_key().unwrap(), signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path: dst_keystore_dir.join(KEYSTORE_NAME), @@ -742,6 +748,8 @@ fn validator_import_launchpad_password_file() { suggested_fee_recipient: None, gas_limit: None, builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, voting_keystore_password_path: None, diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index bc6b6284e..2a88770cd 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -18,13 +18,19 @@ use std::str::FromStr; use std::string::ToString; use std::time::Duration; use tempfile::TempDir; +use types::non_zero_usize::new_non_zero_usize; use types::{ Address, Checkpoint, Epoch, ExecutionBlockHash, ForkName, Hash256, MainnetEthSpec, ProgressiveBalancesMode, }; -use unused_port::{unused_tcp4_port, unused_tcp6_port, unused_udp4_port, unused_udp6_port}; const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545/"; +const DUMMY_ENR_TCP_PORT: u16 = 7777; +const DUMMY_ENR_UDP_PORT: u16 = 8888; +const DUMMY_ENR_QUIC_PORT: u16 = 9999; + +const _: () = + assert!(DUMMY_ENR_QUIC_PORT != 0 && DUMMY_ENR_TCP_PORT != 0 && DUMMY_ENR_UDP_PORT != 0); /// Returns the `lighthouse beacon_node` command. fn base_cmd() -> Command { @@ -86,6 +92,22 @@ fn staking_flag() { }); } +#[test] +fn allow_insecure_genesis_sync() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.allow_insecure_genesis_sync, false); + }); + + CommandLineTest::new() + .flag("allow-insecure-genesis-sync", None) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.allow_insecure_genesis_sync, true); + }); +} + #[test] fn wss_checkpoint_flag() { let state = Some(Checkpoint { @@ -238,60 +260,6 @@ fn paranoid_block_proposal_on() { .with_config(|config| assert!(config.chain.paranoid_block_proposal)); } -#[test] -fn count_unrealized_no_arg() { - CommandLineTest::new() - .flag("count-unrealized", None) - // This flag should be ignored, so there's nothing to test but that the - // client starts with the flag present. - .run_with_zero_port(); -} - -#[test] -fn count_unrealized_false() { - CommandLineTest::new() - .flag("count-unrealized", Some("false")) - // This flag should be ignored, so there's nothing to test but that the - // client starts with the flag present. - .run_with_zero_port(); -} - -#[test] -fn count_unrealized_true() { - CommandLineTest::new() - .flag("count-unrealized", Some("true")) - // This flag should be ignored, so there's nothing to test but that the - // client starts with the flag present. - .run_with_zero_port(); -} - -#[test] -fn count_unrealized_full_no_arg() { - CommandLineTest::new() - .flag("count-unrealized-full", None) - // This flag should be ignored, so there's nothing to test but that the - // client starts with the flag present. - .run_with_zero_port(); -} - -#[test] -fn count_unrealized_full_false() { - CommandLineTest::new() - .flag("count-unrealized-full", Some("false")) - // This flag should be ignored, so there's nothing to test but that the - // client starts with the flag present. - .run_with_zero_port(); -} - -#[test] -fn count_unrealized_full_true() { - CommandLineTest::new() - .flag("count-unrealized-full", Some("true")) - // This flag should be ignored, so there's nothing to test but that the - // client starts with the flag present. - .run_with_zero_port(); -} - #[test] fn reset_payload_statuses_default() { CommandLineTest::new() @@ -383,23 +351,6 @@ fn eth1_flag() { .with_config(|config| assert!(config.sync_eth1_chain)); } #[test] -fn eth1_endpoints_flag() { - CommandLineTest::new() - .flag("eth1-endpoints", Some("http://localhost:9545")) - .run_with_zero_port() - .with_config(|config| { - assert_eq!( - config.eth1.endpoint.get_endpoint().full.to_string(), - "http://localhost:9545/" - ); - assert_eq!( - config.eth1.endpoint.get_endpoint().to_string(), - "http://localhost:9545/" - ); - assert!(config.sync_eth1_chain); - }); -} -#[test] fn eth1_blocks_per_log_query_flag() { CommandLineTest::new() .flag("eth1-blocks-per-log-query", Some("500")) @@ -522,49 +473,6 @@ fn merge_execution_endpoints_flag() { fn merge_execution_endpoint_flag() { run_merge_execution_endpoints_flag_test("execution-endpoint") } -fn run_execution_endpoints_overrides_eth1_endpoints_test(eth1_flag: &str, execution_flag: &str) { - use sensitive_url::SensitiveUrl; - - let eth1_endpoint = "http://bad.bad"; - let execution_endpoint = "http://good.good"; - - assert!(eth1_endpoint != execution_endpoint); - - let dir = TempDir::new().expect("Unable to create temporary directory"); - let jwt_path = dir.path().join("jwt-file"); - - CommandLineTest::new() - .flag(eth1_flag, Some(ð1_endpoint)) - .flag(execution_flag, Some(&execution_endpoint)) - .flag("execution-jwt", jwt_path.as_os_str().to_str()) - .run_with_zero_port() - .with_config(|config| { - assert_eq!( - config.execution_layer.as_ref().unwrap().execution_endpoints, - vec![SensitiveUrl::parse(execution_endpoint).unwrap()] - ); - - // The eth1 endpoint should have been set to the --execution-endpoint value in defiance - // of --eth1-endpoints. - assert_eq!( - config.eth1.endpoint, - Eth1Endpoint::Auth { - endpoint: SensitiveUrl::parse(execution_endpoint).unwrap(), - jwt_path: jwt_path.clone(), - jwt_id: None, - jwt_version: None, - } - ); - }); -} -#[test] -fn execution_endpoints_overrides_eth1_endpoints() { - run_execution_endpoints_overrides_eth1_endpoints_test("eth1-endpoints", "execution-endpoints"); -} -#[test] -fn execution_endpoint_overrides_eth1_endpoint() { - run_execution_endpoints_overrides_eth1_endpoints_test("eth1-endpoint", "execution-endpoint"); -} #[test] fn merge_jwt_secrets_flag() { let dir = TempDir::new().expect("Unable to create temporary directory"); @@ -689,70 +597,6 @@ fn builder_fallback_flags() { assert_eq!(config.chain.builder_fallback_disable_checks, true); }, ); - run_payload_builder_flag_test_with_config( - "builder", - "http://meow.cats", - Some("builder-profit-threshold"), - Some("1000000000000000000000000"), - |config| { - assert_eq!( - config - .execution_layer - .as_ref() - .unwrap() - .builder_profit_threshold, - 1000000000000000000000000 - ); - }, - ); - run_payload_builder_flag_test_with_config( - "builder", - "http://meow.cats", - None, - None, - |config| { - assert_eq!( - config - .execution_layer - .as_ref() - .unwrap() - .builder_profit_threshold, - 0 - ); - }, - ); - run_payload_builder_flag_test_with_config( - "builder", - "http://meow.cats", - Some("always-prefer-builder-payload"), - None, - |config| { - assert_eq!( - config - .execution_layer - .as_ref() - .unwrap() - .always_prefer_builder_payload, - true - ); - }, - ); - run_payload_builder_flag_test_with_config( - "builder", - "http://meow.cats", - None, - None, - |config| { - assert_eq!( - config - .execution_layer - .as_ref() - .unwrap() - .always_prefer_builder_payload, - false - ); - }, - ); } #[test] @@ -798,7 +642,7 @@ fn run_jwt_optional_flags_test(jwt_flag: &str, jwt_id_flag: &str, jwt_version_fl let id = "bn-1"; let version = "Lighthouse-v2.1.3"; CommandLineTest::new() - .flag("execution-endpoint", Some(execution_endpoint.clone())) + .flag("execution-endpoint", Some(execution_endpoint)) .flag(jwt_flag, dir.path().join(jwt_file).as_os_str().to_str()) .flag(jwt_id_flag, Some(id)) .flag(jwt_version_flag, Some(version)) @@ -1004,7 +848,7 @@ fn network_listen_address_flag_wrong_double_v6_value_config() { } #[test] fn network_port_flag_over_ipv4() { - let port = unused_tcp4_port().expect("Unable to find unused port."); + let port = 0; CommandLineTest::new() .flag("port", Some(port.to_string().as_str())) .run() @@ -1015,13 +859,30 @@ fn network_port_flag_over_ipv4() { listen_addr.quic_port, listen_addr.tcp_port )), + // quic_port should be 0 if tcp_port is given as 0. + Some((port, 0, port)) + ); + }); + + let port = 9000; + CommandLineTest::new() + .flag("port", Some(port.to_string().as_str())) + .run() + .with_config(|config| { + assert_eq!( + config.network.listen_addrs().v4().map(|listen_addr| ( + listen_addr.disc_port, + listen_addr.quic_port, + listen_addr.tcp_port + )), + // quic_port should be (tcp_port + 1) if tcp_port is given as non-zero. Some((port, port + 1, port)) ); }); } #[test] fn network_port_flag_over_ipv6() { - let port = unused_tcp6_port().expect("Unable to find unused port."); + let port = 0; CommandLineTest::new() .flag("listen-address", Some("::1")) .flag("port", Some(port.to_string().as_str())) @@ -1033,14 +894,92 @@ fn network_port_flag_over_ipv6() { listen_addr.quic_port, listen_addr.tcp_port )), + // quic_port should be 0 if tcp_port is given as 0. + Some((port, 0, port)) + ); + }); + + let port = 9000; + CommandLineTest::new() + .flag("listen-address", Some("::1")) + .flag("port", Some(port.to_string().as_str())) + .run() + .with_config(|config| { + assert_eq!( + config.network.listen_addrs().v6().map(|listen_addr| ( + listen_addr.disc_port, + listen_addr.quic_port, + listen_addr.tcp_port + )), + // quic_port should be (tcp_port + 1) if tcp_port is given as non-zero. Some((port, port + 1, port)) ); }); } #[test] +fn network_port_flag_over_ipv4_and_ipv6() { + let port = 0; + let port6 = 0; + CommandLineTest::new() + .flag("listen-address", Some("127.0.0.1")) + .flag("listen-address", Some("::1")) + .flag("port", Some(port.to_string().as_str())) + .flag("port6", Some(port6.to_string().as_str())) + .run() + .with_config(|config| { + assert_eq!( + config.network.listen_addrs().v4().map(|listen_addr| ( + listen_addr.disc_port, + listen_addr.quic_port, + listen_addr.tcp_port + )), + // quic_port should be 0 if tcp_port is given as 0. + Some((port, 0, port)) + ); + assert_eq!( + config.network.listen_addrs().v6().map(|listen_addr| ( + listen_addr.disc_port, + listen_addr.quic_port, + listen_addr.tcp_port + )), + // quic_port should be 0 if tcp_port is given as 0. + Some((port6, 0, port6)) + ); + }); + + let port = 19000; + let port6 = 29000; + CommandLineTest::new() + .flag("listen-address", Some("127.0.0.1")) + .flag("listen-address", Some("::1")) + .flag("port", Some(port.to_string().as_str())) + .flag("port6", Some(port6.to_string().as_str())) + .run() + .with_config(|config| { + assert_eq!( + config.network.listen_addrs().v4().map(|listen_addr| ( + listen_addr.disc_port, + listen_addr.quic_port, + listen_addr.tcp_port + )), + // quic_port should be (tcp_port + 1) if tcp_port is given as non-zero. + Some((port, port + 1, port)) + ); + assert_eq!( + config.network.listen_addrs().v6().map(|listen_addr| ( + listen_addr.disc_port, + listen_addr.quic_port, + listen_addr.tcp_port + )), + // quic_port should be (tcp_port + 1) if tcp_port is given as non-zero. + Some((port6, port6 + 1, port6)) + ); + }); +} +#[test] fn network_port_and_discovery_port_flags_over_ipv4() { - let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); - let disc4_port = unused_udp4_port().expect("Unable to find unused port."); + let tcp4_port = 0; + let disc4_port = 0; CommandLineTest::new() .flag("port", Some(tcp4_port.to_string().as_str())) .flag("discovery-port", Some(disc4_port.to_string().as_str())) @@ -1058,8 +997,8 @@ fn network_port_and_discovery_port_flags_over_ipv4() { } #[test] fn network_port_and_discovery_port_flags_over_ipv6() { - let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); - let disc6_port = unused_udp6_port().expect("Unable to find unused port."); + let tcp6_port = 0; + let disc6_port = 0; CommandLineTest::new() .flag("listen-address", Some("::1")) .flag("port", Some(tcp6_port.to_string().as_str())) @@ -1078,10 +1017,10 @@ fn network_port_and_discovery_port_flags_over_ipv6() { } #[test] fn network_port_and_discovery_port_flags_over_ipv4_and_ipv6() { - let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); - let disc4_port = unused_udp4_port().expect("Unable to find unused port."); - let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); - let disc6_port = unused_udp6_port().expect("Unable to find unused port."); + let tcp4_port = 0; + let disc4_port = 0; + let tcp6_port = 0; + let disc6_port = 0; CommandLineTest::new() .flag("listen-address", Some("::1")) .flag("listen-address", Some("127.0.0.1")) @@ -1113,12 +1052,12 @@ fn network_port_and_discovery_port_flags_over_ipv4_and_ipv6() { #[test] fn network_port_discovery_quic_port_flags_over_ipv4_and_ipv6() { - let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); - let disc4_port = unused_udp4_port().expect("Unable to find unused port."); - let quic4_port = unused_udp4_port().expect("Unable to find unused port."); - let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); - let disc6_port = unused_udp6_port().expect("Unable to find unused port."); - let quic6_port = unused_udp6_port().expect("Unable to find unused port."); + let tcp4_port = 0; + let disc4_port = 0; + let quic4_port = 0; + let tcp6_port = 0; + let disc6_port = 0; + let quic6_port = 0; CommandLineTest::new() .flag("listen-address", Some("::1")) .flag("listen-address", Some("127.0.0.1")) @@ -1194,7 +1133,7 @@ fn default_backfill_rate_limiting_flag() { } #[test] fn default_boot_nodes() { - let number_of_boot_nodes = 17; + let number_of_boot_nodes = 15; CommandLineTest::new() .run_with_zero_port() @@ -1264,57 +1203,91 @@ fn network_load_flag() { // Tests for ENR flags. #[test] fn enr_udp_port_flag() { - let port = unused_udp4_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_UDP_PORT; + assert!(port != 0); CommandLineTest::new() .flag("enr-udp-port", Some(port.to_string().as_str())) .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.enr_udp4_port, Some(port))); + .with_config(|config| { + assert_eq!( + config.network.enr_udp4_port.map(|port| port.get()), + Some(port) + ) + }); } #[test] fn enr_quic_port_flag() { - let port = unused_udp4_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_QUIC_PORT; CommandLineTest::new() .flag("enr-quic-port", Some(port.to_string().as_str())) .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.enr_quic4_port, Some(port))); + .with_config(|config| { + assert_eq!( + config.network.enr_quic4_port.map(|port| port.get()), + Some(port) + ) + }); } #[test] fn enr_tcp_port_flag() { - let port = unused_tcp4_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_TCP_PORT; CommandLineTest::new() .flag("enr-tcp-port", Some(port.to_string().as_str())) .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.enr_tcp4_port, Some(port))); + .with_config(|config| { + assert_eq!( + config.network.enr_tcp4_port.map(|port| port.get()), + Some(port) + ) + }); } #[test] fn enr_udp6_port_flag() { - let port = unused_udp6_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_UDP_PORT; CommandLineTest::new() .flag("enr-udp6-port", Some(port.to_string().as_str())) .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.enr_udp6_port, Some(port))); + .with_config(|config| { + assert_eq!( + config.network.enr_udp6_port.map(|port| port.get()), + Some(port) + ) + }); } #[test] fn enr_quic6_port_flag() { - let port = unused_udp6_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_QUIC_PORT; CommandLineTest::new() .flag("enr-quic6-port", Some(port.to_string().as_str())) .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.enr_quic6_port, Some(port))); + .with_config(|config| { + assert_eq!( + config.network.enr_quic6_port.map(|port| port.get()), + Some(port) + ) + }); } #[test] fn enr_tcp6_port_flag() { - let port = unused_tcp6_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_TCP_PORT; CommandLineTest::new() .flag("enr-tcp6-port", Some(port.to_string().as_str())) .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.enr_tcp6_port, Some(port))); + .with_config(|config| { + assert_eq!( + config.network.enr_tcp6_port.map(|port| port.get()), + Some(port) + ) + }); } #[test] fn enr_match_flag_over_ipv4() { let addr = "127.0.0.2".parse::().unwrap(); - let udp4_port = unused_udp4_port().expect("Unable to find unused port."); - let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); + + // the reason we use the ENR dummy values is because, due to the nature of the `--enr-match` flag, these will eventually become ENR ports (as well as listening ports). + let udp4_port = DUMMY_ENR_UDP_PORT; + let tcp4_port = DUMMY_ENR_TCP_PORT; + CommandLineTest::new() .flag("enr-match", None) .flag("listen-address", Some("127.0.0.2")) @@ -1331,15 +1304,21 @@ fn enr_match_flag_over_ipv4() { Some((addr, udp4_port, tcp4_port)) ); assert_eq!(config.network.enr_address, (Some(addr), None)); - assert_eq!(config.network.enr_udp4_port, Some(udp4_port)); + assert_eq!( + config.network.enr_udp4_port.map(|port| port.get()), + Some(udp4_port) + ); }); } #[test] fn enr_match_flag_over_ipv6() { const ADDR: &str = "::1"; let addr = ADDR.parse::().unwrap(); - let udp6_port = unused_udp6_port().expect("Unable to find unused port."); - let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); + + // the reason we use the ENR dummy values is because, due to the nature of the `--enr-match` flag, these will eventually become ENR ports (as well as listening ports). + let udp6_port = DUMMY_ENR_UDP_PORT; + let tcp6_port = DUMMY_ENR_TCP_PORT; + CommandLineTest::new() .flag("enr-match", None) .flag("listen-address", Some(ADDR)) @@ -1356,19 +1335,27 @@ fn enr_match_flag_over_ipv6() { Some((addr, udp6_port, tcp6_port)) ); assert_eq!(config.network.enr_address, (None, Some(addr))); - assert_eq!(config.network.enr_udp6_port, Some(udp6_port)); + assert_eq!( + config.network.enr_udp6_port.map(|port| port.get()), + Some(udp6_port) + ); }); } #[test] fn enr_match_flag_over_ipv4_and_ipv6() { const IPV6_ADDR: &str = "::1"; + + // the reason we use the ENR dummy values is because, due to the nature of the `--enr-match` flag, these will eventually become ENR ports (as well as listening ports). + let udp6_port = DUMMY_ENR_UDP_PORT; + let tcp6_port = DUMMY_ENR_TCP_PORT; let ipv6_addr = IPV6_ADDR.parse::().unwrap(); - let udp6_port = unused_udp6_port().expect("Unable to find unused port."); - let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); + const IPV4_ADDR: &str = "127.0.0.1"; + // the reason we use the ENR dummy values is because, due to the nature of the `--enr-match` flag, these will eventually become ENR ports (as well as listening ports). + let udp4_port = DUMMY_ENR_UDP_PORT; + let tcp4_port = DUMMY_ENR_TCP_PORT; let ipv4_addr = IPV4_ADDR.parse::().unwrap(); - let udp4_port = unused_udp4_port().expect("Unable to find unused port."); - let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); + CommandLineTest::new() .flag("enr-match", None) .flag("listen-address", Some(IPV4_ADDR)) @@ -1399,41 +1386,53 @@ fn enr_match_flag_over_ipv4_and_ipv6() { config.network.enr_address, (Some(ipv4_addr), Some(ipv6_addr)) ); - assert_eq!(config.network.enr_udp6_port, Some(udp6_port)); - assert_eq!(config.network.enr_udp4_port, Some(udp4_port)); + assert_eq!( + config.network.enr_udp6_port.map(|port| port.get()), + Some(udp6_port) + ); + assert_eq!( + config.network.enr_udp4_port.map(|port| port.get()), + Some(udp4_port) + ); }); } #[test] fn enr_address_flag_with_ipv4() { let addr = "192.167.1.1".parse::().unwrap(); - let port = unused_udp4_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_UDP_PORT; CommandLineTest::new() .flag("enr-address", Some("192.167.1.1")) .flag("enr-udp-port", Some(port.to_string().as_str())) .run_with_zero_port() .with_config(|config| { assert_eq!(config.network.enr_address, (Some(addr), None)); - assert_eq!(config.network.enr_udp4_port, Some(port)); + assert_eq!( + config.network.enr_udp4_port.map(|port| port.get()), + Some(port) + ); }); } #[test] fn enr_address_flag_with_ipv6() { let addr = "192.167.1.1".parse::().unwrap(); - let port = unused_udp4_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_UDP_PORT; CommandLineTest::new() .flag("enr-address", Some("192.167.1.1")) .flag("enr-udp-port", Some(port.to_string().as_str())) .run_with_zero_port() .with_config(|config| { assert_eq!(config.network.enr_address, (Some(addr), None)); - assert_eq!(config.network.enr_udp4_port, Some(port)); + assert_eq!( + config.network.enr_udp4_port.map(|port| port.get()), + Some(port) + ); }); } #[test] fn enr_address_dns_flag() { let addr = Ipv4Addr::LOCALHOST; let ipv6addr = Ipv6Addr::LOCALHOST; - let port = unused_udp4_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_UDP_PORT; CommandLineTest::new() .flag("enr-address", Some("localhost")) .flag("enr-udp-port", Some(port.to_string().as_str())) @@ -1443,7 +1442,10 @@ fn enr_address_dns_flag() { config.network.enr_address.0 == Some(addr) || config.network.enr_address.1 == Some(ipv6addr) ); - assert_eq!(config.network.enr_udp4_port, Some(port)); + assert_eq!( + config.network.enr_udp4_port.map(|port| port.get()), + Some(port) + ); }); } #[test] @@ -1482,8 +1484,8 @@ fn http_address_ipv6_flag() { } #[test] fn http_port_flag() { - let port1 = unused_tcp4_port().expect("Unable to find unused port."); - let port2 = unused_tcp4_port().expect("Unable to find unused port."); + let port1 = 0; + let port2 = 0; CommandLineTest::new() .flag("http", None) .flag("http-port", Some(port1.to_string().as_str())) @@ -1639,8 +1641,8 @@ fn metrics_address_ipv6_flag() { } #[test] fn metrics_port_flag() { - let port1 = unused_tcp4_port().expect("Unable to find unused port."); - let port2 = unused_tcp4_port().expect("Unable to find unused port."); + let port1 = 0; + let port2 = 0; CommandLineTest::new() .flag("metrics", None) .flag("metrics-port", Some(port1.to_string().as_str())) @@ -1672,11 +1674,17 @@ fn metrics_allow_origin_all_flag() { // Tests for Validator Monitor flags. #[test] +fn validator_monitor_default_values() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert!(config.validator_monitor == <_>::default())); +} +#[test] fn validator_monitor_auto_flag() { CommandLineTest::new() .flag("validator-monitor-auto", None) .run_with_zero_port() - .with_config(|config| assert!(config.validator_monitor_auto)); + .with_config(|config| assert!(config.validator_monitor.auto_register)); } #[test] fn validator_monitor_pubkeys_flag() { @@ -1685,8 +1693,8 @@ fn validator_monitor_pubkeys_flag() { 0xbeefdeadbeefdeaddeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef")) .run_with_zero_port() .with_config(|config| { - assert_eq!(config.validator_monitor_pubkeys[0].to_string(), "0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"); - assert_eq!(config.validator_monitor_pubkeys[1].to_string(), "0xbeefdeadbeefdeaddeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"); + assert_eq!(config.validator_monitor.validators[0].to_string(), "0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"); + assert_eq!(config.validator_monitor.validators[1].to_string(), "0xbeefdeadbeefdeaddeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"); }); } #[test] @@ -1700,8 +1708,8 @@ fn validator_monitor_file_flag() { .flag("validator-monitor-file", dir.path().join("pubkeys.txt").as_os_str().to_str()) .run_with_zero_port() .with_config(|config| { - assert_eq!(config.validator_monitor_pubkeys[0].to_string(), "0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"); - assert_eq!(config.validator_monitor_pubkeys[1].to_string(), "0xbeefdeadbeefdeaddeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"); + assert_eq!(config.validator_monitor.validators[0].to_string(), "0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"); + assert_eq!(config.validator_monitor.validators[1].to_string(), "0xbeefdeadbeefdeaddeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"); }); } #[test] @@ -1710,7 +1718,7 @@ fn validator_monitor_metrics_threshold_default() { .run_with_zero_port() .with_config(|config| { assert_eq!( - config.validator_monitor_individual_tracking_threshold, + config.validator_monitor.individual_tracking_threshold, // If this value changes make sure to update the help text for // the CLI command. 64 @@ -1726,7 +1734,7 @@ fn validator_monitor_metrics_threshold_custom() { ) .run_with_zero_port() .with_config(|config| { - assert_eq!(config.validator_monitor_individual_tracking_threshold, 42) + assert_eq!(config.validator_monitor.individual_tracking_threshold, 42) }); } @@ -1777,14 +1785,19 @@ fn block_cache_size_flag() { CommandLineTest::new() .flag("block-cache-size", Some("4")) .run_with_zero_port() - .with_config(|config| assert_eq!(config.store.block_cache_size, 4_usize)); + .with_config(|config| assert_eq!(config.store.block_cache_size, new_non_zero_usize(4))); } #[test] fn historic_state_cache_size_flag() { CommandLineTest::new() .flag("historic-state-cache-size", Some("4")) .run_with_zero_port() - .with_config(|config| assert_eq!(config.store.historic_state_cache_size, 4_usize)); + .with_config(|config| { + assert_eq!( + config.store.historic_state_cache_size, + new_non_zero_usize(4) + ) + }); } #[test] fn historic_state_cache_size_default() { @@ -1827,6 +1840,45 @@ fn prune_payloads_on_startup_false() { .with_config(|config| assert!(!config.store.prune_payloads)); } #[test] +fn prune_blobs_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert!(config.store.prune_blobs)); +} +#[test] +fn prune_blobs_on_startup_false() { + CommandLineTest::new() + .flag("prune-blobs", Some("false")) + .run_with_zero_port() + .with_config(|config| assert!(!config.store.prune_blobs)); +} +#[test] +fn epochs_per_blob_prune_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert!(config.store.epochs_per_blob_prune == 1)); +} +#[test] +fn epochs_per_blob_prune_on_startup_five() { + CommandLineTest::new() + .flag("epochs-per-blob-prune", Some("5")) + .run_with_zero_port() + .with_config(|config| assert!(config.store.epochs_per_blob_prune == 5)); +} +#[test] +fn blob_prune_margin_epochs_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert!(config.store.blob_prune_margin_epochs == 0)); +} +#[test] +fn blob_prune_margin_epochs_on_startup_ten() { + CommandLineTest::new() + .flag("blob-prune-margin-epochs", Some("10")) + .run_with_zero_port() + .with_config(|config| assert!(config.store.blob_prune_margin_epochs == 10)); +} +#[test] fn reconstruct_historic_states_flag() { CommandLineTest::new() .flag("reconstruct-historic-states", None) @@ -1970,7 +2022,10 @@ fn slasher_attestation_cache_size_flag() { .slasher .as_ref() .expect("Unable to parse Slasher config"); - assert_eq!(slasher_config.attestation_root_cache_size, 10000); + assert_eq!( + slasher_config.attestation_root_cache_size, + new_non_zero_usize(10000) + ); }); } #[test] @@ -2315,7 +2370,10 @@ fn sync_eth1_chain_disable_deposit_contract_sync_flag() { fn light_client_server_default() { CommandLineTest::new() .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.enable_light_client_server, false)); + .with_config(|config| { + assert_eq!(config.network.enable_light_client_server, false); + assert_eq!(config.http_api.enable_light_client_server, false); + }); } #[test] @@ -2323,7 +2381,20 @@ fn light_client_server_enabled() { CommandLineTest::new() .flag("light-client-server", None) .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.enable_light_client_server, true)); + .with_config(|config| { + assert_eq!(config.network.enable_light_client_server, true); + }); +} + +#[test] +fn light_client_http_server_enabled() { + CommandLineTest::new() + .flag("http", None) + .flag("light-client-server", None) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.http_api.enable_light_client_server, true); + }); } #[test] @@ -2333,7 +2404,7 @@ fn gui_flag() { .run_with_zero_port() .with_config(|config| { assert!(config.http_api.enabled); - assert!(config.validator_monitor_auto); + assert!(config.validator_monitor.auto_register); }); } @@ -2396,20 +2467,20 @@ fn progressive_balances_default() { .with_config(|config| { assert_eq!( config.chain.progressive_balances_mode, - ProgressiveBalancesMode::Checked + ProgressiveBalancesMode::Fast ) }); } #[test] -fn progressive_balances_fast() { +fn progressive_balances_checked() { CommandLineTest::new() - .flag("progressive-balances", Some("fast")) + .flag("progressive-balances", Some("checked")) .run_with_zero_port() .with_config(|config| { assert_eq!( config.chain.progressive_balances_mode, - ProgressiveBalancesMode::Fast + ProgressiveBalancesMode::Checked ) }); } @@ -2511,3 +2582,22 @@ fn genesis_state_url_value() { assert_eq!(config.genesis_state_url_timeout, Duration::from_secs(42)); }); } + +#[test] +fn disable_duplicate_warn_logs_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.network.disable_duplicate_warn_logs, false); + }); +} + +#[test] +fn disable_duplicate_warn_logs() { + CommandLineTest::new() + .flag("disable-duplicate-warn-logs", None) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.network.disable_duplicate_warn_logs, true); + }); +} diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 062b7e778..701aed07e 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -1,4 +1,4 @@ -use validator_client::Config; +use validator_client::{ApiTopic, Config}; use crate::exec::CommandLineTestExec; use bls::{Keypair, PublicKeyBytes}; @@ -101,12 +101,6 @@ fn beacon_nodes_flag() { }); } -#[test] -fn allow_unsynced_flag() { - // No-op, but doesn't crash. - CommandLineTest::new().flag("allow-unsynced", None).run(); -} - #[test] fn disable_auto_discover_flag() { CommandLineTest::new() @@ -428,23 +422,20 @@ fn no_doppelganger_protection_flag() { .with_config(|config| assert!(!config.enable_doppelganger_protection)); } #[test] -fn block_delay_ms() { +fn produce_block_v3_flag() { CommandLineTest::new() - .flag("block-delay-ms", Some("2000")) + .flag("produce-block-v3", None) .run() - .with_config(|config| { - assert_eq!( - config.block_delay, - Some(std::time::Duration::from_millis(2000)) - ) - }); + .with_config(|config| assert!(config.produce_block_v3)); } + #[test] -fn no_block_delay_ms() { +fn no_produce_block_v3_flag() { CommandLineTest::new() .run() - .with_config(|config| assert_eq!(config.block_delay, None)); + .with_config(|config| assert!(!config.produce_block_v3)); } + #[test] fn no_gas_limit_flag() { CommandLineTest::new() @@ -473,6 +464,32 @@ fn builder_proposals_flag() { .with_config(|config| assert!(config.builder_proposals)); } #[test] +fn builder_boost_factor_flag() { + CommandLineTest::new() + .flag("builder-boost-factor", Some("150")) + .run() + .with_config(|config| assert_eq!(config.builder_boost_factor, Some(150))); +} +#[test] +fn no_builder_boost_factor_flag() { + CommandLineTest::new() + .run() + .with_config(|config| assert_eq!(config.builder_boost_factor, None)); +} +#[test] +fn prefer_builder_proposals_flag() { + CommandLineTest::new() + .flag("prefer-builder-proposals", None) + .run() + .with_config(|config| assert!(config.prefer_builder_proposals)); +} +#[test] +fn no_prefer_builder_proposals_flag() { + CommandLineTest::new() + .run() + .with_config(|config| assert!(!config.prefer_builder_proposals)); +} +#[test] fn no_builder_registration_timestamp_override_flag() { CommandLineTest::new() .run() @@ -499,20 +516,78 @@ fn monitoring_endpoint() { assert_eq!(api_conf.update_period_secs, Some(30)); }); } -#[test] -fn disable_run_on_all_default() { - CommandLineTest::new().run().with_config(|config| { - assert!(!config.disable_run_on_all); - }); -} #[test] -fn disable_run_on_all() { +fn disable_run_on_all_flag() { CommandLineTest::new() .flag("disable-run-on-all", None) .run() .with_config(|config| { - assert!(config.disable_run_on_all); + assert_eq!(config.broadcast_topics, vec![]); + }); + // --broadcast flag takes precedence + CommandLineTest::new() + .flag("disable-run-on-all", None) + .flag("broadcast", Some("attestations")) + .run() + .with_config(|config| { + assert_eq!(config.broadcast_topics, vec![ApiTopic::Attestations]); + }); +} + +#[test] +fn no_broadcast_flag() { + CommandLineTest::new().run().with_config(|config| { + assert_eq!(config.broadcast_topics, vec![ApiTopic::Subscriptions]); + }); +} + +#[test] +fn broadcast_flag() { + // "none" variant + CommandLineTest::new() + .flag("broadcast", Some("none")) + .run() + .with_config(|config| { + assert_eq!(config.broadcast_topics, vec![]); + }); + // "none" with other values is ignored + CommandLineTest::new() + .flag("broadcast", Some("none,sync-committee")) + .run() + .with_config(|config| { + assert_eq!(config.broadcast_topics, vec![ApiTopic::SyncCommittee]); + }); + // Other valid variants + CommandLineTest::new() + .flag("broadcast", Some("blocks, subscriptions")) + .run() + .with_config(|config| { + assert_eq!( + config.broadcast_topics, + vec![ApiTopic::Blocks, ApiTopic::Subscriptions], + ); + }); + // Omitted "subscription" overrides default + CommandLineTest::new() + .flag("broadcast", Some("attestations")) + .run() + .with_config(|config| { + assert_eq!(config.broadcast_topics, vec![ApiTopic::Attestations]); + }); +} + +#[test] +#[should_panic(expected = "Unknown API topic")] +fn wrong_broadcast_flag() { + CommandLineTest::new() + .flag("broadcast", Some("foo, subscriptions")) + .run() + .with_config(|config| { + assert_eq!( + config.broadcast_topics, + vec![ApiTopic::Blocks, ApiTopic::Subscriptions], + ); }); } diff --git a/lighthouse/tests/validator_manager.rs b/lighthouse/tests/validator_manager.rs index e0a1e92d6..fab1cfebf 100644 --- a/lighthouse/tests/validator_manager.rs +++ b/lighthouse/tests/validator_manager.rs @@ -122,6 +122,8 @@ pub fn validator_create_defaults() { specify_voting_keystore_password: false, eth1_withdrawal_address: None, builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, fee_recipient: None, gas_limit: None, bn_url: None, @@ -143,6 +145,8 @@ pub fn validator_create_misc_flags() { .flag("--specify-voting-keystore-password", None) .flag("--eth1-withdrawal-address", Some(EXAMPLE_ETH1_ADDRESS)) .flag("--builder-proposals", Some("true")) + .flag("--prefer-builder-proposals", Some("true")) + .flag("--builder-boost-factor", Some("150")) .flag("--suggested-fee-recipient", Some(EXAMPLE_ETH1_ADDRESS)) .flag("--gas-limit", Some("1337")) .flag("--beacon-node", Some("http://localhost:1001")) @@ -159,6 +163,8 @@ pub fn validator_create_misc_flags() { specify_voting_keystore_password: true, eth1_withdrawal_address: Some(Address::from_str(EXAMPLE_ETH1_ADDRESS).unwrap()), builder_proposals: Some(true), + builder_boost_factor: Some(150), + prefer_builder_proposals: Some(true), fee_recipient: Some(Address::from_str(EXAMPLE_ETH1_ADDRESS).unwrap()), gas_limit: Some(1337), bn_url: Some(SensitiveUrl::parse("http://localhost:1001").unwrap()), @@ -244,6 +250,8 @@ pub fn validator_move_defaults() { dest_vc_token_path: PathBuf::from("./2.json"), validators: Validators::All, builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, fee_recipient: None, gas_limit: None, password_source: PasswordSource::Interactive { @@ -280,6 +288,8 @@ pub fn validator_move_misc_flags_0() { PublicKeyBytes::from_str(EXAMPLE_PUBKEY_1).unwrap(), ]), builder_proposals: Some(true), + builder_boost_factor: None, + prefer_builder_proposals: None, fee_recipient: Some(Address::from_str(EXAMPLE_ETH1_ADDRESS).unwrap()), gas_limit: Some(1337), password_source: PasswordSource::Interactive { stdin_inputs: true }, @@ -297,6 +307,7 @@ pub fn validator_move_misc_flags_1() { .flag("--dest-vc-token", Some("./2.json")) .flag("--validators", Some(&format!("{}", EXAMPLE_PUBKEY_0))) .flag("--builder-proposals", Some("false")) + .flag("--prefer-builder-proposals", Some("false")) .assert_success(|config| { let expected = MoveConfig { src_vc_url: SensitiveUrl::parse("http://localhost:1").unwrap(), @@ -307,6 +318,40 @@ pub fn validator_move_misc_flags_1() { PublicKeyBytes::from_str(EXAMPLE_PUBKEY_0).unwrap() ]), builder_proposals: Some(false), + builder_boost_factor: None, + prefer_builder_proposals: Some(false), + fee_recipient: None, + gas_limit: None, + password_source: PasswordSource::Interactive { + stdin_inputs: cfg!(windows) || false, + }, + }; + assert_eq!(expected, config); + }); +} + +#[test] +pub fn validator_move_misc_flags_2() { + CommandLineTest::validators_move() + .flag("--src-vc-url", Some("http://localhost:1")) + .flag("--src-vc-token", Some("./1.json")) + .flag("--dest-vc-url", Some("http://localhost:2")) + .flag("--dest-vc-token", Some("./2.json")) + .flag("--validators", Some(&format!("{}", EXAMPLE_PUBKEY_0))) + .flag("--builder-proposals", Some("false")) + .flag("--builder-boost-factor", Some("100")) + .assert_success(|config| { + let expected = MoveConfig { + src_vc_url: SensitiveUrl::parse("http://localhost:1").unwrap(), + src_vc_token_path: PathBuf::from("./1.json"), + dest_vc_url: SensitiveUrl::parse("http://localhost:2").unwrap(), + dest_vc_token_path: PathBuf::from("./2.json"), + validators: Validators::Specific(vec![ + PublicKeyBytes::from_str(EXAMPLE_PUBKEY_0).unwrap() + ]), + builder_proposals: Some(false), + builder_boost_factor: Some(100), + prefer_builder_proposals: None, fee_recipient: None, gas_limit: None, password_source: PasswordSource::Interactive { @@ -333,6 +378,8 @@ pub fn validator_move_count() { dest_vc_token_path: PathBuf::from("./2.json"), validators: Validators::Count(42), builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, fee_recipient: None, gas_limit: None, password_source: PasswordSource::Interactive { diff --git a/scripts/ci/check-success-job.sh b/scripts/ci/check-success-job.sh new file mode 100755 index 000000000..dfa5c0325 --- /dev/null +++ b/scripts/ci/check-success-job.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +# Check that $SUCCESS_JOB depends on all other jobs in the given $YAML + +set -euf -o pipefail + +YAML=$1 +SUCCESS_JOB=$2 + +yq '... comments="" | .jobs | map(. | key) | .[]' < "$YAML" | grep -v "$SUCCESS_JOB" | sort > all_jobs.txt +yq "... comments=\"\" | .jobs.$SUCCESS_JOB.needs[]" < "$YAML" | grep -v "$SUCCESS_JOB" | sort > dep_jobs.txt +diff all_jobs.txt dep_jobs.txt || (echo "COMPLETENESS CHECK FAILED" && exit 1) +rm all_jobs.txt dep_jobs.txt +echo "COMPLETENESS CHECK PASSED" diff --git a/scripts/cli.sh b/scripts/cli.sh new file mode 100755 index 000000000..7ba98d08b --- /dev/null +++ b/scripts/cli.sh @@ -0,0 +1,98 @@ +#! /usr/bin/env bash + +# IMPORTANT +# This script should NOT be run directly. +# Run `make cli` or `make cli-local` from the root of the repository instead. + +set -e + +# A function to generate formatted .md files +write_to_file() { + local cmd="$1" + local file="$2" + local program="$3" + + # Remove first line of cmd to get rid of commit specific numbers. + cmd=${cmd#*$'\n'} + + # We need to add the header and the backticks to create the code block. + printf "# %s\n\n\`\`\`\n%s\n\`\`\`" "$program" "$cmd" > "$file" +} + +CMD=./target/release/lighthouse + +# Store all help strings in variables. +general_cli=$($CMD --help) +bn_cli=$($CMD bn --help) +vc_cli=$($CMD vc --help) +vm_cli=$($CMD vm --help) +vm_cli_create=$($CMD vm create --help) +vm_cli_import=$($CMD vm import --help) +vm_cli_move=$($CMD vm move --help) + +general=./help_general.md +bn=./help_bn.md +vc=./help_vc.md +am=./help_am.md +vm=./help_vm.md +vm_create=./help_vm_create.md +vm_import=./help_vm_import.md +vm_move=./help_vm_move.md + +# create .md files +write_to_file "$general_cli" "$general" "Lighthouse General Commands" +write_to_file "$bn_cli" "$bn" "Beacon Node" +write_to_file "$vc_cli" "$vc" "Validator Client" +write_to_file "$vm_cli" "$vm" "Validator Manager" +write_to_file "$vm_cli_create" "$vm_create" "Validator Manager Create" +write_to_file "$vm_cli_import" "$vm_import" "Validator Manager Import" +write_to_file "$vm_cli_move" "$vm_move" "Validator Manager Move" + +#input 1 = $1 = files; input 2 = $2 = new files +files=(./book/src/help_general.md ./book/src/help_bn.md ./book/src/help_vc.md ./book/src/help_vm.md ./book/src/help_vm_create.md ./book/src/help_vm_import.md ./book/src/help_vm_move.md) +new_files=($general $bn $vc $vm $vm_create $vm_import $vm_move) + +# function to check +check() { + local file="$1" + local new_file="$2" + + if [[ -f $file ]]; then # check for existence of file + diff=$(diff $file $new_file || :) + else + cp $new_file $file + changes=true + echo "$file is not found, it has just been created" + fi + + if [[ -z $diff ]]; then # check for difference + : # do nothing + else + cp $new_file $file + changes=true + echo "$file has been updated" + fi +} + +# define changes as false +changes=false +# call check function to check for each help file +check ${files[0]} ${new_files[0]} +check ${files[1]} ${new_files[1]} +check ${files[2]} ${new_files[2]} +check ${files[3]} ${new_files[3]} +check ${files[4]} ${new_files[4]} +check ${files[5]} ${new_files[5]} +check ${files[6]} ${new_files[6]} + +# remove help files +rm -f help_general.md help_bn.md help_vc.md help_am.md help_vm.md help_vm_create.md help_vm_import.md help_vm_move.md + +# only exit at the very end +if [[ $changes == true ]]; then + echo "Exiting with error to indicate changes occurred. To fix, run 'make cli-local' or 'make cli' and commit the changes." + exit 1 +else + echo "CLI help texts are up to date." + exit 0 +fi diff --git a/scripts/local_testnet/README.md b/scripts/local_testnet/README.md index f261ea67f..2862fde07 100644 --- a/scripts/local_testnet/README.md +++ b/scripts/local_testnet/README.md @@ -5,58 +5,105 @@ This setup can be useful for testing and development. ## Requirements -The scripts require `lcli`, `lighthouse`, `geth`, `bootnode` to be installed on `PATH`. +The scripts require `lcli`, `lighthouse`, `geth`, `bootnode` to be installed on `PATH` (run `echo $PATH` to view all `PATH` directories). MacOS users need to install GNU `sed` and GNU `grep`, and add them both to `PATH` as well. -From the -root of this repository, run: +The first step is to install Rust and dependencies. Refer to the [Lighthouse Book](https://lighthouse-book.sigmaprime.io/installation-source.html#dependencies) for installation. We will also need [jq](https://jqlang.github.io/jq/), which can be installed with `sudo apt install jq`. + +Then, we clone the Lighthouse repository: +```bash +cd ~ +git clone https://github.com/sigp/lighthouse.git +cd lighthouse +``` +We are now ready to build Lighthouse. Run the command: ```bash make make install-lcli ``` +This will build `lighthouse` and `lcli`. For `geth` and `bootnode`, go to [geth website](https://geth.ethereum.org/downloads) and download the `Geth & Tools`. For example, to download and extract `Geth & Tools 1.13.1`: + +```bash +cd ~ +curl -LO https://gethstore.blob.core.windows.net/builds/geth-alltools-linux-amd64-1.13.1-3f40e65c.tar.gz +tar xvf geth-alltools-linux-amd64-1.13.1-3f40e65c.tar.gz +``` + +After extraction, copy `geth` and `bootnode` to the `PATH`. A typical directory is `/usr/local/bin`. + +```bash +cd geth-alltools-linux-amd64-1.13.1-3f40e65c +sudo cp geth bootnode /usr/local/bin +``` + +After that We can remove the downloaded files: + +```bash +cd ~ +rm -r geth-alltools-linux-amd64-1.13.1-3f40e65c geth-alltools-linux-amd64-1.13.1-3f40e65c.tar.gz +``` + +We are now ready to start a local testnet. + ## Starting the testnet -Modify `vars.env` as desired. +To start a testnet using the predetermined settings: + +```bash +cd ~ +cd ./lighthouse/scripts/local_testnet +./start_local_testnet.sh genesis.json +``` + +This will execute the script and if the testnet setup is successful, you will see "Started!" at the end. The testnet starts with a post-merge genesis state. -Start a consensus layer and execution layer boot node along with `BN_COUNT` -number of beacon nodes each connected to a geth execution client and `VC_COUNT` validator clients. +The testnet starts a consensus layer and execution layer boot node along with `BN_COUNT` +(the number of beacon nodes) each connected to a geth execution client and `VC_COUNT` (the number of validator clients). By default, `BN_COUNT=4`, `VC_COUNT=4`. The `start_local_testnet.sh` script takes four options `-v VC_COUNT`, `-d DEBUG_LEVEL`, `-p` to enable builder proposals and `-h` for help. It also takes a mandatory `GENESIS_FILE` for initialising geth's state. A sample `genesis.json` is provided in this directory. -The `ETH1_BLOCK_HASH` environment variable is set to the block_hash of the genesis execution layer block which depends on the contents of `genesis.json`. Users of these scripts need to ensure that the `ETH1_BLOCK_HASH` variable is updated if genesis file is modified. - The options may be in any order or absent in which case they take the default value specified. - VC_COUNT: the number of validator clients to create, default: `BN_COUNT` - DEBUG_LEVEL: one of { error, warn, info, debug, trace }, default: `info` +The `ETH1_BLOCK_HASH` environment variable is set to the block_hash of the genesis execution layer block which depends on the contents of `genesis.json`. Users of these scripts need to ensure that the `ETH1_BLOCK_HASH` variable is updated if genesis file is modified. +To view the beacon, validator client and geth logs: ```bash -./start_local_testnet.sh genesis.json +tail -f ~/.lighthouse/local-testnet/testnet/beacon_node_1.log +taif -f ~/.lighthouse/local-testnet/testnet/validator_node_1.log +tail -f ~/.lighthouse/local-testnet/testnet/geth_1.log ``` +where `beacon_node_1` can be changed to `beacon_node_2`, `beacon_node_3` or `beacon_node_4` to view logs for different beacon nodes. The same applies to validator clients and geth nodes. + ## Stopping the testnet -This is not necessary before `start_local_testnet.sh` as it invokes `stop_local_testnet.sh` automatically. +To stop the testnet, navigate to the directory `cd ~/lighthouse/scripts/local_testnet`, then run the command: + ```bash ./stop_local_testnet.sh ``` +Once a testnet is stopped, it cannot be continued from where it left off. When the start local testnet command is run, it will start a new local testnet. + ## Manual creation of local testnet -These scripts are used by ./start_local_testnet.sh and may be used to manually +In [Starting the testnet](./README.md#starting-the-testnet), the testnet is started automatically with predetermined parameters (database directory, ports used etc). This section describes some modifications of the local testnet settings, e.g., changing the database directory, or changing the ports used. -Assuming you are happy with the configuration in `vars.env`, -create the testnet directory, genesis state with embedded validators and validator keys with: + +The testnet also contains parameters that are specified in `vars.env`, such as the slot time `SECONDS_PER_SLOT=3` (instead of 12 seconds on mainnet). You may change these parameters to suit your testing purposes. After that, in the `local_testnet` directory, run the following command to create genesis state with embedded validators and validator keys, and also to update the time in `genesis.json`: ```bash ./setup.sh +./setup_time.sh genesis.json ``` Note: The generated genesis validators are embedded into the genesis state as genesis validators and hence do not require manual deposits to activate. @@ -73,17 +120,17 @@ Start a geth node: ``` e.g. ```bash -./geth.sh $HOME/.lighthouse/local-testnet/geth_1 5000 6000 7000 genesis.json +./geth.sh $HOME/.lighthouse/local-testnet/geth_1 7001 6001 5001 genesis.json ``` Start a beacon node: ```bash -./beacon_node.sh +./beacon_node.sh ``` e.g. ```bash -./beacon_node.sh $HOME/.lighthouse/local-testnet/node_1 9000 8000 http://localhost:6000 ~/.lighthouse/local-testnet/geth_1/geth/jwtsecret +./beacon_node.sh $HOME/.lighthouse/local-testnet/node_1 9001 9101 8001 http://localhost:5001 ~/.lighthouse/local-testnet/geth_1/geth/jwtsecret ``` In a new terminal, start the validator client which will attach to the first @@ -94,10 +141,16 @@ beacon node: ``` e.g. to attach to the above created beacon node ```bash -./validator_client.sh $HOME/.lighthouse/local-testnet/node_1 http://localhost:8000 +./validator_client.sh $HOME/.lighthouse/local-testnet/node_1 http://localhost:8001 ``` -You can create additional beacon node and validator client instances with appropriate parameters. +You can create additional geth, beacon node and validator client instances by changing the ports, e.g., for a second geth, beacon node and validator client: + +```bash +./geth.sh $HOME/.lighthouse/local-testnet/geth_2 7002 6002 5002 genesis.json +./beacon_node.sh $HOME/.lighthouse/local-testnet/node_2 9002 9102 8002 http://localhost:5002 ~/.lighthouse/local-testnet/geth_2/geth/jwtsecret +./validator_client.sh $HOME/.lighthouse/local-testnet/node_2 http://localhost:8002 +``` ## Additional Info @@ -109,7 +162,7 @@ instances using the `--datadir` parameter. ### Starting fresh -Delete the current testnet and all related files using. Generally not necessary as `start_local_test.sh` does this each time it starts. +You can delete the current testnet and all related files using the following command. Alternatively, if you wish to start another testnet, doing the steps [Starting the testnet](./README.md#starting-the-testnet) will automatically delete the files and start a fresh local testnet. ```bash ./clean.sh @@ -128,3 +181,15 @@ Update the genesis time to now using: > Note: you probably want to just rerun `./start_local_testnet.sh` to start over > but this is another option. + +### Testing builder flow + +1. Add builder URL to `BN_ARGS` in `./vars.env`, e.g. `--builder http://localhost:8650`. Some mock builder server options: + - [`mock-relay`](https://github.com/realbigsean/mock-relay) + - [`dummy-builder`](https://github.com/michaelsproul/dummy_builder) +2. The above mock builders do not support non-mainnet presets as of now, and will require setting `SECONDS_PER_SLOT` and `SECONDS_PER_ETH1_BLOCK` to `12` in `./vars.env`. +3. Start the testnet with the following command (the `-p` flag enables the validator client `--builder-proposals` flag): + ```bash + ./start_local_testnet.sh -p genesis.json + ``` +4. Block production using builder flow will start at epoch 4. diff --git a/scripts/local_testnet/beacon_node.sh b/scripts/local_testnet/beacon_node.sh index 70a36614c..940fe2b85 100755 --- a/scripts/local_testnet/beacon_node.sh +++ b/scripts/local_testnet/beacon_node.sh @@ -66,4 +66,5 @@ exec $lighthouse_binary \ --disable-packet-filter \ --target-peers $((BN_COUNT - 1)) \ --execution-endpoint $execution_endpoint \ - --execution-jwt $execution_jwt + --execution-jwt $execution_jwt \ + $BN_ARGS diff --git a/scripts/local_testnet/el_bootnode.sh b/scripts/local_testnet/el_bootnode.sh index d73a463f6..ee437a491 100755 --- a/scripts/local_testnet/el_bootnode.sh +++ b/scripts/local_testnet/el_bootnode.sh @@ -1,3 +1,3 @@ priv_key="02fd74636e96a8ffac8e7b01b0de8dea94d6bcf4989513b38cf59eb32163ff91" source ./vars.env -$EL_BOOTNODE_BINARY --nodekeyhex $priv_key \ No newline at end of file +exec $EL_BOOTNODE_BINARY --nodekeyhex $priv_key \ No newline at end of file diff --git a/scripts/local_testnet/genesis.json b/scripts/local_testnet/genesis.json index 3ac553e55..eda3b312f 100644 --- a/scripts/local_testnet/genesis.json +++ b/scripts/local_testnet/genesis.json @@ -13,6 +13,7 @@ "londonBlock": 0, "mergeNetsplitBlock": 0, "shanghaiTime": 0, + "cancunTime": 0, "terminalTotalDifficulty": 0, "terminalTotalDifficultyPassed": true }, @@ -858,4 +859,4 @@ "mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "timestamp": "1662465600" -} \ No newline at end of file +} diff --git a/scripts/local_testnet/geth.sh b/scripts/local_testnet/geth.sh index d3923cdd8..5dc4575cf 100755 --- a/scripts/local_testnet/geth.sh +++ b/scripts/local_testnet/geth.sh @@ -33,7 +33,6 @@ http_port=${@:$OPTIND+2:1} auth_port=${@:$OPTIND+3:1} genesis_file=${@:$OPTIND+4:1} - # Init $GETH_BINARY init \ --datadir $data_dir \ @@ -51,4 +50,4 @@ exec $GETH_BINARY \ --bootnodes $EL_BOOTNODE_ENODE \ --port $network_port \ --http.port $http_port \ - --authrpc.port $auth_port \ No newline at end of file + --authrpc.port $auth_port diff --git a/scripts/local_testnet/setup.sh b/scripts/local_testnet/setup.sh index e026ba1c0..d7a6016aa 100755 --- a/scripts/local_testnet/setup.sh +++ b/scripts/local_testnet/setup.sh @@ -28,10 +28,11 @@ lcli \ --altair-fork-epoch $ALTAIR_FORK_EPOCH \ --bellatrix-fork-epoch $BELLATRIX_FORK_EPOCH \ --capella-fork-epoch $CAPELLA_FORK_EPOCH \ + --deneb-fork-epoch $DENEB_FORK_EPOCH \ --ttd $TTD \ --eth1-block-hash $ETH1_BLOCK_HASH \ --eth1-id $CHAIN_ID \ - --eth1-follow-distance 1 \ + --eth1-follow-distance 128 \ --seconds-per-slot $SECONDS_PER_SLOT \ --seconds-per-eth1-block $SECONDS_PER_ETH1_BLOCK \ --proposer-score-boost "$PROPOSER_SCORE_BOOST" \ diff --git a/scripts/local_testnet/setup_time.sh b/scripts/local_testnet/setup_time.sh new file mode 100755 index 000000000..21a8ae7ac --- /dev/null +++ b/scripts/local_testnet/setup_time.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +set -Eeuo pipefail + +source ./vars.env + +# Function to output SLOT_PER_EPOCH for mainnet or minimal +get_spec_preset_value() { + case "$SPEC_PRESET" in + mainnet) echo 32 ;; + minimal) echo 8 ;; + gnosis) echo 16 ;; + *) echo "Unsupported preset: $SPEC_PRESET" >&2; exit 1 ;; + esac +} + +SLOT_PER_EPOCH=$(get_spec_preset_value $SPEC_PRESET) +echo "slot_per_epoch=$SLOT_PER_EPOCH" + +genesis_file=$1 + +# Update future hardforks time in the EL genesis file based on the CL genesis time +GENESIS_TIME=$(lcli pretty-ssz --spec $SPEC_PRESET --testnet-dir $TESTNET_DIR BeaconState $TESTNET_DIR/genesis.ssz | jq | grep -Po 'genesis_time": "\K.*\d') +echo $GENESIS_TIME +CAPELLA_TIME=$((GENESIS_TIME + (CAPELLA_FORK_EPOCH * $SLOT_PER_EPOCH * SECONDS_PER_SLOT))) +echo $CAPELLA_TIME +sed -i 's/"shanghaiTime".*$/"shanghaiTime": '"$CAPELLA_TIME"',/g' $genesis_file +CANCUN_TIME=$((GENESIS_TIME + (DENEB_FORK_EPOCH * $SLOT_PER_EPOCH * SECONDS_PER_SLOT))) +echo $CANCUN_TIME +sed -i 's/"cancunTime".*$/"cancunTime": '"$CANCUN_TIME"',/g' $genesis_file +cat $genesis_file + diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index fdf9ae172..512b1e98d 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -102,17 +102,15 @@ execute_command_add_PID() { echo "executing: ./setup.sh >> $LOG_DIR/setup.log" ./setup.sh >> $LOG_DIR/setup.log 2>&1 -# Update future hardforks time in the EL genesis file based on the CL genesis time -GENESIS_TIME=$(lcli pretty-ssz --spec $SPEC_PRESET --testnet-dir $TESTNET_DIR BeaconState $TESTNET_DIR/genesis.ssz | jq | grep -Po 'genesis_time": "\K.*\d') -echo $GENESIS_TIME -CAPELLA_TIME=$((GENESIS_TIME + (CAPELLA_FORK_EPOCH * 32 * SECONDS_PER_SLOT))) -echo $CAPELLA_TIME -sed -i 's/"shanghaiTime".*$/"shanghaiTime": '"$CAPELLA_TIME"',/g' $genesis_file -cat $genesis_file +# Call setup_time.sh to update future hardforks time in the EL genesis file based on the CL genesis time +./setup_time.sh genesis.json # Delay to let boot_enr.yaml to be created execute_command_add_PID bootnode.log ./bootnode.sh -sleeping 1 +sleeping 3 + +execute_command_add_PID el_bootnode.log ./el_bootnode.sh +sleeping 3 execute_command_add_PID el_bootnode.log ./el_bootnode.sh sleeping 1 @@ -135,6 +133,7 @@ sleeping 20 # Reset the `genesis.json` config file fork times. sed -i 's/"shanghaiTime".*$/"shanghaiTime": 0,/g' $genesis_file +sed -i 's/"cancunTime".*$/"cancunTime": 0,/g' $genesis_file for (( bn=1; bn<=$BN_COUNT; bn++ )); do secret=$DATADIR/geth_datadir$bn/geth/jwtsecret diff --git a/scripts/local_testnet/vars.env b/scripts/local_testnet/vars.env index 6e05f0c41..31274d2c5 100644 --- a/scripts/local_testnet/vars.env +++ b/scripts/local_testnet/vars.env @@ -45,6 +45,7 @@ CHAIN_ID=4242 ALTAIR_FORK_EPOCH=0 BELLATRIX_FORK_EPOCH=0 CAPELLA_FORK_EPOCH=1 +DENEB_FORK_EPOCH=2 TTD=0 @@ -55,10 +56,13 @@ SPEC_PRESET=mainnet SECONDS_PER_SLOT=3 # Seconds per Eth1 block -SECONDS_PER_ETH1_BLOCK=1 +SECONDS_PER_ETH1_BLOCK=3 # Proposer score boost percentage PROPOSER_SCORE_BOOST=40 +# Command line arguments for beacon node client +BN_ARGS="" + # Command line arguments for validator client VC_ARGS="" diff --git a/scripts/tests/doppelganger_protection.sh b/scripts/tests/doppelganger_protection.sh index 1eefa7cf5..e13c06cdb 100755 --- a/scripts/tests/doppelganger_protection.sh +++ b/scripts/tests/doppelganger_protection.sh @@ -49,8 +49,6 @@ exit_if_fails ../local_testnet/geth.sh $HOME/.lighthouse/local-testnet/geth_data sleep 20 -echo "Starting local beacon nodes" - exit_if_fails ../local_testnet/beacon_node.sh -d debug $HOME/.lighthouse/local-testnet/node_1 8000 7000 9000 http://localhost:4000 $HOME/.lighthouse/local-testnet/geth_datadir1/geth/jwtsecret &> /dev/null & exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_2 8100 7100 9100 http://localhost:4100 $HOME/.lighthouse/local-testnet/geth_datadir2/geth/jwtsecret &> /dev/null & exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_3 8200 7200 9200 http://localhost:4200 $HOME/.lighthouse/local-testnet/geth_datadir3/geth/jwtsecret &> /dev/null & diff --git a/scripts/tests/genesis.json b/scripts/tests/genesis.json index ec3cd1e81..83f45f1a0 100644 --- a/scripts/tests/genesis.json +++ b/scripts/tests/genesis.json @@ -12,10 +12,15 @@ "berlinBlock": 0, "londonBlock": 0, "mergeForkBlock": 0, + "shanghaiTime": 0, + "shardingForkTime": 0, "terminalTotalDifficulty": 0, "terminalTotalDifficultyPassed": true }, "alloc": { + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x6d6172697573766477000000" + }, "0x0000000000000000000000000000000000000000": { "balance": "1" }, @@ -848,4 +853,4 @@ "mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "timestamp": "1662465600" -} \ No newline at end of file +} diff --git a/scripts/tests/vars.env b/scripts/tests/vars.env index a7e696ec0..98ae08f07 100644 --- a/scripts/tests/vars.env +++ b/scripts/tests/vars.env @@ -16,7 +16,7 @@ DEPOSIT_CONTRACT_ADDRESS=4242424242424242424242424242424242424242 GENESIS_FORK_VERSION=0x42424242 # Block hash generated from genesis.json in directory -ETH1_BLOCK_HASH=16ef16304456fdacdeb272bd70207021031db355ed6c5e44ebd34c1ab757e221 +ETH1_BLOCK_HASH=add7865f8346031c72287e2edc4a4952fd34fc0a8642403e8c1bce67f215c92b VALIDATOR_COUNT=80 GENESIS_VALIDATOR_COUNT=80 @@ -41,7 +41,7 @@ CHAIN_ID=4242 # Hard fork configuration ALTAIR_FORK_EPOCH=0 BELLATRIX_FORK_EPOCH=0 -CAPELLA_FORK_EPOCH=18446744073709551615 +CAPELLA_FORK_EPOCH=1 DENEB_FORK_EPOCH=18446744073709551615 TTD=0 @@ -58,5 +58,8 @@ SECONDS_PER_ETH1_BLOCK=1 # Proposer score boost percentage PROPOSER_SCORE_BOOST=70 +# Command line arguments for beacon node client +BN_ARGS="" + # Enable doppelganger detection VC_ARGS=" --enable-doppelganger-protection " \ No newline at end of file diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 9df77daa1..90fb54cd1 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -8,6 +8,7 @@ edition = { workspace = true } default = ["lmdb"] mdbx = ["dep:mdbx"] lmdb = ["lmdb-rkv", "lmdb-rkv-sys"] +portable = ["types/portable"] [dependencies] bincode = { workspace = true } @@ -23,7 +24,6 @@ parking_lot = { workspace = true } rand = { workspace = true } safe_arith = { workspace = true } serde = { workspace = true } -serde_derive = "1.0" slog = { workspace = true } sloggers = { workspace = true } tree_hash = { workspace = true } diff --git a/slasher/src/array.rs b/slasher/src/array.rs index 4deb38912..91c8f373f 100644 --- a/slasher/src/array.rs +++ b/slasher/src/array.rs @@ -4,7 +4,7 @@ use crate::{ SlasherDB, }; use flate2::bufread::{ZlibDecoder, ZlibEncoder}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::borrow::Borrow; use std::collections::{btree_map::Entry, BTreeMap, HashSet}; use std::convert::TryFrom; @@ -159,9 +159,8 @@ pub trait TargetArrayChunk: Sized + serde::Serialize + serde::de::DeserializeOwn config: &Config, ) -> Result, Error> { let disk_key = config.disk_key(validator_chunk_index, chunk_index); - let chunk_bytes = match txn.get(Self::select_db(db), &disk_key.to_be_bytes())? { - Some(chunk_bytes) => chunk_bytes, - None => return Ok(None), + let Some(chunk_bytes) = txn.get(Self::select_db(db), &disk_key.to_be_bytes())? else { + return Ok(None); }; let chunk = bincode::deserialize_from(ZlibDecoder::new(chunk_bytes.borrow()))?; @@ -448,11 +447,9 @@ pub fn apply_attestation_for_validator( return Ok(slashing_status); } - let mut start_epoch = if let Some(start_epoch) = + let Some(mut start_epoch) = T::first_start_epoch(attestation.data.source.epoch, current_epoch, config) - { - start_epoch - } else { + else { return Ok(slashing_status); }; @@ -536,12 +533,10 @@ pub fn epoch_update_for_validator( current_epoch: Epoch, config: &Config, ) -> Result<(), Error> { - let previous_current_epoch = - if let Some(epoch) = db.get_current_epoch_for_validator(validator_index, txn)? { - epoch - } else { - return Ok(()); - }; + let Some(previous_current_epoch) = db.get_current_epoch_for_validator(validator_index, txn)? + else { + return Ok(()); + }; let mut epoch = previous_current_epoch; diff --git a/slasher/src/block_queue.rs b/slasher/src/block_queue.rs index 3d2472c18..b91ceba89 100644 --- a/slasher/src/block_queue.rs +++ b/slasher/src/block_queue.rs @@ -1,17 +1,18 @@ use parking_lot::Mutex; +use std::collections::HashSet; use types::SignedBeaconBlockHeader; #[derive(Debug, Default)] pub struct BlockQueue { - blocks: Mutex>, + blocks: Mutex>, } impl BlockQueue { pub fn queue(&self, block_header: SignedBeaconBlockHeader) { - self.blocks.lock().push(block_header) + self.blocks.lock().insert(block_header); } - pub fn dequeue(&self) -> Vec { + pub fn dequeue(&self) -> HashSet { let mut blocks = self.blocks.lock(); std::mem::take(&mut *blocks) } diff --git a/slasher/src/config.rs b/slasher/src/config.rs index 361621d17..4fd74343e 100644 --- a/slasher/src/config.rs +++ b/slasher/src/config.rs @@ -1,7 +1,9 @@ use crate::Error; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; +use std::num::NonZeroUsize; use std::path::PathBuf; use strum::{Display, EnumString, EnumVariantNames}; +use types::non_zero_usize::new_non_zero_usize; use types::{Epoch, EthSpec, IndexedAttestation}; pub const DEFAULT_CHUNK_SIZE: usize = 16; @@ -10,7 +12,7 @@ pub const DEFAULT_HISTORY_LENGTH: usize = 4096; pub const DEFAULT_UPDATE_PERIOD: u64 = 12; pub const DEFAULT_SLOT_OFFSET: f64 = 10.5; pub const DEFAULT_MAX_DB_SIZE: usize = 256 * 1024; // 256 GiB -pub const DEFAULT_ATTESTATION_ROOT_CACHE_SIZE: usize = 100_000; +pub const DEFAULT_ATTESTATION_ROOT_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(100_000); pub const DEFAULT_BROADCAST: bool = false; #[cfg(all(feature = "mdbx", not(feature = "lmdb")))] @@ -38,7 +40,7 @@ pub struct Config { /// Maximum size of the database in megabytes. pub max_db_size_mbs: usize, /// Maximum size of the in-memory cache for attestation roots. - pub attestation_root_cache_size: usize, + pub attestation_root_cache_size: NonZeroUsize, /// Whether to broadcast slashings found to the network. pub broadcast: bool, /// Database backend to use. diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index 16fe22504..8bc36d008 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -9,6 +9,7 @@ edition = { workspace = true } ef_tests = [] milagro = ["bls/milagro"] fake_crypto = ["bls/fake_crypto"] +portable = ["beacon_chain/portable"] [dependencies] bls = { workspace = true } @@ -17,11 +18,14 @@ compare_fields_derive = { workspace = true } derivative = { workspace = true } ethereum-types = { workspace = true } hex = { workspace = true } +kzg = { workspace = true } rayon = { workspace = true } serde = { workspace = true } -serde_derive = "1.0.116" +serde_json = { workspace = true } serde_repr = { workspace = true } serde_yaml = { workspace = true } +eth2_network_config = { workspace = true } +ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } tree_hash = { workspace = true } @@ -36,3 +40,4 @@ beacon_chain = { workspace = true } store = { workspace = true } fork_choice = { workspace = true } execution_layer = { workspace = true } +logging = { workspace = true } diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 81a1739eb..e42db1801 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.3.0-rc.4 +TESTS_TAG := v1.4.0-beta.4 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index b52d15522..a5ab897c3 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -41,8 +41,6 @@ excluded_paths = [ "tests/.*/.*/ssz_static/LightClientFinalityUpdate", # LightClientHeader "tests/.*/.*/ssz_static/LightClientHeader", - # Deneb (previously known as eip4844) tests are disabled for now. - "tests/.*/deneb", # One of the EF researchers likes to pack the tarballs on a Mac ".*\.DS_Store.*", # More Mac weirdness. @@ -52,7 +50,8 @@ excluded_paths = [ # some bls tests are not included now "bls12-381-tests/deserialization_G1", "bls12-381-tests/deserialization_G2", - "bls12-381-tests/hash_to_G2" + "bls12-381-tests/hash_to_G2", + "tests/.*/eip6110" ] diff --git a/testing/ef_tests/src/cases.rs b/testing/ef_tests/src/cases.rs index 216912a4f..f328fa640 100644 --- a/testing/ef_tests/src/cases.rs +++ b/testing/ef_tests/src/cases.rs @@ -18,6 +18,12 @@ mod fork; mod fork_choice; mod genesis_initialization; mod genesis_validity; +mod kzg_blob_to_kzg_commitment; +mod kzg_compute_blob_kzg_proof; +mod kzg_compute_kzg_proof; +mod kzg_verify_blob_kzg_proof; +mod kzg_verify_blob_kzg_proof_batch; +mod kzg_verify_kzg_proof; mod merkle_proof_validity; mod operations; mod rewards; @@ -42,6 +48,12 @@ pub use epoch_processing::*; pub use fork::ForkTest; pub use genesis_initialization::*; pub use genesis_validity::*; +pub use kzg_blob_to_kzg_commitment::*; +pub use kzg_compute_blob_kzg_proof::*; +pub use kzg_compute_kzg_proof::*; +pub use kzg_verify_blob_kzg_proof::*; +pub use kzg_verify_blob_kzg_proof_batch::*; +pub use kzg_verify_kzg_proof::*; pub use merkle_proof_validity::*; pub use operations::*; pub use rewards::RewardsTest; diff --git a/testing/ef_tests/src/cases/bls_aggregate_sigs.rs b/testing/ef_tests/src/cases/bls_aggregate_sigs.rs index 53387ee4d..c1085e070 100644 --- a/testing/ef_tests/src/cases/bls_aggregate_sigs.rs +++ b/testing/ef_tests/src/cases/bls_aggregate_sigs.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use crate::impl_bls_load_case; use bls::{AggregateSignature, Signature}; -use serde_derive::Deserialize; +use serde::Deserialize; #[derive(Debug, Clone, Deserialize)] pub struct BlsAggregateSigs { diff --git a/testing/ef_tests/src/cases/bls_aggregate_verify.rs b/testing/ef_tests/src/cases/bls_aggregate_verify.rs index e9539dc15..0e006d95c 100644 --- a/testing/ef_tests/src/cases/bls_aggregate_verify.rs +++ b/testing/ef_tests/src/cases/bls_aggregate_verify.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use crate::impl_bls_load_case; use bls::{AggregateSignature, PublicKeyBytes}; -use serde_derive::Deserialize; +use serde::Deserialize; use types::Hash256; #[derive(Debug, Clone, Deserialize)] diff --git a/testing/ef_tests/src/cases/bls_batch_verify.rs b/testing/ef_tests/src/cases/bls_batch_verify.rs index de8721d67..703444c98 100644 --- a/testing/ef_tests/src/cases/bls_batch_verify.rs +++ b/testing/ef_tests/src/cases/bls_batch_verify.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use crate::impl_bls_load_case; use bls::{verify_signature_sets, BlsWrappedSignature, PublicKeyBytes, Signature, SignatureSet}; -use serde_derive::Deserialize; +use serde::Deserialize; use std::borrow::Cow; use std::str::FromStr; use types::Hash256; diff --git a/testing/ef_tests/src/cases/bls_eth_aggregate_pubkeys.rs b/testing/ef_tests/src/cases/bls_eth_aggregate_pubkeys.rs index c41fbca39..8783aa141 100644 --- a/testing/ef_tests/src/cases/bls_eth_aggregate_pubkeys.rs +++ b/testing/ef_tests/src/cases/bls_eth_aggregate_pubkeys.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use crate::impl_bls_load_case; use bls::{AggregatePublicKey, PublicKeyBytes}; -use serde_derive::Deserialize; +use serde::Deserialize; #[derive(Debug, Clone, Deserialize)] pub struct BlsEthAggregatePubkeys { diff --git a/testing/ef_tests/src/cases/bls_eth_fast_aggregate_verify.rs b/testing/ef_tests/src/cases/bls_eth_fast_aggregate_verify.rs index 80e018459..0fb3a026c 100644 --- a/testing/ef_tests/src/cases/bls_eth_fast_aggregate_verify.rs +++ b/testing/ef_tests/src/cases/bls_eth_fast_aggregate_verify.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use crate::impl_bls_load_case; use bls::{AggregateSignature, PublicKeyBytes}; -use serde_derive::Deserialize; +use serde::Deserialize; use std::convert::TryInto; use types::Hash256; diff --git a/testing/ef_tests/src/cases/bls_fast_aggregate_verify.rs b/testing/ef_tests/src/cases/bls_fast_aggregate_verify.rs index 608995db9..dcdc1bd19 100644 --- a/testing/ef_tests/src/cases/bls_fast_aggregate_verify.rs +++ b/testing/ef_tests/src/cases/bls_fast_aggregate_verify.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use crate::impl_bls_load_case; use bls::{AggregateSignature, PublicKeyBytes}; -use serde_derive::Deserialize; +use serde::Deserialize; use std::convert::TryInto; use types::Hash256; diff --git a/testing/ef_tests/src/cases/bls_sign_msg.rs b/testing/ef_tests/src/cases/bls_sign_msg.rs index 53c13b569..6479fabe4 100644 --- a/testing/ef_tests/src/cases/bls_sign_msg.rs +++ b/testing/ef_tests/src/cases/bls_sign_msg.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use crate::impl_bls_load_case; use bls::SecretKey; -use serde_derive::Deserialize; +use serde::Deserialize; use types::Hash256; #[derive(Debug, Clone, Deserialize)] diff --git a/testing/ef_tests/src/cases/bls_verify_msg.rs b/testing/ef_tests/src/cases/bls_verify_msg.rs index 779b3cf75..24b62c5fa 100644 --- a/testing/ef_tests/src/cases/bls_verify_msg.rs +++ b/testing/ef_tests/src/cases/bls_verify_msg.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use crate::impl_bls_load_case; use bls::{PublicKeyBytes, Signature, SignatureBytes}; -use serde_derive::Deserialize; +use serde::Deserialize; use std::convert::TryInto; use types::Hash256; diff --git a/testing/ef_tests/src/cases/common.rs b/testing/ef_tests/src/cases/common.rs index a59ccb34a..2a7c99875 100644 --- a/testing/ef_tests/src/cases/common.rs +++ b/testing/ef_tests/src/cases/common.rs @@ -1,4 +1,4 @@ -use serde_derive::Deserialize; +use serde::Deserialize; use ssz::Encode; use ssz_derive::{Decode, Encode}; use std::convert::TryFrom; @@ -64,8 +64,9 @@ pub fn previous_fork(fork_name: ForkName) -> ForkName { match fork_name { ForkName::Base => ForkName::Base, ForkName::Altair => ForkName::Base, - ForkName::Merge => ForkName::Altair, // TODO: Check this when tests are released.. - ForkName::Capella => ForkName::Merge, // TODO: Check this when tests are released.. + ForkName::Merge => ForkName::Altair, + ForkName::Capella => ForkName::Merge, + ForkName::Deneb => ForkName::Capella, } } diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index 5e7118715..cf182af2b 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -4,7 +4,7 @@ use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_state, yaml_decode_file}; use crate::type_name; use crate::type_name::TypeName; -use serde_derive::Deserialize; +use serde::Deserialize; use state_processing::per_epoch_processing::capella::process_historical_summaries_update; use state_processing::per_epoch_processing::effective_balance_updates::process_effective_balance_updates; use state_processing::per_epoch_processing::{ @@ -101,7 +101,10 @@ impl EpochTransition for JustificationAndFinalization { justification_and_finalization_state.apply_changes_to_state(state); Ok(()) } - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Deneb(_) => { let justification_and_finalization_state = altair::process_justification_and_finalization( state, @@ -122,13 +125,14 @@ impl EpochTransition for RewardsAndPenalties { validator_statuses.process_attestations(state)?; base::process_rewards_and_penalties(state, &validator_statuses, spec) } - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { - altair::process_rewards_and_penalties( - state, - &altair::ParticipationCache::new(state, spec).unwrap(), - spec, - ) - } + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Deneb(_) => altair::process_rewards_and_penalties( + state, + &altair::ParticipationCache::new(state, spec).unwrap(), + spec, + ), } } } @@ -151,7 +155,10 @@ impl EpochTransition for Slashings { spec, )?; } - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Deneb(_) => { process_slashings( state, altair::ParticipationCache::new(state, spec) @@ -203,7 +210,9 @@ impl EpochTransition for HistoricalRootsUpdate { impl EpochTransition for HistoricalSummariesUpdate { fn run(state: &mut BeaconState, _spec: &ChainSpec) -> Result<(), EpochProcessingError> { match state { - BeaconState::Capella(_) => process_historical_summaries_update(state), + BeaconState::Capella(_) | BeaconState::Deneb(_) => { + process_historical_summaries_update(state) + } _ => Ok(()), } } @@ -223,9 +232,10 @@ impl EpochTransition for SyncCommitteeUpdates { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { match state { BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { - altair::process_sync_committee_updates(state, spec) - } + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Deneb(_) => altair::process_sync_committee_updates(state, spec), } } } @@ -234,13 +244,14 @@ impl EpochTransition for InactivityUpdates { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { match state { BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { - altair::process_inactivity_updates( - state, - &altair::ParticipationCache::new(state, spec).unwrap(), - spec, - ) - } + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Deneb(_) => altair::process_inactivity_updates( + state, + &altair::ParticipationCache::new(state, spec).unwrap(), + spec, + ), } } } @@ -249,9 +260,10 @@ impl EpochTransition for ParticipationFlagUpdates { fn run(state: &mut BeaconState, _: &ChainSpec) -> Result<(), EpochProcessingError> { match state { BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { - altair::process_participation_flag_updates(state) - } + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Deneb(_) => altair::process_participation_flag_updates(state), } } } @@ -302,7 +314,7 @@ impl> Case for EpochProcessing { T::name() != "participation_record_updates" && T::name() != "historical_summaries_update" } - ForkName::Capella => { + ForkName::Capella | ForkName::Deneb => { T::name() != "participation_record_updates" && T::name() != "historical_roots_update" } diff --git a/testing/ef_tests/src/cases/fork.rs b/testing/ef_tests/src/cases/fork.rs index 52157d32f..bc340fa1c 100644 --- a/testing/ef_tests/src/cases/fork.rs +++ b/testing/ef_tests/src/cases/fork.rs @@ -2,8 +2,10 @@ use super::*; use crate::case_result::compare_beacon_state_results_without_caches; use crate::cases::common::previous_fork; use crate::decode::{ssz_decode_state, yaml_decode_file}; -use serde_derive::Deserialize; -use state_processing::upgrade::{upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella}; +use serde::Deserialize; +use state_processing::upgrade::{ + upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_deneb, +}; use types::{BeaconState, ForkName}; #[derive(Debug, Clone, Default, Deserialize)] @@ -62,6 +64,7 @@ impl Case for ForkTest { ForkName::Altair => upgrade_to_altair(&mut result_state, spec).map(|_| result_state), ForkName::Merge => upgrade_to_bellatrix(&mut result_state, spec).map(|_| result_state), ForkName::Capella => upgrade_to_capella(&mut result_state, spec).map(|_| result_state), + ForkName::Deneb => upgrade_to_deneb(&mut result_state, spec).map(|_| result_state), }; compare_beacon_state_results_without_caches(&mut result, &mut expected) diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index c4f288a8a..9884a709e 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -1,13 +1,19 @@ use super::*; use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; -use ::fork_choice::PayloadVerificationStatus; +use ::fork_choice::{PayloadVerificationStatus, ProposerHeadError}; +use beacon_chain::beacon_proposer_cache::compute_proposer_duties_from_head; +use beacon_chain::blob_verification::GossipBlobError; +use beacon_chain::chain_config::{ + DisallowedReOrgOffsets, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_THRESHOLD, +}; use beacon_chain::slot_clock::SlotClock; use beacon_chain::{ attestation_verification::{ obtain_indexed_attestation_and_committees_per_slot, VerifiedAttestation, }, + blob_verification::GossipVerifiedBlob, test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainTypes, CachedHead, ChainConfig, NotifyExecutionLayer, + AvailabilityProcessingStatus, BeaconChainTypes, CachedHead, ChainConfig, NotifyExecutionLayer, }; use execution_layer::{json_structures::JsonPayloadStatusV1Status, PayloadStatusV1}; use serde::Deserialize; @@ -17,9 +23,9 @@ use std::future::Future; use std::sync::Arc; use std::time::Duration; use types::{ - Attestation, AttesterSlashing, BeaconBlock, BeaconState, Checkpoint, EthSpec, - ExecutionBlockHash, ForkName, Hash256, IndexedAttestation, ProgressiveBalancesMode, - SignedBeaconBlock, Slot, Uint256, + Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlobSidecar, BlobsList, Checkpoint, + EthSpec, ExecutionBlockHash, ForkName, Hash256, IndexedAttestation, KzgProof, + ProgressiveBalancesMode, ProposerPreparationData, SignedBeaconBlock, Slot, Uint256, }; #[derive(Default, Debug, PartialEq, Clone, Deserialize, Decode)] @@ -37,6 +43,13 @@ pub struct Head { root: Hash256, } +#[derive(Debug, Clone, Copy, PartialEq, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct ShouldOverrideFcu { + validator_is_connected: bool, + result: bool, +} + #[derive(Debug, Clone, Deserialize)] #[serde(deny_unknown_fields)] pub struct Checks { @@ -49,6 +62,8 @@ pub struct Checks { u_justified_checkpoint: Option, u_finalized_checkpoint: Option, proposer_boost_root: Option, + get_proposer_head: Option, + should_override_forkchoice_update: Option, } #[derive(Debug, Clone, Deserialize)] @@ -71,25 +86,27 @@ impl From for PayloadStatusV1 { #[derive(Debug, Clone, Deserialize)] #[serde(untagged, deny_unknown_fields)] -pub enum Step { +pub enum Step { Tick { tick: u64, }, ValidBlock { - block: B, + block: TBlock, }, MaybeValidBlock { - block: B, + block: TBlock, + blobs: Option, + proofs: Option>, valid: bool, }, Attestation { - attestation: A, + attestation: TAttestation, }, AttesterSlashing { - attester_slashing: AS, + attester_slashing: TAttesterSlashing, }, PowBlock { - pow_block: P, + pow_block: TPowBlock, }, OnPayloadInfo { block_hash: ExecutionBlockHash, @@ -113,7 +130,9 @@ pub struct ForkChoiceTest { pub anchor_state: BeaconState, pub anchor_block: BeaconBlock, #[allow(clippy::type_complexity)] - pub steps: Vec, Attestation, AttesterSlashing, PowBlock>>, + pub steps: Vec< + Step, BlobsList, Attestation, AttesterSlashing, PowBlock>, + >, } impl LoadCase for ForkChoiceTest { @@ -126,7 +145,7 @@ impl LoadCase for ForkChoiceTest { .expect("path must be valid OsStr") .to_string(); let spec = &testing_spec::(fork_name); - let steps: Vec> = + let steps: Vec> = yaml_decode_file(&path.join("steps.yaml"))?; // Resolve the object names in `steps.yaml` into actual decoded block/attestation objects. let steps = steps @@ -139,11 +158,25 @@ impl LoadCase for ForkChoiceTest { }) .map(|block| Step::ValidBlock { block }) } - Step::MaybeValidBlock { block, valid } => { - ssz_decode_file_with(&path.join(format!("{}.ssz_snappy", block)), |bytes| { - SignedBeaconBlock::from_ssz_bytes(bytes, spec) + Step::MaybeValidBlock { + block, + blobs, + proofs, + valid, + } => { + let block = + ssz_decode_file_with(&path.join(format!("{block}.ssz_snappy")), |bytes| { + SignedBeaconBlock::from_ssz_bytes(bytes, spec) + })?; + let blobs = blobs + .map(|blobs| ssz_decode_file(&path.join(format!("{blobs}.ssz_snappy")))) + .transpose()?; + Ok(Step::MaybeValidBlock { + block, + blobs, + proofs, + valid, }) - .map(|block| Step::MaybeValidBlock { block, valid }) } Step::Attestation { attestation } => { ssz_decode_file(&path.join(format!("{}.ssz_snappy", attestation))) @@ -204,10 +237,15 @@ impl Case for ForkChoiceTest { for step in &self.steps { match step { Step::Tick { tick } => tester.set_tick(*tick), - Step::ValidBlock { block } => tester.process_block(block.clone(), true)?, - Step::MaybeValidBlock { block, valid } => { - tester.process_block(block.clone(), *valid)? + Step::ValidBlock { block } => { + tester.process_block(block.clone(), None, None, true)? } + Step::MaybeValidBlock { + block, + blobs, + proofs, + valid, + } => tester.process_block(block.clone(), blobs.clone(), proofs.clone(), *valid)?, Step::Attestation { attestation } => tester.process_attestation(attestation)?, Step::AttesterSlashing { attester_slashing } => { tester.process_attester_slashing(attester_slashing) @@ -232,6 +270,8 @@ impl Case for ForkChoiceTest { u_justified_checkpoint, u_finalized_checkpoint, proposer_boost_root, + get_proposer_head, + should_override_forkchoice_update: should_override_fcu, } = checks.as_ref(); if let Some(expected_head) = head { @@ -270,6 +310,14 @@ impl Case for ForkChoiceTest { if let Some(expected_proposer_boost_root) = proposer_boost_root { tester.check_expected_proposer_boost_root(*expected_proposer_boost_root)?; } + + if let Some(should_override_fcu) = should_override_fcu { + tester.check_should_override_fcu(*should_override_fcu)?; + } + + if let Some(expected_proposer_head) = get_proposer_head { + tester.check_expected_proposer_head(*expected_proposer_head)?; + } } } } @@ -300,7 +348,8 @@ impl Tester { )); } - let harness = BeaconChainHarness::builder(E::default()) + let harness = BeaconChainHarness::>::builder(E::default()) + .logger(logging::test_logger()) .spec(spec.clone()) .keypairs(vec![]) .chain_config(ChainConfig { @@ -380,16 +429,79 @@ impl Tester { .unwrap(); } - pub fn process_block(&self, block: SignedBeaconBlock, valid: bool) -> Result<(), Error> { + pub fn process_block( + &self, + block: SignedBeaconBlock, + blobs: Option>, + kzg_proofs: Option>, + valid: bool, + ) -> Result<(), Error> { let block_root = block.canonical_root(); + + let mut blob_success = true; + + // Convert blobs and kzg_proofs into sidecars, then plumb them into the availability tracker + if let Some(blobs) = blobs.clone() { + let proofs = kzg_proofs.unwrap(); + let commitments = block + .message() + .body() + .blob_kzg_commitments() + .unwrap() + .clone(); + + // Zipping will stop when any of the zipped lists runs out, which is what we want. Some + // of the tests don't provide enough proofs/blobs, and should fail the availability + // check. + for (i, ((blob, kzg_proof), kzg_commitment)) in blobs + .into_iter() + .zip(proofs) + .zip(commitments.into_iter()) + .enumerate() + { + let blob_sidecar = Arc::new(BlobSidecar { + index: i as u64, + blob, + kzg_commitment, + kzg_proof, + signed_block_header: block.signed_block_header(), + kzg_commitment_inclusion_proof: block + .message() + .body() + .kzg_commitment_merkle_proof(i) + .unwrap(), + }); + + let chain = self.harness.chain.clone(); + let blob = + match GossipVerifiedBlob::new(blob_sidecar.clone(), blob_sidecar.index, &chain) + { + Ok(gossip_verified_blob) => gossip_verified_blob, + Err(GossipBlobError::KzgError(_)) => { + blob_success = false; + GossipVerifiedBlob::__assumed_valid(blob_sidecar) + } + Err(_) => GossipVerifiedBlob::__assumed_valid(blob_sidecar), + }; + let result = + self.block_on_dangerous(self.harness.chain.process_gossip_blob(blob))?; + if valid { + assert!(result.is_ok()); + } + } + }; + let block = Arc::new(block); - let result = self.block_on_dangerous(self.harness.chain.process_block( - block_root, - block.clone(), - NotifyExecutionLayer::Yes, - || Ok(()), - ))?; - if result.is_ok() != valid { + let result: Result, _> = self + .block_on_dangerous(self.harness.chain.process_block( + block_root, + block.clone(), + NotifyExecutionLayer::Yes, + || Ok(()), + ))? + .map(|avail: AvailabilityProcessingStatus| avail.try_into()); + let success = blob_success && result.as_ref().map_or(false, |inner| inner.is_ok()); + if success != valid { return Err(Error::DidntFail(format!( "block with root {} was valid={} whilst test expects valid={}. result: {:?}", block_root, @@ -401,8 +513,8 @@ impl Tester { // Apply invalid blocks directly against the fork choice `on_block` function. This ensures // that the block is being rejected by `on_block`, not just some upstream block processing - // function. - if !valid { + // function. When blobs exist, we don't do this. + if !valid && blobs.is_none() { // A missing parent block whilst `valid == false` means the test should pass. if let Some(parent_block) = self .harness @@ -625,6 +737,82 @@ impl Tester { expected_proposer_boost_root, ) } + + pub fn check_expected_proposer_head( + &self, + expected_proposer_head: Hash256, + ) -> Result<(), Error> { + let mut fc = self.harness.chain.canonical_head.fork_choice_write_lock(); + let slot = self.harness.chain.slot().unwrap(); + let canonical_head = fc.get_head(slot, &self.harness.spec).unwrap(); + let proposer_head_result = fc.get_proposer_head( + slot, + canonical_head, + DEFAULT_RE_ORG_THRESHOLD, + &DisallowedReOrgOffsets::default(), + DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, + ); + let proposer_head = match proposer_head_result { + Ok(head) => head.parent_node.root, + Err(ProposerHeadError::DoNotReOrg(_)) => canonical_head, + _ => panic!("Unexpected error in get proposer head"), + }; + + check_equal("proposer_head", proposer_head, expected_proposer_head) + } + + pub fn check_should_override_fcu( + &self, + expected_should_override_fcu: ShouldOverrideFcu, + ) -> Result<(), Error> { + // Determine proposer. + let cached_head = self.harness.chain.canonical_head.cached_head(); + let next_slot = cached_head.snapshot.beacon_block.slot() + 1; + let next_slot_epoch = next_slot.epoch(E::slots_per_epoch()); + let (proposer_indices, decision_root, _, fork) = + compute_proposer_duties_from_head(next_slot_epoch, &self.harness.chain).unwrap(); + let proposer_index = proposer_indices[next_slot.as_usize() % E::slots_per_epoch() as usize]; + + // Ensure the proposer index cache is primed. + self.harness + .chain + .beacon_proposer_cache + .lock() + .insert(next_slot_epoch, decision_root, proposer_indices, fork) + .unwrap(); + + // Update the execution layer proposer preparation to match the test config. + let el = self.harness.chain.execution_layer.clone().unwrap(); + self.block_on_dangerous(async { + if expected_should_override_fcu.validator_is_connected { + el.update_proposer_preparation( + next_slot_epoch, + &[ProposerPreparationData { + validator_index: dbg!(proposer_index) as u64, + fee_recipient: Default::default(), + }], + ) + .await; + } else { + el.clear_proposer_preparation(proposer_index as u64).await; + } + }) + .unwrap(); + + // Check forkchoice override. + let canonical_fcu_params = cached_head.forkchoice_update_parameters(); + let fcu_params = self + .harness + .chain + .overridden_forkchoice_update_params(canonical_fcu_params) + .unwrap(); + + check_equal( + "should_override_forkchoice_update", + fcu_params != canonical_fcu_params, + expected_should_override_fcu.result, + ) + } } /// Checks that the `head` checkpoint from the beacon chain head matches the `fc` checkpoint gleaned diff --git a/testing/ef_tests/src/cases/genesis_initialization.rs b/testing/ef_tests/src/cases/genesis_initialization.rs index dbf6c70b2..14fe7ef95 100644 --- a/testing/ef_tests/src/cases/genesis_initialization.rs +++ b/testing/ef_tests/src/cases/genesis_initialization.rs @@ -1,7 +1,7 @@ use super::*; use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; -use serde_derive::Deserialize; +use serde::Deserialize; use state_processing::initialize_beacon_state_from_eth1; use std::path::PathBuf; use types::{BeaconState, Deposit, EthSpec, ExecutionPayloadHeader, ForkName, Hash256}; diff --git a/testing/ef_tests/src/cases/genesis_validity.rs b/testing/ef_tests/src/cases/genesis_validity.rs index abdc1ed4a..ec89e0f64 100644 --- a/testing/ef_tests/src/cases/genesis_validity.rs +++ b/testing/ef_tests/src/cases/genesis_validity.rs @@ -1,6 +1,6 @@ use super::*; use crate::decode::{ssz_decode_state, yaml_decode_file}; -use serde_derive::Deserialize; +use serde::Deserialize; use state_processing::is_valid_genesis_state; use std::path::Path; use types::{BeaconState, EthSpec, ForkName}; diff --git a/testing/ef_tests/src/cases/kzg_blob_to_kzg_commitment.rs b/testing/ef_tests/src/cases/kzg_blob_to_kzg_commitment.rs new file mode 100644 index 000000000..aa48c127b --- /dev/null +++ b/testing/ef_tests/src/cases/kzg_blob_to_kzg_commitment.rs @@ -0,0 +1,47 @@ +use super::*; +use crate::case_result::compare_result; +use beacon_chain::kzg_utils::blob_to_kzg_commitment; +use kzg::KzgCommitment; +use serde::Deserialize; +use std::marker::PhantomData; + +#[derive(Debug, Clone, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct KZGBlobToKZGCommitmentInput { + pub blob: String, +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec", deny_unknown_fields)] +pub struct KZGBlobToKZGCommitment { + pub input: KZGBlobToKZGCommitmentInput, + pub output: Option, + #[serde(skip)] + _phantom: PhantomData, +} + +impl LoadCase for KZGBlobToKZGCommitment { + fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result { + decode::yaml_decode_file(path.join("data.yaml").as_path()) + } +} + +impl Case for KZGBlobToKZGCommitment { + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name == ForkName::Deneb + } + + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let kzg = get_kzg()?; + + let commitment = parse_blob::(&self.input.blob).and_then(|blob| { + blob_to_kzg_commitment::(&kzg, &blob).map_err(|e| { + Error::InternalError(format!("Failed to compute kzg commitment: {:?}", e)) + }) + }); + + let expected = self.output.as_ref().and_then(|s| parse_commitment(s).ok()); + + compare_result::(&commitment, &expected) + } +} diff --git a/testing/ef_tests/src/cases/kzg_compute_blob_kzg_proof.rs b/testing/ef_tests/src/cases/kzg_compute_blob_kzg_proof.rs new file mode 100644 index 000000000..71e1ff8e2 --- /dev/null +++ b/testing/ef_tests/src/cases/kzg_compute_blob_kzg_proof.rs @@ -0,0 +1,52 @@ +use super::*; +use crate::case_result::compare_result; +use beacon_chain::kzg_utils::compute_blob_kzg_proof; +use kzg::KzgProof; +use serde::Deserialize; +use std::marker::PhantomData; + +#[derive(Debug, Clone, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct KZGComputeBlobKZGProofInput { + pub blob: String, + pub commitment: String, +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec", deny_unknown_fields)] +pub struct KZGComputeBlobKZGProof { + pub input: KZGComputeBlobKZGProofInput, + pub output: Option, + #[serde(skip)] + _phantom: PhantomData, +} + +impl LoadCase for KZGComputeBlobKZGProof { + fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result { + decode::yaml_decode_file(path.join("data.yaml").as_path()) + } +} + +impl Case for KZGComputeBlobKZGProof { + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name == ForkName::Deneb + } + + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let parse_input = |input: &KZGComputeBlobKZGProofInput| -> Result<_, Error> { + let blob = parse_blob::(&input.blob)?; + let commitment = parse_commitment(&input.commitment)?; + Ok((blob, commitment)) + }; + + let kzg = get_kzg()?; + let proof = parse_input(&self.input).and_then(|(blob, commitment)| { + compute_blob_kzg_proof::(&kzg, &blob, commitment) + .map_err(|e| Error::InternalError(format!("Failed to compute kzg proof: {:?}", e))) + }); + + let expected = self.output.as_ref().and_then(|s| parse_proof(s).ok()); + + compare_result::(&proof, &expected) + } +} diff --git a/testing/ef_tests/src/cases/kzg_compute_kzg_proof.rs b/testing/ef_tests/src/cases/kzg_compute_kzg_proof.rs new file mode 100644 index 000000000..98bb74924 --- /dev/null +++ b/testing/ef_tests/src/cases/kzg_compute_kzg_proof.rs @@ -0,0 +1,62 @@ +use super::*; +use crate::case_result::compare_result; +use beacon_chain::kzg_utils::compute_kzg_proof; +use kzg::KzgProof; +use serde::Deserialize; +use std::marker::PhantomData; +use std::str::FromStr; +use types::Hash256; + +pub fn parse_point(point: &str) -> Result { + Hash256::from_str(&point[2..]) + .map_err(|e| Error::FailedToParseTest(format!("Failed to parse point: {:?}", e))) +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct KZGComputeKZGProofInput { + pub blob: String, + pub z: String, +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec", deny_unknown_fields)] +pub struct KZGComputeKZGProof { + pub input: KZGComputeKZGProofInput, + pub output: Option<(String, Hash256)>, + #[serde(skip)] + _phantom: PhantomData, +} + +impl LoadCase for KZGComputeKZGProof { + fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result { + decode::yaml_decode_file(path.join("data.yaml").as_path()) + } +} + +impl Case for KZGComputeKZGProof { + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name == ForkName::Deneb + } + + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let parse_input = |input: &KZGComputeKZGProofInput| -> Result<_, Error> { + let blob = parse_blob::(&input.blob)?; + let z = parse_point(&input.z)?; + Ok((blob, z)) + }; + + let kzg = get_kzg()?; + let proof = parse_input(&self.input).and_then(|(blob, z)| { + compute_kzg_proof::(&kzg, &blob, z) + .map_err(|e| Error::InternalError(format!("Failed to compute kzg proof: {:?}", e))) + }); + + let expected = self + .output + .as_ref() + .and_then(|(s, z)| parse_proof(s).ok().map(|proof| (proof, *z))); + + compare_result::<(KzgProof, Hash256), _>(&proof, &expected) + } +} diff --git a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs new file mode 100644 index 000000000..04d1b8d5d --- /dev/null +++ b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs @@ -0,0 +1,106 @@ +use super::*; +use crate::case_result::compare_result; +use beacon_chain::kzg_utils::validate_blob; +use eth2_network_config::TRUSTED_SETUP_BYTES; +use kzg::{Error as KzgError, Kzg, KzgCommitment, KzgProof, TrustedSetup}; +use serde::Deserialize; +use std::convert::TryInto; +use std::marker::PhantomData; +use types::Blob; + +pub fn get_kzg() -> Result { + let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) + .map_err(|e| Error::InternalError(format!("Failed to initialize kzg: {:?}", e)))?; + Kzg::new_from_trusted_setup(trusted_setup) + .map_err(|e| Error::InternalError(format!("Failed to initialize kzg: {:?}", e))) +} + +pub fn parse_proof(proof: &str) -> Result { + hex::decode(strip_0x(proof)?) + .map_err(|e| Error::FailedToParseTest(format!("Failed to parse proof: {:?}", e))) + .and_then(|bytes| { + bytes + .try_into() + .map_err(|e| Error::FailedToParseTest(format!("Failed to parse proof: {:?}", e))) + }) + .map(KzgProof) +} + +pub fn parse_commitment(commitment: &str) -> Result { + hex::decode(strip_0x(commitment)?) + .map_err(|e| Error::FailedToParseTest(format!("Failed to parse commitment: {:?}", e))) + .and_then(|bytes| { + bytes.try_into().map_err(|e| { + Error::FailedToParseTest(format!("Failed to parse commitment: {:?}", e)) + }) + }) + .map(KzgCommitment) +} + +pub fn parse_blob(blob: &str) -> Result, Error> { + hex::decode(strip_0x(blob)?) + .map_err(|e| Error::FailedToParseTest(format!("Failed to parse blob: {:?}", e))) + .and_then(|bytes| { + Blob::::new(bytes) + .map_err(|e| Error::FailedToParseTest(format!("Failed to parse blob: {:?}", e))) + }) +} + +fn strip_0x(s: &str) -> Result<&str, Error> { + s.strip_prefix("0x").ok_or(Error::FailedToParseTest(format!( + "Hex is missing 0x prefix: {}", + s + ))) +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct KZGVerifyBlobKZGProofInput { + pub blob: String, + pub commitment: String, + pub proof: String, +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec", deny_unknown_fields)] +pub struct KZGVerifyBlobKZGProof { + pub input: KZGVerifyBlobKZGProofInput, + pub output: Option, + #[serde(skip)] + _phantom: PhantomData, +} + +impl LoadCase for KZGVerifyBlobKZGProof { + fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result { + decode::yaml_decode_file(path.join("data.yaml").as_path()) + } +} + +impl Case for KZGVerifyBlobKZGProof { + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name == ForkName::Deneb + } + + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let parse_input = |input: &KZGVerifyBlobKZGProofInput| -> Result<(Blob, KzgCommitment, KzgProof), Error> { + let blob = parse_blob::(&input.blob)?; + let commitment = parse_commitment(&input.commitment)?; + let proof = parse_proof(&input.proof)?; + Ok((blob, commitment, proof)) + }; + + let kzg = get_kzg()?; + let result = parse_input(&self.input).and_then(|(blob, commitment, proof)| { + match validate_blob::(&kzg, &blob, commitment, proof) { + Ok(_) => Ok(true), + Err(KzgError::KzgVerificationFailed) => Ok(false), + Err(e) => Err(Error::InternalError(format!( + "Failed to validate blob: {:?}", + e + ))), + } + }); + + compare_result::(&result, &self.output) + } +} diff --git a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs new file mode 100644 index 000000000..ae5caedf0 --- /dev/null +++ b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs @@ -0,0 +1,77 @@ +use super::*; +use crate::case_result::compare_result; +use beacon_chain::kzg_utils::validate_blobs; +use kzg::Error as KzgError; +use serde::Deserialize; +use std::marker::PhantomData; + +#[derive(Debug, Clone, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct KZGVerifyBlobKZGProofBatchInput { + pub blobs: Vec, + pub commitments: Vec, + pub proofs: Vec, +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec", deny_unknown_fields)] +pub struct KZGVerifyBlobKZGProofBatch { + pub input: KZGVerifyBlobKZGProofBatchInput, + pub output: Option, + #[serde(skip)] + _phantom: PhantomData, +} + +impl LoadCase for KZGVerifyBlobKZGProofBatch { + fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result { + decode::yaml_decode_file(path.join("data.yaml").as_path()) + } +} + +impl Case for KZGVerifyBlobKZGProofBatch { + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name == ForkName::Deneb + } + + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let parse_input = |input: &KZGVerifyBlobKZGProofBatchInput| -> Result<_, Error> { + let blobs = input + .blobs + .iter() + .map(|s| parse_blob::(s)) + .collect::, _>>()?; + let commitments = input + .commitments + .iter() + .map(|s| parse_commitment(s)) + .collect::, _>>()?; + let proofs = input + .proofs + .iter() + .map(|s| parse_proof(s)) + .collect::, _>>()?; + Ok((commitments, blobs, proofs)) + }; + + let kzg = get_kzg()?; + + let result = + parse_input(&self.input).and_then( + |(commitments, blobs, proofs)| match validate_blobs::( + &kzg, + &commitments, + blobs.iter().collect(), + &proofs, + ) { + Ok(_) => Ok(true), + Err(KzgError::KzgVerificationFailed) => Ok(false), + Err(e) => Err(Error::InternalError(format!( + "Failed to validate blobs: {:?}", + e + ))), + }, + ); + + compare_result::(&result, &self.output) + } +} diff --git a/testing/ef_tests/src/cases/kzg_verify_kzg_proof.rs b/testing/ef_tests/src/cases/kzg_verify_kzg_proof.rs new file mode 100644 index 000000000..e395558e0 --- /dev/null +++ b/testing/ef_tests/src/cases/kzg_verify_kzg_proof.rs @@ -0,0 +1,53 @@ +use super::*; +use crate::case_result::compare_result; +use beacon_chain::kzg_utils::verify_kzg_proof; +use serde::Deserialize; +use std::marker::PhantomData; + +#[derive(Debug, Clone, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct KZGVerifyKZGProofInput { + pub commitment: String, + pub z: String, + pub y: String, + pub proof: String, +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec", deny_unknown_fields)] +pub struct KZGVerifyKZGProof { + pub input: KZGVerifyKZGProofInput, + pub output: Option, + #[serde(skip)] + _phantom: PhantomData, +} + +impl LoadCase for KZGVerifyKZGProof { + fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result { + decode::yaml_decode_file(path.join("data.yaml").as_path()) + } +} + +impl Case for KZGVerifyKZGProof { + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name == ForkName::Deneb + } + + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let parse_input = |input: &KZGVerifyKZGProofInput| -> Result<_, Error> { + let commitment = parse_commitment(&input.commitment)?; + let z = parse_point(&input.z)?; + let y = parse_point(&input.y)?; + let proof = parse_proof(&input.proof)?; + Ok((commitment, z, y, proof)) + }; + + let kzg = get_kzg()?; + let result = parse_input(&self.input).and_then(|(commitment, z, y, proof)| { + verify_kzg_proof::(&kzg, commitment, proof, z, y) + .map_err(|e| Error::InternalError(format!("Failed to validate proof: {:?}", e))) + }); + + compare_result::(&result, &self.output) + } +} diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index c180774bb..d9deda812 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -1,9 +1,9 @@ use super::*; -use crate::decode::{ssz_decode_state, yaml_decode_file}; -use serde_derive::Deserialize; +use crate::decode::{ssz_decode_file, ssz_decode_state, yaml_decode_file}; +use serde::Deserialize; use std::path::Path; use tree_hash::Hash256; -use types::{BeaconState, EthSpec, ForkName}; +use types::{BeaconBlockBody, BeaconBlockBodyDeneb, BeaconState, EthSpec, ForkName}; #[derive(Debug, Clone, Deserialize)] pub struct Metadata { @@ -51,13 +51,10 @@ impl Case for MerkleProofValidity { fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let mut state = self.state.clone(); state.initialize_tree_hash_cache(); - let proof = match state.compute_merkle_proof(self.merkle_proof.leaf_index) { - Ok(proof) => proof, - Err(_) => { - return Err(Error::FailedToParseTest( - "Could not retrieve merkle proof".to_string(), - )) - } + let Ok(proof) = state.compute_merkle_proof(self.merkle_proof.leaf_index) else { + return Err(Error::FailedToParseTest( + "Could not retrieve merkle proof".to_string(), + )); }; let proof_len = proof.len(); let branch_len = self.merkle_proof.branch.len(); @@ -85,3 +82,72 @@ impl Case for MerkleProofValidity { Ok(()) } } + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec")] +pub struct KzgInclusionMerkleProofValidity { + pub metadata: Option, + pub block: BeaconBlockBody, + pub merkle_proof: MerkleProof, +} + +impl LoadCase for KzgInclusionMerkleProofValidity { + fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { + let block = match fork_name { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + return Err(Error::InternalError(format!( + "KZG inclusion merkle proof validity test skipped for {:?}", + fork_name + ))) + } + ForkName::Deneb => { + ssz_decode_file::>(&path.join("object.ssz_snappy"))? + } + }; + let merkle_proof = yaml_decode_file(&path.join("proof.yaml"))?; + // Metadata does not exist in these tests but it is left like this just in case. + let meta_path = path.join("meta.yaml"); + let metadata = if meta_path.exists() { + Some(yaml_decode_file(&meta_path)?) + } else { + None + }; + + Ok(Self { + metadata, + block: block.into(), + merkle_proof, + }) + } +} + +impl Case for KzgInclusionMerkleProofValidity { + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let Ok(proof) = self.block.to_ref().kzg_commitment_merkle_proof(0) else { + return Err(Error::FailedToParseTest( + "Could not retrieve merkle proof".to_string(), + )); + }; + let proof_len = proof.len(); + let branch_len = self.merkle_proof.branch.len(); + if proof_len != branch_len { + return Err(Error::NotEqual(format!( + "Branches not equal in length computed: {}, expected {}", + proof_len, branch_len + ))); + } + + for (i, proof_leaf) in proof.iter().enumerate().take(proof_len) { + let expected_leaf = self.merkle_proof.branch[i]; + if *proof_leaf != expected_leaf { + return Err(Error::NotEqual(format!( + "Leaves not equal in merkle proof computed: {}, expected: {}", + hex::encode(proof_leaf), + hex::encode(expected_leaf) + ))); + } + } + + Ok(()) + } +} diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 21a56dcf2..4c02126d4 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -3,14 +3,15 @@ use crate::bls_setting::BlsSetting; use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use crate::testing_spec; -use serde_derive::Deserialize; +use serde::Deserialize; +use ssz::Decode; use state_processing::common::update_progressive_balances_cache::initialize_progressive_balances_cache; use state_processing::{ per_block_processing::{ errors::BlockProcessingError, process_block_header, process_execution_payload, process_operations::{ - altair, base, process_attester_slashings, process_bls_to_execution_changes, + altair_deneb, base, process_attester_slashings, process_bls_to_execution_changes, process_deposits, process_exits, process_proposer_slashings, }, process_sync_aggregate, process_withdrawals, VerifyBlockRoot, VerifySignatures, @@ -20,7 +21,8 @@ use state_processing::{ use std::fmt::Debug; use std::path::Path; use types::{ - Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlindedPayload, ChainSpec, Deposit, + Attestation, AttesterSlashing, BeaconBlock, BeaconBlockBody, BeaconBlockBodyCapella, + BeaconBlockBodyDeneb, BeaconBlockBodyMerge, BeaconState, BlindedPayload, ChainSpec, Deposit, EthSpec, ExecutionPayload, ForkName, FullPayload, ProposerSlashing, SignedBlsToExecutionChange, SignedVoluntaryExit, SyncAggregate, }; @@ -96,9 +98,19 @@ impl Operation for Attestation { &mut ctxt, spec, ), - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Deneb(_) => { initialize_progressive_balances_cache(state, None, spec)?; - altair::process_attestation(state, self, 0, &mut ctxt, VerifySignatures::True, spec) + altair_deneb::process_attestation( + state, + self, + 0, + &mut ctxt, + VerifySignatures::True, + spec, + ) } } } @@ -260,13 +272,13 @@ impl Operation for SyncAggregate { } } -impl Operation for FullPayload { +impl Operation for BeaconBlockBody> { fn handler_name() -> String { "execution_payload".into() } fn filename() -> String { - "execution_payload.ssz_snappy".into() + "body.ssz_snappy".into() } fn is_enabled_for_fork(fork_name: ForkName) -> bool { @@ -275,9 +287,13 @@ impl Operation for FullPayload { fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file_with(path, |bytes| { - ExecutionPayload::from_ssz_bytes(bytes, fork_name) + Ok(match fork_name { + ForkName::Merge => BeaconBlockBody::Merge(<_>::from_ssz_bytes(bytes)?), + ForkName::Capella => BeaconBlockBody::Capella(<_>::from_ssz_bytes(bytes)?), + ForkName::Deneb => BeaconBlockBody::Deneb(<_>::from_ssz_bytes(bytes)?), + _ => panic!(), + }) }) - .map(Into::into) } fn apply_to( @@ -297,13 +313,13 @@ impl Operation for FullPayload { } } } -impl Operation for BlindedPayload { +impl Operation for BeaconBlockBody> { fn handler_name() -> String { "execution_payload".into() } fn filename() -> String { - "execution_payload.ssz_snappy".into() + "body.ssz_snappy".into() } fn is_enabled_for_fork(fork_name: ForkName) -> bool { @@ -312,9 +328,22 @@ impl Operation for BlindedPayload { fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file_with(path, |bytes| { - ExecutionPayload::from_ssz_bytes(bytes, fork_name) + Ok(match fork_name { + ForkName::Merge => { + let inner = >>::from_ssz_bytes(bytes)?; + BeaconBlockBody::Merge(inner.clone_as_blinded()) + } + ForkName::Capella => { + let inner = >>::from_ssz_bytes(bytes)?; + BeaconBlockBody::Capella(inner.clone_as_blinded()) + } + ForkName::Deneb => { + let inner = >>::from_ssz_bytes(bytes)?; + BeaconBlockBody::Deneb(inner.clone_as_blinded()) + } + _ => panic!(), + }) }) - .map(Into::into) } fn apply_to( diff --git a/testing/ef_tests/src/cases/rewards.rs b/testing/ef_tests/src/cases/rewards.rs index ee0fc265e..bb41f6fe1 100644 --- a/testing/ef_tests/src/cases/rewards.rs +++ b/testing/ef_tests/src/cases/rewards.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result_detailed; use crate::decode::{ssz_decode_file, ssz_decode_state, yaml_decode_file}; use compare_fields_derive::CompareFields; -use serde_derive::Deserialize; +use serde::Deserialize; use ssz::four_byte_option_impl; use ssz_derive::{Decode, Encode}; use state_processing::{ diff --git a/testing/ef_tests/src/cases/sanity_blocks.rs b/testing/ef_tests/src/cases/sanity_blocks.rs index 191b45c33..cf8e6b5b2 100644 --- a/testing/ef_tests/src/cases/sanity_blocks.rs +++ b/testing/ef_tests/src/cases/sanity_blocks.rs @@ -2,7 +2,7 @@ use super::*; use crate::bls_setting::BlsSetting; use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; -use serde_derive::Deserialize; +use serde::Deserialize; use state_processing::{ per_block_processing, per_slot_processing, BlockProcessingError, BlockSignatureStrategy, ConsensusContext, StateProcessingStrategy, VerifyBlockRoot, diff --git a/testing/ef_tests/src/cases/sanity_slots.rs b/testing/ef_tests/src/cases/sanity_slots.rs index dd385d13f..0da179d53 100644 --- a/testing/ef_tests/src/cases/sanity_slots.rs +++ b/testing/ef_tests/src/cases/sanity_slots.rs @@ -2,7 +2,7 @@ use super::*; use crate::bls_setting::BlsSetting; use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_state, yaml_decode_file}; -use serde_derive::Deserialize; +use serde::Deserialize; use state_processing::per_slot_processing; use types::{BeaconState, EthSpec, ForkName}; diff --git a/testing/ef_tests/src/cases/shuffling.rs b/testing/ef_tests/src/cases/shuffling.rs index b5ce019f5..e05763c2d 100644 --- a/testing/ef_tests/src/cases/shuffling.rs +++ b/testing/ef_tests/src/cases/shuffling.rs @@ -1,7 +1,7 @@ use super::*; use crate::case_result::compare_result; use crate::decode::yaml_decode_file; -use serde_derive::Deserialize; +use serde::Deserialize; use std::marker::PhantomData; use swap_or_not_shuffle::{compute_shuffled_index, shuffle_list}; use types::ForkName; diff --git a/testing/ef_tests/src/cases/ssz_generic.rs b/testing/ef_tests/src/cases/ssz_generic.rs index 2374ead88..d6c764f52 100644 --- a/testing/ef_tests/src/cases/ssz_generic.rs +++ b/testing/ef_tests/src/cases/ssz_generic.rs @@ -4,8 +4,8 @@ use super::*; use crate::cases::common::{SszStaticType, TestU128, TestU256}; use crate::cases::ssz_static::{check_serialization, check_tree_hash}; use crate::decode::{snappy_decode_file, yaml_decode_file}; +use serde::Deserialize; use serde::{de::Error as SerdeError, Deserializer}; -use serde_derive::Deserialize; use ssz_derive::{Decode, Encode}; use std::path::{Path, PathBuf}; use tree_hash_derive::TreeHash; diff --git a/testing/ef_tests/src/cases/ssz_static.rs b/testing/ef_tests/src/cases/ssz_static.rs index d0cc5f9ea..423dc3152 100644 --- a/testing/ef_tests/src/cases/ssz_static.rs +++ b/testing/ef_tests/src/cases/ssz_static.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use crate::cases::common::SszStaticType; use crate::decode::{snappy_decode_file, yaml_decode_file}; -use serde_derive::Deserialize; +use serde::Deserialize; use ssz::Decode; use tree_hash::TreeHash; use types::{BeaconBlock, BeaconState, ForkName, Hash256, SignedBeaconBlock}; diff --git a/testing/ef_tests/src/cases/transition.rs b/testing/ef_tests/src/cases/transition.rs index bb4efdb6d..c94ce3a23 100644 --- a/testing/ef_tests/src/cases/transition.rs +++ b/testing/ef_tests/src/cases/transition.rs @@ -1,7 +1,7 @@ use super::*; use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; -use serde_derive::Deserialize; +use serde::Deserialize; use state_processing::{ per_block_processing, state_advance::complete_state_advance, BlockSignatureStrategy, ConsensusContext, StateProcessingStrategy, VerifyBlockRoot, @@ -47,6 +47,12 @@ impl LoadCase for TransitionTest { spec.bellatrix_fork_epoch = Some(Epoch::new(0)); spec.capella_fork_epoch = Some(metadata.fork_epoch); } + ForkName::Deneb => { + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(0)); + spec.deneb_fork_epoch = Some(metadata.fork_epoch); + } } // Load blocks diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 2ed596e25..0295ff1bd 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -210,10 +210,6 @@ impl SszStaticHandler { Self::for_forks(vec![ForkName::Altair]) } - pub fn altair_and_later() -> Self { - Self::for_forks(ForkName::list_all()[1..].to_vec()) - } - pub fn merge_only() -> Self { Self::for_forks(vec![ForkName::Merge]) } @@ -222,9 +218,21 @@ impl SszStaticHandler { Self::for_forks(vec![ForkName::Capella]) } + pub fn deneb_only() -> Self { + Self::for_forks(vec![ForkName::Deneb]) + } + + pub fn altair_and_later() -> Self { + Self::for_forks(ForkName::list_all()[1..].to_vec()) + } + pub fn merge_and_later() -> Self { Self::for_forks(ForkName::list_all()[2..].to_vec()) } + + pub fn capella_and_later() -> Self { + Self::for_forks(ForkName::list_all()[3..].to_vec()) + } } /// Handler for SSZ types that implement `CachedTreeHash`. @@ -552,6 +560,13 @@ impl Handler for ForkChoiceHandler { return false; } + // No FCU override tests prior to bellatrix. + if self.handler_name == "should_override_forkchoice_update" + && (fork_name == ForkName::Base || fork_name == ForkName::Altair) + { + return false; + } + // These tests check block validity (which may include signatures) and there is no need to // run them with fake crypto. cfg!(not(feature = "fake_crypto")) @@ -629,6 +644,126 @@ impl Handler for GenesisInitializationHandler { } } +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct KZGBlobToKZGCommitmentHandler(PhantomData); + +impl Handler for KZGBlobToKZGCommitmentHandler { + type Case = cases::KZGBlobToKZGCommitment; + + fn config_name() -> &'static str { + "general" + } + + fn runner_name() -> &'static str { + "kzg" + } + + fn handler_name(&self) -> String { + "blob_to_kzg_commitment".into() + } +} + +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct KZGComputeBlobKZGProofHandler(PhantomData); + +impl Handler for KZGComputeBlobKZGProofHandler { + type Case = cases::KZGComputeBlobKZGProof; + + fn config_name() -> &'static str { + "general" + } + + fn runner_name() -> &'static str { + "kzg" + } + + fn handler_name(&self) -> String { + "compute_blob_kzg_proof".into() + } +} + +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct KZGComputeKZGProofHandler(PhantomData); + +impl Handler for KZGComputeKZGProofHandler { + type Case = cases::KZGComputeKZGProof; + + fn config_name() -> &'static str { + "general" + } + + fn runner_name() -> &'static str { + "kzg" + } + + fn handler_name(&self) -> String { + "compute_kzg_proof".into() + } +} + +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct KZGVerifyBlobKZGProofHandler(PhantomData); + +impl Handler for KZGVerifyBlobKZGProofHandler { + type Case = cases::KZGVerifyBlobKZGProof; + + fn config_name() -> &'static str { + "general" + } + + fn runner_name() -> &'static str { + "kzg" + } + + fn handler_name(&self) -> String { + "verify_blob_kzg_proof".into() + } +} + +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct KZGVerifyBlobKZGProofBatchHandler(PhantomData); + +impl Handler for KZGVerifyBlobKZGProofBatchHandler { + type Case = cases::KZGVerifyBlobKZGProofBatch; + + fn config_name() -> &'static str { + "general" + } + + fn runner_name() -> &'static str { + "kzg" + } + + fn handler_name(&self) -> String { + "verify_blob_kzg_proof_batch".into() + } +} + +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct KZGVerifyKZGProofHandler(PhantomData); + +impl Handler for KZGVerifyKZGProofHandler { + type Case = cases::KZGVerifyKZGProof; + + fn config_name() -> &'static str { + "general" + } + + fn runner_name() -> &'static str { + "kzg" + } + + fn handler_name(&self) -> String { + "verify_kzg_proof".into() + } +} + #[derive(Derivative)] #[derivative(Default(bound = ""))] pub struct MerkleProofValidityHandler(PhantomData); @@ -654,6 +789,34 @@ impl Handler for MerkleProofValidityHandler { // spec. // // https://github.com/sigp/lighthouse/issues/4022 + && fork_name != ForkName::Capella && fork_name != ForkName::Deneb + } +} + +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct KzgInclusionMerkleProofValidityHandler(PhantomData); + +impl Handler for KzgInclusionMerkleProofValidityHandler { + type Case = cases::KzgInclusionMerkleProofValidity; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "merkle_proof" + } + + fn handler_name(&self) -> String { + "single_merkle_proof".into() + } + + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + // Enabled in Deneb + fork_name != ForkName::Base + && fork_name != ForkName::Altair + && fork_name != ForkName::Merge && fork_name != ForkName::Capella } } diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index 675388ee5..13121854a 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -1,4 +1,5 @@ //! Mapping from types to canonical string identifiers used in testing. +use types::blob_sidecar::BlobIdentifier; use types::historical_summary::HistoricalSummary; use types::*; @@ -47,8 +48,11 @@ type_name_generic!(BeaconBlockBodyBase, "BeaconBlockBody"); type_name_generic!(BeaconBlockBodyAltair, "BeaconBlockBody"); type_name_generic!(BeaconBlockBodyMerge, "BeaconBlockBody"); type_name_generic!(BeaconBlockBodyCapella, "BeaconBlockBody"); +type_name_generic!(BeaconBlockBodyDeneb, "BeaconBlockBody"); type_name!(BeaconBlockHeader); type_name_generic!(BeaconState); +type_name!(BlobIdentifier); +type_name_generic!(BlobSidecar); type_name!(Checkpoint); type_name_generic!(ContributionAndProof); type_name!(Deposit); @@ -58,10 +62,12 @@ type_name!(Eth1Data); type_name_generic!(ExecutionPayload); type_name_generic!(ExecutionPayloadMerge, "ExecutionPayload"); type_name_generic!(ExecutionPayloadCapella, "ExecutionPayload"); +type_name_generic!(ExecutionPayloadDeneb, "ExecutionPayload"); type_name_generic!(FullPayload, "ExecutionPayload"); type_name_generic!(ExecutionPayloadHeader); type_name_generic!(ExecutionPayloadHeaderMerge, "ExecutionPayloadHeader"); type_name_generic!(ExecutionPayloadHeaderCapella, "ExecutionPayloadHeader"); +type_name_generic!(ExecutionPayloadHeaderDeneb, "ExecutionPayloadHeader"); type_name_generic!(BlindedPayload, "ExecutionPayloadHeader"); type_name!(Fork); type_name!(ForkData); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 33f8d67ec..dd25dba8b 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -1,7 +1,7 @@ #![cfg(feature = "ef_tests")] -use ef_tests::*; -use types::*; +use ef_tests::{KzgInclusionMerkleProofValidityHandler, *}; +use types::{MainnetEthSpec, MinimalEthSpec, *}; // Check that the hand-computed multiplications on EthSpec are correctly computed. // This test lives here because one is most likely to muck these up during a spec update. @@ -72,14 +72,14 @@ fn operations_sync_aggregate() { #[test] fn operations_execution_payload_full() { - OperationsHandler::>::default().run(); - OperationsHandler::>::default().run(); + OperationsHandler::>>::default().run(); + OperationsHandler::>>::default().run(); } #[test] fn operations_execution_payload_blinded() { - OperationsHandler::>::default().run(); - OperationsHandler::>::default().run(); + OperationsHandler::>>::default().run(); + OperationsHandler::>>::default().run(); } #[test] @@ -215,6 +215,7 @@ macro_rules! ssz_static_test_no_run { #[cfg(feature = "fake_crypto")] mod ssz_static { use ef_tests::{Handler, SszStaticHandler, SszStaticTHCHandler, SszStaticWithSpecHandler}; + use types::blob_sidecar::BlobIdentifier; use types::historical_summary::HistoricalSummary; use types::*; @@ -267,6 +268,10 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::capella_only() .run(); + SszStaticHandler::, MinimalEthSpec>::deneb_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::deneb_only() + .run(); } // Altair and later @@ -327,6 +332,10 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::capella_only() .run(); + SszStaticHandler::, MinimalEthSpec>::deneb_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::deneb_only() + .run(); } #[test] @@ -339,30 +348,46 @@ mod ssz_static { ::capella_only().run(); SszStaticHandler::, MainnetEthSpec> ::capella_only().run(); + SszStaticHandler::, MinimalEthSpec> + ::deneb_only().run(); + SszStaticHandler::, MainnetEthSpec> + ::deneb_only().run(); } #[test] fn withdrawal() { - SszStaticHandler::::capella_only().run(); - SszStaticHandler::::capella_only().run(); + SszStaticHandler::::capella_and_later().run(); + SszStaticHandler::::capella_and_later().run(); } #[test] fn bls_to_execution_change() { - SszStaticHandler::::capella_only().run(); - SszStaticHandler::::capella_only().run(); + SszStaticHandler::::capella_and_later().run(); + SszStaticHandler::::capella_and_later().run(); } #[test] fn signed_bls_to_execution_change() { - SszStaticHandler::::capella_only().run(); - SszStaticHandler::::capella_only().run(); + SszStaticHandler::::capella_and_later().run(); + SszStaticHandler::::capella_and_later().run(); + } + + #[test] + fn blob_sidecar() { + SszStaticHandler::, MinimalEthSpec>::deneb_only().run(); + SszStaticHandler::, MainnetEthSpec>::deneb_only().run(); + } + + #[test] + fn blob_identifier() { + SszStaticHandler::::deneb_only().run(); + SszStaticHandler::::deneb_only().run(); } #[test] fn historical_summary() { - SszStaticHandler::::capella_only().run(); - SszStaticHandler::::capella_only().run(); + SszStaticHandler::::capella_and_later().run(); + SszStaticHandler::::capella_and_later().run(); } } @@ -515,6 +540,18 @@ fn fork_choice_withholding() { // There is no mainnet variant for this test. } +#[test] +fn fork_choice_should_override_forkchoice_update() { + ForkChoiceHandler::::new("should_override_forkchoice_update").run(); + ForkChoiceHandler::::new("should_override_forkchoice_update").run(); +} + +#[test] +fn fork_choice_get_proposer_head() { + ForkChoiceHandler::::new("get_proposer_head").run(); + ForkChoiceHandler::::new("get_proposer_head").run(); +} + #[test] fn optimistic_sync() { OptimisticSyncHandler::::default().run(); @@ -532,11 +569,47 @@ fn genesis_validity() { // Note: there are no genesis validity tests for mainnet } +#[test] +fn kzg_blob_to_kzg_commitment() { + KZGBlobToKZGCommitmentHandler::::default().run(); +} + +#[test] +fn kzg_compute_blob_kzg_proof() { + KZGComputeBlobKZGProofHandler::::default().run(); +} + +#[test] +fn kzg_compute_kzg_proof() { + KZGComputeKZGProofHandler::::default().run(); +} + +#[test] +fn kzg_verify_blob_kzg_proof() { + KZGVerifyBlobKZGProofHandler::::default().run(); +} + +#[test] +fn kzg_verify_blob_kzg_proof_batch() { + KZGVerifyBlobKZGProofBatchHandler::::default().run(); +} + +#[test] +fn kzg_verify_kzg_proof() { + KZGVerifyKZGProofHandler::::default().run(); +} + #[test] fn merkle_proof_validity() { MerkleProofValidityHandler::::default().run(); } +#[test] +fn kzg_inclusion_merkle_proof_validity() { + KzgInclusionMerkleProofValidityHandler::::default().run(); + KzgInclusionMerkleProofValidityHandler::::default().run(); +} + #[test] fn rewards() { for handler in &["basic", "leak", "random"] { diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index 75a63fb46..6de108fcb 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -22,3 +22,6 @@ reqwest = { workspace = true } hex = { workspace = true } fork_choice = { workspace = true } logging = { workspace = true } + +[features] +portable = ["types/portable"] \ No newline at end of file diff --git a/testing/execution_engine_integration/Makefile b/testing/execution_engine_integration/Makefile index 706206506..72f8d8f6b 100644 --- a/testing/execution_engine_integration/Makefile +++ b/testing/execution_engine_integration/Makefile @@ -1,5 +1,5 @@ test: - cargo run --release --locked + cargo run --release --locked --features "$(TEST_FEATURES)" clean: rm -rf execution_clients diff --git a/testing/execution_engine_integration/src/build_utils.rs b/testing/execution_engine_integration/src/build_utils.rs index 15e7fdc0f..5d9652066 100644 --- a/testing/execution_engine_integration/src/build_utils.rs +++ b/testing/execution_engine_integration/src/build_utils.rs @@ -66,6 +66,7 @@ pub fn get_latest_release(repo_dir: &Path, branch_name: &str) -> Result Value { "muirGlacierBlock":0, "berlinBlock":0, "londonBlock":0, - "clique": { - "period": 5, - "epoch": 30000 - }, - "terminalTotalDifficulty":0 + "mergeNetsplitBlock": 0, + "shanghaiTime": 0, + "terminalTotalDifficulty": 0, + "terminalTotalDifficultyPassed": true }, "nonce":"0x42", "timestamp":"0x0", @@ -72,8 +71,10 @@ pub fn nethermind_genesis_json() -> Value { "accountStartNonce": "0x0", "maximumExtraDataSize": "0x20", "minGasLimit": "0x1388", - "networkID": "0x1469ca", - "MergeForkIdTransition": "0x3e8", + "networkID": "0x00146A2E", + "MergeForkIdTransition": "0x0", + "maxCodeSize": "0x6000", + "maxCodeSizeTransition": "0x0", "eip150Transition": "0x0", "eip158Transition": "0x0", "eip160Transition": "0x0", @@ -101,7 +102,15 @@ pub fn nethermind_genesis_json() -> Value { "eip1559Transition": "0x0", "eip3198Transition": "0x0", "eip3529Transition": "0x0", - "eip3541Transition": "0x0" + "eip3541Transition": "0x0", + "eip3540TransitionTimestamp": "0x0", + "eip3651TransitionTimestamp": "0x0", + "eip3670TransitionTimestamp": "0x0", + "eip3675TransitionTimestamp": "0x0", + "eip3855TransitionTimestamp": "0x0", + "eip3860TransitionTimestamp": "0x0", + "eip4895TransitionTimestamp": "0x0", + "terminalTotalDifficulty": "0x0" }, "genesis": { "seal": { @@ -112,10 +121,10 @@ pub fn nethermind_genesis_json() -> Value { }, "difficulty": "0x01", "author": "0x0000000000000000000000000000000000000000", - "timestamp": "0x0", + "timestamp": "0x63585F88", "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "extraData": "", - "gasLimit": "0x1C9C380" + "gasLimit": "0x400000" }, "accounts": { "0x7b8C3a386C0eea54693fFB0DA17373ffC9228139": { @@ -123,9 +132,9 @@ pub fn nethermind_genesis_json() -> Value { }, "0xdA2DD7560DB7e212B945fC72cEB54B7D8C886D77": { "balance": "10000000000000000000000000" - }, + } }, "nodes": [] - } + } ) } diff --git a/testing/execution_engine_integration/src/geth.rs b/testing/execution_engine_integration/src/geth.rs index 5c83a97e2..0bd96a5c9 100644 --- a/testing/execution_engine_integration/src/geth.rs +++ b/testing/execution_engine_integration/src/geth.rs @@ -3,7 +3,7 @@ use crate::execution_engine::GenericExecutionEngine; use crate::genesis_json::geth_genesis_json; use std::path::{Path, PathBuf}; use std::process::{Child, Command, Output}; -use std::{env, fs::File}; +use std::{env, fs}; use tempfile::TempDir; use unused_port::unused_tcp4_port; @@ -36,6 +36,13 @@ pub fn build(execution_clients_dir: &Path) { }); } +pub fn clean(execution_clients_dir: &Path) { + let repo_dir = execution_clients_dir.join("go-ethereum"); + if let Err(e) = fs::remove_dir_all(repo_dir) { + eprintln!("Error while deleting folder: {}", e); + } +} + /* * Geth-specific Implementation for GenericExecutionEngine */ @@ -60,7 +67,7 @@ impl GenericExecutionEngine for GethEngine { let datadir = TempDir::new().unwrap(); let genesis_json_path = datadir.path().join("genesis.json"); - let mut file = File::create(&genesis_json_path).unwrap(); + let mut file = fs::File::create(&genesis_json_path).unwrap(); let json = geth_genesis_json(); serde_json::to_writer(&mut file, &json).unwrap(); diff --git a/testing/execution_engine_integration/src/main.rs b/testing/execution_engine_integration/src/main.rs index e46bc13c8..efb06833f 100644 --- a/testing/execution_engine_integration/src/main.rs +++ b/testing/execution_engine_integration/src/main.rs @@ -1,3 +1,5 @@ +#![recursion_limit = "256"] // for inline json + /// This binary runs integration tests between Lighthouse and execution engines. /// /// It will first attempt to build any supported integration clients, then it will run tests. @@ -31,6 +33,7 @@ fn test_geth() { let test_dir = build_utils::prepare_dir(); geth::build(&test_dir); TestRig::new(GethEngine).perform_tests_blocking(); + geth::clean(&test_dir); } fn test_nethermind() { diff --git a/testing/execution_engine_integration/src/nethermind.rs b/testing/execution_engine_integration/src/nethermind.rs index 8925f1cc8..aad37c32b 100644 --- a/testing/execution_engine_integration/src/nethermind.rs +++ b/testing/execution_engine_integration/src/nethermind.rs @@ -2,7 +2,7 @@ use crate::build_utils; use crate::execution_engine::GenericExecutionEngine; use crate::genesis_json::nethermind_genesis_json; use std::env; -use std::fs::File; +use std::fs; use std::path::{Path, PathBuf}; use std::process::{Child, Command, Output}; use tempfile::TempDir; @@ -11,7 +11,7 @@ use unused_port::unused_tcp4_port; /// We've pinned the Nethermind version since our method of using the `master` branch to /// find the latest tag isn't working. It appears Nethermind don't always tag on `master`. /// We should fix this so we always pull the latest version of Nethermind. -const NETHERMIND_BRANCH: &str = "release/1.18.2"; +const NETHERMIND_BRANCH: &str = "release/1.21.0"; const NETHERMIND_REPO_URL: &str = "https://github.com/NethermindEth/nethermind"; fn build_result(repo_dir: &Path) -> Output { @@ -47,6 +47,12 @@ pub fn build(execution_clients_dir: &Path) { build_utils::check_command_output(build_result(&repo_dir), || { format!("nethermind build failed using release {last_release}") }); + + // Cleanup some disk space by removing nethermind's tests + let tests_dir = execution_clients_dir.join("nethermind/src/tests"); + if let Err(e) = fs::remove_dir_all(tests_dir) { + eprintln!("Error while deleting folder: {}", e); + } } /* @@ -68,7 +74,8 @@ impl NethermindEngine { .join("bin") .join("Release") .join("net7.0") - .join("Nethermind.Runner") + .join("linux-x64") + .join("nethermind") } } @@ -76,7 +83,7 @@ impl GenericExecutionEngine for NethermindEngine { fn init_datadir() -> TempDir { let datadir = TempDir::new().unwrap(); let genesis_json_path = datadir.path().join("genesis.json"); - let mut file = File::create(genesis_json_path).unwrap(); + let mut file = fs::File::create(genesis_json_path).unwrap(); let json = nethermind_genesis_json(); serde_json::to_writer(&mut file, &json).unwrap(); datadir diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 654b8628b..b0701e80a 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -4,7 +4,8 @@ use crate::execution_engine::{ use crate::transactions::transactions; use ethers_providers::Middleware; use execution_layer::{ - BuilderParams, ChainHealth, ExecutionLayer, PayloadAttributes, PayloadStatus, + BlockProposalContentsType, BuilderParams, ChainHealth, ExecutionLayer, PayloadAttributes, + PayloadStatus, }; use fork_choice::ForkchoiceUpdateParameters; use reqwest::{header::CONTENT_TYPE, Client}; @@ -14,11 +15,14 @@ use std::sync::Arc; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use task_executor::TaskExecutor; use tokio::time::sleep; +use types::payload::BlockProductionVersion; use types::{ Address, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, - ForkName, FullPayload, Hash256, MainnetEthSpec, PublicKeyBytes, Slot, Uint256, + ForkName, Hash256, MainnetEthSpec, PublicKeyBytes, Slot, Uint256, }; -const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(30); +const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(60); + +const TEST_FORK: ForkName = ForkName::Capella; struct ExecutionPair { /// The Lighthouse `ExecutionLayer` struct, connected to the `execution_engine` via HTTP. @@ -110,7 +114,7 @@ impl TestRig { let (runtime_shutdown, exit) = exit_future::signal(); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = TaskExecutor::new(Arc::downgrade(&runtime), exit, log.clone(), shutdown_tx); - let mut spec = MainnetEthSpec::default_spec(); + let mut spec = TEST_FORK.make_genesis_spec(MainnetEthSpec::default_spec()); spec.terminal_total_difficulty = Uint256::zero(); let fee_recipient = None; @@ -269,8 +273,13 @@ impl TestRig { Slot::new(1), // Insert proposer for the next slot head_root, proposer_index, - // TODO: think about how to test different forks - PayloadAttributes::new(timestamp, prev_randao, Address::repeat_byte(42), None), + PayloadAttributes::new( + timestamp, + prev_randao, + Address::repeat_byte(42), + Some(vec![]), + None, + ), ) .await; @@ -308,24 +317,34 @@ impl TestRig { .execution_layer .get_suggested_fee_recipient(proposer_index) .await; - let payload_attributes = - PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None); - let valid_payload = self + let payload_attributes = PayloadAttributes::new( + timestamp, + prev_randao, + suggested_fee_recipient, + Some(vec![]), + None, + ); + let block_proposal_content_type = self .ee_a .execution_layer - .get_payload::>( + .get_payload( parent_hash, &payload_attributes, forkchoice_update_params, builder_params, - // FIXME: think about how to test other forks - ForkName::Merge, + TEST_FORK, &self.spec, + None, + BlockProductionVersion::FullV2, ) .await - .unwrap() - .to_payload() - .execution_payload(); + .unwrap(); + + let valid_payload = match block_proposal_content_type { + BlockProposalContentsType::Full(block) => block.to_payload().execution_payload(), + BlockProposalContentsType::Blinded(_) => panic!("Should always be a full payload"), + }; + assert_eq!(valid_payload.transactions().len(), pending_txs.len()); /* @@ -358,10 +377,11 @@ impl TestRig { * Provide the valid payload back to the EE again. */ + // TODO: again consider forks here let status = self .ee_a .execution_layer - .notify_new_payload(&valid_payload) + .notify_new_payload(valid_payload.clone().try_into().unwrap()) .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); @@ -409,12 +429,13 @@ impl TestRig { * Provide an invalidated payload to the EE. */ + // TODO: again think about forks here let mut invalid_payload = valid_payload.clone(); *invalid_payload.prev_randao_mut() = Hash256::from_low_u64_be(42); let status = self .ee_a .execution_layer - .notify_new_payload(&invalid_payload) + .notify_new_payload(invalid_payload.try_into().unwrap()) .await .unwrap(); assert!(matches!( @@ -448,24 +469,33 @@ impl TestRig { .execution_layer .get_suggested_fee_recipient(proposer_index) .await; - let payload_attributes = - PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None); - let second_payload = self + let payload_attributes = PayloadAttributes::new( + timestamp, + prev_randao, + suggested_fee_recipient, + Some(vec![]), + None, + ); + let block_proposal_content_type = self .ee_a .execution_layer - .get_payload::>( + .get_payload( parent_hash, &payload_attributes, forkchoice_update_params, builder_params, - // FIXME: think about how to test other forks - ForkName::Merge, + TEST_FORK, &self.spec, + None, + BlockProductionVersion::FullV2, ) .await - .unwrap() - .to_payload() - .execution_payload(); + .unwrap(); + + let second_payload = match block_proposal_content_type { + BlockProposalContentsType::Full(block) => block.to_payload().execution_payload(), + BlockProposalContentsType::Blinded(_) => panic!("Should always be a full payload"), + }; /* * Execution Engine A: @@ -473,10 +503,11 @@ impl TestRig { * Provide the second payload back to the EE again. */ + // TODO: again consider forks here let status = self .ee_a .execution_layer - .notify_new_payload(&second_payload) + .notify_new_payload(second_payload.clone().try_into().unwrap()) .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); @@ -489,11 +520,15 @@ impl TestRig { */ let head_block_hash = valid_payload.block_hash(); let finalized_block_hash = ExecutionBlockHash::zero(); - // TODO: think about how to handle different forks // To save sending proposer preparation data, just set the fee recipient // to the fee recipient configured for EE A. - let payload_attributes = - PayloadAttributes::new(timestamp, prev_randao, Address::repeat_byte(42), None); + let payload_attributes = PayloadAttributes::new( + timestamp, + prev_randao, + Address::repeat_byte(42), + Some(vec![]), + None, + ); let slot = Slot::new(42); let head_block_root = Hash256::repeat_byte(100); let validator_index = 0; @@ -520,17 +555,14 @@ impl TestRig { * * Provide the second payload, without providing the first. */ + // TODO: again consider forks here let status = self .ee_b .execution_layer - .notify_new_payload(&second_payload) + .notify_new_payload(second_payload.clone().try_into().unwrap()) .await .unwrap(); - // TODO: we should remove the `Accepted` status here once Geth fixes it - assert!(matches!( - status, - PayloadStatus::Syncing | PayloadStatus::Accepted - )); + assert!(matches!(status, PayloadStatus::Syncing)); /* * Execution Engine B: @@ -561,10 +593,11 @@ impl TestRig { * Provide the first payload to the EE. */ + // TODO: again consider forks here let status = self .ee_b .execution_layer - .notify_new_payload(&valid_payload) + .notify_new_payload(valid_payload.clone().try_into().unwrap()) .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); @@ -578,7 +611,7 @@ impl TestRig { let status = self .ee_b .execution_layer - .notify_new_payload(&second_payload) + .notify_new_payload(second_payload.clone().try_into().unwrap()) .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); @@ -630,11 +663,13 @@ async fn check_payload_reconstruction( .get_engine_capabilities(None) .await .unwrap(); + assert!( // if the engine doesn't have these capabilities, we need to update the client in our tests capabilities.get_payload_bodies_by_hash_v1 && capabilities.get_payload_bodies_by_range_v1, "Testing engine does not support payload bodies methods" ); + let mut bodies = ee .execution_layer .get_payload_bodies_by_hash(vec![payload.block_hash()]) diff --git a/testing/network_testing/README.md b/testing/network_testing/README.md new file mode 100644 index 000000000..359151468 --- /dev/null +++ b/testing/network_testing/README.md @@ -0,0 +1,74 @@ +# Lighthouse live network testing + + +## DISCLAIMER + +This document describes how to run a lighthouse node with minimal resources and time on a live +network. + +This procedure should ONLY be used for testing networks and never in production and never with +attached validators. The Lighthouse node described in this state is only a partially functioning +node. + + +## Overview + +We are going to run a single lighthouse node connected to a live network, without syncing and +without an execution engine. This should only ever be done for testing. + +There two main components needed. + +1. A lighthouse node that doesn't sync +2. A fake execution client that does nothing + +We will start with the second + +## Mock-EL + +This is a service that runs and fakes an execution engine. We firstly need to install the lighthouse +`lcli` tool. + +``` +$ make install-lcli +``` + +Once installed, run the fake execution client: + +``` +$ lcli mock-el --jwt-output-path /tmp/mockel.jwt +``` + +This will create a server listening on localhost:8551 + +## Lighthouse no sync + +To create a lighthouse node that doesn't sync we need to compile it with a special flag. + +``` +$ cargo build --release --bin lighthouse --features network/disable-backfill +``` + +Once built, it can run via checkpoint sync on any network, making sure we point to our mock-el + +Prater testnet: + +``` +$ lighthouse --network prater bn --execution-jwt /tmp/mockel.jwt --checkpoint-sync-url +https://prater.checkpoint.sigp.io --execution-endpoint http://localhost:8551 +``` + +Mainnet: + +``` +$ lighthouse --network prater bn --execution-jwt /tmp/mockel.jwt --checkpoint-sync-url +https://checkpoint.sigp.io --execution-endpoint http://localhost:8551 +``` + +Additional flags, such as metrics may be added. + + +## Additional Notes + +The above is assuming that you have not run the command in the past. If you have a database in +existence for the network you are testing, checkpoint sync will not start. You may need to add the +`--purge-db` flag to remove any past database and force checkpoint sync to run. diff --git a/testing/node_test_rig/Cargo.toml b/testing/node_test_rig/Cargo.toml index 5fe820d15..4696d8d2f 100644 --- a/testing/node_test_rig/Cargo.toml +++ b/testing/node_test_rig/Cargo.toml @@ -11,7 +11,7 @@ types = { workspace = true } tempfile = { workspace = true } eth2 = { workspace = true } validator_client = { workspace = true } -validator_dir = { workspace = true } +validator_dir = { workspace = true, features = ["insecure_keys"] } sensitive_url = { workspace = true } execution_layer = { workspace = true } tokio = { workspace = true } diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index 0fdc5cd66..6c9af707f 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -21,7 +21,7 @@ pub use eth2; pub use execution_layer::test_utils::{ Config as MockServerConfig, MockExecutionConfig, MockServer, }; -pub use validator_client::Config as ValidatorConfig; +pub use validator_client::{ApiTopic, Config as ValidatorConfig}; /// The global timeout for HTTP requests to the beacon node. const HTTP_TIMEOUT: Duration = Duration::from_secs(8); @@ -250,7 +250,7 @@ impl LocalExecutionNode { panic!("Failed to write jwt file {}", e); } Self { - server: MockServer::new_with_config(&context.executor.handle().unwrap(), config), + server: MockServer::new_with_config(&context.executor.handle().unwrap(), config, None), datadir, } } diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 57c944cf1..953dcf582 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -10,7 +10,8 @@ use futures::prelude::*; use node_test_rig::environment::RuntimeContext; use node_test_rig::{ environment::{EnvironmentBuilder, LoggerConfig}, - testing_client_config, testing_validator_config, ClientConfig, ClientGenesis, ValidatorFiles, + testing_client_config, testing_validator_config, ApiTopic, ClientConfig, ClientGenesis, + ValidatorFiles, }; use rayon::prelude::*; use sensitive_url::SensitiveUrl; @@ -159,10 +160,25 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { validator_config.fee_recipient = Some(SUGGESTED_FEE_RECIPIENT.into()); } println!("Adding validator client {}", i); - network_1 - .add_validator_client(validator_config, i, files, i % 2 == 0) - .await - .expect("should add validator"); + + // Enable broadcast on every 4th node. + if i % 4 == 0 { + validator_config.broadcast_topics = ApiTopic::all(); + let beacon_nodes = vec![i, (i + 1) % node_count]; + network_1 + .add_validator_client_with_fallbacks( + validator_config, + i, + beacon_nodes, + files, + ) + .await + } else { + network_1 + .add_validator_client(validator_config, i, files, i % 2 == 0) + .await + } + .expect("should add validator"); }, "vc", ); diff --git a/testing/simulator/src/local_network.rs b/testing/simulator/src/local_network.rs index 69fa8ded0..dc8bf0d27 100644 --- a/testing/simulator/src/local_network.rs +++ b/testing/simulator/src/local_network.rs @@ -66,8 +66,8 @@ impl LocalNetwork { BOOTNODE_PORT, QUIC_PORT, ); - beacon_config.network.enr_udp4_port = Some(BOOTNODE_PORT); - beacon_config.network.enr_tcp4_port = Some(BOOTNODE_PORT); + beacon_config.network.enr_udp4_port = Some(BOOTNODE_PORT.try_into().expect("non zero")); + beacon_config.network.enr_tcp4_port = Some(BOOTNODE_PORT.try_into().expect("non zero")); beacon_config.network.discv5_config.table_filter = |_| true; let execution_node = if let Some(el_config) = &mut beacon_config.execution_layer { @@ -152,14 +152,16 @@ impl LocalNetwork { .expect("bootnode must have a network"), ); let count = (self.beacon_node_count() + self.proposer_node_count()) as u16; + let libp2p_tcp_port = BOOTNODE_PORT + count; + let discv5_port = BOOTNODE_PORT + count; beacon_config.network.set_ipv4_listening_address( std::net::Ipv4Addr::UNSPECIFIED, - BOOTNODE_PORT + count, - BOOTNODE_PORT + count, + libp2p_tcp_port, + discv5_port, QUIC_PORT + count, ); - beacon_config.network.enr_udp4_port = Some(BOOTNODE_PORT + count); - beacon_config.network.enr_tcp4_port = Some(BOOTNODE_PORT + count); + beacon_config.network.enr_udp4_port = Some(discv5_port.try_into().unwrap()); + beacon_config.network.enr_tcp4_port = Some(libp2p_tcp_port.try_into().unwrap()); beacon_config.network.discv5_config.table_filter = |_| true; beacon_config.network.proposer_only = is_proposer; } @@ -268,6 +270,48 @@ impl LocalNetwork { Ok(()) } + pub async fn add_validator_client_with_fallbacks( + &self, + mut validator_config: ValidatorConfig, + validator_index: usize, + beacon_nodes: Vec, + validator_files: ValidatorFiles, + ) -> Result<(), String> { + let context = self + .context + .service_context(format!("validator_{}", validator_index)); + let self_1 = self.clone(); + let mut beacon_node_urls = vec![]; + for beacon_node in beacon_nodes { + let socket_addr = { + let read_lock = self.beacon_nodes.read(); + let beacon_node = read_lock + .get(beacon_node) + .ok_or_else(|| format!("No beacon node for index {}", beacon_node))?; + beacon_node + .client + .http_api_listen_addr() + .expect("Must have http started") + }; + let beacon_node = SensitiveUrl::parse( + format!("http://{}:{}", socket_addr.ip(), socket_addr.port()).as_str(), + ) + .unwrap(); + beacon_node_urls.push(beacon_node); + } + + validator_config.beacon_nodes = beacon_node_urls; + + let validator_client = LocalValidatorClient::production_with_insecure_keypairs( + context, + validator_config, + validator_files, + ) + .await?; + self_1.validator_clients.write().push(validator_client); + Ok(()) + } + /// For all beacon nodes in `Self`, return a HTTP client to access each nodes HTTP API. pub fn remote_nodes(&self) -> Result, String> { let beacon_nodes = self.beacon_nodes.read(); diff --git a/testing/state_transition_vectors/Cargo.toml b/testing/state_transition_vectors/Cargo.toml index 8d930d13b..a1b6ed3b8 100644 --- a/testing/state_transition_vectors/Cargo.toml +++ b/testing/state_transition_vectors/Cargo.toml @@ -13,3 +13,6 @@ ethereum_ssz = { workspace = true } beacon_chain = { workspace = true } lazy_static = { workspace = true } tokio = { workspace = true } + +[features] +portable = ["beacon_chain/portable"] \ No newline at end of file diff --git a/testing/state_transition_vectors/Makefile b/testing/state_transition_vectors/Makefile index e06c71918..437aa50b0 100644 --- a/testing/state_transition_vectors/Makefile +++ b/testing/state_transition_vectors/Makefile @@ -2,7 +2,7 @@ produce-vectors: cargo run --release test: - cargo test --release + cargo test --release --features "$(TEST_FEATURES)" clean: rm -r vectors/ diff --git a/testing/state_transition_vectors/src/exit.rs b/testing/state_transition_vectors/src/exit.rs index 7e7fd23e0..50b98d306 100644 --- a/testing/state_transition_vectors/src/exit.rs +++ b/testing/state_transition_vectors/src/exit.rs @@ -57,7 +57,7 @@ impl ExitTest { block_modifier(&harness, block); }) .await; - (signed_block, state) + ((*signed_block.0).clone(), state) } fn process( @@ -127,7 +127,7 @@ vectors_and_tests!( ExitTest { block_modifier: Box::new(|_, block| { // Duplicate the exit - let exit = block.body().voluntary_exits().get(0).unwrap().clone(); + let exit = block.body().voluntary_exits().first().unwrap().clone(); block.body_mut().voluntary_exits_mut().push(exit).unwrap(); }), expected: Err(BlockProcessingError::ExitInvalid { diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 463de0c8b..6f3536fe4 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -391,6 +391,8 @@ mod tests { suggested_fee_recipient: None, gas_limit: None, builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, description: String::default(), signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path: signer_rig.keystore_path.clone(), @@ -409,6 +411,8 @@ mod tests { suggested_fee_recipient: None, gas_limit: None, builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, description: String::default(), signing_definition: SigningDefinition::Web3Signer(Web3SignerDefinition { url: signer_rig.url.to_string(), diff --git a/testing/web3signer_tests/tls/generate.sh b/testing/web3signer_tests/tls/generate.sh index f00e7b7e3..f918e87cf 100755 --- a/testing/web3signer_tests/tls/generate.sh +++ b/testing/web3signer_tests/tls/generate.sh @@ -1,7 +1,7 @@ #!/bin/bash openssl req -x509 -sha256 -nodes -days 36500 -newkey rsa:4096 -keyout web3signer/key.key -out web3signer/cert.pem -config web3signer/config && -openssl pkcs12 -export -out web3signer/key.p12 -inkey web3signer/key.key -in web3signer/cert.pem -password pass:$(cat web3signer/password.txt) && +openssl pkcs12 -export -aes256 -out web3signer/key.p12 -inkey web3signer/key.key -in web3signer/cert.pem -password pass:$(cat web3signer/password.txt) && cp web3signer/cert.pem lighthouse/web3signer.pem && openssl req -x509 -sha256 -nodes -days 36500 -newkey rsa:4096 -keyout lighthouse/key.key -out lighthouse/cert.pem -config lighthouse/config && -openssl pkcs12 -export -out lighthouse/key.p12 -inkey lighthouse/key.key -in lighthouse/cert.pem -password pass:$(cat lighthouse/password.txt) && +openssl pkcs12 -export -aes256 -out lighthouse/key.p12 -inkey lighthouse/key.key -in lighthouse/cert.pem -password pass:$(cat lighthouse/password.txt) && openssl x509 -noout -fingerprint -sha256 -inform pem -in lighthouse/cert.pem | cut -b 20-| sed "s/^/lighthouse /" > web3signer/known_clients.txt diff --git a/testing/web3signer_tests/tls/lighthouse/cert.pem b/testing/web3signer_tests/tls/lighthouse/cert.pem index 5746d19a1..24b0a2e5c 100644 --- a/testing/web3signer_tests/tls/lighthouse/cert.pem +++ b/testing/web3signer_tests/tls/lighthouse/cert.pem @@ -1,33 +1,33 @@ -----BEGIN CERTIFICATE----- -MIIFujCCA6KgAwIBAgIUELASgYwStCn/u/8tPByRADyCwLEwDQYJKoZIhvcNAQEL +MIIFujCCA6KgAwIBAgIUXZijYo8W4/9dAq58ocFEbZDxohwwDQYJKoZIhvcNAQEL BQAwazELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0 eTESMBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYD -VQQDDApsaWdodGhvdXNlMCAXDTIzMDkyMjAzMDA1N1oYDzIxMjMwODI5MDMwMDU3 +VQQDDApsaWdodGhvdXNlMCAXDTIzMDkyMDAyNTYzNloYDzIxMjMwODI3MDI1NjM2 WjBrMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExETAPBgNVBAcMCFNvbWVDaXR5 MRIwEAYDVQQKDAlNeUNvbXBhbnkxEzARBgNVBAsMCk15RGl2aXNpb24xEzARBgNV -BAMMCmxpZ2h0aG91c2UwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCc -i30cib5B/B5QNd8grzi4LxmlyfZFi3VfpukwdwOD1Xk3ODk1OtjAzhK46YhDclvc -u98m1Dnib1Z+eTjRuEEoekIxz2+BbOle7G52LNvuDZpD+HKucqIU3TnEKPPuTYPp -lZ1n/9EyxXUwD5uTkn7xXzK8UFXUt73j6I6VFMdHlNcwLcx8KSwBDzvnGT4ew/UL -+ThON3j5rIT+nFHDcC2zoM+6ANdVkL6GHid4/cOcYW6GxB9TRZtEasqze41bC+kX -ZtPlV5V2nilAzVj8z9ynwBpHkLH+E6sMUhSEwA++QfI1gGf0FmSBgSIZ3RdPo/dp -hkLG8fZXKMkMzKkRm5hcstDP6DnTIYl+CfuVez5gZ0/yelAqXNvTqMKuDhHTTRRY -aOXZX4BAiQO2Q6a6WYLe87E2ka5AF2T2y/BPeXjUwDS/1mFIB3FUGlMLVJt8/RLz -nXVGoSsYapttiiPucQbMPEysCJ4/LZ9zxe3EDWWjpurLHGi/Y/dVziEvg1Eoycix -dZogKz0QVCz4++QI0kPDDX7So7CWni2JJuYguF/8CX8QbCT2L8jXf0uQrq76FLKj -88A7lS8DzXBt/pRryiIlDyLenJwHmrv6p+P/FYvgnJHvAEtTynxYm5GA16YWy+Dj -c5XVgNHjV4TdX3GueAp+NhBBaHDFvYCbP/oXkRvNRQIDAQABo1QwUjALBgNVHQ8E +BAMMCmxpZ2h0aG91c2UwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC1 +R1M9NnRwUsqFvJzNWPKuY1PW7llwRRWCixiWNvcxukGTa6AMLZDrYO1Y7qlw5m52 +aHSA2fs2KyeA61yajG/BsLn1vmTtJMZXgLsG0MIqvhgOoh+ZZbl8biO0gQJSRSDE +jf0ogUVM9TCEt6ydbGnzgs8EESqvyXcreaXfmLI7jiX/BkwCdf+Ru+H3MF96QgAw +Oz1d8/fxYJvIpT/DOx4NuMZouSAcUVXgwcVb6JXeTg0xVcL33lluquhYDR0gD5Fe +V0fPth+e9XMAH7udim8E5wn2Ep8CAVoeVq6K9mBM3NqP7+2YmU//jLbkd6UvKPaI +0vps1zF9Bo8QewiRbM0IRse99ikCVZcjOcZSitw3kwTg59NjZ0Vk9R/2YQt/gGWM +VcR//EtbOZGqzGrLPFKOcWO85Ggz746Saj15N+bqT20hXHyiwYL8DLgJkMR2W9Nr +67Vyi9SWSM6rdRQlezlHq/yNEh+JuY7eoC3VeVw9K1ZXP+OKAwbpcnvd3uLwV91f +kpT6kjc6d2h4bK8fhvF16Em42JypQCl0xMhgg/8MFO+6ZLy5otWAdsSYyO5k9CAa +3zLeqd89dS7HNLdLZ0Y5SFWm6y5Kqu89ErIENafX5DxupHWsruiBV7zhDHNPaGcf +TPFe8xuDYsi155veOfEiDh4g+X1qjL8x8OEDjgsM3QIDAQABo1QwUjALBgNVHQ8E BAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0RBAgwBocEfwAAATAdBgNV -HQ4EFgQUoeeF4G1qTRzLvO583qitbNDzr10wDQYJKoZIhvcNAQELBQADggIBAA9Y -YZP0pZLyovSnjyyuTR4KE9B+TSwqHe/LvH+7EAXLH+cwhyS7ADfJyt3mOCbKHZSo -dmJ5KWQ6M2Xn9Wq40BPk8mQPmAxy0nHg5beG03HYXOIsK8zgXTMad1+D1jnHPAda -ldXJ2Y+ljx4TDXKCWpTaq1+flqgRD3t98tOLuiULZ5jsTFX8Xbun7matcjziU5Lo -GWVQPWkb8Vx+3QyfbfiYJ7hggfYTxQsVJOXKuD8k2FMtKn5oTp3VwD2kY1q2X2Yk -HsDZJdYrvjWi2LcZDKoSNeusuLrv1XoUnwsAa3ng6drvoEU16vfILLYqH820UJ61 -/fFm3a9BFHRvPVd/WcSeIVc9jx9+32RIVxlppwCINnGMGE20kUZxu0TiMjTX9bCp -AouDuhwMt7z5jiQIi/CMxN6IlHBeVLqyK8ayWvH40xYgZTXlePpmLcQhcieNk7oJ -ard9jMfj4JhH5GbLXVptMBVJ0f9Ql4rW3EyNipvVKdkgTUNIeVm7LyUK220aT7ty -a0pGWHHViiF1MjGExo0P3gjZIML32TjZWlG3Nts5NAiyXDo4f78VeLyZQ7efVkub -GpjMf89vrmPdQhssoFr8fRFQObDe7hgxkgeiw9jgHItJl2/MWAxfsHV18HwiBqGW -QzaZR995YhU480jvA5XR8+EB6QUZeCEKunW8WK/F +HQ4EFgQU6r7QHkcEsWhEZHpcMpGxwKXQL9swDQYJKoZIhvcNAQELBQADggIBACyO +8xzqotye1J6xhDQCQnQF3dXaPTqfT31Ypg8UeU25V9N+bZO04CJKlOblukuvkedE +x1RDeqG3A81D4JOgTGFmFVoEF4iTk3NBrsHuMzph6ImHTd3TD+5iG5a3GL0i9PAI +dHTT6z6t2wlayjmHotqQ+N4A4msx8IPBRULcCmId319gpSDHsvt2wYbLdh+d9E2h +vI0VleJpJ7eoy05842VTkFJebriSpi75yFphKUnyAKlONiMN3o6eg90wpWdI+1rQ +js5lfm+pxYw8H6eSf+rl30m+amrxUlooqrSCHNVSO2c4+W5m/r3JfOiRqVUTxaO8 +0f/xYXo6SdRxdvJV18LEzOHURvkbqBjLoEfHbCC2EApevWAeCdjhvCBPl1IJZtFP +sYDpYtHhw69JmZ7Nj75cQyRtJMQ5S4GsJ/haYXNZPgRL1XBo1ntuc8K1cLZ2MucQ +1170+2pi3IvwmST+/+7+2fyms1AwF7rj2dVxNfPIvOxi6E9lHmPVxvpbuOYOEhex +XqTum/MjI17Qf6eoipk81ppCFtO9s3qNe9SBSjzYEYnsytaMdZSSjsOhE/IyYPHI +SICMjWE13du03Z5xWwK9i3UiFq+hIPhBHFPGkNFMmkQtcyS9lj9R0tKUmWdFPNa8 +nuhxn5kLUMriv3zsdhMPUC4NwM5XsopdWcuSxfnt -----END CERTIFICATE----- diff --git a/testing/web3signer_tests/tls/lighthouse/key.key b/testing/web3signer_tests/tls/lighthouse/key.key index 91bee6a78..d00b6c212 100644 --- a/testing/web3signer_tests/tls/lighthouse/key.key +++ b/testing/web3signer_tests/tls/lighthouse/key.key @@ -1,52 +1,52 @@ -----BEGIN PRIVATE KEY----- -MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQCci30cib5B/B5Q -Nd8grzi4LxmlyfZFi3VfpukwdwOD1Xk3ODk1OtjAzhK46YhDclvcu98m1Dnib1Z+ -eTjRuEEoekIxz2+BbOle7G52LNvuDZpD+HKucqIU3TnEKPPuTYPplZ1n/9EyxXUw -D5uTkn7xXzK8UFXUt73j6I6VFMdHlNcwLcx8KSwBDzvnGT4ew/UL+ThON3j5rIT+ -nFHDcC2zoM+6ANdVkL6GHid4/cOcYW6GxB9TRZtEasqze41bC+kXZtPlV5V2nilA -zVj8z9ynwBpHkLH+E6sMUhSEwA++QfI1gGf0FmSBgSIZ3RdPo/dphkLG8fZXKMkM -zKkRm5hcstDP6DnTIYl+CfuVez5gZ0/yelAqXNvTqMKuDhHTTRRYaOXZX4BAiQO2 -Q6a6WYLe87E2ka5AF2T2y/BPeXjUwDS/1mFIB3FUGlMLVJt8/RLznXVGoSsYaptt -iiPucQbMPEysCJ4/LZ9zxe3EDWWjpurLHGi/Y/dVziEvg1EoycixdZogKz0QVCz4 -++QI0kPDDX7So7CWni2JJuYguF/8CX8QbCT2L8jXf0uQrq76FLKj88A7lS8DzXBt -/pRryiIlDyLenJwHmrv6p+P/FYvgnJHvAEtTynxYm5GA16YWy+Djc5XVgNHjV4Td -X3GueAp+NhBBaHDFvYCbP/oXkRvNRQIDAQABAoICACCSBxxeblblQVtX8g4nVso/ -hnsPi61JiEi3/hGG2ZTe4AMEsCZqkXmABrYxZJf/3awN7K5z/n0lxB25VACScQAe -e9JIQf9wLRgCYjM1PycG7n9Q3G9+S0nDA4dUK/h7aUQ6zE68k4aYPbsbrDdmhgHr -WC+FGW6SMjCOjMfo1FOI3MLZ7I8ys8Seqkx5XIrjI4NzvWrMsN9lrSAaXwqmNuQG -Q+ID1cmoPXPDJ1xNlBrfzLK+cHQPafAwte7k+HKmhj9HtjOj5uWQn62ra+Xhy5ud -ZPpZ2Savaem81CcQnNXte5r1Fevbktq9Bt7RuM1ppIrwk8k3w5S72CTRGiYfNPJV -M1RMp46GrXVJdmx3k9LQfKdT6Gv9xTJXYQl7jN0+4uZ7QrVQHpcMpxPsATl+cQQH -wzCTbj2Oqn/30KqkZLyueN2MalRP8mVSe5nD+vvGb/sWLs52kp6QvHdlXER2RBFk -tJ5cGi+vgueoukb+qatiAE2y5MxYCqD02ShGcLos/SUQThRhL+iD8t0h+FoPTD5y -eTNZ85hF1HdypH1If8/YGETg55+fHYUAtYGT6R8lYeFMvBC05suorLBciXShOGuY -4zBbt32fPlsXlLneAtAAFv2BiJMt0TQavWHITLInFW1/aMHDV4/Pq69sRZuHdRaW -XFXD8CjnPUS5tBMQOqYhAoIBAQDLqUo7v3SpIstXmyU7BfUBuTYGS7MzjMhDxFUl -HvmbVZlOXhnPb3p4mW/XHrah9CjFBLJt3CF+PP/njwMw0YtPxCQpQwj0pI8CuveE -4Puq2wEfxVg+JKh1xidNj8230/WINzwfLCVfco7KKmjQX0MgMGaANQ0sGnt/r1eB -MwpY5uID+D5PORXUcHxBWlsVLyzZ9ZqKhAgewr3i7BLX2y7nwqEGlWTt1lxzZGCR -a8NZIAIs3qGzAgtm7O3hMz6XZulVyVSrMmmT8qXT4Lo1nW/9J6slV7Wdp9f++mr9 -m2vwrpJtmdPcA+YKPVgoFlKmZpZZbVvd+4uy8ksoxs1/cF7VAoIBAQDExnLQplq2 -BPoxEDAnlS+8Jju5en5Pk70HOfpQGUa4/6vY60x/N5sJqc6CcDySkkxRI8jLzMTe -AE9jqM+Z39MvGCH+SF9EPRopbAJIrcChXfvk2Imp7PLFRGrEBju63nQfaHdcefFy -Ia7RA8SCHLCReRdqPjSXbPAYPZK84vVNSfhrkytA4FJnaojvaqJqLQH9vB7CXv18 -Fu6w5fnrgARIoBhy2mb0QWzgd9JMsVDgS5XyX/4HBUejjXDdmLosOZ4PJ0GM2+tr -ilO/9KKhV9lqH7DcFgJBNkgVKRD1Ijr21yyOkttB5PULzaTQhzqkorGkWgyTzLWn -ksqOr2cWt0yxAoIBAElIazvAkpvht0WYv/bTF+w81uHBD4R3HgC0fubSlIJ+dKGs -XqEzvd/zZjkEBjeUga8TF5lMYojoLjUGUuGYJQbYzyJBytEs/YDAAhzAUA6Uq3zh -J/WEf1GRscbI/f8tt+YB6hJVckU8FYFNbVW9UYwdnmR3snuyM8ooL9Z/pTOEMMO4 -6cLcCazdpPhnKOsghIURSUCabcmTzXv/8m/VoLUoZYTW8PBb9/xVnCH3ot1JFT9M -BOdCzxOEIbytEdKs5z1FKsBHbZIc9+qbrKVqN0fipETVoLZQFPrc5O7IpDiAuJPT -jFZY2MfKdxRFpAvYUjVvkmT4BLapVL4hewRmTNkCggEBAKuJP8/KJSulvSEGNqRa -9kjzn376XKAsb02caixnTHK7Vuh7fq0sIThUUhT9mKBBbswRANtAv6Gz7YE4SPmf -1+6nAAM2ve2zwlm3sWoAJfvF/W+qoJ+EUsJK+TO3J1yozdwPanYwS52t5UKKIUU3 -k2jNge75GUmkCs1m58NHqoXc5PWKTtt4cf17LrJfaARdBe5Wjw3sVtdU+nE1mh+E -8rcI8Sc2Yyes3Sf07Fw0+wb8fVPUAJPIM4JNK8XRfQJOnA4jr44GrPyLkqS0sw0p -kvtjcv75JLAKjN39da3sUDCctVf4h7Cy0jee5n1uVV3uAiP+6BX0D6tsWK34FEsG -MZECggEBAIi/sjZNQjplD5zOULEWL8W6b+3CZymR5Qqa0brlx1Lz8h/daIITIFvm -bue/CjIht/oRGLVE8yzw2ojLf424h3h5PjmXMBNHlVkWQXfn6xCI8MjfZ71uA39O -RVCXAYwcghOWZL4Fkz+XQmIOdJ1OPXfU0py943joYZbgXXAYOc/zNylo9j7+bqDK -vLtFd4IIQoRzjsY//FoAuAditf4xDRqLwOh4amboZw1Qmn6bwDnCaKsFmA3o5BYR -4aRUm1dEbZgPtm2tuHQpEKuOPhWHroi3NsEdbhoyy3IUe0c3w4YGgnuvVy616wkV -GlPvUaKC1KX0CX1qT1anVZq9bSMTG+M= +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQC1R1M9NnRwUsqF +vJzNWPKuY1PW7llwRRWCixiWNvcxukGTa6AMLZDrYO1Y7qlw5m52aHSA2fs2KyeA +61yajG/BsLn1vmTtJMZXgLsG0MIqvhgOoh+ZZbl8biO0gQJSRSDEjf0ogUVM9TCE +t6ydbGnzgs8EESqvyXcreaXfmLI7jiX/BkwCdf+Ru+H3MF96QgAwOz1d8/fxYJvI +pT/DOx4NuMZouSAcUVXgwcVb6JXeTg0xVcL33lluquhYDR0gD5FeV0fPth+e9XMA +H7udim8E5wn2Ep8CAVoeVq6K9mBM3NqP7+2YmU//jLbkd6UvKPaI0vps1zF9Bo8Q +ewiRbM0IRse99ikCVZcjOcZSitw3kwTg59NjZ0Vk9R/2YQt/gGWMVcR//EtbOZGq +zGrLPFKOcWO85Ggz746Saj15N+bqT20hXHyiwYL8DLgJkMR2W9Nr67Vyi9SWSM6r +dRQlezlHq/yNEh+JuY7eoC3VeVw9K1ZXP+OKAwbpcnvd3uLwV91fkpT6kjc6d2h4 +bK8fhvF16Em42JypQCl0xMhgg/8MFO+6ZLy5otWAdsSYyO5k9CAa3zLeqd89dS7H +NLdLZ0Y5SFWm6y5Kqu89ErIENafX5DxupHWsruiBV7zhDHNPaGcfTPFe8xuDYsi1 +55veOfEiDh4g+X1qjL8x8OEDjgsM3QIDAQABAoICAEP5a1KMPUwzF0Lfr1Jm1JUk +pLb26C2rkf3B56XIFZgddeJwHHMEkQ9Z6JYM5Bd0KJ6Y23rHgiXVN7plRvOiznMs +MAbgblroC8GbAUZ0eCJr5nxyOXQdS1jHufbA21x7FGbvsSqDkrdhR2C0uPLMyMvp +VHP7dey1mEyCkHrP+KFRU5kVxOG1WnBMqdY1Ws/uuMBdLk0xItttdOzfXhH4dHQD +wc5aAJrtusyNDFLC25Og49yIgpPMWe+gAYCm5jFz9PgRtVlDOwcxlX5J5+GSm7+U +XM1bPSmU1TSEH233JbQcqo4HkynB71ftbVUtMhEFhLBYoFO4u5Ncpr+wys0xJY4f +3aJRV5+gtlmAmsKN66GoMA10KNlLp2z7XMlx1EXegOHthcKfgf5D6LKRz8qZhknm +FFgAOg9Bak1mt1DighhPUJ0vLYU6K+u0ZXwysYygOkBJ/yj63ApuPCSTQb7U0JlL +JMgesy1om3rVdN0Oc7hNaxq7VwswkzUTUKS2ZvGozF3MmdPHNm5weJTb3NsWv8Qo +HiK1I88tY9oZ5r91SC82hMErmG4ElXFLxic1B29h3fsIe/l+WjmZRXixD9ugV0gj +CvNa8QD9K3hljlNrR6eSXeO2QOyxAEUr2N1MBlxrnAWZCzXKiTvTx1aKDYhJT0DY +zae/etTLHVjzgdH6GS33AoIBAQDaaWYHa9wkJIJPX4siVCatwWKGTjVfDb5Q9upf +twkxCf58pmbzUOXW3dbaz6S0npR0V6Wqh3S8HW7xaHgDZDMLJ1WxLJrgqDKU3Pqc +k7xnA/krWqoRVSOOGkPnSrnZo6AVc6FR+iwJjfuUu0rFDwiyuqvuXpwNsVwvAOoL +xIbaEbGUHiFsZamm2YkoxrEjXGFkZxQX9+n9f+IAiMxMQc0wezRREc8e61/mTovJ +QJ7ZDd7zLUR7Yeqciy59NOsD57cGtnp1K28I2eKLA4taghgd5bJjPkUaHg9j5Xf6 +nsxU2QCp9kpwXvtMxN7pERKWFsnmu8tfJOiUWCpp8SLbIl6nAoIBAQDUefKKjRLa +6quNW0rOGn2kx0K6sG7T45OhwvWXVjnPAjX3/2mAMALT1wc3t0iKDvpIEfMadW2S +O8x2FwyifdJXmkz943EZ/J5Tq1H0wr4NeClX4UlPIAx3CdFlCphqH6QfKtrpQ+Hf ++e8XzjVvdg8Y/RcbWgPgBtOh2oKT5QHDh13/994nH7GhVM7PjLUVvZVmNWaC77zr +bXcvJFF/81PAPWC2JoV6TL/CXvda2tG2clxbSfykfUBPBpeyEijMoxC4UMuCHhbp +NpLfKJQp9XNqbBG2K4jgLQ8Ipk6Vtia/hktLgORf/pbQ4PxEv7OP5e1AOreDg/CW +RnQtBb+/8czbAoIBABfDA8Cm8WpVNoAgKujvMs4QjgGCnLfcrOnuEw2awjs9lRxG +lki+cmLv+6IOmSK1Zf1KU9G7ru2QXjORZA0qZ4s9GkuOSMNMSUR8zh8ey46Bligr +UvlTw+x/2wdcz99nt9DdpZ1flE7tzYMe5UGPIykeufnS/TNYKmlKtivVk75B0ooE +xSof3Vczr4JqK3dnY4ki1cLNy/0yXookV+Wr+wDdRpHTWC9K+EH8JaUdjKqcobbf +I+Ywfu/NDJ++lBr2qKjoTWZV9VyHJ+hr2Etef/Uwujml2qq+vnnlyynPAPfyK+pR +y0NycfCmMoI0w0rk685YfAW75DnPZb3k6B/jG10CggEBAMxf2DoI5EAKRaUcUOHa +fUxIFhl4p8HMPy7zVkORPt2tZLf8xz/z7mRRirG+7FlPetJj4ZBrr09fkZVtKkwJ +9o8o7jGv2hSC9s/IFHb38tMF586N9nPTgenmWbF09ZHuiXEpSZPiJZvIzn/5a1Ch +IHiKyPUYKm4MYvhmM/+J4Z5v0KzrgJXlWHi0GJFu6KfWyaOcbdQ4QWG6009XAcWv +Cbn5z9KlTvKKbFDMA+UyYVG6wrdUfVzC1V6uGq+/49qiZuzDWlz4EFWWlsNsRsft +Pmz5Mjglu+zVqoZJYYGDydWjmT0w53qmae7U2hJOyqr5ILINSIOKH5qMfiboRr6c +GM0CggEAJTQD/jWjHDIZFRO4SmurNLoyY7bSXJsYAhl77j9Cw/G4vcE+erZYAhp3 +LYu2nrnA8498T9F3H1oKWnK7u4YXO8ViyQd73ql7iKrMjE98CjfGcTPCXwOcPAts +ZpM8ykgFTsJpXEFvIR5cyZ6XFSw2m/Z7CRDpmwQ8es4LpNnYA7V5Yu/zDE4h2/2T +NmftCiZvkxwgj6VyKumOxXBnGK6lB+b6YMTltRrgD/35zmJoKRdqyLb1szPJtQuh +HjRTa/BVPgA66xYFWhifRUiYKpc0bARTYofHeoDgu6yPzcHMuM70NQQGF+WWJySg +vc3Za4ClKSLmb3ZA9giTswYMev+3BQ== -----END PRIVATE KEY----- diff --git a/testing/web3signer_tests/tls/lighthouse/key.p12 b/testing/web3signer_tests/tls/lighthouse/key.p12 index d96ab4775..73468fa08 100644 Binary files a/testing/web3signer_tests/tls/lighthouse/key.p12 and b/testing/web3signer_tests/tls/lighthouse/key.p12 differ diff --git a/testing/web3signer_tests/tls/lighthouse/web3signer.pem b/testing/web3signer_tests/tls/lighthouse/web3signer.pem index 455021447..6266cadf9 100644 --- a/testing/web3signer_tests/tls/lighthouse/web3signer.pem +++ b/testing/web3signer_tests/tls/lighthouse/web3signer.pem @@ -1,33 +1,33 @@ -----BEGIN CERTIFICATE----- -MIIFujCCA6KgAwIBAgIUOVccYETgo2YpKO85U4XRKifK09kwDQYJKoZIhvcNAQEL +MIIFujCCA6KgAwIBAgIUIP5CN0WpH5om1bGaFn17Xc5ITJIwDQYJKoZIhvcNAQEL BQAwazELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0 eTESMBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYD -VQQDDAp3ZWIzc2lnbmVyMCAXDTIzMDkyMjAzMDA1NloYDzIxMjMwODI5MDMwMDU2 +VQQDDAp3ZWIzc2lnbmVyMCAXDTIzMDkyMDAyNTYzNFoYDzIxMjMwODI3MDI1NjM0 WjBrMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExETAPBgNVBAcMCFNvbWVDaXR5 MRIwEAYDVQQKDAlNeUNvbXBhbnkxEzARBgNVBAsMCk15RGl2aXNpb24xEzARBgNV -BAMMCndlYjNzaWduZXIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCr -JajqnvRQEREph+zu7rw1QjHQG1x2H44SJSMjX1Wzi9FErlRSOzywPFL2AzGsNvNS -tPmxN/kF9mBjQIQHxo90M4GcZgW1aljPaXLvQWFrP9ak+JjHuUG+j51fVJp8F2Qc -BG8i2LjjSLvkEYSULHI0kbMPws+DKcemvZJ6IhkoPkbtnx5Z1zDj8D6vvWGJguMO -VSNJY7SoBNuSB6CJ7wCWBg7UPtTUrtnuJVvUh+3k2wc7LJ+C9wd7rt+qYb8LxQuc -j8dDyncXXeI583oGzjTE+1kFrE5TuMDlnWyKPa6NQPeXQtygFTyQL9RMW6JkgWWg -tDFWqd2Mgb8sCRtl5uTJFGJ7PFBP4T69JqYhz817tDS3JrMbbzzhRzf3cB6V2NCC -zVKBrO7gfAyDwWVr5iUyaXhLGyzuGg2nMbFMj/Pr7csravs+Jq5apwyZDNTv+2WQ -xP6d2gGFwQOxcPt4OGPjtFpVHH3cxLkcGsSOZ31akuhdSJ6MqWI4tkgRpsf5Ff0+ -z8SLZaCQIp7M4O4LpMreAT7smvEQpLphK1oKWlsY6ukkJ1y8KD3EfeJRpDL0PBTy -jacQATPsqUzeryCfqAMulLLqUbNFqv6Slhzt2vr+lfIr+IeUa/7XMeZOZJu1T/7n -fTjpdokSTx8DageE4Z3j90q5d4hdXvMWq6MpQW7RqQIDAQABo1QwUjALBgNVHQ8E +BAMMCndlYjNzaWduZXIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDS +cvshqu3747j4KMaGyGW0CA2GAznogVyKqNt4lan/8mdYUI2PUeezaUOnmoyM9oWz +1FPflpj7pVWagWlSOgZ9vOElqQhe+la4ZEdGmOpe44c1rBoeHK314Gbmr2EuCxaa +J3smHx2+VOhaMWDeebRHQqy/s5tf3Um7G2iXU2iexriz42I8d6efWGmaL2sTLQ6H +9C0UBIzXP7PnGrMlef9eR+7pu/ai9MjD1M7CWpwvPhEjanA2InwKugiDXj+A5/6G +WLtJvk5ekfOVlRHPZQbKJc/SG9tbbH9dHLEezIbZ6a5Y0iTcIfoiBxUpX5KyK/pB +YKPThE5zW5KhIxXcpqFIMaTW/nK33BlOJ0fPNtX/SWLyoBsTtxCo1XFFUjHCkXK8 +4y5L4BXxxohG0DAuO4BtQHE5hgyswGQX2t4RjDvzvSm4tN02m9HUh7gu/d2FbgX8 +HtmSgkPEgfSVRxegmbA71qHqKS0/i5BbnQjLkeWiWKRWGJoHFfhGN1sY0jUGFvQr +rrIUQAuXDcQX11UzgwkX5/cowtlm8IB/RWggPfC4gfCL4QvNz4pMxuMUWjXUn0uS +8kbmmuhxshsnZUL+l+nnpRSobZqHRvvqiFKg8q9GsBUTGu0fFbjDeVQyYF2UOWeN +/IC4PpwtYUO3/gR0babEffgYOWwWbQQGSPcmG7Y4zwIDAQABo1QwUjALBgNVHQ8E BAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0RBAgwBocEfwAAATAdBgNV -HQ4EFgQUsBCvmwLPQDG+iN5qI6P7SgLZyP0wDQYJKoZIhvcNAQELBQADggIBAE/j -mwchm30rB+dheTRBcVD0yHgYL2tQlpfKZeX9JDVWNMQ5OYHtMVwdD7RBQJ2ypqIr -5VP6/Hf0M1GE03mnDpjv29q57AkuGFZpBvZ+1XCG87336QIPqkPR4uMJ86MalsX2 -f9GHMG4H0rd1j+ozM0jhJNoVG4lSq/GNn2E9oRjMG8lq0M7apWwK1FQUBECIlfw+ -tk9aq2zLl409vuqzgsWeffBcdVEDHGCLQauzYRtxMBbzLb33gWWd+9149dWeG5up -P0CZvdetgXhlcbusmrBWVn0O57/QDaGzEUZKxqoy8Ncv04KMYN1gOF+nO5cKn0R1 -+4yvb/NJTdo9WcdLcleqSL1Ju3kX1dCIPOpuaZ3aEwLHrvlNxT8Y5OMvRsYPINAU -6JfNGu21+Bq2nEqSqrw8Ys2hdGI+E95uXjPcsm8BZRCfxfkEeYVtx4ZaqMF+bkfD -d+uePSFp4VBWbg40RMVymr1YcNTX3CjvtLZDH4BZBdx/8YjUEUYPpC7xGoaQDGvA -+J9cVHRpxYpry5fbBmSvrKvKXU6aijLpM7etjYWzYFturpi52Ya9h3LIHd4RaBzB -0YzmatirLK/07YBUECsVcAlddIK5KOA5Nd7+oUikmrR1wMY+I/hym6fSTZGo/TDY -vDFERRj1XOOhlCzHx94SS1DS0rVTAj4uxbuZisaz +HQ4EFgQURs+EV23UZh/nDfRX412nxbn4dc8wDQYJKoZIhvcNAQELBQADggIBAHbg +/YOp/MAf+inmH9Docup+Uj/WVJ32I1mMXlpoTKQ6YExR0DAtf1bmP65EGyvJkFTu +taGM4FNdsn4JCJxDfCY5X5M5YcPmjj6n58UcFr418DiZFCRT5MAdOxyYZVszFIc3 +RiYiOocbM30tGiqFm23NwWlAmaSjIeozERk2RgdRDnDG08xEbskn2yvsvvgnZJ8d +0wxyMPHvno664bCNOJfljXYclHBk2coOFDWJ5q8DFCBLXlt+Z95ceaNLA9bMXfhv +gVnKWn+1hcD33pMGyH7POXt+neZxIracTUJDIm39Vx0sQmHdeDxGSe7+qI2dYKbJ +v6srSWw4Y5TEPpkdXg2+R8zM2hO7kxDqjWDiCTjeMWMEdmUW/hYN6ndhfJ5ZLKut +OM/2jAf+ZijB1j7ORgP7haa//31YaPS4efnurDItI5dlQkLY2gKjLfdsEe1NsVR5 +mUjE8HZoVGRFfGca+39TjTTp+mVN0bQhoi+qu11QwB39hl/3I1jVjmUb71MAmva2 +4wh5RblJukbFVcs5Cco1+fpd7j9pSrWD/wsf+l7XM57Mvt9his8pk9yZolLgKT0Z +yio8eJVOfTr8JHmVpbvE3KQ8cLk0qwjs/iSzsSA0wau9RXNmJVVGHWqEjo+i7dzX +JzEM/ha455mjGbrAqJLFMC0yMMjQX4YIvGJENqRS -----END CERTIFICATE----- diff --git a/testing/web3signer_tests/tls/web3signer/cert.pem b/testing/web3signer_tests/tls/web3signer/cert.pem index 455021447..6266cadf9 100644 --- a/testing/web3signer_tests/tls/web3signer/cert.pem +++ b/testing/web3signer_tests/tls/web3signer/cert.pem @@ -1,33 +1,33 @@ -----BEGIN CERTIFICATE----- -MIIFujCCA6KgAwIBAgIUOVccYETgo2YpKO85U4XRKifK09kwDQYJKoZIhvcNAQEL +MIIFujCCA6KgAwIBAgIUIP5CN0WpH5om1bGaFn17Xc5ITJIwDQYJKoZIhvcNAQEL BQAwazELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0 eTESMBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYD -VQQDDAp3ZWIzc2lnbmVyMCAXDTIzMDkyMjAzMDA1NloYDzIxMjMwODI5MDMwMDU2 +VQQDDAp3ZWIzc2lnbmVyMCAXDTIzMDkyMDAyNTYzNFoYDzIxMjMwODI3MDI1NjM0 WjBrMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExETAPBgNVBAcMCFNvbWVDaXR5 MRIwEAYDVQQKDAlNeUNvbXBhbnkxEzARBgNVBAsMCk15RGl2aXNpb24xEzARBgNV -BAMMCndlYjNzaWduZXIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCr -JajqnvRQEREph+zu7rw1QjHQG1x2H44SJSMjX1Wzi9FErlRSOzywPFL2AzGsNvNS -tPmxN/kF9mBjQIQHxo90M4GcZgW1aljPaXLvQWFrP9ak+JjHuUG+j51fVJp8F2Qc -BG8i2LjjSLvkEYSULHI0kbMPws+DKcemvZJ6IhkoPkbtnx5Z1zDj8D6vvWGJguMO -VSNJY7SoBNuSB6CJ7wCWBg7UPtTUrtnuJVvUh+3k2wc7LJ+C9wd7rt+qYb8LxQuc -j8dDyncXXeI583oGzjTE+1kFrE5TuMDlnWyKPa6NQPeXQtygFTyQL9RMW6JkgWWg -tDFWqd2Mgb8sCRtl5uTJFGJ7PFBP4T69JqYhz817tDS3JrMbbzzhRzf3cB6V2NCC -zVKBrO7gfAyDwWVr5iUyaXhLGyzuGg2nMbFMj/Pr7csravs+Jq5apwyZDNTv+2WQ -xP6d2gGFwQOxcPt4OGPjtFpVHH3cxLkcGsSOZ31akuhdSJ6MqWI4tkgRpsf5Ff0+ -z8SLZaCQIp7M4O4LpMreAT7smvEQpLphK1oKWlsY6ukkJ1y8KD3EfeJRpDL0PBTy -jacQATPsqUzeryCfqAMulLLqUbNFqv6Slhzt2vr+lfIr+IeUa/7XMeZOZJu1T/7n -fTjpdokSTx8DageE4Z3j90q5d4hdXvMWq6MpQW7RqQIDAQABo1QwUjALBgNVHQ8E +BAMMCndlYjNzaWduZXIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDS +cvshqu3747j4KMaGyGW0CA2GAznogVyKqNt4lan/8mdYUI2PUeezaUOnmoyM9oWz +1FPflpj7pVWagWlSOgZ9vOElqQhe+la4ZEdGmOpe44c1rBoeHK314Gbmr2EuCxaa +J3smHx2+VOhaMWDeebRHQqy/s5tf3Um7G2iXU2iexriz42I8d6efWGmaL2sTLQ6H +9C0UBIzXP7PnGrMlef9eR+7pu/ai9MjD1M7CWpwvPhEjanA2InwKugiDXj+A5/6G +WLtJvk5ekfOVlRHPZQbKJc/SG9tbbH9dHLEezIbZ6a5Y0iTcIfoiBxUpX5KyK/pB +YKPThE5zW5KhIxXcpqFIMaTW/nK33BlOJ0fPNtX/SWLyoBsTtxCo1XFFUjHCkXK8 +4y5L4BXxxohG0DAuO4BtQHE5hgyswGQX2t4RjDvzvSm4tN02m9HUh7gu/d2FbgX8 +HtmSgkPEgfSVRxegmbA71qHqKS0/i5BbnQjLkeWiWKRWGJoHFfhGN1sY0jUGFvQr +rrIUQAuXDcQX11UzgwkX5/cowtlm8IB/RWggPfC4gfCL4QvNz4pMxuMUWjXUn0uS +8kbmmuhxshsnZUL+l+nnpRSobZqHRvvqiFKg8q9GsBUTGu0fFbjDeVQyYF2UOWeN +/IC4PpwtYUO3/gR0babEffgYOWwWbQQGSPcmG7Y4zwIDAQABo1QwUjALBgNVHQ8E BAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0RBAgwBocEfwAAATAdBgNV -HQ4EFgQUsBCvmwLPQDG+iN5qI6P7SgLZyP0wDQYJKoZIhvcNAQELBQADggIBAE/j -mwchm30rB+dheTRBcVD0yHgYL2tQlpfKZeX9JDVWNMQ5OYHtMVwdD7RBQJ2ypqIr -5VP6/Hf0M1GE03mnDpjv29q57AkuGFZpBvZ+1XCG87336QIPqkPR4uMJ86MalsX2 -f9GHMG4H0rd1j+ozM0jhJNoVG4lSq/GNn2E9oRjMG8lq0M7apWwK1FQUBECIlfw+ -tk9aq2zLl409vuqzgsWeffBcdVEDHGCLQauzYRtxMBbzLb33gWWd+9149dWeG5up -P0CZvdetgXhlcbusmrBWVn0O57/QDaGzEUZKxqoy8Ncv04KMYN1gOF+nO5cKn0R1 -+4yvb/NJTdo9WcdLcleqSL1Ju3kX1dCIPOpuaZ3aEwLHrvlNxT8Y5OMvRsYPINAU -6JfNGu21+Bq2nEqSqrw8Ys2hdGI+E95uXjPcsm8BZRCfxfkEeYVtx4ZaqMF+bkfD -d+uePSFp4VBWbg40RMVymr1YcNTX3CjvtLZDH4BZBdx/8YjUEUYPpC7xGoaQDGvA -+J9cVHRpxYpry5fbBmSvrKvKXU6aijLpM7etjYWzYFturpi52Ya9h3LIHd4RaBzB -0YzmatirLK/07YBUECsVcAlddIK5KOA5Nd7+oUikmrR1wMY+I/hym6fSTZGo/TDY -vDFERRj1XOOhlCzHx94SS1DS0rVTAj4uxbuZisaz +HQ4EFgQURs+EV23UZh/nDfRX412nxbn4dc8wDQYJKoZIhvcNAQELBQADggIBAHbg +/YOp/MAf+inmH9Docup+Uj/WVJ32I1mMXlpoTKQ6YExR0DAtf1bmP65EGyvJkFTu +taGM4FNdsn4JCJxDfCY5X5M5YcPmjj6n58UcFr418DiZFCRT5MAdOxyYZVszFIc3 +RiYiOocbM30tGiqFm23NwWlAmaSjIeozERk2RgdRDnDG08xEbskn2yvsvvgnZJ8d +0wxyMPHvno664bCNOJfljXYclHBk2coOFDWJ5q8DFCBLXlt+Z95ceaNLA9bMXfhv +gVnKWn+1hcD33pMGyH7POXt+neZxIracTUJDIm39Vx0sQmHdeDxGSe7+qI2dYKbJ +v6srSWw4Y5TEPpkdXg2+R8zM2hO7kxDqjWDiCTjeMWMEdmUW/hYN6ndhfJ5ZLKut +OM/2jAf+ZijB1j7ORgP7haa//31YaPS4efnurDItI5dlQkLY2gKjLfdsEe1NsVR5 +mUjE8HZoVGRFfGca+39TjTTp+mVN0bQhoi+qu11QwB39hl/3I1jVjmUb71MAmva2 +4wh5RblJukbFVcs5Cco1+fpd7j9pSrWD/wsf+l7XM57Mvt9his8pk9yZolLgKT0Z +yio8eJVOfTr8JHmVpbvE3KQ8cLk0qwjs/iSzsSA0wau9RXNmJVVGHWqEjo+i7dzX +JzEM/ha455mjGbrAqJLFMC0yMMjQX4YIvGJENqRS -----END CERTIFICATE----- diff --git a/testing/web3signer_tests/tls/web3signer/key.key b/testing/web3signer_tests/tls/web3signer/key.key index 42b7ad9a9..d96975340 100644 --- a/testing/web3signer_tests/tls/web3signer/key.key +++ b/testing/web3signer_tests/tls/web3signer/key.key @@ -1,52 +1,52 @@ -----BEGIN PRIVATE KEY----- -MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQCrJajqnvRQEREp -h+zu7rw1QjHQG1x2H44SJSMjX1Wzi9FErlRSOzywPFL2AzGsNvNStPmxN/kF9mBj -QIQHxo90M4GcZgW1aljPaXLvQWFrP9ak+JjHuUG+j51fVJp8F2QcBG8i2LjjSLvk -EYSULHI0kbMPws+DKcemvZJ6IhkoPkbtnx5Z1zDj8D6vvWGJguMOVSNJY7SoBNuS -B6CJ7wCWBg7UPtTUrtnuJVvUh+3k2wc7LJ+C9wd7rt+qYb8LxQucj8dDyncXXeI5 -83oGzjTE+1kFrE5TuMDlnWyKPa6NQPeXQtygFTyQL9RMW6JkgWWgtDFWqd2Mgb8s -CRtl5uTJFGJ7PFBP4T69JqYhz817tDS3JrMbbzzhRzf3cB6V2NCCzVKBrO7gfAyD -wWVr5iUyaXhLGyzuGg2nMbFMj/Pr7csravs+Jq5apwyZDNTv+2WQxP6d2gGFwQOx -cPt4OGPjtFpVHH3cxLkcGsSOZ31akuhdSJ6MqWI4tkgRpsf5Ff0+z8SLZaCQIp7M -4O4LpMreAT7smvEQpLphK1oKWlsY6ukkJ1y8KD3EfeJRpDL0PBTyjacQATPsqUze -ryCfqAMulLLqUbNFqv6Slhzt2vr+lfIr+IeUa/7XMeZOZJu1T/7nfTjpdokSTx8D -ageE4Z3j90q5d4hdXvMWq6MpQW7RqQIDAQABAoICAAajqX2/kJn+DYDDUoJS3deB -k8HfW9sDvpzO1sH/p+kVEJdV3XTKskAiePPs/AtynsYbue+BbL5J2GOlTDHqhqRi -/qFQ9mH7KAqUzEYCjutMkNC5yaB+2/Fu7BOXij4r4KDzHZYYGvULPGm8sbxXTI9k -QxJmk+sCTwnVgxYMllYAs3ryoChrUAzZpC7oXX0qiBElZZ7qWKbneFaeB+Dt9gN7 -5O2gKdy90zu5NIqmQsjs48cMhDweBerrBed7zv/fgyOt0rS7KRtNk7H8k2Rp8bNe -Dk4paOj3yvjlXmFvAuNdLLWwHPOzWzP7PQTAzcgRGn6NWvgiExOJMX+9czQE7OVd -OY47PndUFU6zkiOMYipnsEOFrZvHrvuCquQ+5X6x8PXdK4aFJ8VphH2HTo6xXr6E -q3zTHZq7rXSuI2yLBE6JslqP3D2H022cow6iLGnuJKYVXMOcOOTrrVBJjjau/OfN -feOvEgut6T7BmdWrcdgQzh3rvvMKdawdekuQgPjNfLxR5JCjWKaKqkJ1iBZ1jkiC -LqoeelsJnWSG+P9QKO+ntt3TW7qUsMPBAHIk2UqbsZcnX9La9huiIfABP1L1qGTb -WQJiIumyCY7LDEKcaqrFbsBS45xoQVoVlDeJPAFk48947mZY+m6TnwEC/K000ENU -fYS0x+CsNmEaXGbItrZBAoIBAQDouRfE1B/bl8KktK3uQ+wwlTWpiZnzji8wg8FG -O68BsL1qmxDG0eShBQzwNdFY9HTgGu/BjPz02liXY+smB1DXgk1tuP6NXl7ZakE4 -gdaL9wifjvoTqzgf3nBJguUAxGRBpYzbYRMELnw/FSjwLykpGUTSv+jKhOqNqb8r -T/JIFq/DG2oioYuzksEdDNaWOD3CkTjkA4guBvM5iONSed4VIn4C/L31jNFXeG1u -ToowtFLr8zG2h6sfI2NWHD8cR1LKQA6hSaimrrHUFYBo4qzNJ7afVFkF/zO37UGL -isNAmMQfFE7Lqom7YcI+QRDhtBX3XsvN3Y/RPQASZWtOTr/BAoIBAQC8Q+ggBpVK -En2CWXTvoBys9Ad3le50RIH3pmM4Uv1AQeNNtT6PKRKiL18stRxDql0oGCslOJh4 -FvawJGfANVN0vu3aIwG6kg6myYxn4sP9x2VeQUktaKcdCZ4oVuG2aXwCeg92Cpmz -W7jok8qvWjmN8IDBM4iN2Q5auO0Xg7n6vjZ6EBkm+XCsIzSazgN2sLoNC2RUKbVT -U6shGkPGhHJwumXtcPp+Ogljlv/8Gc+oc5Ty+hdhmMzTGDYwy3bwd4yfIFRRSmCr -OS0V2cwnsUQkmH0c5DVVIa0s1i+nqM2epvxjQOIsBJpEwzHXY00YZb5d4jeELPqU -XUhnrKqKxQvpAoIBAFHTerL/LrBkPNDyolErWql+XR7ePd4v+RGi0dsi8xayEPeh -zBVMCYpAH1t6YMBZO5rsfa5dJzfkac/ZFv4JBniv3Q+eQwprywfA32vB4zDVTBfm -CrHNuu8ho/OE7YYGh4W5crxT9n665X68ruc8fclwlA1R4sUKVPo4W/obowGL0ILW -acwBZwBdsj7Hm8+3uKdnrkwlncUpNm3dXqhKJzbhKNNeEGB9AcIymq91OAuF674A -hVM7goRxSeUmC16jCU4JldtJ7d2lgOskIEFAqid8Ni7xVlfQclvSNQCeaqaU0Chp -WIct0D2tUsHW2NuzGSIgF6Krq3yTaSoOtNsUv0ECggEAFR9lVtqGO3ZRoKNazFvh -e8IxaEhpJaBeGKQRc8tT4LbDwv830qYgEhRQkFqNnkXqB8qWZKmx6Z9h9CdRgK46 -+9lEJHpTAlTK0gnA+BLoPHv3spiOlkqsnURr+0isMGQrZre9LlhIIGiFGYsjbYMo -+/Tk7UhT5N5ajvE6oK3F2w0mXZGa0NWhv55/k3LTzqhLZ5VEn3DCiGPVynQA8LAB -iwZO01IeuLTYQtU5SVa4BsVZC93la6zSJkkMI3Ngl+BB5cSh0TEQIYXbuhzim/12 -kMiPGQO9vBx4KpSpah01XLyNirFH7vphOJ/R4sGgb8FSl4P/CJRnVOgWbJNh2wn6 -qQKCAQAkZMqlOokxcpiNLDyBS33mLzVuVIXIBsKmZibmurWxcXvyHGA7K/uHRvE/ -5pajoO8Pw9dQhAX2LmOISW8YJwR0UR9LmDOeYUW+8nypG2jprKezMVSNu+lWHanE -vw+fLvRWyDEdKQK6RHOytHppFn48eC5HrPdOe4EaNQ09vUiMsJmVL6ep4nuAg4nr -WilB9iJQtrFcItB5tnfD2puJQKaFV3rgqWCFIgJJg0ThuiWyoVNKtlRvv5o3mQyz -Y+jyCm4RtgSDm9+e/Tcv2vUeoiNt2bVb9tK3r2M2cZ6N1PuHV/cmBjf6I/ssPqmM -CXDusRSlsQNpzHc6QKq8IDZLut9g +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDScvshqu3747j4 +KMaGyGW0CA2GAznogVyKqNt4lan/8mdYUI2PUeezaUOnmoyM9oWz1FPflpj7pVWa +gWlSOgZ9vOElqQhe+la4ZEdGmOpe44c1rBoeHK314Gbmr2EuCxaaJ3smHx2+VOha +MWDeebRHQqy/s5tf3Um7G2iXU2iexriz42I8d6efWGmaL2sTLQ6H9C0UBIzXP7Pn +GrMlef9eR+7pu/ai9MjD1M7CWpwvPhEjanA2InwKugiDXj+A5/6GWLtJvk5ekfOV +lRHPZQbKJc/SG9tbbH9dHLEezIbZ6a5Y0iTcIfoiBxUpX5KyK/pBYKPThE5zW5Kh +IxXcpqFIMaTW/nK33BlOJ0fPNtX/SWLyoBsTtxCo1XFFUjHCkXK84y5L4BXxxohG +0DAuO4BtQHE5hgyswGQX2t4RjDvzvSm4tN02m9HUh7gu/d2FbgX8HtmSgkPEgfSV +RxegmbA71qHqKS0/i5BbnQjLkeWiWKRWGJoHFfhGN1sY0jUGFvQrrrIUQAuXDcQX +11UzgwkX5/cowtlm8IB/RWggPfC4gfCL4QvNz4pMxuMUWjXUn0uS8kbmmuhxshsn +ZUL+l+nnpRSobZqHRvvqiFKg8q9GsBUTGu0fFbjDeVQyYF2UOWeN/IC4PpwtYUO3 +/gR0babEffgYOWwWbQQGSPcmG7Y4zwIDAQABAoICABRxePXJ+KOpznPE5Owo7BWe +BqTzC/K1xlCYm0v5IJzYEQlM4e4p4wZ+/kR6Hex/nM4IR+bbZpxjcOUObIsWpJTI +VAgS2y5RcTp+UJzfXpJogIpKiqBMNutAqPOrK8Hg797PtlsmAKoBmNn8xqU1+2Oa +FX/rKaJus6qKZ2bz16DnkFUL4foabDJte0IFbd2yAyGv1ZqGiqFKSJFK+wYeoMZU +LzWOEyUR/wK5ryVwJJCY8z9BKAoKNYnb4oHTFlDRDdztIlxv29sR9dtHsjA3EdQc +nOCTNi7eY6JJlucgBSWGrsS6vTvpImGggIIWt6sOh0Px6Fg0F7mFtsESex2GePow +50MwKFbbVo3TUYRYTggJj7ba4+yrl/dsAWJUX3F90xNj/6REF+2+Licb7kgCHQKw +TvdExiikOOFtuFRkl5fqyoM9Ph+sj7/db5Pd53D8vaMjR3Yw/JA5dKPZS5ZKHBs0 +qo7FxV8ZlOESMv2eF6y0kM4wLhUN8wnEWxpsFWtXDNjYIlQ6W5qrfwR1vlnIkrmb +bYQCJFtko6CKUEa8yb4OvLgyX6VSskeYEC5zdekivZWJN/OZZa/xIS2nupYqD4GT +Y3QcsEhfzDvVIwI7M+eBwS5qjgdwN2qEGrXva5KKesb2zdjNircKaUahTWJNYHjj +jHGOSY/vyGFH2HFZNYZpAoIBAQDyoMpeXBDQhAXbHpIm6p8KljqRMHU05UeRRWVR +d0RKXGYq/bUzoAhr8F2QE2+HC+2NnBGh6qR5QNO/6H6p8Du6aSXDaDNJxTErOOmY +pAkbOlcA7TjpDSrNUr4EfAXl6vUF7JB8jJHEXIqBkbGWOFYPzwLEwErQAlQN2u4e +u9HKG3Me+DP2IcrCgZ5iWvmjV4l+vXYyBEXoJqHOWEscWXHiz64c336oZqwqKe/x +s8Xy2sd6FRU/mp34wXT4kZ56/U4BV+DEN20fffBiTfMQxKmXhMykmD/O63dASCiA +seZrZK5mRND+aS95MqI6FMm0ToKj24RvvAWR8w50cuF7wl5zAoIBAQDeDC6ImN7K +mSLaMBaIhoZsJDdG0cJiFPRmwtepeoWt4qUWuc51LOFthhlkyGx/JbEzFMK6uYTu +hHHNOgk6ydrz1+HOzpSvN0Iz61j1hJd8Ve/0MyTBg912FPe2p3hR9dN4j5Ly+oes +QvNIr/ReW5HJhDcgXm/9oT68XyzrKM3t93XPoO4wDPSHPbRWE2dzLrNi1xg/ZyRz +ZLAtBsGPG5rVAeSEob0ytZH2H1pHfkRQ/1jSKxwb+QVMfjDd5FrEAMLA4E6J8HFz +RDHTmrveGrR1i5BJrce3VUOAuL7Y3iw6Sb+b1LyA8htxiYfBVdVfCeocDv64m0R5 +NJs6Milm9uk1AoIBAQCdQLForusG+kqBVjMLng0uY2faKjoM6n2UHhIo1tAgEfr1 +6jHDH/nVW5iIhNBICucQXRLgip/HJskXHKzbn6RWkUe0epijO3c+uEhOciKkzw8M +vrOf+LTBFtupNGjuN3ZPPJ/42XKwffoXOEKNRj4hSN5Wfvr+DkREJp0mtjymbVwT +unKTGBu+LRxmSuh5gYbP6iPtDu/wIvnEL12fJim2Azyp4gDJTKJRQZUOZqHpYPrg +mUGIU8IHM/uID3lT5VDldftrsTC8tHdUf4kGWTBB0ASCuVrB1cMYmqwFnUfmWv7d +scRy3+Gw/6w9ULPadPgfE2umr4o8qfe4aazS9YsZAoIBADZH+hQwcr5KQ0fdW5TS +dgf3rn+khYVepAR++yOWLRm9/yeYEo14hD82+fw2Nre6aiAXoibtdT6tp/hIiLsT +X3AexTe+LoDK3Gc+0Edsu2+MvpUO75xS9Q+JvqirNfGrS5/8USsO7Z3B3CFXykBK +2E/P/33tOCljgqegCKYQGo9i4Cz6pV+fuyNYhT5Jjg+NShMOjAHr3/BJm/vV2/l1 +ARuzU77MnyjHVEA7l+FET8URNxBhs4RvEsmJS77itQGXQgTOkMSNv94yvI+DEwwP +sS/PB13LmrgJou/TuevgHCW/o5Sfo9lN1kGiIkq0Be4uyUlErSZJ5qpOnufSHWbr +U0UCggEAC5WM3BXKo11Y+XphsYnpJesiB9C5HMvhnB5oCHH7ffIVqkXp2AiUnWy6 +HE+DwUWFEtRLYr4beTXn+TeunoQa7X5K1JXV41XENf5CsbQTIUnX2j7o2ilCEx9C +rDPtpUZPObqXHBiHSF67Il7GitCud+7YDAGqbJABlV3WF0MkPIfW/cxN3cb65FoI +AEV3OZiS6zvDR91++ovNV5QAmH1vljvipM7kKy5RsLFF8GYa0KNTNJ/EYojKmw00 +2OakG0pjjDcWjfdGI+i5gcHNUZwbgqx4NG/RY3YslJswBhGGlhEGuuUtpH47HTM2 +oJ/aHbXf6PdOO9MYiI/es/dfKK8ywA== -----END PRIVATE KEY----- diff --git a/testing/web3signer_tests/tls/web3signer/key.p12 b/testing/web3signer_tests/tls/web3signer/key.p12 index 51269be8d..792dc197f 100644 Binary files a/testing/web3signer_tests/tls/web3signer/key.p12 and b/testing/web3signer_tests/tls/web3signer/key.p12 differ diff --git a/testing/web3signer_tests/tls/web3signer/known_clients.txt b/testing/web3signer_tests/tls/web3signer/known_clients.txt index 330132731..c4722fe58 100644 --- a/testing/web3signer_tests/tls/web3signer/known_clients.txt +++ b/testing/web3signer_tests/tls/web3signer/known_clients.txt @@ -1 +1 @@ -lighthouse FF:4C:84:A6:37:28:EC:7E:A7:D8:C6:49:0D:C6:F9:5D:C1:06:BA:6D:69:49:0A:AA:38:32:01:2B:ED:D9:F2:FA +lighthouse 02:D0:A8:C0:6A:59:90:40:54:67:D4:BD:AE:5A:D4:F5:14:A9:79:38:98:E0:62:93:C1:77:13:FC:B4:60:65:CE diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 46f523c99..90a82b7e3 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -19,7 +19,6 @@ slot_clock = { workspace = true } types = { workspace = true } safe_arith = { workspace = true } serde = { workspace = true } -serde_derive = "1.0.116" bincode = { workspace = true } serde_json = { workspace = true } slog = { workspace = true } @@ -56,9 +55,10 @@ itertools = { workspace = true } monitoring_api = { workspace = true } sensitive_url = { workspace = true } task_executor = { workspace = true } -reqwest = { workspace = true } +reqwest = { workspace = true, features = ["native-tls"] } url = { workspace = true } malloc_utils = { workspace = true } sysinfo = { workspace = true } system_health = { path = "../common/system_health" } logging = { workspace = true } +strum = { workspace = true } diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index cc90c979b..baba14c53 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -16,7 +16,6 @@ rusqlite = { workspace = true } r2d2 = { workspace = true } r2d2_sqlite = "0.21.0" serde = { workspace = true } -serde_derive = "1.0.116" serde_json = { workspace = true } ethereum_serde_utils = { workspace = true } filesystem = { workspace = true } @@ -28,3 +27,4 @@ rayon = { workspace = true } [features] arbitrary-fuzz = ["types/arbitrary-fuzz"] +portable = ["types/portable"] diff --git a/validator_client/slashing_protection/Makefile b/validator_client/slashing_protection/Makefile index e3d935b4c..1b9729634 100644 --- a/validator_client/slashing_protection/Makefile +++ b/validator_client/slashing_protection/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v5.2.1 +TESTS_TAG := v5.3.0 GENERATE_DIR := generated-tests OUTPUT_DIR := interchange-tests TARBALL := $(OUTPUT_DIR)-$(TESTS_TAG).tar.gz @@ -6,20 +6,23 @@ ARCHIVE_URL := https://github.com/eth-clients/slashing-protection-interchange-te ifeq ($(OS),Windows_NT) ifeq (, $(shell where rm)) - rmfile = if exist $(1) (del /F /Q $(1)) - rmdir = if exist $(1) (rmdir /Q /S $(1)) + rmfile = if exist $(1) (del /F /Q $(1)) + rmdir = if exist $(1) (rmdir /Q /S $(1)) + makedir = if not exist $(1) (mkdir $(1)) else - rmfile = rm -f $(1) - rmdir = rm -rf $(1) + rmfile = rm -f $(1) + rmdir = rm -rf $(1) + makedir = mkdir -p $(1) endif else - rmfile = rm -f $(1) - rmdir = rm -rf $(1) + rmfile = rm -f $(1) + rmdir = rm -rf $(1) + makedir = mkdir -p $(1) endif $(OUTPUT_DIR): $(TARBALL) $(call rmdir,$@) - mkdir $@ + $(call makedir,$@) tar --strip-components=1 -xzf $^ -C $@ $(TARBALL): diff --git a/validator_client/slashing_protection/src/bin/test_generator.rs b/validator_client/slashing_protection/src/bin/test_generator.rs index b96dd8eb7..c95cb6917 100644 --- a/validator_client/slashing_protection/src/bin/test_generator.rs +++ b/validator_client/slashing_protection/src/bin/test_generator.rs @@ -70,14 +70,18 @@ fn interchange_with_signing_roots( } fn main() { - let single_validator_blocks = - vec![(0, 32, false), (0, 33, true), (0, 31, false), (0, 1, false)]; + let single_validator_blocks = vec![ + (0, 32, false, false), + (0, 33, true, true), + (0, 31, false, false), + (0, 1, false, false), + ]; let single_validator_attestations = vec![ - (0, 3, 4, false), - (0, 14, 19, false), - (0, 15, 20, false), - (0, 16, 20, false), - (0, 15, 21, true), + (0, 3, 4, false, false), + (0, 14, 19, false, false), + (0, 15, 20, false, false), + (0, 16, 20, false, false), + (0, 15, 21, true, true), ]; let tests = vec![ @@ -104,7 +108,7 @@ fn main() { MultiTestCase::single( "single_validator_genesis_attestation", TestCase::new(interchange(vec![(0, vec![], vec![(0, 0)])])) - .with_attestations(vec![(0, 0, 0, false)]), + .with_attestations(vec![(0, 0, 0, false, false)]), ), MultiTestCase::single( "single_validator_multiple_blocks_and_attestations", @@ -114,23 +118,23 @@ fn main() { vec![(10, 11), (12, 13), (20, 24)], )])) .with_blocks(vec![ - (0, 1, false), - (0, 2, false), - (0, 3, false), - (0, 10, false), - (0, 1200, false), - (0, 4, true), - (0, 256, true), - (0, 1201, true), + (0, 1, false, false), + (0, 2, false, false), + (0, 3, false, false), + (0, 10, false, false), + (0, 1200, false, false), + (0, 4, false, true), + (0, 256, false, true), + (0, 1201, true, true), ]) .with_attestations(vec![ - (0, 9, 10, false), - (0, 12, 13, false), - (0, 11, 14, false), - (0, 21, 22, false), - (0, 10, 24, false), - (0, 11, 12, true), - (0, 20, 25, true), + (0, 9, 10, false, false), + (0, 12, 13, false, false), + (0, 11, 14, false, false), + (0, 21, 22, false, false), + (0, 10, 24, false, false), + (0, 11, 12, false, true), + (0, 20, 25, true, true), ]), ), MultiTestCase::single( @@ -157,30 +161,30 @@ fn main() { (2, vec![10, 15, 20], vec![(1, 2), (1, 3), (2, 4)]), ])) .with_blocks(vec![ - (0, 9, false), - (0, 10, false), - (0, 21, true), - (0, 11, true), - (1, 2, false), - (1, 3, false), - (1, 0, false), - (1, 101, true), - (2, 9, false), - (2, 10, false), - (2, 22, true), + (0, 9, false, false), + (0, 10, false, false), + (0, 21, true, true), + (0, 11, false, true), + (1, 2, false, false), + (1, 3, false, false), + (1, 0, false, false), + (1, 101, true, true), + (2, 9, false, false), + (2, 10, false, false), + (2, 22, true, true), ]) .with_attestations(vec![ - (0, 0, 5, false), - (0, 3, 6, false), - (0, 4, 6, true), - (0, 5, 7, true), - (0, 6, 8, true), - (1, 1, 7, false), - (1, 1, 4, true), - (1, 5, 7, true), - (2, 0, 0, false), - (2, 0, 1, false), - (2, 2, 5, true), + (0, 0, 5, false, false), + (0, 3, 6, false, false), + (0, 4, 6, true, true), + (0, 5, 7, true, true), + (0, 6, 8, true, true), + (1, 1, 7, false, false), + (1, 1, 4, false, true), + (1, 5, 7, true, true), + (2, 0, 0, false, false), + (2, 0, 1, false, false), + (2, 2, 5, true, true), ]), ), MultiTestCase::single( @@ -202,16 +206,16 @@ fn main() { TestCase::new(interchange(vec![(0, vec![40], vec![(2, 30)])])), TestCase::new(interchange(vec![(0, vec![50], vec![(10, 50)])])) .with_blocks(vec![ - (0, 41, false), - (0, 45, false), - (0, 49, false), - (0, 50, false), - (0, 51, true), + (0, 41, false, true), + (0, 45, false, true), + (0, 49, false, true), + (0, 50, false, false), + (0, 51, true, true), ]) .with_attestations(vec![ - (0, 3, 31, false), - (0, 9, 49, false), - (0, 10, 51, true), + (0, 3, 31, false, true), + (0, 9, 49, false, true), + (0, 10, 51, true, true), ]), ], ), @@ -221,20 +225,20 @@ fn main() { TestCase::new(interchange(vec![(0, vec![40], vec![])])), TestCase::new(interchange(vec![(0, vec![20], vec![])])) .contains_slashable_data() - .with_blocks(vec![(0, 20, false)]), + .with_blocks(vec![(0, 20, false, false)]), ], ), MultiTestCase::new( "multiple_interchanges_single_validator_multiple_blocks_out_of_order", vec![ TestCase::new(interchange(vec![(0, vec![0], vec![])])).with_blocks(vec![ - (0, 10, true), - (0, 20, true), - (0, 30, true), + (0, 10, true, true), + (0, 20, true, true), + (0, 30, true, true), ]), TestCase::new(interchange(vec![(0, vec![20], vec![])])) .contains_slashable_data() - .with_blocks(vec![(0, 29, false)]), + .with_blocks(vec![(0, 29, false, true)]), ], ), MultiTestCase::new( @@ -243,7 +247,7 @@ fn main() { TestCase::new(interchange(vec![(0, vec![40], vec![])])), TestCase::new(interchange(vec![(0, vec![20, 50], vec![])])) .contains_slashable_data() - .with_blocks(vec![(0, 20, false), (0, 50, false)]), + .with_blocks(vec![(0, 20, false, false), (0, 50, false, false)]), ], ), MultiTestCase::new( @@ -253,10 +257,10 @@ fn main() { TestCase::new(interchange(vec![(0, vec![], vec![(10, 11)])])) .contains_slashable_data() .with_attestations(vec![ - (0, 10, 14, false), - (0, 12, 13, false), - (0, 12, 14, true), - (0, 13, 15, true), + (0, 10, 14, false, false), + (0, 12, 13, false, false), + (0, 12, 14, true, true), + (0, 13, 15, true, true), ]), ], ), @@ -267,11 +271,11 @@ fn main() { TestCase::new(interchange(vec![(0, vec![], vec![(9, 21)])])) .contains_slashable_data() .with_attestations(vec![ - (0, 10, 20, false), - (0, 10, 21, false), - (0, 9, 21, false), - (0, 9, 22, false), - (0, 10, 22, true), + (0, 10, 20, false, false), + (0, 10, 21, false, false), + (0, 9, 21, false, false), + (0, 9, 22, false, false), + (0, 10, 22, true, true), ]), ], ), @@ -282,11 +286,11 @@ fn main() { TestCase::new(interchange(vec![(0, vec![], vec![(10, 20)])])) .contains_slashable_data() .with_attestations(vec![ - (0, 10, 20, false), - (0, 10, 21, false), - (0, 9, 21, false), - (0, 9, 22, false), - (0, 10, 22, true), + (0, 10, 20, false, false), + (0, 10, 21, false, false), + (0, 9, 21, false, false), + (0, 9, 22, false, false), + (0, 10, 22, true, true), ]), ], ), @@ -303,13 +307,13 @@ fn main() { ])) .contains_slashable_data() .with_blocks(vec![ - (0, 0, false), - (0, 3, true), - (0, 7, true), - (0, 3, true), - (1, 0, false), + (0, 0, false, false), + (0, 3, false, true), + (0, 7, true, true), + (0, 3, false, true), + (1, 0, false, false), ]) - .with_attestations(vec![(0, 0, 4, false), (1, 0, 4, true)]), + .with_attestations(vec![(0, 0, 4, false, false), (1, 0, 4, true, true)]), ], ), MultiTestCase::new( @@ -330,9 +334,9 @@ fn main() { ])) .contains_slashable_data() .with_attestations(vec![ - (0, 0, 4, false), - (1, 1, 2, false), - (2, 1, 2, false), + (0, 0, 4, false, false), + (1, 1, 2, false, false), + (2, 1, 2, false, false), ]), ], ), @@ -351,23 +355,23 @@ fn main() { ])) .contains_slashable_data() .with_blocks(vec![ - (0, 100, false), - (1, 101, false), - (2, 102, false), - (0, 103, true), - (1, 104, true), - (2, 105, true), + (0, 100, false, false), + (1, 101, false, false), + (2, 102, false, false), + (0, 103, true, true), + (1, 104, true, true), + (2, 105, true, true), ]) .with_attestations(vec![ - (0, 12, 13, false), - (0, 11, 14, false), - (1, 12, 13, false), - (1, 11, 14, false), - (2, 12, 13, false), - (2, 11, 14, false), - (0, 12, 14, true), - (1, 13, 14, true), - (2, 13, 14, true), + (0, 12, 13, false, false), + (0, 11, 14, false, false), + (1, 12, 13, false, false), + (1, 11, 14, false, false), + (2, 12, 13, false, false), + (2, 11, 14, false, false), + (0, 12, 14, true, true), + (1, 13, 14, true, true), + (2, 13, 14, true, true), ]), ], ), @@ -379,36 +383,36 @@ fn main() { "single_validator_source_greater_than_target_surrounding", TestCase::new(interchange(vec![(0, vec![], vec![(5, 2)])])) .contains_slashable_data() - .with_attestations(vec![(0, 3, 4, false)]), + .with_attestations(vec![(0, 3, 4, false, false)]), ), MultiTestCase::single( "single_validator_source_greater_than_target_surrounded", TestCase::new(interchange(vec![(0, vec![], vec![(5, 2)])])) .contains_slashable_data() - .with_attestations(vec![(0, 6, 1, false)]), + .with_attestations(vec![(0, 6, 1, false, false)]), ), MultiTestCase::single( "single_validator_source_greater_than_target_sensible_iff_minified", TestCase::new(interchange(vec![(0, vec![], vec![(5, 2), (6, 7)])])) .contains_slashable_data() - .with_attestations(vec![(0, 5, 8, false), (0, 6, 8, true)]), + .with_attestations(vec![(0, 5, 8, false, false), (0, 6, 8, true, true)]), ), MultiTestCase::single( "single_validator_out_of_order_blocks", TestCase::new(interchange(vec![(0, vec![6, 5], vec![])])).with_blocks(vec![ - (0, 5, false), - (0, 6, false), - (0, 7, true), + (0, 5, false, false), + (0, 6, false, false), + (0, 7, true, true), ]), ), MultiTestCase::single( "single_validator_out_of_order_attestations", TestCase::new(interchange(vec![(0, vec![], vec![(4, 5), (3, 4)])])).with_attestations( vec![ - (0, 3, 4, false), - (0, 4, 5, false), - (0, 1, 10, false), - (0, 3, 3, false), + (0, 3, 4, false, false), + (0, 4, 5, false, false), + (0, 1, 10, false, false), + (0, 3, 3, false, false), ], ), ), @@ -417,15 +421,15 @@ fn main() { MultiTestCase::single( "single_validator_two_blocks_no_signing_root", TestCase::new(interchange(vec![(0, vec![10, 20], vec![])])) - .with_blocks(vec![(0, 20, false)]), + .with_blocks(vec![(0, 20, false, false)]), ), MultiTestCase::single( "single_validator_multiple_block_attempts", TestCase::new(interchange(vec![(0, vec![15, 16, 17], vec![])])) .with_signing_root_blocks(vec![ - (0, 16, 0, false), - (0, 16, 1, false), - (0, 16, u64::MAX, false), + (0, 16, 0, false, false), + (0, 16, 1, false, false), + (0, 16, u64::MAX, false, false), ]), ), MultiTestCase::single( @@ -436,15 +440,15 @@ fn main() { vec![], )])) .with_signing_root_blocks(vec![ - (0, 15, 151, true), - (0, 16, 161, true), - (0, 17, 171, true), - (0, 15, 152, false), - (0, 15, 0, false), - (0, 16, 151, false), - (0, 17, 151, false), - (0, 18, 151, true), - (0, 14, 171, false), + (0, 15, 151, false, true), + (0, 16, 161, false, true), + (0, 17, 171, false, true), + (0, 15, 152, false, false), + (0, 15, 0, false, false), + (0, 16, 151, false, false), + (0, 17, 151, false, false), + (0, 18, 151, true, true), + (0, 14, 171, false, false), ]), ), MultiTestCase::single( @@ -455,11 +459,11 @@ fn main() { vec![(5, 15, Some(515))], )])) .with_signing_root_attestations(vec![ - (0, 5, 15, 0, false), - (0, 5, 15, 1, false), - (0, 5, 15, 515, true), - (0, 6, 15, 615, false), - (0, 5, 14, 515, false), + (0, 5, 15, 0, false, false), + (0, 5, 15, 1, false, false), + (0, 5, 15, 515, false, true), + (0, 6, 15, 615, false, false), + (0, 5, 14, 515, false, false), ]), ), MultiTestCase::single( @@ -500,8 +504,12 @@ fn main() { (0, vec![10, 11], vec![(0, 2)]), (0, vec![12, 13], vec![(1, 3)]), ])) - .with_blocks(vec![(0, 10, false), (0, 13, false), (0, 14, true)]) - .with_attestations(vec![(0, 0, 2, false), (0, 1, 3, false)]), + .with_blocks(vec![ + (0, 10, false, false), + (0, 13, false, false), + (0, 14, true, true), + ]) + .with_attestations(vec![(0, 0, 2, false, false), (0, 1, 3, false, false)]), ), MultiTestCase::single( "duplicate_pubkey_slashable_block", @@ -510,7 +518,7 @@ fn main() { (0, vec![10], vec![(1, 3)]), ])) .contains_slashable_data() - .with_blocks(vec![(0, 10, false), (0, 11, true)]), + .with_blocks(vec![(0, 10, false, false), (0, 11, true, true)]), ), MultiTestCase::single( "duplicate_pubkey_slashable_attestation", @@ -520,10 +528,10 @@ fn main() { ])) .contains_slashable_data() .with_attestations(vec![ - (0, 0, 1, false), - (0, 0, 2, false), - (0, 0, 4, false), - (0, 1, 4, true), + (0, 0, 1, false, false), + (0, 0, 2, false, false), + (0, 0, 4, false, false), + (0, 1, 4, true, true), ]), ), ]; diff --git a/validator_client/slashing_protection/src/interchange.rs b/validator_client/slashing_protection/src/interchange.rs index 99d37c38b..ad5f21e51 100644 --- a/validator_client/slashing_protection/src/interchange.rs +++ b/validator_client/slashing_protection/src/interchange.rs @@ -1,5 +1,5 @@ use crate::InterchangeError; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::cmp::max; use std::collections::{HashMap, HashSet}; use std::io; diff --git a/validator_client/slashing_protection/src/interchange_test.rs b/validator_client/slashing_protection/src/interchange_test.rs index dc828773b..d88bb93a0 100644 --- a/validator_client/slashing_protection/src/interchange_test.rs +++ b/validator_client/slashing_protection/src/interchange_test.rs @@ -3,7 +3,7 @@ use crate::{ test_utils::{pubkey, DEFAULT_GENESIS_VALIDATORS_ROOT}, SigningRoot, SlashingDatabase, }; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::collections::HashSet; use tempfile::tempdir; use types::{Epoch, Hash256, PublicKeyBytes, Slot}; @@ -33,6 +33,7 @@ pub struct TestBlock { pub slot: Slot, pub signing_root: Hash256, pub should_succeed: bool, + pub should_succeed_complete: bool, } #[derive(Debug, Clone, Deserialize, Serialize)] @@ -43,6 +44,7 @@ pub struct TestAttestation { pub target_epoch: Epoch, pub signing_root: Hash256, pub should_succeed: bool, + pub should_succeed_complete: bool, } impl MultiTestCase { @@ -68,10 +70,6 @@ impl MultiTestCase { let slashing_db_file = dir.path().join("slashing_protection.sqlite"); let slashing_db = SlashingDatabase::create(&slashing_db_file).unwrap(); - // Now that we are using implicit minification on import, we must always allow - // false positives. - let allow_false_positives = true; - for test_case in &self.steps { // If the test case is marked as containing slashable data, then the spec allows us to // fail to import the file. However, we minify on import and ignore slashable data, so @@ -124,7 +122,7 @@ impl MultiTestCase { i, self.name, safe ); } - Err(e) if block.should_succeed && !allow_false_positives => { + Err(e) if block.should_succeed => { panic!( "block {} from `{}` failed when it should have succeeded: {:?}", i, self.name, e @@ -147,7 +145,7 @@ impl MultiTestCase { i, self.name, safe ); } - Err(e) if att.should_succeed && !allow_false_positives => { + Err(e) if att.should_succeed => { panic!( "attestation {} from `{}` failed when it should have succeeded: {:?}", i, self.name, e @@ -181,53 +179,65 @@ impl TestCase { self } - pub fn with_blocks(self, blocks: impl IntoIterator) -> Self { - self.with_signing_root_blocks( - blocks - .into_iter() - .map(|(index, slot, should_succeed)| (index, slot, 0, should_succeed)), - ) + pub fn with_blocks(self, blocks: impl IntoIterator) -> Self { + self.with_signing_root_blocks(blocks.into_iter().map( + |(index, slot, should_succeed, should_succeed_complete)| { + (index, slot, 0, should_succeed, should_succeed_complete) + }, + )) } pub fn with_signing_root_blocks( mut self, - blocks: impl IntoIterator, + blocks: impl IntoIterator, ) -> Self { - self.blocks.extend( - blocks - .into_iter() - .map(|(pk, slot, signing_root, should_succeed)| TestBlock { + self.blocks.extend(blocks.into_iter().map( + |(pk, slot, signing_root, should_succeed, should_succeed_complete)| { + assert!( + !should_succeed || should_succeed_complete, + "if should_succeed is true then should_succeed_complete must also be true" + ); + TestBlock { pubkey: pubkey(pk), slot: Slot::new(slot), signing_root: Hash256::from_low_u64_be(signing_root), should_succeed, - }), - ); + should_succeed_complete, + } + }, + )); self } pub fn with_attestations( self, - attestations: impl IntoIterator, + attestations: impl IntoIterator, ) -> Self { - self.with_signing_root_attestations( - attestations - .into_iter() - .map(|(id, source, target, succeed)| (id, source, target, 0, succeed)), - ) + self.with_signing_root_attestations(attestations.into_iter().map( + |(id, source, target, succeed, succeed_complete)| { + (id, source, target, 0, succeed, succeed_complete) + }, + )) } pub fn with_signing_root_attestations( mut self, - attestations: impl IntoIterator, + attestations: impl IntoIterator, ) -> Self { self.attestations.extend(attestations.into_iter().map( - |(pk, source, target, signing_root, should_succeed)| TestAttestation { - pubkey: pubkey(pk), - source_epoch: Epoch::new(source), - target_epoch: Epoch::new(target), - signing_root: Hash256::from_low_u64_be(signing_root), - should_succeed, + |(pk, source, target, signing_root, should_succeed, should_succeed_complete)| { + assert!( + !should_succeed || should_succeed_complete, + "if should_succeed is true then should_succeed_complete must also be true" + ); + TestAttestation { + pubkey: pubkey(pk), + source_epoch: Epoch::new(source), + target_epoch: Epoch::new(target), + signing_root: Hash256::from_low_u64_be(signing_root), + should_succeed, + should_succeed_complete, + } }, )); self diff --git a/validator_client/slashing_protection/tests/interop.rs b/validator_client/slashing_protection/tests/interop.rs index ee5bb1147..ee8f522cd 100644 --- a/validator_client/slashing_protection/tests/interop.rs +++ b/validator_client/slashing_protection/tests/interop.rs @@ -25,8 +25,10 @@ fn test_root_dir() -> PathBuf { .join("tests") } +// NOTE: I've combined two tests together to avoid a race-condition which occurs when fighting over +// which test builds the TEST_ROOT_DIR lazy static. #[test] -fn generated() { +fn generated_and_with_minification() { for entry in TEST_ROOT_DIR .join("generated") .read_dir() @@ -37,10 +39,7 @@ fn generated() { let test_case: MultiTestCase = serde_json::from_reader(&file).unwrap(); test_case.run(false); } -} -#[test] -fn generated_with_minification() { for entry in TEST_ROOT_DIR .join("generated") .read_dir() diff --git a/validator_client/src/attestation_service.rs b/validator_client/src/attestation_service.rs index f0a9258c7..43b9d60e2 100644 --- a/validator_client/src/attestation_service.rs +++ b/validator_client/src/attestation_service.rs @@ -1,4 +1,4 @@ -use crate::beacon_node_fallback::{BeaconNodeFallback, RequireSynced}; +use crate::beacon_node_fallback::{ApiTopic, BeaconNodeFallback, RequireSynced}; use crate::{ duties_service::{DutiesService, DutyAndProof}, http_metrics::metrics, @@ -193,7 +193,7 @@ impl AttestationService { .into_iter() .fold(HashMap::new(), |mut map, duty_and_proof| { map.entry(duty_and_proof.duty.committee_index) - .or_insert_with(Vec::new) + .or_default() .push(duty_and_proof); map }); @@ -433,9 +433,10 @@ impl AttestationService { // Post the attestations to the BN. match self .beacon_nodes - .first_success( + .request( RequireSynced::No, OfflineOnFailure::Yes, + ApiTopic::Attestations, |beacon_node| async move { let _timer = metrics::start_timer_vec( &metrics::ATTESTATION_SERVICE_TIMES, @@ -491,6 +492,14 @@ impl AttestationService { ) -> Result<(), String> { let log = self.context.log(); + if !validator_duties + .iter() + .any(|duty_and_proof| duty_and_proof.selection_proof.is_some()) + { + // Exit early if no validator is aggregator + return Ok(()); + } + let aggregated_attestation = &self .beacon_nodes .first_success( diff --git a/validator_client/src/beacon_node_fallback.rs b/validator_client/src/beacon_node_fallback.rs index 531cec08a..23458d327 100644 --- a/validator_client/src/beacon_node_fallback.rs +++ b/validator_client/src/beacon_node_fallback.rs @@ -7,6 +7,7 @@ use crate::http_metrics::metrics::{inc_counter_vec, ENDPOINT_ERRORS, ENDPOINT_RE use environment::RuntimeContext; use eth2::BeaconNodeHttpClient; use futures::future; +use serde::{Deserialize, Serialize}; use slog::{debug, error, info, warn, Logger}; use slot_clock::SlotClock; use std::fmt; @@ -15,6 +16,7 @@ use std::future::Future; use std::marker::PhantomData; use std::sync::Arc; use std::time::{Duration, Instant}; +use strum::{EnumString, EnumVariantNames}; use tokio::{sync::RwLock, time::sleep}; use types::{ChainSpec, Config, EthSpec}; @@ -296,6 +298,14 @@ impl CandidateBeaconNode { "endpoint_capella_fork_epoch" => ?beacon_node_spec.capella_fork_epoch, "hint" => UPDATE_REQUIRED_LOG_HINT, ); + } else if beacon_node_spec.deneb_fork_epoch != spec.deneb_fork_epoch { + warn!( + log, + "Beacon node has mismatched Deneb fork epoch"; + "endpoint" => %self.beacon_node, + "endpoint_deneb_fork_epoch" => ?beacon_node_spec.deneb_fork_epoch, + "hint" => UPDATE_REQUIRED_LOG_HINT, + ); } Ok(()) @@ -322,7 +332,7 @@ impl CandidateBeaconNode { pub struct BeaconNodeFallback { candidates: Vec>, slot_clock: Option, - disable_run_on_all: bool, + broadcast_topics: Vec, spec: ChainSpec, log: Logger, } @@ -330,14 +340,14 @@ pub struct BeaconNodeFallback { impl BeaconNodeFallback { pub fn new( candidates: Vec>, - disable_run_on_all: bool, + broadcast_topics: Vec, spec: ChainSpec, log: Logger, ) -> Self { Self { candidates, slot_clock: None, - disable_run_on_all, + broadcast_topics, spec, log, } @@ -571,7 +581,7 @@ impl BeaconNodeFallback { /// It returns a list of errors along with the beacon node id that failed for `func`. /// Since this ignores the actual result of `func`, this function should only be used for beacon /// node calls whose results we do not care about, only that they completed successfully. - pub async fn run_on_all<'a, F, O, Err, R>( + pub async fn broadcast<'a, F, O, Err, R>( &'a self, require_synced: RequireSynced, offline_on_failure: OfflineOnFailure, @@ -679,11 +689,12 @@ impl BeaconNodeFallback { } /// Call `func` on first beacon node that returns success or on all beacon nodes - /// depending on the value of `disable_run_on_all`. - pub async fn run<'a, F, Err, R>( + /// depending on the `topic` and configuration. + pub async fn request<'a, F, Err, R>( &'a self, require_synced: RequireSynced, offline_on_failure: OfflineOnFailure, + topic: ApiTopic, func: F, ) -> Result<(), Errors> where @@ -691,13 +702,47 @@ impl BeaconNodeFallback { R: Future>, Err: Debug, { - if self.disable_run_on_all { + if self.broadcast_topics.contains(&topic) { + self.broadcast(require_synced, offline_on_failure, func) + .await + } else { self.first_success(require_synced, offline_on_failure, func) .await?; Ok(()) - } else { - self.run_on_all(require_synced, offline_on_failure, func) - .await } } } + +/// Serves as a cue for `BeaconNodeFallback` to tell which requests need to be broadcasted. +#[derive(Clone, Copy, Debug, PartialEq, Deserialize, Serialize, EnumString, EnumVariantNames)] +#[strum(serialize_all = "kebab-case")] +pub enum ApiTopic { + Attestations, + Blocks, + Subscriptions, + SyncCommittee, +} + +impl ApiTopic { + pub fn all() -> Vec { + use ApiTopic::*; + vec![Attestations, Blocks, Subscriptions, SyncCommittee] + } +} + +#[cfg(test)] +mod test { + use super::*; + use std::str::FromStr; + use strum::VariantNames; + + #[test] + fn api_topic_all() { + let all = ApiTopic::all(); + assert_eq!(all.len(), ApiTopic::VARIANTS.len()); + assert!(ApiTopic::VARIANTS + .iter() + .map(|topic| ApiTopic::from_str(topic).unwrap()) + .eq(all.into_iter())); + } +} diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 094b85bf8..445d4f1a5 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -1,6 +1,6 @@ use crate::beacon_node_fallback::{Error as FallbackError, Errors}; use crate::{ - beacon_node_fallback::{BeaconNodeFallback, RequireSynced}, + beacon_node_fallback::{ApiTopic, BeaconNodeFallback, RequireSynced}, determine_graffiti, graffiti_file::GraffitiFile, OfflineOnFailure, @@ -9,10 +9,11 @@ use crate::{ http_metrics::metrics, validator_store::{Error as ValidatorStoreError, ValidatorStore}, }; +use bls::SignatureBytes; use environment::RuntimeContext; +use eth2::types::{FullBlockContents, PublishBlockRequest}; use eth2::{BeaconNodeHttpClient, StatusCode}; -use slog::Logger; -use slog::{crit, debug, error, info, trace, warn}; +use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use std::fmt::Debug; use std::future::Future; @@ -20,15 +21,17 @@ use std::ops::Deref; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; -use tokio::time::sleep; use types::{ - AbstractExecPayload, BlindedPayload, BlockType, EthSpec, FullPayload, Graffiti, PublicKeyBytes, + BlindedBeaconBlock, BlockType, EthSpec, Graffiti, PublicKeyBytes, SignedBlindedBeaconBlock, Slot, }; #[derive(Debug)] pub enum BlockError { + /// A recoverable error that can be retried, as the validator has not signed anything. Recoverable(String), + /// An irrecoverable error has occurred during block proposal and should not be retried, as a + /// block may have already been signed. Irrecoverable(String), } @@ -56,7 +59,6 @@ pub struct BlockServiceBuilder { context: Option>, graffiti: Option, graffiti_file: Option, - block_delay: Option, } impl BlockServiceBuilder { @@ -69,7 +71,6 @@ impl BlockServiceBuilder { context: None, graffiti: None, graffiti_file: None, - block_delay: None, } } @@ -108,11 +109,6 @@ impl BlockServiceBuilder { self } - pub fn block_delay(mut self, block_delay: Option) -> Self { - self.block_delay = block_delay; - self - } - pub fn build(self) -> Result, String> { Ok(BlockService { inner: Arc::new(Inner { @@ -131,7 +127,6 @@ impl BlockServiceBuilder { proposer_nodes: self.proposer_nodes, graffiti: self.graffiti, graffiti_file: self.graffiti_file, - block_delay: self.block_delay, }), }) } @@ -146,35 +141,41 @@ pub struct ProposerFallback { impl ProposerFallback { // Try `func` on `self.proposer_nodes` first. If that doesn't work, try `self.beacon_nodes`. - pub async fn first_success_try_proposers_first<'a, F, O, Err, R>( + pub async fn request_proposers_first<'a, F, Err, R>( &'a self, require_synced: RequireSynced, offline_on_failure: OfflineOnFailure, func: F, - ) -> Result> + ) -> Result<(), Errors> where F: Fn(&'a BeaconNodeHttpClient) -> R + Clone, - R: Future>, + R: Future>, Err: Debug, { // If there are proposer nodes, try calling `func` on them and return early if they are successful. if let Some(proposer_nodes) = &self.proposer_nodes { - if let Ok(result) = proposer_nodes - .first_success(require_synced, offline_on_failure, func.clone()) + if proposer_nodes + .request( + require_synced, + offline_on_failure, + ApiTopic::Blocks, + func.clone(), + ) .await + .is_ok() { - return Ok(result); + return Ok(()); } } // If the proposer nodes failed, try on the non-proposer nodes. self.beacon_nodes - .first_success(require_synced, offline_on_failure, func) + .request(require_synced, offline_on_failure, ApiTopic::Blocks, func) .await } // Try `func` on `self.beacon_nodes` first. If that doesn't work, try `self.proposer_nodes`. - pub async fn first_success_try_proposers_last<'a, F, O, Err, R>( + pub async fn request_proposers_last<'a, F, O, Err, R>( &'a self, require_synced: RequireSynced, offline_on_failure: OfflineOnFailure, @@ -215,7 +216,6 @@ pub struct Inner { context: RuntimeContext, graffiti: Option, graffiti_file: Option, - block_delay: Option, } /// Attempts to produce attestations for any block producer(s) at the start of the epoch. @@ -259,18 +259,7 @@ impl BlockService { executor.spawn( async move { while let Some(notif) = notification_rx.recv().await { - let service = self.clone(); - - if let Some(delay) = service.block_delay { - debug!( - service.context.log(), - "Delaying block production by {}ms", - delay.as_millis() - ); - sleep(delay).await; - } - - service.do_update(notif).await.ok(); + self.do_update(notif).await.ok(); } debug!(log, "Block service shutting down"); }, @@ -334,93 +323,206 @@ impl BlockService { ) } - for validator_pubkey in proposers { - let builder_proposals = self - .validator_store - .get_builder_proposals(&validator_pubkey); - let service = self.clone(); - let log = log.clone(); - self.inner.context.executor.spawn( - async move { - if builder_proposals { + if self.validator_store.produce_block_v3() { + for validator_pubkey in proposers { + let builder_boost_factor = self.get_builder_boost_factor(&validator_pubkey); + let service = self.clone(); + let log = log.clone(); + self.inner.context.executor.spawn( + async move { let result = service - .clone() - .publish_block::>(slot, validator_pubkey) + .publish_block_v3(slot, validator_pubkey, builder_boost_factor) .await; + match result { - Err(BlockError::Recoverable(e)) => { + Ok(_) => {} + Err(BlockError::Recoverable(e)) | Err(BlockError::Irrecoverable(e)) => { error!( log, "Error whilst producing block"; "error" => ?e, "block_slot" => ?slot, - "info" => "blinded proposal failed, attempting full block" + "info" => "block v3 proposal failed, this error may or may not result in a missed block" ); - if let Err(e) = service - .publish_block::>(slot, validator_pubkey) - .await - { - // Log a `crit` since a full block - // (non-builder) proposal failed. - crit!( + } + } + }, + "block service", + ) + } + } else { + for validator_pubkey in proposers { + let builder_proposals = self + .validator_store + .get_builder_proposals(&validator_pubkey); + let service = self.clone(); + let log = log.clone(); + self.inner.context.executor.spawn( + async move { + if builder_proposals { + let result = service + .publish_block(slot, validator_pubkey, true) + .await; + + match result { + Err(BlockError::Recoverable(e)) => { + error!( log, "Error whilst producing block"; "error" => ?e, "block_slot" => ?slot, - "info" => "full block attempted after a blinded failure", + "info" => "blinded proposal failed, attempting full block" ); + if let Err(e) = service + .publish_block(slot, validator_pubkey, false) + .await + { + // Log a `crit` since a full block + // (non-builder) proposal failed. + crit!( + log, + "Error whilst producing block"; + "error" => ?e, + "block_slot" => ?slot, + "info" => "full block attempted after a blinded failure", + ); + } } - } - Err(BlockError::Irrecoverable(e)) => { - // Only log an `error` since it's common for - // builders to timeout on their response, only - // to publish the block successfully themselves. - error!( + Err(BlockError::Irrecoverable(e)) => { + // Only log an `error` since it's common for + // builders to timeout on their response, only + // to publish the block successfully themselves. + error!( + log, + "Error whilst producing block"; + "error" => ?e, + "block_slot" => ?slot, + "info" => "this error may or may not result in a missed block", + ) + } + Ok(_) => {} + }; + } else if let Err(e) = service + .publish_block(slot, validator_pubkey, false) + .await + { + // Log a `crit` since a full block (non-builder) + // proposal failed. + crit!( log, "Error whilst producing block"; - "error" => ?e, + "message" => ?e, "block_slot" => ?slot, - "info" => "this error may or may not result in a missed block", - ) + "info" => "proposal did not use a builder", + ); } - Ok(_) => {} - }; - } else if let Err(e) = service - .publish_block::>(slot, validator_pubkey) - .await - { - // Log a `crit` since a full block (non-builder) - // proposal failed. - crit!( - log, - "Error whilst producing block"; - "message" => ?e, - "block_slot" => ?slot, - "info" => "proposal did not use a builder", - ); - } - }, - "block service", - ); + }, + "block service", + ) + } } Ok(()) } - /// Produce a block at the given slot for validator_pubkey - async fn publish_block>( + #[allow(clippy::too_many_arguments)] + async fn sign_and_publish_block( + &self, + proposer_fallback: ProposerFallback, + slot: Slot, + graffiti: Option, + validator_pubkey: &PublicKeyBytes, + unsigned_block: UnsignedBlock, + ) -> Result<(), BlockError> { + let log = self.context.log(); + let signing_timer = metrics::start_timer(&metrics::BLOCK_SIGNING_TIMES); + + let res = match unsigned_block { + UnsignedBlock::Full(block_contents) => { + let (block, maybe_blobs) = block_contents.deconstruct(); + self.validator_store + .sign_block(*validator_pubkey, block, slot) + .await + .map(|b| SignedBlock::Full(PublishBlockRequest::new(Arc::new(b), maybe_blobs))) + } + UnsignedBlock::Blinded(block) => self + .validator_store + .sign_block(*validator_pubkey, block, slot) + .await + .map(Arc::new) + .map(SignedBlock::Blinded), + }; + + let signed_block = match res { + Ok(block) => block, + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently removed + // via the API. + warn!( + log, + "Missing pubkey for block"; + "info" => "a validator may have recently been removed from this VC", + "pubkey" => ?pubkey, + "slot" => ?slot + ); + return Ok(()); + } + Err(e) => { + return Err(BlockError::Recoverable(format!( + "Unable to sign block: {:?}", + e + ))) + } + }; + + let signing_time_ms = + Duration::from_secs_f64(signing_timer.map_or(0.0, |t| t.stop_and_record())).as_millis(); + + info!( + log, + "Publishing signed block"; + "slot" => slot.as_u64(), + "signing_time_ms" => signing_time_ms, + ); + + // Publish block with first available beacon node. + // + // Try the proposer nodes first, since we've likely gone to efforts to + // protect them from DoS attacks and they're most likely to successfully + // publish a block. + proposer_fallback + .request_proposers_first( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async { + self.publish_signed_block_contents(&signed_block, beacon_node) + .await + }, + ) + .await?; + + info!( + log, + "Successfully published block"; + "block_type" => ?signed_block.block_type(), + "deposits" => signed_block.num_deposits(), + "attestations" => signed_block.num_attestations(), + "graffiti" => ?graffiti.map(|g| g.as_utf8_lossy()), + "slot" => signed_block.slot().as_u64(), + ); + Ok(()) + } + + async fn publish_block_v3( self, slot: Slot, validator_pubkey: PublicKeyBytes, + builder_boost_factor: Option, ) -> Result<(), BlockError> { let log = self.context.log(); let _timer = metrics::start_timer_vec(&metrics::BLOCK_SERVICE_TIMES, &[metrics::BEACON_BLOCK]); - let current_slot = self.slot_clock.now().ok_or_else(|| { - BlockError::Recoverable("Unable to determine current slot from clock".to_string()) - })?; - let randao_reveal = match self .validator_store .randao_reveal(validator_pubkey, slot.epoch(E::slots_per_epoch())) @@ -458,7 +560,6 @@ impl BlockService { let randao_reveal_ref = &randao_reveal; let self_ref = &self; let proposer_index = self.validator_store.validator_index(&validator_pubkey); - let validator_pubkey_ref = &validator_pubkey; let proposer_fallback = ProposerFallback { beacon_nodes: self.beacon_nodes.clone(), proposer_nodes: self.proposer_nodes.clone(), @@ -474,78 +575,67 @@ impl BlockService { // // Try the proposer nodes last, since it's likely that they don't have a // great view of attestations on the network. - let block = proposer_fallback - .first_success_try_proposers_last( + let unsigned_block = proposer_fallback + .request_proposers_last( RequireSynced::No, OfflineOnFailure::Yes, |beacon_node| async move { - let block = match Payload::block_type() { - BlockType::Full => { - let _get_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BEACON_BLOCK_HTTP_GET], - ); - beacon_node - .get_validator_blocks::( - slot, - randao_reveal_ref, - graffiti.as_ref(), - ) - .await - .map_err(|e| { - BlockError::Recoverable(format!( - "Error from beacon node when producing block: {:?}", - e - )) - })? - .data - } - BlockType::Blinded => { - let _get_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BLINDED_BEACON_BLOCK_HTTP_GET], - ); - beacon_node - .get_validator_blinded_blocks::( - slot, - randao_reveal_ref, - graffiti.as_ref(), - ) - .await - .map_err(|e| { - BlockError::Recoverable(format!( - "Error from beacon node when producing block: {:?}", - e - )) - })? - .data - } - }; - - info!( - log, - "Received unsigned block"; - "slot" => slot.as_u64(), + let _get_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BEACON_BLOCK_HTTP_GET], ); - if proposer_index != Some(block.proposer_index()) { - return Err(BlockError::Recoverable( - "Proposer index does not match block proposer. Beacon chain re-orged" - .to_string(), - )); - } + let block_response = Self::get_validator_block_v3( + beacon_node, + slot, + randao_reveal_ref, + graffiti, + proposer_index, + builder_boost_factor, + log, + ) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error from beacon node when producing block: {:?}", + e + )) + }); - Ok::<_, BlockError>(block) + Ok::<_, BlockError>(block_response) }, ) + .await??; + + self_ref + .sign_and_publish_block( + proposer_fallback, + slot, + graffiti, + &validator_pubkey, + unsigned_block, + ) .await?; - let signing_timer = metrics::start_timer(&metrics::BLOCK_SIGNING_TIMES); - let signed_block = match self_ref + Ok(()) + } + + /// Produce a block at the given slot for validator_pubkey + async fn publish_block( + &self, + slot: Slot, + validator_pubkey: PublicKeyBytes, + builder_proposal: bool, + ) -> Result<(), BlockError> { + let log = self.context.log(); + let _timer = + metrics::start_timer_vec(&metrics::BLOCK_SERVICE_TIMES, &[metrics::BEACON_BLOCK]); + + let randao_reveal = match self .validator_store - .sign_block::(*validator_pubkey_ref, block, current_slot) + .randao_reveal(validator_pubkey, slot.epoch(E::slots_per_epoch())) .await { - Ok(block) => block, + Ok(signature) => signature.into(), Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { // A pubkey can be missing when a validator was recently removed // via the API. @@ -565,65 +655,273 @@ impl BlockService { ))) } }; - let signing_time_ms = - Duration::from_secs_f64(signing_timer.map_or(0.0, |t| t.stop_and_record())).as_millis(); + + let graffiti = determine_graffiti( + &validator_pubkey, + log, + self.graffiti_file.clone(), + self.validator_store.graffiti(&validator_pubkey), + self.graffiti, + ); + + let randao_reveal_ref = &randao_reveal; + let self_ref = &self; + let proposer_index = self.validator_store.validator_index(&validator_pubkey); + let proposer_fallback = ProposerFallback { + beacon_nodes: self.beacon_nodes.clone(), + proposer_nodes: self.proposer_nodes.clone(), + }; info!( log, - "Publishing signed block"; + "Requesting unsigned block"; "slot" => slot.as_u64(), - "signing_time_ms" => signing_time_ms, ); - // Publish block with first available beacon node. + // Request block from first responsive beacon node. // - // Try the proposer nodes first, since we've likely gone to efforts to - // protect them from DoS attacks and they're most likely to successfully - // publish a block. - proposer_fallback - .first_success_try_proposers_first( + // Try the proposer nodes last, since it's likely that they don't have a + // great view of attestations on the network. + let unsigned_block = proposer_fallback + .request_proposers_last( RequireSynced::No, OfflineOnFailure::Yes, - |beacon_node| async { - match Payload::block_type() { - BlockType::Full => { - let _post_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BEACON_BLOCK_HTTP_POST], - ); - beacon_node - .post_beacon_blocks(&signed_block) - .await - .or_else(|e| handle_block_post_error(e, slot, log))? - } - BlockType::Blinded => { - let _post_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BLINDED_BEACON_BLOCK_HTTP_POST], - ); - beacon_node - .post_beacon_blinded_blocks(&signed_block) - .await - .or_else(|e| handle_block_post_error(e, slot, log))? - } - } - Ok::<_, BlockError>(()) + move |beacon_node| { + Self::get_validator_block( + beacon_node, + slot, + randao_reveal_ref, + graffiti, + proposer_index, + builder_proposal, + log, + ) }, ) .await?; - info!( - log, - "Successfully published block"; - "block_type" => ?Payload::block_type(), - "deposits" => signed_block.message().body().deposits().len(), - "attestations" => signed_block.message().body().attestations().len(), - "graffiti" => ?graffiti.map(|g| g.as_utf8_lossy()), - "slot" => signed_block.slot().as_u64(), - ); + self_ref + .sign_and_publish_block( + proposer_fallback, + slot, + graffiti, + &validator_pubkey, + unsigned_block, + ) + .await?; Ok(()) } + + async fn publish_signed_block_contents( + &self, + signed_block: &SignedBlock, + beacon_node: &BeaconNodeHttpClient, + ) -> Result<(), BlockError> { + let log = self.context.log(); + let slot = signed_block.slot(); + match signed_block { + SignedBlock::Full(signed_block) => { + let _post_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BEACON_BLOCK_HTTP_POST], + ); + beacon_node + .post_beacon_blocks(signed_block) + .await + .or_else(|e| handle_block_post_error(e, slot, log))? + } + SignedBlock::Blinded(signed_block) => { + let _post_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BLINDED_BEACON_BLOCK_HTTP_POST], + ); + beacon_node + .post_beacon_blinded_blocks(signed_block) + .await + .or_else(|e| handle_block_post_error(e, slot, log))? + } + } + Ok::<_, BlockError>(()) + } + + async fn get_validator_block_v3( + beacon_node: &BeaconNodeHttpClient, + slot: Slot, + randao_reveal_ref: &SignatureBytes, + graffiti: Option, + proposer_index: Option, + builder_boost_factor: Option, + log: &Logger, + ) -> Result, BlockError> { + let (block_response, _) = beacon_node + .get_validator_blocks_v3::( + slot, + randao_reveal_ref, + graffiti.as_ref(), + builder_boost_factor, + ) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error from beacon node when producing block: {:?}", + e + )) + })?; + + let unsigned_block = match block_response.data { + eth2::types::ProduceBlockV3Response::Full(block) => UnsignedBlock::Full(block), + eth2::types::ProduceBlockV3Response::Blinded(block) => UnsignedBlock::Blinded(block), + }; + + info!( + log, + "Received unsigned block"; + "slot" => slot.as_u64(), + ); + if proposer_index != Some(unsigned_block.proposer_index()) { + return Err(BlockError::Recoverable( + "Proposer index does not match block proposer. Beacon chain re-orged".to_string(), + )); + } + + Ok::<_, BlockError>(unsigned_block) + } + + async fn get_validator_block( + beacon_node: &BeaconNodeHttpClient, + slot: Slot, + randao_reveal_ref: &SignatureBytes, + graffiti: Option, + proposer_index: Option, + builder_proposal: bool, + log: &Logger, + ) -> Result, BlockError> { + let unsigned_block = if !builder_proposal { + let _get_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BEACON_BLOCK_HTTP_GET], + ); + UnsignedBlock::Full( + beacon_node + .get_validator_blocks::(slot, randao_reveal_ref, graffiti.as_ref()) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error from beacon node when producing block: {:?}", + e + )) + })? + .data, + ) + } else { + let _get_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BLINDED_BEACON_BLOCK_HTTP_GET], + ); + UnsignedBlock::Blinded( + beacon_node + .get_validator_blinded_blocks::(slot, randao_reveal_ref, graffiti.as_ref()) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error from beacon node when producing block: {:?}", + e + )) + })? + .data, + ) + }; + + info!( + log, + "Received unsigned block"; + "slot" => slot.as_u64(), + ); + if proposer_index != Some(unsigned_block.proposer_index()) { + return Err(BlockError::Recoverable( + "Proposer index does not match block proposer. Beacon chain re-orged".to_string(), + )); + } + + Ok::<_, BlockError>(unsigned_block) + } + + /// Returns the builder boost factor of the given public key. + /// The priority order for fetching this value is: + /// + /// 1. validator_definitions.yml + /// 2. process level flag + fn get_builder_boost_factor(&self, validator_pubkey: &PublicKeyBytes) -> Option { + // Apply per validator configuration first. + let validator_builder_boost_factor = self + .validator_store + .determine_validator_builder_boost_factor(validator_pubkey); + + // Fallback to process-wide configuration if needed. + let maybe_builder_boost_factor = validator_builder_boost_factor.or_else(|| { + self.validator_store + .determine_default_builder_boost_factor() + }); + + if let Some(builder_boost_factor) = maybe_builder_boost_factor { + // if builder boost factor is set to 100 it should be treated + // as None to prevent unnecessary calculations that could + // lead to loss of information. + if builder_boost_factor == 100 { + return None; + } + return Some(builder_boost_factor); + } + + None + } +} + +pub enum UnsignedBlock { + Full(FullBlockContents), + Blinded(BlindedBeaconBlock), +} + +impl UnsignedBlock { + pub fn proposer_index(&self) -> u64 { + match self { + UnsignedBlock::Full(block) => block.block().proposer_index(), + UnsignedBlock::Blinded(block) => block.proposer_index(), + } + } +} + +pub enum SignedBlock { + Full(PublishBlockRequest), + Blinded(Arc>), +} + +impl SignedBlock { + pub fn block_type(&self) -> BlockType { + match self { + SignedBlock::Full(_) => BlockType::Full, + SignedBlock::Blinded(_) => BlockType::Blinded, + } + } + pub fn slot(&self) -> Slot { + match self { + SignedBlock::Full(block) => block.signed_block().message().slot(), + SignedBlock::Blinded(block) => block.message().slot(), + } + } + pub fn num_deposits(&self) -> usize { + match self { + SignedBlock::Full(block) => block.signed_block().message().body().deposits().len(), + SignedBlock::Blinded(block) => block.message().body().deposits().len(), + } + } + pub fn num_attestations(&self) -> usize { + match self { + SignedBlock::Full(block) => block.signed_block().message().body().attestations().len(), + SignedBlock::Blinded(block) => block.message().body().attestations().len(), + } + } } fn handle_block_post_error(err: eth2::Error, slot: Slot, log: &Logger) -> Result<(), BlockError> { diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 0af92a9e3..e6d19bc89 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -8,15 +8,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { "When connected to a beacon node, performs the duties of a staked \ validator (e.g., proposing blocks and attestations).", ) - // This argument is deprecated, use `--beacon-nodes` instead. - .arg( - Arg::with_name("beacon-node") - .long("beacon-node") - .value_name("NETWORK_ADDRESS") - .help("Deprecated. Use --beacon-nodes.") - .takes_value(true) - .conflicts_with("beacon-nodes"), - ) .arg( Arg::with_name("beacon-nodes") .long("beacon-nodes") @@ -35,24 +26,28 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) .takes_value(true), ) + // TODO remove this flag in a future release .arg( Arg::with_name("disable-run-on-all") .long("disable-run-on-all") .value_name("DISABLE_RUN_ON_ALL") - .help("By default, Lighthouse publishes attestation, sync committee subscriptions \ + .help("DEPRECATED. Use --broadcast. \ + By default, Lighthouse publishes attestation, sync committee subscriptions \ and proposer preparation messages to all beacon nodes provided in the \ `--beacon-nodes flag`. This option changes that behaviour such that these \ api calls only go out to the first available and synced beacon node") - .takes_value(false) + .takes_value(false), ) - // This argument is deprecated, use `--beacon-nodes` instead. .arg( - Arg::with_name("server") - .long("server") - .value_name("NETWORK_ADDRESS") - .help("Deprecated. Use --beacon-nodes.") - .takes_value(true) - .conflicts_with_all(&["beacon-node", "beacon-nodes"]), + Arg::with_name("broadcast") + .long("broadcast") + .value_name("API_TOPICS") + .help("Comma-separated list of beacon API topics to broadcast to all beacon nodes. \ + Possible values are: none, attestations, blocks, subscriptions, \ + sync-committee. Default (when flag is omitted) is to broadcast \ + subscriptions only." + ) + .takes_value(true), ) .arg( Arg::with_name("validators-dir") @@ -80,13 +75,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .conflicts_with("datadir") ) - .arg( - Arg::with_name("delete-lockfiles") - .long("delete-lockfiles") - .help( - "DEPRECATED. This flag does nothing and will be removed in a future release." - ) - ) .arg( Arg::with_name("init-slashing-protection") .long("init-slashing-protection") @@ -106,11 +94,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { will need to be manually added to the validator_definitions.yml file." ) ) - .arg( - Arg::with_name("allow-unsynced") - .long("allow-unsynced") - .help("DEPRECATED: this flag does nothing"), - ) .arg( Arg::with_name("use-long-timeouts") .long("use-long-timeouts") @@ -153,6 +136,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .value_name("FEE-RECIPIENT") .takes_value(true) ) + .arg( + Arg::with_name("produce-block-v3") + .long("produce-block-v3") + .help("Enable block production via the block v3 endpoint for this validator client. \ + This should only be enabled when paired with a beacon node \ + that has this endpoint implemented. This flag will be enabled by default in \ + future.") + .takes_value(false) + ) /* REST API related arguments */ .arg( Arg::with_name("http") @@ -319,18 +311,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { headers during proposals and will sign over headers. Useful for outsourcing \ execution payload construction during proposals.") .takes_value(false), - ).arg( - Arg::with_name("strict-fee-recipient") - .long("strict-fee-recipient") - .help("[DEPRECATED] If this flag is set, Lighthouse will refuse to sign any block whose \ - `fee_recipient` does not match the `suggested_fee_recipient` sent by this validator. \ - This applies to both the normal block proposal flow, as well as block proposals \ - through the builder API. Proposals through the builder API are more likely to have a \ - discrepancy in `fee_recipient` so you should be aware of how your connected relay \ - sends proposer payments before using this flag. If this flag is used, a fee recipient \ - mismatch in the builder API flow will result in a fallback to the local execution engine \ - for payload construction, where a strict fee recipient check will still be applied.") - .takes_value(false), ) .arg( Arg::with_name("builder-registration-timestamp-override") @@ -369,16 +349,22 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("500") .takes_value(true), ) - /* - * Experimental/development options. - */ .arg( - Arg::with_name("block-delay-ms") - .long("block-delay-ms") - .value_name("MILLIS") - .hidden(true) - .help("Time to delay block production from the start of the slot. Should only be \ - used for testing.") + Arg::with_name("builder-boost-factor") + .long("builder-boost-factor") + .value_name("UINT64") + .help("Defines the boost factor, \ + a percentage multiplier to apply to the builder's payload value \ + when choosing between a builder payload header and payload from \ + the local execution node.") + .conflicts_with("prefer-builder-proposals") .takes_value(true), ) + .arg( + Arg::with_name("prefer-builder-proposals") + .long("prefer-builder-proposals") + .help("If this flag is set, Lighthouse will always prefer blocks \ + constructed by builders, regardless of payload value.") + .takes_value(false), + ) } diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 7c662db93..a919afb01 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -1,3 +1,4 @@ +use crate::beacon_node_fallback::ApiTopic; use crate::graffiti_file::GraffitiFile; use crate::{http_api, http_metrics}; use clap::ArgMatches; @@ -8,12 +9,11 @@ use directory::{ }; use eth2::types::Graffiti; use sensitive_url::SensitiveUrl; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use slog::{info, warn, Logger}; use std::fs; use std::net::IpAddr; use std::path::PathBuf; -use std::time::Duration; use types::{Address, GRAFFITI_BYTES_LEN}; pub const DEFAULT_BEACON_NODE: &str = "http://localhost:5052/"; @@ -69,16 +69,18 @@ pub struct Config { /// A list of custom certificates that the validator client will additionally use when /// connecting to a beacon node over SSL/TLS. pub beacon_nodes_tls_certs: Option>, - /// Delay from the start of the slot to wait before publishing a block. - /// - /// This is *not* recommended in prod and should only be used for testing. - pub block_delay: Option, - /// Disables publishing http api requests to all beacon nodes for select api calls. - pub disable_run_on_all: bool, + /// Enables broadcasting of various requests (by topic) to all beacon nodes. + pub broadcast_topics: Vec, /// Enables a service which attempts to measure latency between the VC and BNs. pub enable_latency_measurement_service: bool, /// Defines the number of validators per `validator/register_validator` request sent to the BN. pub validator_registration_batch_size: usize, + /// Enables block production via the block v3 endpoint. This configuration option can be removed post deneb. + pub produce_block_v3: bool, + /// Specifies the boost factor, a percentage multiplier to apply to the builder's payload value. + pub builder_boost_factor: Option, + /// If true, Lighthouse will prefer builder proposals, if available. + pub prefer_builder_proposals: bool, } impl Default for Config { @@ -113,13 +115,15 @@ impl Default for Config { enable_doppelganger_protection: false, enable_high_validator_count_metrics: false, beacon_nodes_tls_certs: None, - block_delay: None, builder_proposals: false, builder_registration_timestamp_override: None, gas_limit: None, - disable_run_on_all: false, + broadcast_topics: vec![ApiTopic::Subscriptions], enable_latency_measurement_service: true, validator_registration_batch_size: 500, + produce_block_v3: false, + builder_boost_factor: None, + prefer_builder_proposals: false, } } } @@ -171,27 +175,6 @@ impl Config { .collect::>() .map_err(|e| format!("Unable to parse beacon node URL: {:?}", e))?; } - // To be deprecated. - else if let Some(beacon_node) = parse_optional::(cli_args, "beacon-node")? { - warn!( - log, - "The --beacon-node flag is deprecated"; - "msg" => "please use --beacon-nodes instead" - ); - config.beacon_nodes = vec![SensitiveUrl::parse(&beacon_node) - .map_err(|e| format!("Unable to parse beacon node URL: {:?}", e))?]; - } - // To be deprecated. - else if let Some(server) = parse_optional::(cli_args, "server")? { - warn!( - log, - "The --server flag is deprecated"; - "msg" => "please use --beacon-nodes instead" - ); - config.beacon_nodes = vec![SensitiveUrl::parse(&server) - .map_err(|e| format!("Unable to parse beacon node URL: {:?}", e))?]; - } - if let Some(proposer_nodes) = parse_optional::(cli_args, "proposer_nodes")? { config.proposer_nodes = proposer_nodes .split(',') @@ -200,22 +183,6 @@ impl Config { .map_err(|e| format!("Unable to parse proposer node URL: {:?}", e))?; } - if cli_args.is_present("delete-lockfiles") { - warn!( - log, - "The --delete-lockfiles flag is deprecated"; - "msg" => "it is no longer necessary, and no longer has any effect", - ); - } - - if cli_args.is_present("allow-unsynced") { - warn!( - log, - "The --allow-unsynced flag is deprecated"; - "msg" => "it no longer has any effect", - ); - } - config.disable_run_on_all = cli_args.is_present("disable-run-on-all"); config.disable_auto_discover = cli_args.is_present("disable-auto-discover"); config.init_slashing_protection = cli_args.is_present("init-slashing-protection"); config.use_long_timeouts = cli_args.is_present("use-long-timeouts"); @@ -258,6 +225,26 @@ impl Config { config.beacon_nodes_tls_certs = Some(tls_certs.split(',').map(PathBuf::from).collect()); } + if cli_args.is_present("disable-run-on-all") { + warn!( + log, + "The --disable-run-on-all flag is deprecated"; + "msg" => "please use --broadcast instead" + ); + config.broadcast_topics = vec![]; + } + if let Some(broadcast_topics) = cli_args.value_of("broadcast") { + config.broadcast_topics = broadcast_topics + .split(',') + .filter(|t| *t != "none") + .map(|t| { + t.trim() + .parse::() + .map_err(|_| format!("Unknown API topic to broadcast: {t}")) + }) + .collect::>()?; + } + /* * Http API server */ @@ -361,6 +348,14 @@ impl Config { config.builder_proposals = true; } + if cli_args.is_present("produce-block-v3") { + config.produce_block_v3 = true; + } + + if cli_args.is_present("prefer-builder-proposals") { + config.prefer_builder_proposals = true; + } + config.gas_limit = cli_args .value_of("gas-limit") .map(|gas_limit| { @@ -380,13 +375,7 @@ impl Config { ); } - if cli_args.is_present("strict-fee-recipient") { - warn!( - log, - "The flag `--strict-fee-recipient` has been deprecated due to a bug causing \ - missed proposals. The flag will be ignored." - ); - } + config.builder_boost_factor = parse_optional(cli_args, "builder-boost-factor")?; config.enable_latency_measurement_service = parse_optional(cli_args, "latency-measurement-service")?.unwrap_or(true); @@ -397,13 +386,6 @@ impl Config { return Err("validator-registration-batch-size cannot be 0".to_string()); } - /* - * Experimental - */ - if let Some(delay_ms) = parse_optional::(cli_args, "block-delay-ms")? { - config.block_delay = Some(Duration::from_millis(delay_ms)); - } - Ok(config) } } diff --git a/validator_client/src/doppelganger_service.rs b/validator_client/src/doppelganger_service.rs index 558b9e199..86584d794 100644 --- a/validator_client/src/doppelganger_service.rs +++ b/validator_client/src/doppelganger_service.rs @@ -163,8 +163,6 @@ async fn beacon_node_liveness<'a, T: 'static + SlotClock, E: EthSpec>( current_epoch: Epoch, validator_indices: Vec, ) -> LivenessResponses { - let validator_indices = validator_indices.as_slice(); - let previous_epoch = current_epoch.saturating_sub(1_u64); let previous_epoch_responses = if previous_epoch == current_epoch { @@ -180,12 +178,22 @@ async fn beacon_node_liveness<'a, T: 'static + SlotClock, E: EthSpec>( .first_success( RequireSynced::Yes, OfflineOnFailure::Yes, - |beacon_node| async move { + |beacon_node| async { beacon_node - .post_lighthouse_liveness(validator_indices, previous_epoch) + .post_validator_liveness_epoch(previous_epoch, &validator_indices) .await .map_err(|e| format!("Failed query for validator liveness: {:?}", e)) - .map(|result| result.data) + .map(|result| { + result + .data + .into_iter() + .map(|response| LivenessResponseData { + index: response.index, + epoch: previous_epoch, + is_live: response.is_live, + }) + .collect() + }) }, ) .await @@ -207,12 +215,22 @@ async fn beacon_node_liveness<'a, T: 'static + SlotClock, E: EthSpec>( .first_success( RequireSynced::Yes, OfflineOnFailure::Yes, - |beacon_node| async move { + |beacon_node| async { beacon_node - .post_lighthouse_liveness(validator_indices, current_epoch) + .post_validator_liveness_epoch(current_epoch, &validator_indices) .await .map_err(|e| format!("Failed query for validator liveness: {:?}", e)) - .map(|result| result.data) + .map(|result| { + result + .data + .into_iter() + .map(|response| LivenessResponseData { + index: response.index, + epoch: current_epoch, + is_live: response.is_live, + }) + .collect() + }) }, ) .await @@ -525,9 +543,7 @@ impl DoppelgangerService { } // Resolve the index from the server response back to a public key. - let pubkey = if let Some(pubkey) = indices_map.get(&response.index) { - pubkey - } else { + let Some(pubkey) = indices_map.get(&response.index) else { crit!( self.log, "Inconsistent indices map"; diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index a3b3cabcc..26747f811 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -8,7 +8,7 @@ mod sync; -use crate::beacon_node_fallback::{BeaconNodeFallback, OfflineOnFailure, RequireSynced}; +use crate::beacon_node_fallback::{ApiTopic, BeaconNodeFallback, OfflineOnFailure, RequireSynced}; use crate::http_metrics::metrics::{get_int_gauge, set_int_gauge, ATTESTATION_DUTY}; use crate::{ block_service::BlockServiceNotification, @@ -21,11 +21,12 @@ use eth2::types::{ }; use futures::{stream, StreamExt}; use parking_lot::RwLock; -use safe_arith::ArithError; +use safe_arith::{ArithError, SafeArith}; use slog::{debug, error, info, warn, Logger}; use slot_clock::SlotClock; use std::cmp::min; use std::collections::{hash_map, BTreeMap, HashMap, HashSet}; +use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::time::Duration; use sync::poll_sync_committee_duties; @@ -33,14 +34,6 @@ use sync::SyncDutiesMap; use tokio::{sync::mpsc::Sender, time::sleep}; use types::{ChainSpec, Epoch, EthSpec, Hash256, PublicKeyBytes, SelectionProof, Slot}; -/// Since the BN does not like it when we subscribe to slots that are close to the current time, we -/// will only subscribe to slots which are further than `SUBSCRIPTION_BUFFER_SLOTS` away. -/// -/// This number is based upon `MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD` value in the -/// `beacon_node::network::attestation_service` crate. It is not imported directly to avoid -/// bringing in the entire crate. -const SUBSCRIPTION_BUFFER_SLOTS: u64 = 2; - /// Only retain `HISTORICAL_DUTIES_EPOCHS` duties prior to the current epoch. const HISTORICAL_DUTIES_EPOCHS: u64 = 2; @@ -62,6 +55,36 @@ const VALIDATOR_METRICS_MIN_COUNT: usize = 64; /// reduces the amount of data that needs to be transferred. const INITIAL_DUTIES_QUERY_SIZE: usize = 1; +/// Offsets from the attestation duty slot at which a subscription should be sent. +const ATTESTATION_SUBSCRIPTION_OFFSETS: [u64; 8] = [3, 4, 5, 6, 7, 8, 16, 32]; + +/// Check that `ATTESTATION_SUBSCRIPTION_OFFSETS` is sorted ascendingly. +const _: () = assert!({ + let mut i = 0; + loop { + let prev = if i > 0 { + ATTESTATION_SUBSCRIPTION_OFFSETS[i - 1] + } else { + 0 + }; + let curr = ATTESTATION_SUBSCRIPTION_OFFSETS[i]; + if curr < prev { + break false; + } + i += 1; + if i == ATTESTATION_SUBSCRIPTION_OFFSETS.len() { + break true; + } + } +}); +/// Since the BN does not like it when we subscribe to slots that are close to the current time, we +/// will only subscribe to slots which are further than 2 slots away. +/// +/// This number is based upon `MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD` value in the +/// `beacon_node::network::attestation_service` crate. It is not imported directly to avoid +/// bringing in the entire crate. +const _: () = assert!(ATTESTATION_SUBSCRIPTION_OFFSETS[0] > 2); + #[derive(Debug)] pub enum Error { UnableToReadSlotClock, @@ -84,6 +107,16 @@ pub struct DutyAndProof { pub duty: AttesterData, /// This value is only set to `Some` if the proof indicates that the validator is an aggregator. pub selection_proof: Option, + /// Track which slots we should send subscriptions at for this duty. + /// + /// This value is updated after each subscription is successfully sent. + pub subscription_slots: Arc, +} + +/// Tracker containing the slots at which an attestation subscription should be sent. +pub struct SubscriptionSlots { + /// Pairs of `(slot, already_sent)` in slot-descending order. + slots: Vec<(Slot, AtomicBool)>, } impl DutyAndProof { @@ -111,17 +144,55 @@ impl DutyAndProof { } })?; + let subscription_slots = SubscriptionSlots::new(duty.slot); + Ok(Self { duty, selection_proof, + subscription_slots, }) } /// Create a new `DutyAndProof` with the selection proof waiting to be filled in. pub fn new_without_selection_proof(duty: AttesterData) -> Self { + let subscription_slots = SubscriptionSlots::new(duty.slot); Self { duty, selection_proof: None, + subscription_slots, + } + } +} + +impl SubscriptionSlots { + fn new(duty_slot: Slot) -> Arc { + let slots = ATTESTATION_SUBSCRIPTION_OFFSETS + .into_iter() + .filter_map(|offset| duty_slot.safe_sub(offset).ok()) + .map(|scheduled_slot| (scheduled_slot, AtomicBool::new(false))) + .collect(); + Arc::new(Self { slots }) + } + + /// Return `true` if we should send a subscription at `slot`. + fn should_send_subscription_at(&self, slot: Slot) -> bool { + // Iterate slots from smallest to largest looking for one that hasn't been completed yet. + self.slots + .iter() + .rev() + .any(|(scheduled_slot, already_sent)| { + slot >= *scheduled_slot && !already_sent.load(Ordering::Relaxed) + }) + } + + /// Update our record of subscribed slots to account for successful subscription at `slot`. + fn record_successful_subscription_at(&self, slot: Slot) { + for (scheduled_slot, already_sent) in self.slots.iter().rev() { + if slot >= *scheduled_slot { + already_sent.store(true, Ordering::Relaxed); + } else { + break; + } } } } @@ -574,8 +645,24 @@ async fn poll_beacon_attesters( let subscriptions_timer = metrics::start_timer_vec(&metrics::DUTIES_SERVICE_TIMES, &[metrics::SUBSCRIPTIONS]); - // This vector is likely to be a little oversized, but it won't reallocate. - let mut subscriptions = Vec::with_capacity(local_pubkeys.len() * 2); + // This vector is intentionally oversized by 10% so that it won't reallocate. + // Each validator has 2 attestation duties occuring in the current and next epoch, for which + // they must send `ATTESTATION_SUBSCRIPTION_OFFSETS.len()` subscriptions. These subscription + // slots are approximately evenly distributed over the two epochs, usually with a slight lag + // that balances out (some subscriptions for the current epoch were sent in the previous, and + // some subscriptions for the next next epoch will be sent in the next epoch but aren't included + // in our calculation). We cancel the factor of 2 from the formula for simplicity. + let overallocation_numerator = 110; + let overallocation_denominator = 100; + let num_expected_subscriptions = overallocation_numerator + * std::cmp::max( + 1, + local_pubkeys.len() * ATTESTATION_SUBSCRIPTION_OFFSETS.len() + / E::slots_per_epoch() as usize, + ) + / overallocation_denominator; + let mut subscriptions = Vec::with_capacity(num_expected_subscriptions); + let mut subscription_slots_to_confirm = Vec::with_capacity(num_expected_subscriptions); // For this epoch and the next epoch, produce any beacon committee subscriptions. // @@ -588,10 +675,10 @@ async fn poll_beacon_attesters( .read() .iter() .filter_map(|(_, map)| map.get(epoch)) - // The BN logs a warning if we try and subscribe to current or near-by slots. Give it a - // buffer. .filter(|(_, duty_and_proof)| { - current_slot + SUBSCRIPTION_BUFFER_SLOTS < duty_and_proof.duty.slot + duty_and_proof + .subscription_slots + .should_send_subscription_at(current_slot) }) .for_each(|(_, duty_and_proof)| { let duty = &duty_and_proof.duty; @@ -603,7 +690,8 @@ async fn poll_beacon_attesters( committees_at_slot: duty.committees_at_slot, slot: duty.slot, is_aggregator, - }) + }); + subscription_slots_to_confirm.push(duty_and_proof.subscription_slots.clone()); }); } @@ -612,9 +700,10 @@ async fn poll_beacon_attesters( let subscriptions_ref = &subscriptions; if let Err(e) = duties_service .beacon_nodes - .run( + .request( RequireSynced::No, OfflineOnFailure::Yes, + ApiTopic::Subscriptions, |beacon_node| async move { let _timer = metrics::start_timer_vec( &metrics::DUTIES_SERVICE_TIMES, @@ -632,6 +721,16 @@ async fn poll_beacon_attesters( "Failed to subscribe validators"; "error" => %e ) + } else { + // Record that subscriptions were successfully sent. + debug!( + log, + "Broadcast attestation subscriptions"; + "count" => subscriptions.len(), + ); + for subscription_slots in subscription_slots_to_confirm { + subscription_slots.record_successful_subscription_at(current_slot); + } } } @@ -1200,3 +1299,67 @@ async fn notify_block_production_service( }; } } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn subscription_slots_exact() { + for duty_slot in [ + Slot::new(32), + Slot::new(47), + Slot::new(99), + Slot::new(1002003), + ] { + let subscription_slots = SubscriptionSlots::new(duty_slot); + + // Run twice to check idempotence (subscription slots shouldn't be marked as done until + // we mark them manually). + for _ in 0..2 { + for offset in ATTESTATION_SUBSCRIPTION_OFFSETS { + assert!(subscription_slots.should_send_subscription_at(duty_slot - offset)); + } + } + + // Mark each slot as complete and check that all prior slots are still marked + // incomplete. + for (i, offset) in ATTESTATION_SUBSCRIPTION_OFFSETS + .into_iter() + .rev() + .enumerate() + { + subscription_slots.record_successful_subscription_at(duty_slot - offset); + for lower_offset in ATTESTATION_SUBSCRIPTION_OFFSETS + .into_iter() + .rev() + .skip(i + 1) + { + assert!(lower_offset < offset); + assert!( + subscription_slots.should_send_subscription_at(duty_slot - lower_offset) + ); + } + } + } + } + #[test] + fn subscription_slots_mark_multiple() { + for (i, offset) in ATTESTATION_SUBSCRIPTION_OFFSETS.into_iter().enumerate() { + let duty_slot = Slot::new(64); + let subscription_slots = SubscriptionSlots::new(duty_slot); + + subscription_slots.record_successful_subscription_at(duty_slot - offset); + + // All past offsets (earlier slots) should be marked as complete. + for (j, other_offset) in ATTESTATION_SUBSCRIPTION_OFFSETS.into_iter().enumerate() { + let past = j >= i; + assert_eq!(other_offset >= offset, past); + assert_eq!( + subscription_slots.should_send_subscription_at(duty_slot - other_offset), + !past + ); + } + } + } +} diff --git a/validator_client/src/duties_service/sync.rs b/validator_client/src/duties_service/sync.rs index cf63d8ac6..de42fa587 100644 --- a/validator_client/src/duties_service/sync.rs +++ b/validator_client/src/duties_service/sync.rs @@ -163,7 +163,7 @@ impl SyncDutiesMap { committees_writer .entry(committee_period) - .or_insert_with(CommitteeDuties::default) + .or_default() .init(validator_indices); // Return shared reference @@ -607,9 +607,7 @@ pub async fn fill_in_aggregation_proofs( // Add to global storage (we add regularly so the proofs can be used ASAP). let sync_map = duties_service.sync_duties.committees.read(); - let committee_duties = if let Some(duties) = sync_map.get(&sync_committee_period) { - duties - } else { + let Some(committee_duties) = sync_map.get(&sync_committee_period) else { debug!( log, "Missing sync duties"; diff --git a/validator_client/src/graffiti_file.rs b/validator_client/src/graffiti_file.rs index 5c1f84e10..29da3dca5 100644 --- a/validator_client/src/graffiti_file.rs +++ b/validator_client/src/graffiti_file.rs @@ -1,4 +1,4 @@ -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::fs::File; use std::io::{prelude::*, BufReader}; diff --git a/validator_client/src/http_api/create_validator.rs b/validator_client/src/http_api/create_validator.rs index 52336afa5..afa5d4fed 100644 --- a/validator_client/src/http_api/create_validator.rs +++ b/validator_client/src/http_api/create_validator.rs @@ -148,6 +148,8 @@ pub async fn create_validators_mnemonic, T: 'static + SlotClock, request.suggested_fee_recipient, request.gas_limit, request.builder_proposals, + request.builder_boost_factor, + request.prefer_builder_proposals, ) .await .map_err(|e| { diff --git a/validator_client/src/http_api/graffiti.rs b/validator_client/src/http_api/graffiti.rs new file mode 100644 index 000000000..79d4fd61f --- /dev/null +++ b/validator_client/src/http_api/graffiti.rs @@ -0,0 +1,80 @@ +use crate::validator_store::ValidatorStore; +use bls::PublicKey; +use slot_clock::SlotClock; +use std::sync::Arc; +use types::{graffiti::GraffitiString, EthSpec, Graffiti}; + +pub fn get_graffiti( + validator_pubkey: PublicKey, + validator_store: Arc>, + graffiti_flag: Option, +) -> Result { + let initialized_validators_rw_lock = validator_store.initialized_validators(); + let initialized_validators = initialized_validators_rw_lock.read(); + match initialized_validators.validator(&validator_pubkey.compress()) { + None => Err(warp_utils::reject::custom_not_found( + "The key was not found on the server".to_string(), + )), + Some(_) => { + let Some(graffiti) = initialized_validators.graffiti(&validator_pubkey.into()) else { + return graffiti_flag.ok_or(warp_utils::reject::custom_server_error( + "No graffiti found, unable to return the process-wide default".to_string(), + )); + }; + Ok(graffiti) + } + } +} + +pub fn set_graffiti( + validator_pubkey: PublicKey, + graffiti: GraffitiString, + validator_store: Arc>, +) -> Result<(), warp::Rejection> { + let initialized_validators_rw_lock = validator_store.initialized_validators(); + let mut initialized_validators = initialized_validators_rw_lock.write(); + match initialized_validators.validator(&validator_pubkey.compress()) { + None => Err(warp_utils::reject::custom_not_found( + "The key was not found on the server, nothing to update".to_string(), + )), + Some(initialized_validator) => { + if initialized_validator.get_graffiti() == Some(graffiti.clone().into()) { + Ok(()) + } else { + initialized_validators + .set_graffiti(&validator_pubkey, graffiti) + .map_err(|_| { + warp_utils::reject::custom_server_error( + "A graffiti was found, but failed to be updated.".to_string(), + ) + }) + } + } + } +} + +pub fn delete_graffiti( + validator_pubkey: PublicKey, + validator_store: Arc>, +) -> Result<(), warp::Rejection> { + let initialized_validators_rw_lock = validator_store.initialized_validators(); + let mut initialized_validators = initialized_validators_rw_lock.write(); + match initialized_validators.validator(&validator_pubkey.compress()) { + None => Err(warp_utils::reject::custom_not_found( + "The key was not found on the server, nothing to delete".to_string(), + )), + Some(initialized_validator) => { + if initialized_validator.get_graffiti().is_none() { + Ok(()) + } else { + initialized_validators + .delete_graffiti(&validator_pubkey) + .map_err(|_| { + warp_utils::reject::custom_server_error( + "A graffiti was found, but failed to be removed.".to_string(), + ) + }) + } + } + } +} diff --git a/validator_client/src/http_api/keystores.rs b/validator_client/src/http_api/keystores.rs index c2d9b4d67..074c57834 100644 --- a/validator_client/src/http_api/keystores.rs +++ b/validator_client/src/http_api/keystores.rs @@ -224,6 +224,8 @@ fn import_single_keystore( None, None, None, + None, + None, )) .map_err(|e| format!("failed to initialize validator: {:?}", e))?; diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index f654833cb..dcf66d2fb 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -1,12 +1,15 @@ mod api_secret; mod create_signed_voluntary_exit; mod create_validator; +mod graffiti; mod keystores; mod remotekeys; mod tests; pub mod test_utils; +use crate::http_api::graffiti::{delete_graffiti, get_graffiti, set_graffiti}; + use crate::http_api::create_signed_voluntary_exit::create_signed_voluntary_exit; use crate::{determine_graffiti, GraffitiFile, ValidatorStore}; use account_utils::{ @@ -19,7 +22,10 @@ use create_validator::{ }; use eth2::lighthouse_vc::{ std_types::{AuthResponse, GetFeeRecipientResponse, GetGasLimitResponse}, - types::{self as api_types, GenericResponse, Graffiti, PublicKey, PublicKeyBytes}, + types::{ + self as api_types, GenericResponse, GetGraffitiResponse, Graffiti, PublicKey, + PublicKeyBytes, SetGraffitiRequest, + }, }; use lighthouse_version::version_with_platform; use logging::SSELoggingComponents; @@ -559,6 +565,8 @@ pub fn serve( let suggested_fee_recipient = body.suggested_fee_recipient; let gas_limit = body.gas_limit; let builder_proposals = body.builder_proposals; + let builder_boost_factor = body.builder_boost_factor; + let prefer_builder_proposals = body.prefer_builder_proposals; let validator_def = { if let Some(handle) = task_executor.handle() { @@ -571,6 +579,8 @@ pub fn serve( suggested_fee_recipient, gas_limit, builder_proposals, + builder_boost_factor, + prefer_builder_proposals, )) .map_err(|e| { warp_utils::reject::custom_server_error(format!( @@ -619,6 +629,8 @@ pub fn serve( suggested_fee_recipient: web3signer.suggested_fee_recipient, gas_limit: web3signer.gas_limit, builder_proposals: web3signer.builder_proposals, + builder_boost_factor: web3signer.builder_boost_factor, + prefer_builder_proposals: web3signer.prefer_builder_proposals, description: web3signer.description, signing_definition: SigningDefinition::Web3Signer( Web3SignerDefinition { @@ -653,7 +665,7 @@ pub fn serve( .and(warp::path::end()) .and(warp::body::json()) .and(validator_store_filter.clone()) - .and(graffiti_file_filter) + .and(graffiti_file_filter.clone()) .and(signer.clone()) .and(task_executor_filter.clone()) .and_then( @@ -685,8 +697,12 @@ pub fn serve( (Some(is_enabled), Some(initialized_validator)) if Some(is_enabled) == body.enabled && initialized_validator.get_gas_limit() == body.gas_limit + && initialized_validator.get_builder_boost_factor() + == body.builder_boost_factor && initialized_validator.get_builder_proposals() == body.builder_proposals + && initialized_validator.get_prefer_builder_proposals() + == body.prefer_builder_proposals && initialized_validator.get_graffiti() == maybe_graffiti => { Ok(()) @@ -700,6 +716,8 @@ pub fn serve( body.enabled, body.gas_limit, body.builder_proposals, + body.builder_boost_factor, + body.prefer_builder_proposals, body.graffiti, ), ) @@ -1028,6 +1046,86 @@ pub fn serve( }, ); + // GET /eth/v1/validator/{pubkey}/graffiti + let get_graffiti = eth_v1 + .and(warp::path("validator")) + .and(warp::path::param::()) + .and(warp::path("graffiti")) + .and(warp::path::end()) + .and(validator_store_filter.clone()) + .and(graffiti_flag_filter) + .and(signer.clone()) + .and_then( + |pubkey: PublicKey, + validator_store: Arc>, + graffiti_flag: Option, + signer| { + blocking_signed_json_task(signer, move || { + let graffiti = get_graffiti(pubkey.clone(), validator_store, graffiti_flag)?; + Ok(GenericResponse::from(GetGraffitiResponse { + pubkey: pubkey.into(), + graffiti, + })) + }) + }, + ); + + // POST /eth/v1/validator/{pubkey}/graffiti + let post_graffiti = eth_v1 + .and(warp::path("validator")) + .and(warp::path::param::()) + .and(warp::path("graffiti")) + .and(warp::body::json()) + .and(warp::path::end()) + .and(validator_store_filter.clone()) + .and(graffiti_file_filter.clone()) + .and(signer.clone()) + .and_then( + |pubkey: PublicKey, + query: SetGraffitiRequest, + validator_store: Arc>, + graffiti_file: Option, + signer| { + blocking_signed_json_task(signer, move || { + if graffiti_file.is_some() { + return Err(warp_utils::reject::invalid_auth( + "Unable to update graffiti as the \"--graffiti-file\" flag is set" + .to_string(), + )); + } + set_graffiti(pubkey.clone(), query.graffiti, validator_store) + }) + }, + ) + .map(|reply| warp::reply::with_status(reply, warp::http::StatusCode::ACCEPTED)); + + // DELETE /eth/v1/validator/{pubkey}/graffiti + let delete_graffiti = eth_v1 + .and(warp::path("validator")) + .and(warp::path::param::()) + .and(warp::path("graffiti")) + .and(warp::path::end()) + .and(validator_store_filter.clone()) + .and(graffiti_file_filter.clone()) + .and(signer.clone()) + .and_then( + |pubkey: PublicKey, + validator_store: Arc>, + graffiti_file: Option, + signer| { + blocking_signed_json_task(signer, move || { + if graffiti_file.is_some() { + return Err(warp_utils::reject::invalid_auth( + "Unable to delete graffiti as the \"--graffiti-file\" flag is set" + .to_string(), + )); + } + delete_graffiti(pubkey.clone(), validator_store) + }) + }, + ) + .map(|reply| warp::reply::with_status(reply, warp::http::StatusCode::NO_CONTENT)); + // GET /eth/v1/keystores let get_std_keystores = std_keystores .and(signer.clone()) @@ -1175,8 +1273,10 @@ pub fn serve( .or(get_lighthouse_ui_graffiti) .or(get_fee_recipient) .or(get_gas_limit) + .or(get_graffiti) .or(get_std_keystores) - .or(get_std_remotekeys), + .or(get_std_remotekeys) + .recover(warp_utils::reject::handle_rejection), ) .or(warp::post().and( post_validators @@ -1187,15 +1287,20 @@ pub fn serve( .or(post_fee_recipient) .or(post_gas_limit) .or(post_std_keystores) - .or(post_std_remotekeys), + .or(post_std_remotekeys) + .or(post_graffiti) + .recover(warp_utils::reject::handle_rejection), )) - .or(warp::patch().and(patch_validators)) + .or(warp::patch() + .and(patch_validators.recover(warp_utils::reject::handle_rejection))) .or(warp::delete().and( delete_lighthouse_keystores .or(delete_fee_recipient) .or(delete_gas_limit) .or(delete_std_keystores) - .or(delete_std_remotekeys), + .or(delete_std_remotekeys) + .or(delete_graffiti) + .recover(warp_utils::reject::handle_rejection), )), ) // The auth route and logs are the only routes that are allowed to be accessed without the API token. diff --git a/validator_client/src/http_api/remotekeys.rs b/validator_client/src/http_api/remotekeys.rs index 991dfb8bf..053bbcb4b 100644 --- a/validator_client/src/http_api/remotekeys.rs +++ b/validator_client/src/http_api/remotekeys.rs @@ -125,6 +125,8 @@ fn import_single_remotekey( suggested_fee_recipient: None, gas_limit: None, builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, description: String::from("Added by remotekey API"), signing_definition: SigningDefinition::Web3Signer(Web3SignerDefinition { url, diff --git a/validator_client/src/http_api/test_utils.rs b/validator_client/src/http_api/test_utils.rs index c7558dd58..7b0cb51ec 100644 --- a/validator_client/src/http_api/test_utils.rs +++ b/validator_client/src/http_api/test_utils.rs @@ -249,9 +249,9 @@ impl ApiTester { pub async fn test_get_lighthouse_spec(self) -> Self { let result = self .client - .get_lighthouse_spec::() + .get_lighthouse_spec::() .await - .map(|res| ConfigAndPreset::Bellatrix(res.data)) + .map(|res| ConfigAndPreset::Capella(res.data)) .unwrap(); let expected = ConfigAndPreset::from_chain_spec::(&E::default_spec(), None); @@ -315,6 +315,8 @@ impl ApiTester { suggested_fee_recipient: None, gas_limit: None, builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, deposit_gwei: E::default_spec().max_effective_balance, }) .collect::>(); @@ -447,6 +449,8 @@ impl ApiTester { suggested_fee_recipient: None, gas_limit: None, builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, }; self.client @@ -467,6 +471,8 @@ impl ApiTester { suggested_fee_recipient: None, gas_limit: None, builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, }; let response = self @@ -511,6 +517,8 @@ impl ApiTester { request_timeout_ms: None, client_identity_path: None, client_identity_password: None, + builder_boost_factor: None, + prefer_builder_proposals: None, } }) .collect(); @@ -534,7 +542,15 @@ impl ApiTester { let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; self.client - .patch_lighthouse_validators(&validator.voting_pubkey, Some(enabled), None, None, None) + .patch_lighthouse_validators( + &validator.voting_pubkey, + Some(enabled), + None, + None, + None, + None, + None, + ) .await .unwrap(); @@ -582,6 +598,8 @@ impl ApiTester { Some(gas_limit), None, None, + None, + None, ) .await .unwrap(); @@ -610,6 +628,8 @@ impl ApiTester { None, Some(builder_proposals), None, + None, + None, ) .await .unwrap(); diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index db9d0613b..f7db76e4a 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -52,6 +52,12 @@ struct ApiTester { impl ApiTester { pub async fn new() -> Self { + let mut config = Config::default(); + config.fee_recipient = Some(TEST_DEFAULT_FEE_RECIPIENT); + Self::new_with_config(config).await + } + + pub async fn new_with_config(mut config: Config) -> Self { let log = test_logger(); let validator_dir = tempdir().unwrap(); @@ -70,10 +76,8 @@ impl ApiTester { let api_secret = ApiSecret::create_or_open(validator_dir.path()).unwrap(); let api_pubkey = api_secret.api_token(); - let mut config = Config::default(); config.validator_dir = validator_dir.path().into(); config.secrets_dir = secrets_dir.path().into(); - config.fee_recipient = Some(TEST_DEFAULT_FEE_RECIPIENT); let spec = E::default_spec(); @@ -205,9 +209,9 @@ impl ApiTester { pub async fn test_get_lighthouse_spec(self) -> Self { let result = self .client - .get_lighthouse_spec::() + .get_lighthouse_spec::() .await - .map(|res| ConfigAndPreset::Capella(res.data)) + .map(|res| ConfigAndPreset::Deneb(res.data)) .unwrap(); let expected = ConfigAndPreset::from_chain_spec::(&E::default_spec(), None); @@ -271,6 +275,8 @@ impl ApiTester { suggested_fee_recipient: None, gas_limit: None, builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, deposit_gwei: E::default_spec().max_effective_balance, }) .collect::>(); @@ -404,6 +410,8 @@ impl ApiTester { suggested_fee_recipient: None, gas_limit: None, builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, }; self.client @@ -424,6 +432,8 @@ impl ApiTester { suggested_fee_recipient: None, gas_limit: None, builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, }; let response = self @@ -462,6 +472,8 @@ impl ApiTester { suggested_fee_recipient: None, gas_limit: None, builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, voting_public_key: kp.pk, url: format!("http://signer_{}.com/", i), root_certificate_path: None, @@ -518,7 +530,15 @@ impl ApiTester { let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; self.client - .patch_lighthouse_validators(&validator.voting_pubkey, Some(enabled), None, None, None) + .patch_lighthouse_validators( + &validator.voting_pubkey, + Some(enabled), + None, + None, + None, + None, + None, + ) .await .unwrap(); @@ -566,6 +586,8 @@ impl ApiTester { Some(gas_limit), None, None, + None, + None, ) .await .unwrap(); @@ -594,6 +616,50 @@ impl ApiTester { None, Some(builder_proposals), None, + None, + None, + ) + .await + .unwrap(); + + self + } + + pub async fn set_builder_boost_factor(self, index: usize, builder_boost_factor: u64) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + self.client + .patch_lighthouse_validators( + &validator.voting_pubkey, + None, + None, + None, + Some(builder_boost_factor), + None, + None, + ) + .await + .unwrap(); + + self + } + + pub async fn set_prefer_builder_proposals( + self, + index: usize, + prefer_builder_proposals: bool, + ) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + self.client + .patch_lighthouse_validators( + &validator.voting_pubkey, + None, + None, + None, + None, + Some(prefer_builder_proposals), + None, ) .await .unwrap(); @@ -613,6 +679,64 @@ impl ApiTester { self } + pub async fn assert_builder_boost_factor( + self, + index: usize, + builder_boost_factor: Option, + ) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + assert_eq!( + self.validator_store + .get_builder_boost_factor(&validator.voting_pubkey), + builder_boost_factor + ); + + self + } + + pub async fn assert_validator_derived_builder_boost_factor( + self, + index: usize, + builder_boost_factor: Option, + ) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + assert_eq!( + self.validator_store + .determine_validator_builder_boost_factor(&validator.voting_pubkey), + builder_boost_factor + ); + + self + } + + pub fn assert_default_builder_boost_factor(self, builder_boost_factor: Option) -> Self { + assert_eq!( + self.validator_store + .determine_default_builder_boost_factor(), + builder_boost_factor + ); + + self + } + + pub async fn assert_prefer_builder_proposals( + self, + index: usize, + prefer_builder_proposals: bool, + ) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + assert_eq!( + self.validator_store + .get_prefer_builder_proposals(&validator.voting_pubkey), + prefer_builder_proposals + ); + + self + } + pub async fn set_graffiti(self, index: usize, graffiti: &str) -> Self { let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; let graffiti_str = GraffitiString::from_str(graffiti).unwrap(); @@ -622,6 +746,8 @@ impl ApiTester { None, None, None, + None, + None, Some(graffiti_str), ) .await @@ -640,6 +766,49 @@ impl ApiTester { self } + + pub async fn test_set_graffiti(self, index: usize, graffiti: &str) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + let graffiti_str = GraffitiString::from_str(graffiti).unwrap(); + let resp = self + .client + .set_graffiti(&validator.voting_pubkey, graffiti_str) + .await; + + assert!(resp.is_ok()); + + self + } + + pub async fn test_delete_graffiti(self, index: usize) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + let resp = self.client.get_graffiti(&validator.voting_pubkey).await; + + assert!(resp.is_ok()); + let old_graffiti = resp.unwrap().graffiti; + + let resp = self.client.delete_graffiti(&validator.voting_pubkey).await; + + assert!(resp.is_ok()); + + let resp = self.client.get_graffiti(&validator.voting_pubkey).await; + + assert!(resp.is_ok()); + assert_ne!(old_graffiti, resp.unwrap().graffiti); + + self + } + + pub async fn test_get_graffiti(self, index: usize, expected_graffiti: &str) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + let expected_graffiti_str = GraffitiString::from_str(expected_graffiti).unwrap(); + let resp = self.client.get_graffiti(&validator.voting_pubkey).await; + + assert!(resp.is_ok()); + assert_eq!(&resp.unwrap().graffiti, &expected_graffiti_str.into()); + + self + } } struct HdValidatorScenario { @@ -698,6 +867,8 @@ async fn routes_with_invalid_auth() { gas_limit: <_>::default(), builder_proposals: <_>::default(), deposit_gwei: <_>::default(), + builder_boost_factor: <_>::default(), + prefer_builder_proposals: <_>::default(), }]) .await }) @@ -728,6 +899,8 @@ async fn routes_with_invalid_auth() { suggested_fee_recipient: <_>::default(), gas_limit: <_>::default(), builder_proposals: <_>::default(), + builder_boost_factor: <_>::default(), + prefer_builder_proposals: <_>::default(), }) .await }) @@ -740,6 +913,8 @@ async fn routes_with_invalid_auth() { None, None, None, + None, + None, ) .await }) @@ -771,6 +946,20 @@ async fn routes_with_invalid_auth() { }) .await }) + .await + .test_with_invalid_auth(|client| async move { + client.delete_graffiti(&PublicKeyBytes::empty()).await + }) + .await + .test_with_invalid_auth(|client| async move { + client.get_graffiti(&PublicKeyBytes::empty()).await + }) + .await + .test_with_invalid_auth(|client| async move { + client + .set_graffiti(&PublicKeyBytes::empty(), GraffitiString::default()) + .await + }) .await; } @@ -923,6 +1112,100 @@ async fn validator_builder_proposals() { .await; } +#[tokio::test] +async fn validator_builder_boost_factor() { + ApiTester::new() + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .set_builder_boost_factor(0, 120) + .await + // Test setting builder proposals while the validator is disabled + .set_validator_enabled(0, false) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(2) + .set_builder_boost_factor(0, 80) + .await + .set_validator_enabled(0, true) + .await + .assert_enabled_validators_count(2) + .assert_builder_boost_factor(0, Some(80)) + .await; +} + +/// Verifies the builder boost factors translated from the `builder_proposals`, +/// `prefer_builder_proposals` and `builder_boost_factor` values. +#[tokio::test] +async fn validator_derived_builder_boost_factor_with_process_defaults() { + let config = Config { + builder_proposals: true, + prefer_builder_proposals: false, + builder_boost_factor: Some(80), + ..Config::default() + }; + ApiTester::new_with_config(config) + .await + .create_hd_validators(HdValidatorScenario { + count: 3, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_default_builder_boost_factor(Some(80)) + .assert_validator_derived_builder_boost_factor(0, None) + .await + .set_builder_proposals(0, false) + .await + .assert_validator_derived_builder_boost_factor(0, Some(0)) + .await + .set_builder_boost_factor(1, 120) + .await + .assert_validator_derived_builder_boost_factor(1, Some(120)) + .await + .set_prefer_builder_proposals(2, true) + .await + .assert_validator_derived_builder_boost_factor(2, Some(u64::MAX)) + .await; +} + +#[tokio::test] +async fn prefer_builder_proposals_validator() { + ApiTester::new() + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .set_prefer_builder_proposals(0, false) + .await + // Test setting builder proposals while the validator is disabled + .set_validator_enabled(0, false) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(2) + .set_prefer_builder_proposals(0, true) + .await + .set_validator_enabled(0, true) + .await + .assert_enabled_validators_count(2) + .assert_prefer_builder_proposals(0, true) + .await; +} + #[tokio::test] async fn validator_graffiti() { ApiTester::new() @@ -954,6 +1237,31 @@ async fn validator_graffiti() { .await; } +#[tokio::test] +async fn validator_graffiti_api() { + ApiTester::new() + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .set_graffiti(0, "Mr F was here") + .await + .test_get_graffiti(0, "Mr F was here") + .await + .test_set_graffiti(0, "Uncle Bill was here") + .await + .test_get_graffiti(0, "Uncle Bill was here") + .await + .test_delete_graffiti(0) + .await; +} + #[tokio::test] async fn keystore_validator_creation() { ApiTester::new() diff --git a/validator_client/src/http_api/tests/keystores.rs b/validator_client/src/http_api/tests/keystores.rs index d60872e49..fe58393bb 100644 --- a/validator_client/src/http_api/tests/keystores.rs +++ b/validator_client/src/http_api/tests/keystores.rs @@ -43,6 +43,8 @@ fn web3signer_validator_with_pubkey(pubkey: PublicKey) -> Web3SignerValidatorReq suggested_fee_recipient: None, gas_limit: None, builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, voting_public_key: pubkey, url: web3_signer_url(), root_certificate_path: None, @@ -468,7 +470,7 @@ async fn import_and_delete_conflicting_web3_signer_keystores() { for pubkey in &pubkeys { tester .client - .patch_lighthouse_validators(pubkey, Some(false), None, None, None) + .patch_lighthouse_validators(pubkey, Some(false), None, None, None, None, None) .await .unwrap(); } @@ -2146,7 +2148,7 @@ async fn import_remotekey_web3signer_enabled() { assert_eq!(tester.vals_total(), 1); assert_eq!(tester.vals_enabled(), 1); let vals = tester.initialized_validators.read(); - let web3_vals = vals.validator_definitions().clone(); + let web3_vals = vals.validator_definitions(); // Import remotekeys. let import_res = tester @@ -2164,7 +2166,7 @@ async fn import_remotekey_web3signer_enabled() { assert_eq!(tester.vals_total(), 1); assert_eq!(tester.vals_enabled(), 1); let vals = tester.initialized_validators.read(); - let remote_vals = vals.validator_definitions().clone(); + let remote_vals = vals.validator_definitions(); // Web3signer should not be overwritten since it is enabled. assert!(web3_vals == remote_vals); diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index f15ea27c9..7e4331dc8 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -131,6 +131,8 @@ pub struct InitializedValidator { suggested_fee_recipient: Option
, gas_limit: Option, builder_proposals: Option, + builder_boost_factor: Option, + prefer_builder_proposals: Option, /// The validators index in `state.validators`, to be updated by an external service. index: Option, } @@ -159,6 +161,14 @@ impl InitializedValidator { self.gas_limit } + pub fn get_builder_boost_factor(&self) -> Option { + self.builder_boost_factor + } + + pub fn get_prefer_builder_proposals(&self) -> Option { + self.prefer_builder_proposals + } + pub fn get_builder_proposals(&self) -> Option { self.builder_proposals } @@ -335,6 +345,8 @@ impl InitializedValidator { suggested_fee_recipient: def.suggested_fee_recipient, gas_limit: def.gas_limit, builder_proposals: def.builder_proposals, + builder_boost_factor: def.builder_boost_factor, + prefer_builder_proposals: def.prefer_builder_proposals, index: None, }) } @@ -716,6 +728,74 @@ impl InitializedValidators { self.validators.get(public_key).and_then(|v| v.graffiti) } + /// Sets the `InitializedValidator` and `ValidatorDefinition` `graffiti` values. + /// + /// ## Notes + /// + /// Setting a validator `graffiti` will cause `self.definitions` to be updated and saved to + /// disk. + /// + /// Saves the `ValidatorDefinitions` to file, even if no definitions were changed. + pub fn set_graffiti( + &mut self, + voting_public_key: &PublicKey, + graffiti: GraffitiString, + ) -> Result<(), Error> { + if let Some(def) = self + .definitions + .as_mut_slice() + .iter_mut() + .find(|def| def.voting_public_key == *voting_public_key) + { + def.graffiti = Some(graffiti.clone()); + } + + if let Some(val) = self + .validators + .get_mut(&PublicKeyBytes::from(voting_public_key)) + { + val.graffiti = Some(graffiti.into()); + } + + self.definitions + .save(&self.validators_dir) + .map_err(Error::UnableToSaveDefinitions)?; + Ok(()) + } + + /// Removes the `InitializedValidator` and `ValidatorDefinition` `graffiti` values. + /// + /// ## Notes + /// + /// Removing a validator `graffiti` will cause `self.definitions` to be updated and saved to + /// disk. The graffiti for the validator will then fall back to the process level default if + /// it is set. + /// + /// Saves the `ValidatorDefinitions` to file, even if no definitions were changed. + pub fn delete_graffiti(&mut self, voting_public_key: &PublicKey) -> Result<(), Error> { + if let Some(def) = self + .definitions + .as_mut_slice() + .iter_mut() + .find(|def| def.voting_public_key == *voting_public_key) + { + def.graffiti = None; + } + + if let Some(val) = self + .validators + .get_mut(&PublicKeyBytes::from(voting_public_key)) + { + val.graffiti = None; + } + + self.definitions + .save(&self.validators_dir) + .map_err(Error::UnableToSaveDefinitions)?; + + Ok(()) + } + /// Returns a `HashMap` of `public_key` -> `graffiti` for all initialized validators. pub fn get_all_validators_graffiti(&self) -> HashMap<&PublicKeyBytes, Option> { let mut result = HashMap::new(); @@ -747,6 +827,22 @@ impl InitializedValidators { .and_then(|v| v.builder_proposals) } + /// Returns the `builder_boost_factor` for a given public key specified in the + /// `ValidatorDefinitions`. + pub fn builder_boost_factor(&self, public_key: &PublicKeyBytes) -> Option { + self.validators + .get(public_key) + .and_then(|v| v.builder_boost_factor) + } + + /// Returns the `prefer_builder_proposals` for a given public key specified in the + /// `ValidatorDefinitions`. + pub fn prefer_builder_proposals(&self, public_key: &PublicKeyBytes) -> Option { + self.validators + .get(public_key) + .and_then(|v| v.prefer_builder_proposals) + } + /// Returns an `Option` of a reference to an `InitializedValidator` for a given public key specified in the /// `ValidatorDefinitions`. pub fn validator(&self, public_key: &PublicKeyBytes) -> Option<&InitializedValidator> { @@ -767,12 +863,15 @@ impl InitializedValidators { /// or `InitializedValidator`. The same logic applies to `builder_proposals` and `graffiti`. /// /// Saves the `ValidatorDefinitions` to file, even if no definitions were changed. + #[allow(clippy::too_many_arguments)] pub async fn set_validator_definition_fields( &mut self, voting_public_key: &PublicKey, enabled: Option, gas_limit: Option, builder_proposals: Option, + builder_boost_factor: Option, + prefer_builder_proposals: Option, graffiti: Option, ) -> Result<(), Error> { if let Some(def) = self @@ -794,6 +893,12 @@ impl InitializedValidators { if let Some(graffiti) = graffiti.clone() { def.graffiti = Some(graffiti); } + if let Some(builder_boost_factor) = builder_boost_factor { + def.builder_boost_factor = Some(builder_boost_factor); + } + if let Some(prefer_builder_proposals) = prefer_builder_proposals { + def.prefer_builder_proposals = Some(prefer_builder_proposals); + } } self.update_validators().await?; @@ -812,6 +917,12 @@ impl InitializedValidators { if let Some(graffiti) = graffiti { val.graffiti = Some(graffiti.into()); } + if let Some(builder_boost_factor) = builder_boost_factor { + val.builder_boost_factor = Some(builder_boost_factor); + } + if let Some(prefer_builder_proposals) = prefer_builder_proposals { + val.prefer_builder_proposals = Some(prefer_builder_proposals); + } } self.definitions diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 6925e285f..89fc03762 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -19,6 +19,7 @@ pub mod http_api; pub mod initialized_validators; pub mod validator_store; +pub use beacon_node_fallback::ApiTopic; pub use cli::cli_app; pub use config::Config; use initialized_validators::InitializedValidators; @@ -82,7 +83,7 @@ const HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT: u32 = 4; const HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT: u32 = 4; -const HTTP_GET_VALIDATOR_BLOCK_SSZ_TIMEOUT_QUOTIENT: u32 = 4; +const HTTP_GET_VALIDATOR_BLOCK_TIMEOUT_QUOTIENT: u32 = 4; const DOPPELGANGER_SERVICE_NAME: &str = "doppelganger"; @@ -310,8 +311,7 @@ impl ProductionValidatorClient { / HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT, get_debug_beacon_states: slot_duration / HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT, get_deposit_snapshot: slot_duration / HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT, - get_validator_block_ssz: slot_duration - / HTTP_GET_VALIDATOR_BLOCK_SSZ_TIMEOUT_QUOTIENT, + get_validator_block: slot_duration / HTTP_GET_VALIDATOR_BLOCK_TIMEOUT_QUOTIENT, } } else { Timeouts::set_all(slot_duration) @@ -369,14 +369,14 @@ impl ProductionValidatorClient { let mut beacon_nodes: BeaconNodeFallback<_, T> = BeaconNodeFallback::new( candidates, - config.disable_run_on_all, + config.broadcast_topics.clone(), context.eth2_config.spec.clone(), log.clone(), ); let mut proposer_nodes: BeaconNodeFallback<_, T> = BeaconNodeFallback::new( proposer_candidates, - config.disable_run_on_all, + config.broadcast_topics.clone(), context.eth2_config.spec.clone(), log.clone(), ); @@ -471,8 +471,7 @@ impl ProductionValidatorClient { .beacon_nodes(beacon_nodes.clone()) .runtime_context(context.service_context("block".into())) .graffiti(config.graffiti) - .graffiti_file(config.graffiti_file.clone()) - .block_delay(config.block_delay); + .graffiti_file(config.graffiti_file.clone()); // If we have proposer nodes, add them to the block service builder. if proposer_nodes_num > 0 { diff --git a/validator_client/src/preparation_service.rs b/validator_client/src/preparation_service.rs index 2d2221680..7aabc7d5a 100644 --- a/validator_client/src/preparation_service.rs +++ b/validator_client/src/preparation_service.rs @@ -1,4 +1,4 @@ -use crate::beacon_node_fallback::{BeaconNodeFallback, RequireSynced}; +use crate::beacon_node_fallback::{ApiTopic, BeaconNodeFallback, RequireSynced}; use crate::validator_store::{DoppelgangerStatus, Error as ValidatorStoreError, ValidatorStore}; use crate::OfflineOnFailure; use bls::PublicKeyBytes; @@ -342,9 +342,10 @@ impl PreparationService { let preparation_entries = preparation_data.as_slice(); match self .beacon_nodes - .run( + .request( RequireSynced::No, OfflineOnFailure::Yes, + ApiTopic::Subscriptions, |beacon_node| async move { beacon_node .post_validator_prepare_beacon_proposer(preparation_entries) diff --git a/validator_client/src/signing_method/web3signer.rs b/validator_client/src/signing_method/web3signer.rs index 2c1f0cb3f..d7d74c944 100644 --- a/validator_client/src/signing_method/web3signer.rs +++ b/validator_client/src/signing_method/web3signer.rs @@ -27,6 +27,7 @@ pub enum ForkName { Altair, Bellatrix, Capella, + Deneb, } #[derive(Debug, PartialEq, Serialize)] @@ -95,6 +96,11 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload> Web3SignerObject<'a, T, Pa block: None, block_header: Some(block.block_header()), }), + BeaconBlock::Deneb(_) => Ok(Web3SignerObject::BeaconBlock { + version: ForkName::Deneb, + block: None, + block_header: Some(block.block_header()), + }), } } diff --git a/validator_client/src/sync_committee_service.rs b/validator_client/src/sync_committee_service.rs index e01bf09cf..90b62cd3b 100644 --- a/validator_client/src/sync_committee_service.rs +++ b/validator_client/src/sync_committee_service.rs @@ -1,4 +1,4 @@ -use crate::beacon_node_fallback::{BeaconNodeFallback, RequireSynced}; +use crate::beacon_node_fallback::{ApiTopic, BeaconNodeFallback, RequireSynced}; use crate::{ duties_service::DutiesService, validator_store::{Error as ValidatorStoreError, ValidatorStore}, @@ -158,13 +158,11 @@ impl SyncCommitteeService { .checked_sub(slot_duration / 3) .unwrap_or_else(|| Duration::from_secs(0)); - let slot_duties = if let Some(duties) = self + let Some(slot_duties) = self .duties_service .sync_duties .get_duties_for_slot::(slot, &self.duties_service.spec) - { - duties - } else { + else { debug!(log, "No duties known for slot {}", slot); return Ok(()); }; @@ -301,9 +299,10 @@ impl SyncCommitteeService { .collect::>(); self.beacon_nodes - .first_success( + .request( RequireSynced::No, OfflineOnFailure::Yes, + ApiTopic::SyncCommittee, |beacon_node| async move { beacon_node .post_beacon_pool_sync_committee_signatures(committee_signatures) @@ -596,9 +595,10 @@ impl SyncCommitteeService { if let Err(e) = self .beacon_nodes - .run( + .request( RequireSynced::No, OfflineOnFailure::Yes, + ApiTopic::Subscriptions, |beacon_node| async move { beacon_node .post_validator_sync_committee_subscriptions(subscriptions_slice) diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 365f7f734..c913b9906 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -20,11 +20,11 @@ use task_executor::TaskExecutor; use types::{ attestation::Error as AttestationError, graffiti::GraffitiString, AbstractExecPayload, Address, AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof, - Domain, Epoch, EthSpec, Fork, Graffiti, Hash256, Keypair, PublicKeyBytes, SelectionProof, - Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedRoot, - SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncAggregatorSelectionData, - SyncCommitteeContribution, SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, - ValidatorRegistrationData, VoluntaryExit, + Domain, Epoch, EthSpec, Fork, ForkName, Graffiti, Hash256, Keypair, PublicKeyBytes, + SelectionProof, Signature, SignedAggregateAndProof, SignedBeaconBlock, + SignedContributionAndProof, SignedRoot, SignedValidatorRegistrationData, SignedVoluntaryExit, + Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, SyncCommitteeMessage, + SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, VoluntaryExit, }; use validator_dir::ValidatorDir; @@ -97,6 +97,9 @@ pub struct ValidatorStore { fee_recipient_process: Option
, gas_limit: Option, builder_proposals: bool, + produce_block_v3: bool, + prefer_builder_proposals: bool, + builder_boost_factor: Option, task_executor: TaskExecutor, _phantom: PhantomData, } @@ -128,6 +131,9 @@ impl ValidatorStore { fee_recipient_process: config.fee_recipient, gas_limit: config.gas_limit, builder_proposals: config.builder_proposals, + produce_block_v3: config.produce_block_v3, + prefer_builder_proposals: config.prefer_builder_proposals, + builder_boost_factor: config.builder_boost_factor, task_executor, _phantom: PhantomData, } @@ -176,6 +182,8 @@ impl ValidatorStore { suggested_fee_recipient: Option
, gas_limit: Option, builder_proposals: Option, + builder_boost_factor: Option, + prefer_builder_proposals: Option, ) -> Result { let mut validator_def = ValidatorDefinition::new_keystore_with_password( voting_keystore_path, @@ -184,6 +192,8 @@ impl ValidatorStore { suggested_fee_recipient, gas_limit, builder_proposals, + builder_boost_factor, + prefer_builder_proposals, ) .map_err(|e| format!("failed to create validator definitions: {:?}", e))?; @@ -336,6 +346,10 @@ impl ValidatorStore { self.spec.fork_at_epoch(epoch) } + pub fn produce_block_v3(&self) -> bool { + self.produce_block_v3 + } + /// Returns a `SigningMethod` for `validator_pubkey` *only if* that validator is considered safe /// by doppelganger protection. fn doppelganger_checked_signing_method( @@ -369,11 +383,35 @@ impl ValidatorStore { } fn signing_context(&self, domain: Domain, signing_epoch: Epoch) -> SigningContext { - SigningContext { - domain, - epoch: signing_epoch, - fork: self.fork(signing_epoch), - genesis_validators_root: self.genesis_validators_root, + if domain == Domain::VoluntaryExit { + match self.spec.fork_name_at_epoch(signing_epoch) { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + SigningContext { + domain, + epoch: signing_epoch, + fork: self.fork(signing_epoch), + genesis_validators_root: self.genesis_validators_root, + } + } + // EIP-7044 + ForkName::Deneb => SigningContext { + domain, + epoch: signing_epoch, + fork: Fork { + previous_version: self.spec.capella_fork_version, + current_version: self.spec.capella_fork_version, + epoch: signing_epoch, + }, + genesis_validators_root: self.genesis_validators_root, + }, + } + } else { + SigningContext { + domain, + epoch: signing_epoch, + fork: self.fork(signing_epoch), + genesis_validators_root: self.genesis_validators_root, + } } } @@ -444,7 +482,7 @@ impl ValidatorStore { .unwrap_or(DEFAULT_GAS_LIMIT) } - /// Returns a `bool` for the given public key that denotes whther this validator should use the + /// Returns a `bool` for the given public key that denotes whether this validator should use the /// builder API. The priority order for fetching this value is: /// /// 1. validator_definitions.yml @@ -457,12 +495,91 @@ impl ValidatorStore { ) } + /// Returns a `u64` for the given public key that denotes the builder boost factor. The priority order for fetching this value is: + /// + /// 1. validator_definitions.yml + /// 2. process level flag + pub fn get_builder_boost_factor(&self, validator_pubkey: &PublicKeyBytes) -> Option { + self.validators + .read() + .builder_boost_factor(validator_pubkey) + .or(self.builder_boost_factor) + } + + /// Returns a `bool` for the given public key that denotes whether this validator should prefer a + /// builder payload. The priority order for fetching this value is: + /// + /// 1. validator_definitions.yml + /// 2. process level flag + pub fn get_prefer_builder_proposals(&self, validator_pubkey: &PublicKeyBytes) -> bool { + self.validators + .read() + .prefer_builder_proposals(validator_pubkey) + .unwrap_or(self.prefer_builder_proposals) + } + fn get_builder_proposals_defaulting(&self, builder_proposals: Option) -> bool { builder_proposals // If there's nothing in the file, try the process-level default value. .unwrap_or(self.builder_proposals) } + /// Translate the per validator `builder_proposals`, `builder_boost_factor` and + /// `prefer_builder_proposals` to a boost factor, if available. + /// - If `prefer_builder_proposals` is true, set boost factor to `u64::MAX` to indicate a + /// preference for builder payloads. + /// - If `builder_boost_factor` is a value other than None, return its value as the boost factor. + /// - If `builder_proposals` is set to false, set boost factor to 0 to indicate a preference for + /// local payloads. + /// - Else return `None` to indicate no preference between builder and local payloads. + pub fn determine_validator_builder_boost_factor( + &self, + validator_pubkey: &PublicKeyBytes, + ) -> Option { + let validator_prefer_builder_proposals = self + .validators + .read() + .prefer_builder_proposals(validator_pubkey); + + if matches!(validator_prefer_builder_proposals, Some(true)) { + return Some(u64::MAX); + } + + self.validators + .read() + .builder_boost_factor(validator_pubkey) + .or_else(|| { + if matches!( + self.validators.read().builder_proposals(validator_pubkey), + Some(false) + ) { + return Some(0); + } + None + }) + } + + /// Translate the process-wide `builder_proposals`, `builder_boost_factor` and + /// `prefer_builder_proposals` configurations to a boost factor. + /// - If `prefer_builder_proposals` is true, set boost factor to `u64::MAX` to indicate a + /// preference for builder payloads. + /// - If `builder_boost_factor` is a value other than None, return its value as the boost factor. + /// - If `builder_proposals` is set to false, set boost factor to 0 to indicate a preference for + /// local payloads. + /// - Else return `None` to indicate no preference between builder and local payloads. + pub fn determine_default_builder_boost_factor(&self) -> Option { + if self.prefer_builder_proposals { + return Some(u64::MAX); + } + self.builder_boost_factor.or({ + if self.builder_proposals { + Some(0) + } else { + None + } + }) + } + pub async fn sign_block>( &self, validator_pubkey: PublicKeyBytes, diff --git a/validator_manager/src/common.rs b/validator_manager/src/common.rs index 6a3f93a3f..871c53620 100644 --- a/validator_manager/src/common.rs +++ b/validator_manager/src/common.rs @@ -46,6 +46,8 @@ pub struct ValidatorSpecification { pub fee_recipient: Option
, pub gas_limit: Option, pub builder_proposals: Option, + pub builder_boost_factor: Option, + pub prefer_builder_proposals: Option, pub enabled: Option, } @@ -64,6 +66,8 @@ impl ValidatorSpecification { gas_limit, builder_proposals, enabled, + builder_boost_factor, + prefer_builder_proposals, } = self; let voting_public_key = voting_keystore @@ -136,6 +140,8 @@ impl ValidatorSpecification { enabled, gas_limit, builder_proposals, + builder_boost_factor, + prefer_builder_proposals, None, // Grafitti field is not maintained between validator moves. ) .await diff --git a/validator_manager/src/create_validators.rs b/validator_manager/src/create_validators.rs index 8ea740ff5..8ab3303d3 100644 --- a/validator_manager/src/create_validators.rs +++ b/validator_manager/src/create_validators.rs @@ -25,6 +25,8 @@ pub const ETH1_WITHDRAWAL_ADDRESS_FLAG: &str = "eth1-withdrawal-address"; pub const GAS_LIMIT_FLAG: &str = "gas-limit"; pub const FEE_RECIPIENT_FLAG: &str = "suggested-fee-recipient"; pub const BUILDER_PROPOSALS_FLAG: &str = "builder-proposals"; +pub const BUILDER_BOOST_FACTOR_FLAG: &str = "builder-boost-factor"; +pub const PREFER_BUILDER_PROPOSALS_FLAG: &str = "prefer-builder-proposals"; pub const BEACON_NODE_FLAG: &str = "beacon-node"; pub const FORCE_BLS_WITHDRAWAL_CREDENTIALS: &str = "force-bls-withdrawal-credentials"; @@ -183,6 +185,30 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { address. This is not recommended.", ), ) + .arg( + Arg::with_name(BUILDER_BOOST_FACTOR_FLAG) + .long(BUILDER_BOOST_FACTOR_FLAG) + .takes_value(true) + .value_name("UINT64") + .required(false) + .help( + "Defines the boost factor, \ + a percentage multiplier to apply to the builder's payload value \ + when choosing between a builder payload header and payload from \ + the local execution node.", + ), + ) + .arg( + Arg::with_name(PREFER_BUILDER_PROPOSALS_FLAG) + .long(PREFER_BUILDER_PROPOSALS_FLAG) + .help( + "If this flag is set, Lighthouse will always prefer blocks \ + constructed by builders, regardless of payload value.", + ) + .required(false) + .possible_values(&["true", "false"]) + .takes_value(true), + ) } /// The CLI arguments are parsed into this struct before running the application. This step of @@ -199,6 +225,8 @@ pub struct CreateConfig { pub specify_voting_keystore_password: bool, pub eth1_withdrawal_address: Option
, pub builder_proposals: Option, + pub builder_boost_factor: Option, + pub prefer_builder_proposals: Option, pub fee_recipient: Option
, pub gas_limit: Option, pub bn_url: Option, @@ -223,6 +251,11 @@ impl CreateConfig { ETH1_WITHDRAWAL_ADDRESS_FLAG, )?, builder_proposals: clap_utils::parse_optional(matches, BUILDER_PROPOSALS_FLAG)?, + builder_boost_factor: clap_utils::parse_optional(matches, BUILDER_BOOST_FACTOR_FLAG)?, + prefer_builder_proposals: clap_utils::parse_optional( + matches, + PREFER_BUILDER_PROPOSALS_FLAG, + )?, fee_recipient: clap_utils::parse_optional(matches, FEE_RECIPIENT_FLAG)?, gas_limit: clap_utils::parse_optional(matches, GAS_LIMIT_FLAG)?, bn_url: clap_utils::parse_optional(matches, BEACON_NODE_FLAG)?, @@ -254,6 +287,8 @@ impl ValidatorsAndDeposits { gas_limit, bn_url, force_bls_withdrawal_credentials, + builder_boost_factor, + prefer_builder_proposals, } = config; // Since Capella, it really doesn't make much sense to use BLS @@ -456,6 +491,8 @@ impl ValidatorsAndDeposits { fee_recipient, gas_limit, builder_proposals, + builder_boost_factor, + prefer_builder_proposals, // Allow the VC to choose a default "enabled" state. Since "enabled" is not part of // the standard API, leaving this as `None` means we are not forced to use the // non-standard API. @@ -585,6 +622,8 @@ pub mod tests { specify_voting_keystore_password: false, eth1_withdrawal_address: junk_execution_address(), builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, fee_recipient: None, gas_limit: None, bn_url: None, diff --git a/validator_manager/src/move_validators.rs b/validator_manager/src/move_validators.rs index fa886e8f9..5826f2756 100644 --- a/validator_manager/src/move_validators.rs +++ b/validator_manager/src/move_validators.rs @@ -32,6 +32,8 @@ pub const VALIDATORS_FLAG: &str = "validators"; pub const GAS_LIMIT_FLAG: &str = "gas-limit"; pub const FEE_RECIPIENT_FLAG: &str = "suggested-fee-recipient"; pub const BUILDER_PROPOSALS_FLAG: &str = "builder-proposals"; +pub const BUILDER_BOOST_FACTOR_FLAG: &str = "builder-boost-factor"; +pub const PREFER_BUILDER_PROPOSALS_FLAG: &str = "prefer-builder-proposals"; const NO_VALIDATORS_MSG: &str = "No validators present on source validator client"; @@ -170,6 +172,30 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .long(STDIN_INPUTS_FLAG) .help("If present, read all user inputs from stdin instead of tty."), ) + .arg( + Arg::with_name(BUILDER_BOOST_FACTOR_FLAG) + .long(BUILDER_BOOST_FACTOR_FLAG) + .takes_value(true) + .value_name("UINT64") + .required(false) + .help( + "Defines the boost factor, \ + a percentage multiplier to apply to the builder's payload value \ + when choosing between a builder payload header and payload from \ + the local execution node.", + ), + ) + .arg( + Arg::with_name(PREFER_BUILDER_PROPOSALS_FLAG) + .long(PREFER_BUILDER_PROPOSALS_FLAG) + .help( + "If this flag is set, Lighthouse will always prefer blocks \ + constructed by builders, regardless of payload value.", + ) + .required(false) + .possible_values(&["true", "false"]) + .takes_value(true), + ) } #[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] @@ -187,6 +213,8 @@ pub struct MoveConfig { pub dest_vc_token_path: PathBuf, pub validators: Validators, pub builder_proposals: Option, + pub builder_boost_factor: Option, + pub prefer_builder_proposals: Option, pub fee_recipient: Option
, pub gas_limit: Option, pub password_source: PasswordSource, @@ -221,6 +249,11 @@ impl MoveConfig { dest_vc_token_path: clap_utils::parse_required(matches, DEST_VC_TOKEN_FLAG)?, validators, builder_proposals: clap_utils::parse_optional(matches, BUILDER_PROPOSALS_FLAG)?, + builder_boost_factor: clap_utils::parse_optional(matches, BUILDER_BOOST_FACTOR_FLAG)?, + prefer_builder_proposals: clap_utils::parse_optional( + matches, + PREFER_BUILDER_PROPOSALS_FLAG, + )?, fee_recipient: clap_utils::parse_optional(matches, FEE_RECIPIENT_FLAG)?, gas_limit: clap_utils::parse_optional(matches, GAS_LIMIT_FLAG)?, password_source: PasswordSource::Interactive { @@ -253,6 +286,8 @@ async fn run<'a>(config: MoveConfig) -> Result<(), String> { fee_recipient, gas_limit, mut password_source, + builder_boost_factor, + prefer_builder_proposals, } = config; // Moving validators between the same VC is unlikely to be useful and probably indicates a user @@ -488,13 +523,15 @@ async fn run<'a>(config: MoveConfig) -> Result<(), String> { let keystore_derivation_path = voting_keystore.0.path(); - let validator_specification = ValidatorSpecification { + let validator_specification: ValidatorSpecification = ValidatorSpecification { voting_keystore, voting_keystore_password, slashing_protection: Some(InterchangeJsonStr(slashing_protection)), fee_recipient, gas_limit, builder_proposals, + builder_boost_factor, + prefer_builder_proposals, // Allow the VC to choose a default "enabled" state. Since "enabled" is not part of // the standard API, leaving this as `None` means we are not forced to use the // non-standard API. @@ -758,6 +795,8 @@ mod test { dest_vc_token_path: dest_vc_token_path.clone(), validators: validators.clone(), builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, fee_recipient: None, gas_limit: None, password_source: PasswordSource::Testing(self.passwords.clone()), diff --git a/watch/Cargo.toml b/watch/Cargo.toml index 67cbc3cc2..aaaf50aa4 100644 --- a/watch/Cargo.toml +++ b/watch/Cargo.toml @@ -21,7 +21,7 @@ types = { workspace = true } eth2 = { workspace = true } beacon_node = { workspace = true } tokio = { workspace = true } -axum = "0.6.18" +axum = "0.7" hyper = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } @@ -41,8 +41,7 @@ tokio-postgres = "0.7.5" http_api = { workspace = true } beacon_chain = { workspace = true } network = { workspace = true } -# TODO: update to 0.15 when released: https://github.com/testcontainers/testcontainers-rs/issues/497 -testcontainers = { git = "https://github.com/testcontainers/testcontainers-rs/", rev = "0f2c9851" } +testcontainers = "0.15" unused_port = { workspace = true } task_executor = { workspace = true } logging = { workspace = true } diff --git a/watch/src/blockprint/mod.rs b/watch/src/blockprint/mod.rs index b8107e5bf..532776f42 100644 --- a/watch/src/blockprint/mod.rs +++ b/watch/src/blockprint/mod.rs @@ -17,7 +17,7 @@ pub use config::Config; pub use database::{ get_blockprint_by_root, get_blockprint_by_slot, get_highest_blockprint, get_lowest_blockprint, get_unknown_blockprint, get_validators_clients_at_slot, insert_batch_blockprint, - list_consensus_clients, WatchBlockprint, + WatchBlockprint, }; pub use server::blockprint_routes; diff --git a/watch/src/cli.rs b/watch/src/cli.rs index a8e5f3716..97dc21729 100644 --- a/watch/src/cli.rs +++ b/watch/src/cli.rs @@ -1,6 +1,5 @@ use crate::{config::Config, logger, server, updater}; use clap::{App, Arg}; -use tokio::sync::oneshot; pub const SERVE: &str = "serve"; pub const RUN_UPDATER: &str = "run-updater"; @@ -44,12 +43,9 @@ pub async fn run() -> Result<(), String> { (RUN_UPDATER, Some(_)) => updater::run_updater(config) .await .map_err(|e| format!("Failure: {:?}", e)), - (SERVE, Some(_)) => { - let (_shutdown_tx, shutdown_rx) = oneshot::channel(); - server::serve(config, shutdown_rx) - .await - .map_err(|e| format!("Failure: {:?}", e)) - } + (SERVE, Some(_)) => server::serve(config) + .await + .map_err(|e| format!("Failure: {:?}", e)), _ => Err("Unsupported subcommand. See --help".into()), } } diff --git a/watch/src/database/mod.rs b/watch/src/database/mod.rs index b9a7a900a..841ebe5ee 100644 --- a/watch/src/database/mod.rs +++ b/watch/src/database/mod.rs @@ -26,24 +26,29 @@ pub use self::error::Error; pub use self::models::{WatchBeaconBlock, WatchCanonicalSlot, WatchProposerInfo, WatchValidator}; pub use self::watch_types::{WatchHash, WatchPK, WatchSlot}; +// Clippy has false positives on these re-exports from Rust 1.75.0-beta.1. +#[allow(unused_imports)] pub use crate::block_rewards::{ get_block_rewards_by_root, get_block_rewards_by_slot, get_highest_block_rewards, get_lowest_block_rewards, get_unknown_block_rewards, insert_batch_block_rewards, WatchBlockRewards, }; +#[allow(unused_imports)] pub use crate::block_packing::{ get_block_packing_by_root, get_block_packing_by_slot, get_highest_block_packing, get_lowest_block_packing, get_unknown_block_packing, insert_batch_block_packing, WatchBlockPacking, }; +#[allow(unused_imports)] pub use crate::suboptimal_attestations::{ get_all_suboptimal_attestations_for_epoch, get_attestation_by_index, get_attestation_by_pubkey, get_highest_attestation, get_lowest_attestation, insert_batch_suboptimal_attestations, WatchAttestation, WatchSuboptimalAttestation, }; +#[allow(unused_imports)] pub use crate::blockprint::{ get_blockprint_by_root, get_blockprint_by_slot, get_highest_blockprint, get_lowest_blockprint, get_unknown_blockprint, get_validators_clients_at_slot, insert_batch_blockprint, diff --git a/watch/src/server/error.rs b/watch/src/server/error.rs index d1542f784..0db3df2a0 100644 --- a/watch/src/server/error.rs +++ b/watch/src/server/error.rs @@ -3,12 +3,14 @@ use axum::Error as AxumError; use axum::{http::StatusCode, response::IntoResponse, Json}; use hyper::Error as HyperError; use serde_json::json; +use std::io::Error as IoError; #[derive(Debug)] pub enum Error { Axum(AxumError), Hyper(HyperError), Database(DbError), + IoError(IoError), BadRequest, NotFound, Other(String), @@ -43,6 +45,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: IoError) -> Self { + Error::IoError(e) + } +} + impl From for Error { fn from(e: String) -> Self { Error::Other(e) diff --git a/watch/src/server/mod.rs b/watch/src/server/mod.rs index d8ae0eb6c..25dd242aa 100644 --- a/watch/src/server/mod.rs +++ b/watch/src/server/mod.rs @@ -11,9 +11,8 @@ use axum::{ }; use eth2::types::ErrorMessage; use log::info; -use std::future::Future; -use std::net::SocketAddr; -use tokio::sync::oneshot; +use std::future::{Future, IntoFuture}; +use std::net::{SocketAddr, TcpListener}; pub use config::Config; pub use error::Error; @@ -22,7 +21,7 @@ mod config; mod error; mod handler; -pub async fn serve(config: FullConfig, shutdown: oneshot::Receiver<()>) -> Result<(), Error> { +pub async fn serve(config: FullConfig) -> Result<(), Error> { let db = database::build_connection_pool(&config.database)?; let (_, slots_per_epoch) = database::get_active_config(&mut database::get_connection(&db)?)? .ok_or_else(|| { @@ -32,9 +31,7 @@ pub async fn serve(config: FullConfig, shutdown: oneshot::Receiver<()>) -> Resul ) })?; - let server = start_server(&config, slots_per_epoch as u64, db, async { - let _ = shutdown.await; - })?; + let server = start_server(&config, slots_per_epoch as u64, db)?; server.await?; @@ -61,8 +58,7 @@ pub fn start_server( config: &FullConfig, slots_per_epoch: u64, pool: PgPool, - shutdown: impl Future + Send + Sync + 'static, -) -> Result> + 'static, Error> { +) -> Result> + 'static, Error> { let mut routes = Router::new() .route("/v1/slots", get(handler::get_slots_by_range)) .route("/v1/slots/:slot", get(handler::get_slot)) @@ -108,16 +104,13 @@ pub fn start_server( .layer(Extension(slots_per_epoch)); let addr = SocketAddr::new(config.server.listen_addr, config.server.listen_port); - - let server = axum::Server::try_bind(&addr)?.serve(app.into_make_service()); - - let server = server.with_graceful_shutdown(async { - shutdown.await; - }); + let listener = TcpListener::bind(addr)?; + listener.set_nonblocking(true)?; + let serve = axum::serve(tokio::net::TcpListener::from_std(listener)?, app); info!("HTTP server listening on {}", addr); - Ok(server) + Ok(serve.into_future()) } // The default route indicating that no available routes matched the request. diff --git a/watch/tests/tests.rs b/watch/tests/tests.rs index dc0b8af6e..0e29e7f0c 100644 --- a/watch/tests/tests.rs +++ b/watch/tests/tests.rs @@ -17,7 +17,6 @@ use std::env; use std::net::SocketAddr; use std::time::Duration; use testcontainers::{clients::Cli, core::WaitFor, Image, RunnableImage}; -use tokio::sync::oneshot; use tokio::{runtime, task::JoinHandle}; use tokio_postgres::{config::Config as PostgresConfig, Client, NoTls}; use types::{Hash256, MainnetEthSpec, Slot}; @@ -188,11 +187,7 @@ impl TesterBuilder { /* * Spawn a Watch HTTP API. */ - let (_watch_shutdown_tx, watch_shutdown_rx) = oneshot::channel(); - let watch_server = start_server(&self.config, SLOTS_PER_EPOCH, pool, async { - let _ = watch_shutdown_rx.await; - }) - .unwrap(); + let watch_server = start_server(&self.config, SLOTS_PER_EPOCH, pool).unwrap(); tokio::spawn(watch_server); let addr = SocketAddr::new( @@ -228,7 +223,6 @@ impl TesterBuilder { config: self.config, updater, _bn_network_rx: self._bn_network_rx, - _watch_shutdown_tx, } } async fn initialize_database(&self) -> PgPool { @@ -245,7 +239,6 @@ struct Tester { pub config: Config, pub updater: UpdateHandler, _bn_network_rx: NetworkReceivers, - _watch_shutdown_tx: oneshot::Sender<()>, } impl Tester {