diff --git a/.config/nextest.toml b/.config/nextest.toml new file mode 100644 index 000000000..b701259fc --- /dev/null +++ b/.config/nextest.toml @@ -0,0 +1,113 @@ +# This is the default config used by nextest. It is embedded in the binary at +# build time. It may be used as a template for .config/nextest.toml. + +[store] +# The directory under the workspace root at which nextest-related files are +# written. Profile-specific storage is currently written to dir/. +dir = "target/nextest" + +# This section defines the default nextest profile. Custom profiles are layered +# on top of the default profile. +[profile.default] +# "retries" defines the number of times a test should be retried. If set to a +# non-zero value, tests that succeed on a subsequent attempt will be marked as +# non-flaky. Can be overridden through the `--retries` option. +# Examples +# * retries = 3 +# * retries = { backoff = "fixed", count = 2, delay = "1s" } +# * retries = { backoff = "exponential", count = 10, delay = "1s", jitter = true, max-delay = "10s" } +retries = 0 + +# The number of threads to run tests with. Supported values are either an integer or +# the string "num-cpus". Can be overridden through the `--test-threads` option. +test-threads = "num-cpus" + +# The number of threads required for each test. This is generally used in overrides to +# mark certain tests as heavier than others. However, it can also be set as a global parameter. +threads-required = 1 + +# Show these test statuses in the output. +# +# The possible values this can take are: +# * none: no output +# * fail: show failed (including exec-failed) tests +# * retry: show flaky and retried tests +# * slow: show slow tests +# * pass: show passed tests +# * skip: show skipped tests (most useful for CI) +# * all: all of the above +# +# Each value includes all the values above it; for example, "slow" includes +# failed and retried tests. +# +# Can be overridden through the `--status-level` flag. +status-level = "pass" + +# Similar to status-level, show these test statuses at the end of the run. +final-status-level = "flaky" + +# "failure-output" defines when standard output and standard error for failing tests are produced. +# Accepted values are +# * "immediate": output failures as soon as they happen +# * "final": output failures at the end of the test run +# * "immediate-final": output failures as soon as they happen and at the end of +# the test run; combination of "immediate" and "final" +# * "never": don't output failures at all +# +# For large test suites and CI it is generally useful to use "immediate-final". +# +# Can be overridden through the `--failure-output` option. +failure-output = "immediate" + +# "success-output" controls production of standard output and standard error on success. This should +# generally be set to "never". +success-output = "never" + +# Cancel the test run on the first failure. For CI runs, consider setting this +# to false. +fail-fast = true + +# Treat a test that takes longer than the configured 'period' as slow, and print a message. +# See for more information. +# +# Optional: specify the parameter 'terminate-after' with a non-zero integer, +# which will cause slow tests to be terminated after the specified number of +# periods have passed. +# Example: slow-timeout = { period = "60s", terminate-after = 2 } +slow-timeout = { period = "120s" } + +# Treat a test as leaky if after the process is shut down, standard output and standard error +# aren't closed within this duration. +# +# This usually happens in case of a test that creates a child process and lets it inherit those +# handles, but doesn't clean the child process up (especially when it fails). +# +# See for more information. +leak-timeout = "100ms" + +[profile.default.junit] +# Output a JUnit report into the given file inside 'store.dir/'. +# If unspecified, JUnit is not written out. + +# path = "junit.xml" + +# The name of the top-level "report" element in JUnit report. If aggregating +# reports across different test runs, it may be useful to provide separate names +# for each report. +report-name = "lighthouse-run" + +# Whether standard output and standard error for passing tests should be stored in the JUnit report. +# Output is stored in the and elements of the element. +store-success-output = false + +# Whether standard output and standard error for failing tests should be stored in the JUnit report. +# Output is stored in the and elements of the element. +# +# Note that if a description can be extracted from the output, it is always stored in the +# element. +store-failure-output = true + +# This profile is activated if MIRI_SYSROOT is set. +[profile.default-miri] +# Miri tests take up a lot of memory, so only run 1 test at a time by default. +test-threads = 4 diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 1b8a3b2c0..1d80feadd 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -18,14 +18,14 @@ env: # Deny warnings in CI # Disable debug info (see https://github.com/sigp/lighthouse/issues/4005) RUSTFLAGS: "-D warnings -C debuginfo=0" - # The Nightly version used for cargo-udeps, might need updating from time to time. - PINNED_NIGHTLY: nightly-2023-04-16 # Prevent Github API rate limiting. LIGHTHOUSE_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Enable self-hosted runners for the sigp repo only. SELF_HOSTED_RUNNERS: ${{ github.repository == 'sigp/lighthouse' }} # Self-hosted runners need to reference a different host for `./watch` tests. WATCH_HOST: ${{ github.repository == 'sigp/lighthouse' && 'host.docker.internal' || 'localhost' }} + # Disable incremental compilation + CARGO_INCREMENTAL: 0 jobs: target-branch-check: name: target-branch-check @@ -34,155 +34,191 @@ jobs: steps: - name: Check that the pull request is not targeting the stable branch run: test ${{ github.base_ref }} != "stable" - extract-msrv: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Extract Minimum Supported Rust Version (MSRV) - run: | - metadata=$(cargo metadata --no-deps --format-version 1) - msrv=$(echo $metadata | jq -r '.packages | map(select(.name == "lighthouse")) | .[0].rust_version') - echo "MSRV=$msrv" >> $GITHUB_OUTPUT - id: extract_msrv - outputs: - MSRV: ${{ steps.extract_msrv.outputs.MSRV }} - cargo-fmt: - name: cargo-fmt - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Check formatting with cargo fmt - run: make cargo-fmt release-tests-ubuntu: name: release-tests-ubuntu # Use self-hosted runners only on the sigp repo. runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} - needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == false - run: rustup update stable + if: env.SELF_HOSTED_RUNNERS == 'false' + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Install Foundry (anvil) + if: env.SELF_HOSTED_RUNNERS == 'false' uses: foundry-rs/foundry-toolchain@v1 with: version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - name: Run tests in release - run: make test-release + run: make nextest-release + - name: Show cache stats + if: env.SELF_HOSTED_RUNNERS == 'true' + run: sccache --show-stats release-tests-windows: name: release-tests-windows runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "windows", "CI"]') || 'windows-2019' }} - needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == false - run: rustup update stable + if: env.SELF_HOSTED_RUNNERS == 'false' + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Install Foundry (anvil) + if: env.SELF_HOSTED_RUNNERS == 'false' uses: foundry-rs/foundry-toolchain@v1 with: version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - name: Install make + if: env.SELF_HOSTED_RUNNERS == 'false' run: choco install -y make - - uses: KyleMayes/install-llvm-action@v1 - if: env.SELF_HOSTED_RUNNERS == false - with: - version: "15.0" - directory: ${{ runner.temp }}/llvm +# - uses: KyleMayes/install-llvm-action@v1 +# if: env.SELF_HOSTED_RUNNERS == 'false' +# with: +# version: "15.0" +# directory: ${{ runner.temp }}/llvm - name: Set LIBCLANG_PATH run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV - name: Run tests in release - run: make test-release + run: make nextest-release + - name: Show cache stats + if: env.SELF_HOSTED_RUNNERS == 'true' + run: sccache --show-stats beacon-chain-tests: name: beacon-chain-tests # Use self-hosted runners only on the sigp repo. runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} - needs: cargo-fmt + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == false - run: rustup update stable + if: env.SELF_HOSTED_RUNNERS == 'false' + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest - name: Run beacon_chain tests for all known forks run: make test-beacon-chain + - name: Show cache stats + if: env.SELF_HOSTED_RUNNERS == 'true' + run: sccache --show-stats op-pool-tests: name: op-pool-tests runs-on: ubuntu-latest - needs: cargo-fmt + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest - name: Run operation_pool tests for all known forks run: make test-op-pool network-tests: name: network-tests runs-on: ubuntu-latest - needs: cargo-fmt + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable - - name: Run network tests for all known forks + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest + - name: Run network tests for all known forks run: make test-network slasher-tests: name: slasher-tests runs-on: ubuntu-latest - needs: cargo-fmt + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest - name: Run slasher tests for all supported backends run: make test-slasher debug-tests-ubuntu: name: debug-tests-ubuntu # Use self-hosted runners only on the sigp repo. runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} - needs: cargo-fmt + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == false - run: rustup update stable + if: env.SELF_HOSTED_RUNNERS == 'false' + uses: moonrepo/setup-rust@v1 + with: + channel: stable + bins: cargo-nextest - name: Install Foundry (anvil) + if: env.SELF_HOSTED_RUNNERS == 'false' uses: foundry-rs/foundry-toolchain@v1 with: version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - name: Run tests in debug - run: make test-debug + run: make nextest-debug + - name: Show cache stats + if: env.SELF_HOSTED_RUNNERS == 'true' + run: sccache --show-stats state-transition-vectors-ubuntu: name: state-transition-vectors-ubuntu runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release - name: Run state_transition_vectors in release. run: make run-state-transition-tests ef-tests-ubuntu: name: ef-tests-ubuntu # Use self-hosted runners only on the sigp repo. runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest' }} - needs: cargo-fmt + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == false - run: rustup update stable + if: env.SELF_HOSTED_RUNNERS == 'false' + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest - name: Run consensus-spec-tests with blst, milagro and fake_crypto - run: make test-ef + run: make nextest-ef + - name: Show cache stats + if: env.SELF_HOSTED_RUNNERS == 'true' + run: sccache --show-stats dockerfile-ubuntu: name: dockerfile-ubuntu runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - - name: Get latest version of stable Rust - run: rustup update stable - name: Build the root Dockerfile run: docker build --build-arg FEATURES=portable -t lighthouse:local . - name: Test the built image @@ -190,11 +226,13 @@ jobs: eth1-simulator-ubuntu: name: eth1-simulator-ubuntu runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 with: @@ -204,11 +242,13 @@ jobs: merge-transition-ubuntu: name: merge-transition-ubuntu runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 with: @@ -218,21 +258,25 @@ jobs: no-eth1-simulator-ubuntu: name: no-eth1-simulator-ubuntu runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release - name: Run the beacon chain sim without an eth1 connection run: cargo run --release --bin simulator no-eth1-sim syncing-simulator-ubuntu: name: syncing-simulator-ubuntu runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 with: @@ -241,21 +285,28 @@ jobs: run: cargo run --release --bin simulator syncing-sim doppelganger-protection-test: name: doppelganger-protection-test - runs-on: ubuntu-latest - needs: cargo-fmt + runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest' }} steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + if: env.SELF_HOSTED_RUNNERS == 'false' + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release - name: Install geth + if: env.SELF_HOSTED_RUNNERS == 'false' run: | sudo add-apt-repository -y ppa:ethereum/ethereum sudo apt-get update sudo apt-get install ethereum - - name: Install lighthouse and lcli + - name: Install lighthouse run: | make - make install-lcli + - name: Install lcli +# TODO(jimmy): re-enable this once we merge deneb into unstable +# if: env.SELF_HOSTED_RUNNERS == 'false' + run: make install-lcli - name: Run the doppelganger protection failure test script run: | cd scripts/tests @@ -267,91 +318,73 @@ jobs: execution-engine-integration-ubuntu: name: execution-engine-integration-ubuntu runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 - with: - go-version: '1.20' - - uses: actions/setup-dotnet@v3 - with: - dotnet-version: '6.0.201' - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + cache: false + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Run exec engine integration tests in release run: make test-exec-engine - check-benchmarks: - name: check-benchmarks + check-code: + name: check-code runs-on: ubuntu-latest - needs: cargo-fmt + env: + CARGO_INCREMENTAL: 1 steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable - - name: Typecheck benchmark code without running it - run: make check-benches - clippy: - name: clippy - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v3 - - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + components: rustfmt,clippy + bins: cargo-audit + - name: Check formatting with cargo fmt + run: make cargo-fmt - name: Lint code for quality and style with Clippy run: make lint - name: Certify Cargo.lock freshness run: git diff --exit-code Cargo.lock + - name: Typecheck benchmark code without running it + run: make check-benches + - name: Validate state_processing feature arbitrary-fuzz + run: make arbitrary-fuzz + - name: Run cargo audit + run: make audit-CI +# TODO(sean): re-enable this when we can figure it out with c-kzg +# Issue: https://github.com/sigp/lighthouse/issues/4440 +# - name: Run cargo vendor to make sure dependencies can be vendored for packaging, reproducibility and archival purpose +# run: CARGO_HOME=$(readlink -f $HOME) make vendor check-msrv: name: check-msrv runs-on: ubuntu-latest - needs: [cargo-fmt, extract-msrv] steps: - uses: actions/checkout@v3 - - name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }}) - run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }} + - name: Install Rust at Minimum Supported Rust Version (MSRV) + run: | + metadata=$(cargo metadata --no-deps --format-version 1) + msrv=$(echo $metadata | jq -r '.packages | map(select(.name == "lighthouse")) | .[0].rust_version') + rustup override set $msrv - name: Run cargo check run: cargo check --workspace - arbitrary-check: - name: arbitrary-check - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v3 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Validate state_processing feature arbitrary-fuzz - run: make arbitrary-fuzz - cargo-audit: - name: cargo-audit - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v3 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Run cargo audit to identify known security vulnerabilities reported to the RustSec Advisory Database - run: make audit -# TODO(sean): re-enable this when we can figure it out with c-kzg -# Issue: https://github.com/sigp/lighthouse/issues/4440 -# cargo-vendor: -# name: cargo-vendor -# runs-on: ubuntu-latest -# needs: cargo-fmt -# steps: -# - uses: actions/checkout@v3 -# - name: Run cargo vendor to make sure dependencies can be vendored for packaging, reproducibility and archival purpose -# run: CARGO_HOME=$(readlink -f $HOME) make vendor cargo-udeps: name: cargo-udeps runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - - name: Install Rust (${{ env.PINNED_NIGHTLY }}) - run: rustup toolchain install $PINNED_NIGHTLY - - name: Install cargo-udeps - run: cargo install cargo-udeps --locked --force + - name: Get latest version of nightly Rust + uses: moonrepo/setup-rust@v1 + with: + channel: nightly + bins: cargo-udeps + cache: false + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Create Cargo config dir run: mkdir -p .cargo - name: Install custom Cargo config diff --git a/Cargo.lock b/Cargo.lock index 447b4de49..83d4cd74e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -18,7 +18,7 @@ version = "0.3.5" dependencies = [ "account_utils", "bls", - "clap 2.34.0", + "clap", "clap_utils", "directory", "environment", @@ -153,9 +153,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f2135563fb5c609d2b2b87c1e8ce7bc41b0b45430fa9661f457981503dd5bf0" +checksum = "ea5d730647d4fadd988536d06fecce94b7b4f2a7efdae548f1cf4b63205518ab" dependencies = [ "memchr", ] @@ -195,63 +195,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "anstream" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44" -dependencies = [ - "anstyle", - "anstyle-parse", - "anstyle-query", - "anstyle-wincon", - "colorchoice", - "utf8parse", -] - -[[package]] -name = "anstyle" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" - -[[package]] -name = "anstyle-parse" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140" -dependencies = [ - "utf8parse", -] - -[[package]] -name = "anstyle-query" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" -dependencies = [ - "windows-sys 0.48.0", -] - -[[package]] -name = "anstyle-wincon" -version = "3.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628" -dependencies = [ - "anstyle", - "windows-sys 0.48.0", -] - -[[package]] -name = "anvil-rpc" -version = "0.1.0" -source = "git+https://github.com/foundry-rs/foundry?rev=b45456717ffae1af65acdc71099f8cb95e6683a0#b45456717ffae1af65acdc71099f8cb95e6683a0" -dependencies = [ - "serde", - "serde_json", -] - [[package]] name = "anyhow" version = "1.0.75" @@ -273,15 +216,6 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" -[[package]] -name = "array-init" -version = "0.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23589ecb866b460d3a0f1278834750268c607e8e28a1b982c907219f3178cd72" -dependencies = [ - "nodrop", -] - [[package]] name = "arrayref" version = "0.3.7" @@ -368,28 +302,6 @@ dependencies = [ "event-listener", ] -[[package]] -name = "async-stream" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" -dependencies = [ - "async-stream-impl", - "futures-core", - "pin-project-lite", -] - -[[package]] -name = "async-stream-impl" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.37", -] - [[package]] name = "async-trait" version = "0.1.73" @@ -566,25 +478,6 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" -[[package]] -name = "beacon-api-client" -version = "0.1.0" -source = "git+https://github.com/ralexstokes/beacon-api-client?rev=7f28993615fde52d563dd601a0511c34fe9b7c38#7f28993615fde52d563dd601a0511c34fe9b7c38" -dependencies = [ - "clap 4.4.6", - "ethereum-consensus", - "http", - "itertools", - "reqwest", - "serde", - "serde_json", - "thiserror", - "tokio", - "tracing", - "tracing-subscriber", - "url", -] - [[package]] name = "beacon_chain" version = "0.2.0" @@ -631,7 +524,7 @@ dependencies = [ "slog-term", "sloggers", "slot_clock", - "smallvec 1.11.0", + "smallvec", "ssz_types", "state_processing", "store", @@ -651,7 +544,7 @@ name = "beacon_node" version = "4.5.0" dependencies = [ "beacon_chain", - "clap 2.34.0", + "clap", "clap_utils", "client", "directory", @@ -874,7 +767,7 @@ name = "boot_node" version = "4.5.0" dependencies = [ "beacon_node", - "clap 2.34.0", + "clap", "clap_utils", "eth2_network_config", "ethereum_ssz", @@ -1008,7 +901,7 @@ dependencies = [ "ethereum_ssz_derive", "quickcheck", "quickcheck_macros", - "smallvec 1.11.0", + "smallvec", "ssz_types", "tree_hash", ] @@ -1158,51 +1051,11 @@ dependencies = [ "vec_map", ] -[[package]] -name = "clap" -version = "4.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d04704f56c2cde07f43e8e2c154b43f216dc5c92fc98ada720177362f953b956" -dependencies = [ - "clap_builder", - "clap_derive", -] - -[[package]] -name = "clap_builder" -version = "4.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e231faeaca65ebd1ea3c737966bf858971cd38c3849107aa3ea7de90a804e45" -dependencies = [ - "anstream", - "anstyle", - "clap_lex", - "strsim 0.10.0", -] - -[[package]] -name = "clap_derive" -version = "4.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0862016ff20d69b84ef8247369fabf5c008a7417002411897d40ee1f4532b873" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn 2.0.37", -] - -[[package]] -name = "clap_lex" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd7cc57abe963c6d3b9d8be5b06ba7c8957a930305ca90304f24ef040aa6f961" - [[package]] name = "clap_utils" version = "0.1.0" dependencies = [ - "clap 2.34.0", + "clap", "dirs", "eth2_network_config", "ethereum-types 0.14.1", @@ -1266,12 +1119,6 @@ dependencies = [ "cc", ] -[[package]] -name = "colorchoice" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" - [[package]] name = "compare_fields" version = "0.2.0" @@ -1289,9 +1136,9 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" +checksum = "f057a694a54f12365049b0958a1685bb52d567f5593b355fbf685838e873d400" dependencies = [ "crossbeam-utils", ] @@ -1365,7 +1212,7 @@ checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f" dependencies = [ "atty", "cast", - "clap 2.34.0", + "clap", "criterion-plot", "csv", "itertools", @@ -1551,9 +1398,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.1.0" +version = "4.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622178105f911d937a42cdb140730ba4a3ed2becd8ae6ce39c7d28b5d75d4588" +checksum = "e89b8c6a2e4b1f45971ad09761aafb85514a84744b67a95e32c3cc1352d1f65c" dependencies = [ "cfg-if", "cpufeatures", @@ -1664,7 +1511,7 @@ version = "0.1.0" dependencies = [ "beacon_chain", "beacon_node", - "clap 2.34.0", + "clap", "clap_utils", "environment", "logging", @@ -1689,7 +1536,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4355c25cbf99edcb6b4a0e906f6bdc6956eda149e84455bea49696429b2f8e8" dependencies = [ "futures", - "tokio-util 0.7.8", + "tokio-util 0.7.9", ] [[package]] @@ -1784,9 +1631,9 @@ dependencies = [ [[package]] name = "diesel" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d98235fdc2f355d330a8244184ab6b4b33c28679c0b4158f63138e51d6cf7e88" +checksum = "53c8a2cb22327206568569e5a45bb5a2c946455efdd76e24d15b7e82171af95e" dependencies = [ "bitflags 2.4.0", "byteorder", @@ -1798,9 +1645,9 @@ dependencies = [ [[package]] name = "diesel_derives" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e054665eaf6d97d1e7125512bb2d35d07c73ac86cc6920174cb42d1ab697a554" +checksum = "ef8337737574f55a468005a83499da720f20c65586241ffea339db9ecdfd2b44" dependencies = [ "diesel_table_macro_syntax", "proc-macro2", @@ -1853,7 +1700,7 @@ dependencies = [ name = "directory" version = "0.1.0" dependencies = [ - "clap 2.34.0", + "clap", "clap_utils", "eth2_network_config", ] @@ -1909,7 +1756,7 @@ dependencies = [ "aes-gcm", "arrayvec", "delay_map", - "enr 0.9.0", + "enr", "fnv", "futures", "hashlink 0.7.0", @@ -1923,7 +1770,7 @@ dependencies = [ "parking_lot 0.11.2", "rand", "rlp", - "smallvec 1.11.0", + "smallvec", "socket2 0.4.9", "tokio", "tracing", @@ -1997,7 +1844,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7277392b266383ef8396db7fdeb1e77b6c52fed775f5df15bb24f35b72156980" dependencies = [ - "curve25519-dalek 4.1.0", + "curve25519-dalek 4.1.1", "ed25519", "rand_core 0.6.4", "serde", @@ -2059,7 +1906,6 @@ dependencies = [ "ff 0.12.1", "generic-array", "group 0.12.1", - "pkcs8 0.9.0", "rand_core 0.6.4", "sec1 0.3.0", "subtle", @@ -2097,28 +1943,9 @@ dependencies = [ [[package]] name = "enr" -version = "0.6.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26fa0a0be8915790626d5759eb51fe47435a8eac92c2f212bd2da9aa7f30ea56" -dependencies = [ - "base64 0.13.1", - "bs58 0.4.0", - "bytes", - "hex", - "k256 0.11.6", - "log", - "rand", - "rlp", - "serde", - "sha3 0.10.8", - "zeroize", -] - -[[package]] -name = "enr" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0be7b2ac146c1f99fe245c02d16af0696450d8e06c135db75e10eeb9e642c20d" +checksum = "fe81b5c06ecfdbc71dd845216f225f53b62a10cb8a16c946836a3467f701d05b" dependencies = [ "base64 0.21.4", "bytes", @@ -2129,7 +1956,6 @@ dependencies = [ "rand", "rlp", "serde", - "serde-hex", "sha3 0.10.8", "zeroize", ] @@ -2482,30 +2308,6 @@ dependencies = [ "tiny-keccak", ] -[[package]] -name = "ethereum-consensus" -version = "0.1.1" -source = "git+https://github.com/ralexstokes/ethereum-consensus?rev=12508c1f9b0c8f4bf4c5e9b6d441e840c1b37fd9#12508c1f9b0c8f4bf4c5e9b6d441e840c1b37fd9" -dependencies = [ - "async-stream", - "blst", - "bs58 0.4.0", - "enr 0.6.2", - "hex", - "integer-sqrt", - "multiaddr 0.14.0", - "multihash 0.16.3", - "rand", - "serde", - "serde_json", - "serde_yaml", - "sha2 0.9.9", - "ssz_rs", - "thiserror", - "tokio", - "tokio-stream", -] - [[package]] name = "ethereum-types" version = "0.12.1" @@ -2569,7 +2371,7 @@ checksum = "e61ffea29f26e8249d35128a82ec8d3bd4fbc80179ea5f5e5e3daafef6a80fcb" dependencies = [ "ethereum-types 0.14.1", "itertools", - "smallvec 1.11.0", + "smallvec", ] [[package]] @@ -2743,12 +2545,10 @@ version = "0.1.0" dependencies = [ "arc-swap", "async-trait", - "axum", "builder_client", "bytes", "environment", "eth2", - "ethereum-consensus", "ethereum_serde_utils", "ethereum_ssz", "ethers-core", @@ -2758,14 +2558,12 @@ dependencies = [ "hash-db", "hash256-std-hasher", "hex", - "hyper", "jsonwebtoken", "keccak-hash", "kzg", "lazy_static", "lighthouse_metrics", "lru 0.7.8", - "mev-rs", "parking_lot 0.12.1", "pretty_reqwest_error", "rand", @@ -2775,7 +2573,6 @@ dependencies = [ "serde_json", "slog", "slot_clock", - "ssz_rs", "ssz_types", "state_processing", "strum", @@ -2834,9 +2631,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" [[package]] name = "ff" @@ -3275,7 +3072,7 @@ dependencies = [ "indexmap 1.9.3", "slab", "tokio", - "tokio-util 0.7.8", + "tokio-util 0.7.9", "tracing", ] @@ -3405,9 +3202,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" +checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" [[package]] name = "hex" @@ -3889,7 +3686,7 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi 0.3.2", + "hermit-abi 0.3.3", "libc", "windows-sys 0.48.0", ] @@ -4076,7 +3873,7 @@ dependencies = [ "account_utils", "beacon_chain", "bls", - "clap 2.34.0", + "clap", "clap_utils", "deposit_contract", "directory", @@ -4214,7 +4011,7 @@ dependencies = [ "libp2p-swarm", "libp2p-tcp", "libp2p-yamux", - "multiaddr 0.18.0", + "multiaddr", "pin-project", ] @@ -4244,9 +4041,9 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.40.0" +version = "0.40.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef7dd7b09e71aac9271c60031d0e558966cdb3253ba0308ab369bb2de80630d0" +checksum = "dd44289ab25e4c9230d9246c475a22241e301b23e8f4061d3bdef304a1a99713" dependencies = [ "either", "fnv", @@ -4255,8 +4052,8 @@ dependencies = [ "instant", "libp2p-identity", "log", - "multiaddr 0.18.0", - "multihash 0.19.1", + "multiaddr", + "multihash", "multistream-select", "once_cell", "parking_lot 0.12.1", @@ -4264,7 +4061,7 @@ dependencies = [ "quick-protobuf", "rand", "rw-stream-sink", - "smallvec 1.11.0", + "smallvec", "thiserror", "unsigned-varint 0.7.2", "void", @@ -4281,7 +4078,7 @@ dependencies = [ "libp2p-identity", "log", "parking_lot 0.12.1", - "smallvec 1.11.0", + "smallvec", "trust-dns-resolver", ] @@ -4312,7 +4109,7 @@ dependencies = [ "rand", "regex", "sha2 0.10.7", - "smallvec 1.11.0", + "smallvec", "unsigned-varint 0.7.2", "void", ] @@ -4334,7 +4131,7 @@ dependencies = [ "lru 0.10.1", "quick-protobuf", "quick-protobuf-codec", - "smallvec 1.11.0", + "smallvec", "thiserror", "void", ] @@ -4350,7 +4147,7 @@ dependencies = [ "ed25519-dalek", "libsecp256k1", "log", - "multihash 0.19.1", + "multihash", "p256", "quick-protobuf", "rand", @@ -4375,7 +4172,7 @@ dependencies = [ "libp2p-swarm", "log", "rand", - "smallvec 1.11.0", + "smallvec", "socket2 0.5.4", "tokio", "trust-dns-proto", @@ -4413,7 +4210,7 @@ dependencies = [ "nohash-hasher", "parking_lot 0.12.1", "rand", - "smallvec 1.11.0", + "smallvec", "unsigned-varint 0.7.2", ] @@ -4424,13 +4221,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71ce70757f2c0d82e9a3ef738fb10ea0723d16cec37f078f719e2c247704c1bb" dependencies = [ "bytes", - "curve25519-dalek 4.1.0", + "curve25519-dalek 4.1.1", "futures", "libp2p-core", "libp2p-identity", "log", - "multiaddr 0.18.0", - "multihash 0.19.1", + "multiaddr", + "multihash", "once_cell", "quick-protobuf", "rand", @@ -4483,9 +4280,9 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.43.3" +version = "0.43.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28016944851bd73526d3c146aabf0fa9bbe27c558f080f9e5447da3a1772c01a" +checksum = "f0cf749abdc5ca1dce6296dc8ea0f012464dfcfd3ddd67ffc0cabd8241c4e1da" dependencies = [ "either", "fnv", @@ -4499,7 +4296,7 @@ dependencies = [ "multistream-select", "once_cell", "rand", - "smallvec 1.11.0", + "smallvec", "tokio", "void", ] @@ -4646,7 +4443,7 @@ dependencies = [ "beacon_processor", "bls", "boot_node", - "clap 2.34.0", + "clap", "clap_utils", "database_manager", "directory", @@ -4722,7 +4519,7 @@ dependencies = [ "slog", "slog-async", "slog-term", - "smallvec 1.11.0", + "smallvec", "snap", "ssz_types", "strum", @@ -4869,6 +4666,7 @@ name = "lru_cache" version = "0.1.0" dependencies = [ "fnv", + "mock_instant", ] [[package]] @@ -4921,22 +4719,17 @@ checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" [[package]] name = "matchit" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed1202b2a6f884ae56f04cff409ab315c5ce26b5e58d7412e484f01fd52f52ef" - -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] name = "md-5" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" +checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" dependencies = [ + "cfg-if", "digest 0.10.7", ] @@ -5012,31 +4805,10 @@ dependencies = [ "itertools", "proc-macro2", "quote", - "smallvec 1.11.0", + "smallvec", "syn 1.0.109", ] -[[package]] -name = "mev-rs" -version = "0.3.0" -source = "git+https://github.com/jimmygchen/mev-rs?rev=dedc77a#dedc77a796986603fb3376c5f353863d09e0dbf2" -dependencies = [ - "anvil-rpc", - "async-trait", - "axum", - "beacon-api-client", - "ethereum-consensus", - "hyper", - "parking_lot 0.12.1", - "reqwest", - "serde", - "serde_json", - "ssz_rs", - "thiserror", - "tokio", - "tracing", -] - [[package]] name = "migrations_internals" version = "2.1.0" @@ -5112,6 +4884,12 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "mock_instant" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c1a54de846c4006b88b1516731cc1f6026eb5dc4bcb186aa071ef66d40524ec" + [[package]] name = "monitoring_api" version = "0.1.0" @@ -5138,24 +4916,6 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7843ec2de400bcbc6a6328c958dc38e5359da6e93e72e37bc5246bf1ae776389" -[[package]] -name = "multiaddr" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c580bfdd8803cce319b047d239559a22f809094aaea4ac13902a1fdcfcd4261" -dependencies = [ - "arrayref", - "bs58 0.4.0", - "byteorder", - "data-encoding", - "multihash 0.16.3", - "percent-encoding", - "serde", - "static_assertions", - "unsigned-varint 0.7.2", - "url", -] - [[package]] name = "multiaddr" version = "0.18.0" @@ -5167,7 +4927,7 @@ dependencies = [ "data-encoding", "libp2p-identity", "multibase", - "multihash 0.19.1", + "multihash", "percent-encoding", "serde", "static_assertions", @@ -5186,19 +4946,6 @@ dependencies = [ "data-encoding-macro", ] -[[package]] -name = "multihash" -version = "0.16.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c346cf9999c631f002d8f977c4eaeaa0e6386f16007202308d0b3757522c2cc" -dependencies = [ - "core2", - "digest 0.10.7", - "multihash-derive", - "sha2 0.10.7", - "unsigned-varint 0.7.2", -] - [[package]] name = "multihash" version = "0.19.1" @@ -5209,20 +4956,6 @@ dependencies = [ "unsigned-varint 0.7.2", ] -[[package]] -name = "multihash-derive" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6d4752e6230d8ef7adf7bd5d8c4b1f6561c1014c5ba9a37445ccefe18aa1db" -dependencies = [ - "proc-macro-crate", - "proc-macro-error", - "proc-macro2", - "quote", - "syn 1.0.109", - "synstructure", -] - [[package]] name = "multistream-select" version = "0.13.0" @@ -5233,7 +4966,7 @@ dependencies = [ "futures", "log", "pin-project", - "smallvec 1.11.0", + "smallvec", "unsigned-varint 0.7.2", ] @@ -5360,7 +5093,7 @@ dependencies = [ "slog-term", "sloggers", "slot_clock", - "smallvec 1.11.0", + "smallvec", "ssz_types", "store", "strum", @@ -5422,12 +5155,6 @@ dependencies = [ "validator_dir", ] -[[package]] -name = "nodrop" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" - [[package]] name = "nohash-hasher" version = "0.2.0" @@ -5488,7 +5215,7 @@ dependencies = [ "num-traits", "rand", "serde", - "smallvec 1.11.0", + "smallvec", "zeroize", ] @@ -5528,7 +5255,7 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.3.2", + "hermit-abi 0.3.3", "libc", ] @@ -5643,9 +5370,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "300.1.3+3.1.2" +version = "300.1.5+3.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd2c101a165fff9935e34def4669595ab1c7847943c42be86e21503e482be107" +checksum = "559068e4c12950d7dcaa1857a61725c0d38d4fc03ff8e070ab31a75d6e316491" dependencies = [ "cc", ] @@ -5759,9 +5486,9 @@ dependencies = [ [[package]] name = "parking" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" +checksum = "e52c774a4c39359c1d1c52e43f73dd91a75a614652c825408eec30c95a9b2067" [[package]] name = "parking_lot" @@ -5794,7 +5521,7 @@ dependencies = [ "instant", "libc", "redox_syscall 0.2.16", - "smallvec 1.11.0", + "smallvec", "winapi", ] @@ -5807,7 +5534,7 @@ dependencies = [ "cfg-if", "libc", "redox_syscall 0.3.5", - "smallvec 1.11.0", + "smallvec", "windows-targets 0.48.5", ] @@ -6484,9 +6211,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" +checksum = "9c27db03db7734835b3f53954b534c91069375ce6ccaa2e065441e07d9b6cdb1" dependencies = [ "either", "rayon-core", @@ -6494,14 +6221,12 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" +checksum = "5ce3fb6ad83f861aac485e76e1985cd109d9a3713802152be56c3b1f0e0658ed" dependencies = [ - "crossbeam-channel", "crossbeam-deque", "crossbeam-utils", - "num_cpus", ] [[package]] @@ -6622,7 +6347,7 @@ dependencies = [ "tokio", "tokio-native-tls", "tokio-rustls", - "tokio-util 0.7.8", + "tokio-util 0.7.9", "tower-service", "url", "wasm-bindgen", @@ -6742,7 +6467,7 @@ dependencies = [ "fallible-streaming-iterator", "hashlink 0.8.4", "libsqlite3-sys", - "smallvec 1.11.0", + "smallvec", ] [[package]] @@ -6811,9 +6536,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.13" +version = "0.38.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7db8590df6dfcd144d22afd1b83b36c21a18d7cbc1dc4bb5295a8712e9eb662" +checksum = "747c788e9ce8e92b12cd485c49ddf90723550b654b32508f979b71a7b1ecda4f" dependencies = [ "bitflags 2.4.0", "errno", @@ -6845,9 +6570,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.101.5" +version = "0.101.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45a27e3b59326c16e23d30aeb7a36a24cc0d29e71d68ff611cdfb4a01d013bed" +checksum = "3c7d5dece342910d9ba34d259310cae3e0154b873b35408b787b59bce53d34fe" dependencies = [ "ring", "untrusted", @@ -7027,9 +6752,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" +checksum = "ad977052201c6de01a8ef2aa3378c4bd23217a056337d1d6da40468d267a4fb0" dependencies = [ "serde", ] @@ -7057,17 +6782,6 @@ dependencies = [ "serde_derive", ] -[[package]] -name = "serde-hex" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca37e3e4d1b39afd7ff11ee4e947efae85adfddf4841787bfa47c470e96dc26d" -dependencies = [ - "array-init", - "serde", - "smallvec 0.6.14", -] - [[package]] name = "serde_array_query" version = "0.1.0" @@ -7188,9 +6902,9 @@ dependencies = [ [[package]] name = "sha1" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ "cfg-if", "cpufeatures", @@ -7303,7 +7017,7 @@ dependencies = [ name = "simulator" version = "0.2.0" dependencies = [ - "clap 2.34.0", + "clap", "env_logger 0.9.3", "eth1", "eth1_test_rig", @@ -7515,18 +7229,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "0.6.14" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97fcaeba89edba30f044a10c6a3cc39df9c3f17d7cd829dd1446cab35f890e0" -dependencies = [ - "maybe-uninit", -] - -[[package]] -name = "smallvec" -version = "1.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" +checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a" [[package]] name = "snap" @@ -7543,7 +7248,7 @@ dependencies = [ "aes-gcm", "blake2", "chacha20poly1305", - "curve25519-dalek 4.1.0", + "curve25519-dalek 4.1.1", "rand_core 0.6.4", "ring", "rustc_version", @@ -7597,31 +7302,6 @@ dependencies = [ "der 0.7.8", ] -[[package]] -name = "ssz_rs" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "057291e5631f280978fa9c8009390663ca4613359fc1318e36a8c24c392f6d1f" -dependencies = [ - "bitvec 1.0.1", - "hex", - "num-bigint", - "serde", - "sha2 0.9.9", - "ssz_rs_derive", -] - -[[package]] -name = "ssz_rs_derive" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f07d54c4d01a1713eb363b55ba51595da15f6f1211435b71466460da022aa140" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "ssz_types" version = "0.5.4" @@ -7635,7 +7315,7 @@ dependencies = [ "itertools", "serde", "serde_derive", - "smallvec 1.11.0", + "smallvec", "tree_hash", "typenum", ] @@ -7660,7 +7340,7 @@ dependencies = [ "merkle_proof", "rayon", "safe_arith", - "smallvec 1.11.0", + "smallvec", "ssz_types", "tokio", "tree_hash", @@ -7771,7 +7451,7 @@ dependencies = [ "itertools", "proc-macro2", "quote", - "smallvec 1.11.0", + "smallvec", "syn 1.0.109", ] @@ -7918,9 +7598,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" dependencies = [ "cfg-if", - "fastrand 2.0.0", + "fastrand 2.0.1", "redox_syscall 0.3.5", - "rustix 0.38.13", + "rustix 0.38.14", "windows-sys 0.48.0", ] @@ -7937,9 +7617,9 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" +checksum = "6093bad37da69aab9d123a8091e4be0aa4a03e4d601ec641c327398315f62b64" dependencies = [ "winapi-util", ] @@ -8026,9 +7706,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f6bb557fd245c28e6411aa56b6403c689ad95061f50e4be16c274e70a17e48" +checksum = "426f806f4089c493dcac0d24c29c01e2c38baf8e30f1b716ee37e83d200b18fe" dependencies = [ "deranged", "itoa", @@ -8041,15 +7721,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a942f44339478ef67935ab2bbaec2fb0322496cf3cbe84b261e06ac3814c572" +checksum = "4ad70d68dba9e1f8aceda7aa6711965dfec1cac869f311a51bd08b3a2ccbce20" dependencies = [ "time-core", ] @@ -8129,7 +7809,6 @@ dependencies = [ "libc", "mio", "num_cpus", - "parking_lot 0.12.1", "pin-project-lite", "signal-hook-registry", "socket2 0.5.4", @@ -8190,7 +7869,7 @@ dependencies = [ "rand", "socket2 0.5.4", "tokio", - "tokio-util 0.7.8", + "tokio-util 0.7.9", "whoami", ] @@ -8213,7 +7892,7 @@ dependencies = [ "futures-core", "pin-project-lite", "tokio", - "tokio-util 0.7.8", + "tokio-util 0.7.9", ] [[package]] @@ -8234,9 +7913,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" +checksum = "1d68074620f57a0b21594d9735eb2e98ab38b17f80d3fcb189fca266771ca60d" dependencies = [ "bytes", "futures-core", @@ -8384,7 +8063,7 @@ dependencies = [ "once_cell", "regex", "sharded-slab", - "smallvec 1.11.0", + "smallvec", "thread_local", "tracing", "tracing-core", @@ -8418,7 +8097,7 @@ checksum = "5c998ac5fe2b07c025444bdd522e6258110b63861c6698eedc610c071980238d" dependencies = [ "ethereum-types 0.14.1", "ethereum_hashing", - "smallvec 1.11.0", + "smallvec", ] [[package]] @@ -8459,7 +8138,7 @@ dependencies = [ "ipnet", "lazy_static", "rand", - "smallvec 1.11.0", + "smallvec", "socket2 0.4.9", "thiserror", "tinyvec", @@ -8481,7 +8160,7 @@ dependencies = [ "lru-cache", "parking_lot 0.12.1", "resolv-conf", - "smallvec 1.11.0", + "smallvec", "thiserror", "tokio", "tracing", @@ -8540,7 +8219,7 @@ dependencies = [ "serde_json", "serde_yaml", "slog", - "smallvec 1.11.0", + "smallvec", "ssz_types", "state_processing", "strum", @@ -8604,9 +8283,9 @@ dependencies = [ [[package]] name = "unicode-width" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" +checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" [[package]] name = "unicode-xid" @@ -8670,12 +8349,6 @@ dependencies = [ "percent-encoding", ] -[[package]] -name = "utf8parse" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" - [[package]] name = "uuid" version = "0.8.2" @@ -8693,7 +8366,7 @@ dependencies = [ "account_utils", "bincode", "bls", - "clap 2.34.0", + "clap", "clap_utils", "deposit_contract", "directory", @@ -8766,7 +8439,7 @@ version = "0.1.0" dependencies = [ "account_utils", "bls", - "clap 2.34.0", + "clap", "clap_utils", "environment", "eth2", @@ -8817,9 +8490,9 @@ checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" [[package]] name = "waker-fn" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" +checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" [[package]] name = "walkdir" @@ -8864,7 +8537,7 @@ dependencies = [ "tokio", "tokio-rustls", "tokio-stream", - "tokio-util 0.7.8", + "tokio-util 0.7.9", "tower-service", "tracing", ] @@ -9002,7 +8675,7 @@ dependencies = [ "beacon_node", "bls", "byteorder", - "clap 2.34.0", + "clap", "diesel", "diesel_migrations", "env_logger 0.9.3", @@ -9079,7 +8752,7 @@ dependencies = [ "either", "home", "once_cell", - "rustix 0.38.13", + "rustix 0.38.14", ] [[package]] @@ -9128,9 +8801,9 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" dependencies = [ "winapi", ] @@ -9420,9 +9093,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.18" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab77e97b50aee93da431f2cee7cd0f43b4d1da3c408042f2d7d164187774f0a" +checksum = "0fcb9cbac069e033553e8bb871be2fbdffcab578eb25bd0f7c508cedc6dcd75a" [[package]] name = "xmltree" diff --git a/Cargo.toml b/Cargo.toml index 7c00d175d..9adb913ff 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -137,7 +137,7 @@ r2d2 = "0.8" rand = "0.8" rayon = "1.7" regex = "1" -reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "stream", "rustls-tls"] } +reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "stream", "rustls-tls", "native-tls-vendored"] } ring = "0.16" rusqlite = { version = "0.28", features = ["bundled"] } serde = { version = "1", features = ["derive"] } @@ -157,7 +157,7 @@ superstruct = "0.6" syn = "1" sysinfo = "0.26" tempfile = "3" -tokio = { version = "1", features = ["rt-multi-thread", "sync"] } +tokio = { version = "1", features = ["rt-multi-thread", "sync", "signal"] } tokio-stream = { version = "0.1", features = ["sync"] } tokio-util = { version = "0.6", features = ["codec", "compat", "time"] } tree_hash = "0.5" diff --git a/Makefile b/Makefile index 0236b4452..05aafa8b3 100644 --- a/Makefile +++ b/Makefile @@ -108,11 +108,21 @@ build-release-tarballs: test-release: cargo test --workspace --release --exclude ef_tests --exclude beacon_chain --exclude slasher --exclude network +# Runs the full workspace tests in **release**, without downloading any additional +# test vectors, using nextest. +nextest-release: + cargo nextest run --workspace --release --exclude ef_tests --exclude beacon_chain --exclude slasher --exclude network + # Runs the full workspace tests in **debug**, without downloading any additional test # vectors. test-debug: cargo test --workspace --exclude ef_tests --exclude beacon_chain --exclude network +# Runs the full workspace tests in **debug**, without downloading any additional test +# vectors, using nextest. +nextest-debug: + cargo nextest run --workspace --exclude ef_tests --exclude beacon_chain --exclude network + # Runs cargo-fmt (linter). cargo-fmt: cargo fmt --all -- --check @@ -129,17 +139,25 @@ run-ef-tests: cargo test --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),milagro" ./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/consensus-spec-tests +# Runs EF test vectors with nextest +nextest-run-ef-tests: + rm -rf $(EF_TESTS)/.accessed_file_log.txt + cargo nextest run --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES)" + cargo nextest run --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),fake_crypto" + cargo nextest run --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),milagro" + ./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/consensus-spec-tests + # Run the tests in the `beacon_chain` crate for all known forks. test-beacon-chain: $(patsubst %,test-beacon-chain-%,$(FORKS)) test-beacon-chain-%: - env FORK_NAME=$* cargo test --release --features fork_from_env,slasher/lmdb -p beacon_chain + env FORK_NAME=$* cargo nextest run --release --features fork_from_env,slasher/lmdb -p beacon_chain # Run the tests in the `operation_pool` crate for all known forks. test-op-pool: $(patsubst %,test-op-pool-%,$(FORKS)) test-op-pool-%: - env FORK_NAME=$* cargo test --release \ + env FORK_NAME=$* cargo nextest run --release \ --features 'beacon_chain/fork_from_env'\ -p operation_pool @@ -147,15 +165,15 @@ test-op-pool-%: test-network: $(patsubst %,test-network-%,$(FORKS)) test-network-%: - env FORK_NAME=$* cargo test --release \ + env FORK_NAME=$* cargo nextest run --release \ --features 'fork_from_env' \ -p network # Run the tests in the `slasher` crate for all supported database backends. test-slasher: - cargo test --release -p slasher --features lmdb - cargo test --release -p slasher --no-default-features --features mdbx - cargo test --release -p slasher --features lmdb,mdbx # both backends enabled + cargo nextest run --release -p slasher --features lmdb + cargo nextest run --release -p slasher --no-default-features --features mdbx + cargo nextest run --release -p slasher --features lmdb,mdbx # both backends enabled # Runs only the tests/state_transition_vectors tests. run-state-transition-tests: @@ -164,6 +182,9 @@ run-state-transition-tests: # Downloads and runs the EF test vectors. test-ef: make-ef-tests run-ef-tests +# Downloads and runs the EF test vectors with nextest. +nextest-ef: make-ef-tests nextest-run-ef-tests + # Runs tests checking interop between Lighthouse and execution clients. test-exec-engine: make -C $(EXECUTION_ENGINE_INTEGRATION) test @@ -213,8 +234,12 @@ arbitrary-fuzz: cargo check -p slashing_protection --features arbitrary-fuzz # Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database) -audit: +audit: install-audit audit-CI + +install-audit: cargo install --force cargo-audit + +audit-CI: cargo audit # Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose. diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 4d2271a0a..a4abb9010 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -20,8 +20,8 @@ use execution_layer::test_utils::generate_genesis_header; use execution_layer::{ auth::JwtKey, test_utils::{ - ExecutionBlockGenerator, MockBuilder, MockBuilderServer, MockExecutionLayer, - DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, + ExecutionBlockGenerator, MockBuilder, MockExecutionLayer, DEFAULT_JWT_SECRET, + DEFAULT_TERMINAL_BLOCK, }, ExecutionLayer, }; @@ -663,7 +663,10 @@ where .execution_block_generator() } - pub fn set_mock_builder(&mut self, beacon_url: SensitiveUrl) -> MockBuilderServer { + pub fn set_mock_builder( + &mut self, + beacon_url: SensitiveUrl, + ) -> impl futures::Future { let mock_el = self .mock_execution_layer .as_ref() @@ -672,7 +675,7 @@ where let mock_el_url = SensitiveUrl::parse(mock_el.server.url().as_str()).unwrap(); // Create the builder, listening on a free port. - let (mock_builder, mock_builder_server) = MockBuilder::new_for_testing( + let (mock_builder, (addr, mock_builder_server)) = MockBuilder::new_for_testing( mock_el_url, beacon_url, self.spec.clone(), @@ -680,8 +683,7 @@ where ); // Set the builder URL in the execution layer now that its port is known. - let builder_listen_addr = mock_builder_server.local_addr(); - let port = builder_listen_addr.port(); + let port = addr.port(); mock_el .el .set_builder_url( diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 60c97b028..46c585bb0 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -42,12 +42,6 @@ lazy_static = { workspace = true } ethers-core = { workspace = true } builder_client = { path = "../builder_client" } fork_choice = { workspace = true } -#PR: https://github.com/ralexstokes/mev-rs/pull/124 -mev-rs = { git = "https://github.com/jimmygchen/mev-rs", rev = "dedc77a" } -axum = "0.6" -hyper = "0.14" -ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus", rev = "12508c1f9b0c8f4bf4c5e9b6d441e840c1b37fd9" } -ssz_rs = "0.9.0" tokio-stream = { workspace = true } strum = { workspace = true } keccak-hash = "0.10.0" diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index d06c9be4f..1e45ef272 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -1,51 +1,30 @@ use crate::test_utils::{DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_JWT_SECRET}; use crate::{Config, ExecutionLayer, PayloadAttributes}; -use async_trait::async_trait; use eth2::types::{BlobsBundle, BlockId, StateId, ValidatorId}; use eth2::{BeaconNodeHttpClient, Timeouts}; -pub use ethereum_consensus::state_transition::Context; -use ethereum_consensus::{ - crypto::{SecretKey, Signature}, - primitives::{BlsPublicKey, BlsSignature, ExecutionAddress, Hash32, Root, U256}, - state_transition::Error, -}; use fork_choice::ForkchoiceUpdateParameters; -use mev_rs::{ - blinded_block_provider::Server as BlindedBlockProviderServer, - signing::{sign_builder_message, verify_signed_builder_message}, - types::{ - bellatrix::{ - BuilderBid as BuilderBidBellatrix, SignedBuilderBid as SignedBuilderBidBellatrix, - }, - capella::{BuilderBid as BuilderBidCapella, SignedBuilderBid as SignedBuilderBidCapella}, - deneb::{BuilderBid as BuilderBidDeneb, SignedBuilderBid as SignedBuilderBidDeneb}, - BidRequest, BuilderBid, ExecutionPayload as ServerPayload, SignedBlindedBeaconBlock, - SignedBuilderBid, SignedValidatorRegistration, - }, - Error as MevError, -}; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; -use ssz::{Decode, Encode}; -use ssz_rs::{Merkleized, SimpleSerialize}; use std::collections::HashMap; use std::fmt::Debug; -use std::net::Ipv4Addr; +use std::future::Future; +use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use std::sync::Arc; use std::time::Duration; use task_executor::TaskExecutor; use tempfile::NamedTempFile; use tree_hash::TreeHash; -use types::builder_bid::BlindedBlobsBundle; +use types::builder_bid::{ + BuilderBid, BuilderBidCapella, BuilderBidDeneb, BuilderBidMerge, SignedBuilderBid, +}; use types::{ Address, BeaconState, ChainSpec, EthSpec, ExecPayload, ExecutionPayload, - ExecutionPayloadHeader, ForkName, ForkVersionedResponse, Hash256, Slot, Uint256, + ExecutionPayloadHeaderRefMut, ForkName, ForkVersionedResponse, Hash256, PublicKeyBytes, + Signature, SignedBlindedBeaconBlock, SignedRoot, SignedValidatorRegistrationData, Slot, + Uint256, }; - -pub type MockBuilderServer = axum::Server< - hyper::server::conn::AddrIncoming, - axum::routing::IntoMakeService, ->; +use types::{ExecutionBlockHash, SecretKey}; +use warp::{Filter, Rejection}; #[derive(Clone)] pub enum Operation { @@ -60,115 +39,156 @@ pub enum Operation { } impl Operation { - fn apply(self, bid: &mut B) -> Result<(), MevError> { + fn apply>(self, bid: &mut B) { match self { - Operation::FeeRecipient(fee_recipient) => { - *bid.fee_recipient_mut() = to_ssz_rs(&fee_recipient)? - } - Operation::GasLimit(gas_limit) => *bid.gas_limit_mut() = gas_limit as u64, - Operation::Value(value) => *bid.value_mut() = to_ssz_rs(&value)?, - Operation::ParentHash(parent_hash) => *bid.parent_hash_mut() = to_ssz_rs(&parent_hash)?, - Operation::PrevRandao(prev_randao) => *bid.prev_randao_mut() = to_ssz_rs(&prev_randao)?, - Operation::BlockNumber(block_number) => *bid.block_number_mut() = block_number as u64, - Operation::Timestamp(timestamp) => *bid.timestamp_mut() = timestamp as u64, - Operation::WithdrawalsRoot(root) => *bid.withdrawals_root_mut()? = to_ssz_rs(&root)?, + Operation::FeeRecipient(fee_recipient) => bid.set_fee_recipient(fee_recipient), + Operation::GasLimit(gas_limit) => bid.set_gas_limit(gas_limit as u64), + Operation::Value(value) => bid.set_value(value), + Operation::ParentHash(parent_hash) => bid.set_parent_hash(parent_hash), + Operation::PrevRandao(prev_randao) => bid.set_prev_randao(prev_randao), + Operation::BlockNumber(block_number) => bid.set_block_number(block_number as u64), + Operation::Timestamp(timestamp) => bid.set_timestamp(timestamp as u64), + Operation::WithdrawalsRoot(root) => bid.set_withdrawals_root(root), } - Ok(()) } } +#[derive(Debug)] +struct Custom(String); + +impl warp::reject::Reject for Custom {} + // contains functions we need for BuilderBids.. not sure what to call this -pub trait BidStuff { - fn fee_recipient_mut(&mut self) -> &mut ExecutionAddress; - fn gas_limit_mut(&mut self) -> &mut u64; - fn value_mut(&mut self) -> &mut U256; - fn parent_hash_mut(&mut self) -> &mut Hash32; - fn prev_randao_mut(&mut self) -> &mut Hash32; - fn block_number_mut(&mut self) -> &mut u64; - fn timestamp_mut(&mut self) -> &mut u64; - fn withdrawals_root_mut(&mut self) -> Result<&mut Root, MevError>; +pub trait BidStuff { + fn set_fee_recipient(&mut self, fee_recipient_address: Address); + fn set_gas_limit(&mut self, gas_limit: u64); + fn set_value(&mut self, value: Uint256); + fn set_parent_hash(&mut self, parent_hash: Hash256); + fn set_prev_randao(&mut self, randao: Hash256); + fn set_block_number(&mut self, block_number: u64); + fn set_timestamp(&mut self, timestamp: u64); + fn set_withdrawals_root(&mut self, withdrawals_root: Hash256); - fn sign_builder_message( - &mut self, - signing_key: &SecretKey, - context: &Context, - ) -> Result; + fn sign_builder_message(&mut self, sk: &SecretKey, spec: &ChainSpec) -> Signature; - fn to_signed_bid(self, signature: BlsSignature) -> SignedBuilderBid; + fn to_signed_bid(self, signature: Signature) -> SignedBuilderBid; } -macro_rules! map_builder_bid { - ($self_ident:ident, |$var:ident| $expr:expr) => { - match $self_ident { - BuilderBid::Bellatrix($var) => $expr, - BuilderBid::Capella($var) => $expr, - BuilderBid::Deneb($var) => $expr, - } - }; -} - -impl BidStuff for BuilderBid { - fn fee_recipient_mut(&mut self) -> &mut ExecutionAddress { - map_builder_bid!(self, |bid| &mut bid.header.fee_recipient) - } - - fn gas_limit_mut(&mut self) -> &mut u64 { - map_builder_bid!(self, |bid| &mut bid.header.gas_limit) - } - - fn value_mut(&mut self) -> &mut U256 { - map_builder_bid!(self, |bid| &mut bid.value) - } - - fn parent_hash_mut(&mut self) -> &mut Hash32 { - map_builder_bid!(self, |bid| &mut bid.header.parent_hash) - } - - fn prev_randao_mut(&mut self) -> &mut Hash32 { - map_builder_bid!(self, |bid| &mut bid.header.prev_randao) - } - - fn block_number_mut(&mut self) -> &mut u64 { - map_builder_bid!(self, |bid| &mut bid.header.block_number) - } - - fn timestamp_mut(&mut self) -> &mut u64 { - map_builder_bid!(self, |bid| &mut bid.header.timestamp) - } - - fn withdrawals_root_mut(&mut self) -> Result<&mut Root, MevError> { - match self { - Self::Bellatrix(_) => Err(MevError::InvalidFork), - Self::Capella(bid) => Ok(&mut bid.header.withdrawals_root), - Self::Deneb(bid) => Ok(&mut bid.header.withdrawals_root), +impl BidStuff for BuilderBid { + fn set_fee_recipient(&mut self, fee_recipient: Address) { + match self.to_mut().header_mut() { + ExecutionPayloadHeaderRefMut::Merge(header) => { + header.fee_recipient = fee_recipient; + } + ExecutionPayloadHeaderRefMut::Capella(header) => { + header.fee_recipient = fee_recipient; + } + ExecutionPayloadHeaderRefMut::Deneb(header) => { + header.fee_recipient = fee_recipient; + } } } - fn sign_builder_message( - &mut self, - signing_key: &SecretKey, - context: &Context, - ) -> Result { - map_builder_bid!(self, |message| sign_builder_message( - message, - signing_key, - context - )) + fn set_gas_limit(&mut self, gas_limit: u64) { + match self.to_mut().header_mut() { + ExecutionPayloadHeaderRefMut::Merge(header) => { + header.gas_limit = gas_limit; + } + ExecutionPayloadHeaderRefMut::Capella(header) => { + header.gas_limit = gas_limit; + } + ExecutionPayloadHeaderRefMut::Deneb(header) => { + header.gas_limit = gas_limit; + } + } } - fn to_signed_bid(self, signature: Signature) -> SignedBuilderBid { - match self { - Self::Bellatrix(message) => { - SignedBuilderBid::Bellatrix(SignedBuilderBidBellatrix { message, signature }) + fn set_value(&mut self, value: Uint256) { + *self.value_mut() = value; + } + + fn set_parent_hash(&mut self, parent_hash: Hash256) { + match self.to_mut().header_mut() { + ExecutionPayloadHeaderRefMut::Merge(header) => { + header.parent_hash = ExecutionBlockHash::from_root(parent_hash); } - Self::Capella(message) => { - SignedBuilderBid::Capella(SignedBuilderBidCapella { message, signature }) + ExecutionPayloadHeaderRefMut::Capella(header) => { + header.parent_hash = ExecutionBlockHash::from_root(parent_hash); } - Self::Deneb(message) => { - SignedBuilderBid::Deneb(SignedBuilderBidDeneb { message, signature }) + ExecutionPayloadHeaderRefMut::Deneb(header) => { + header.parent_hash = ExecutionBlockHash::from_root(parent_hash); } } } + + fn set_prev_randao(&mut self, prev_randao: Hash256) { + match self.to_mut().header_mut() { + ExecutionPayloadHeaderRefMut::Merge(header) => { + header.prev_randao = prev_randao; + } + ExecutionPayloadHeaderRefMut::Capella(header) => { + header.prev_randao = prev_randao; + } + ExecutionPayloadHeaderRefMut::Deneb(header) => { + header.prev_randao = prev_randao; + } + } + } + + fn set_block_number(&mut self, block_number: u64) { + match self.to_mut().header_mut() { + ExecutionPayloadHeaderRefMut::Merge(header) => { + header.block_number = block_number; + } + ExecutionPayloadHeaderRefMut::Capella(header) => { + header.block_number = block_number; + } + ExecutionPayloadHeaderRefMut::Deneb(header) => { + header.block_number = block_number; + } + } + } + + fn set_timestamp(&mut self, timestamp: u64) { + match self.to_mut().header_mut() { + ExecutionPayloadHeaderRefMut::Merge(header) => { + header.timestamp = timestamp; + } + ExecutionPayloadHeaderRefMut::Capella(header) => { + header.timestamp = timestamp; + } + ExecutionPayloadHeaderRefMut::Deneb(header) => { + header.timestamp = timestamp; + } + } + } + + fn set_withdrawals_root(&mut self, withdrawals_root: Hash256) { + match self.to_mut().header_mut() { + ExecutionPayloadHeaderRefMut::Merge(_) => { + panic!("no withdrawals before capella") + } + ExecutionPayloadHeaderRefMut::Capella(header) => { + header.withdrawals_root = withdrawals_root; + } + ExecutionPayloadHeaderRefMut::Deneb(header) => { + header.withdrawals_root = withdrawals_root; + } + } + } + + fn sign_builder_message(&mut self, sk: &SecretKey, spec: &ChainSpec) -> Signature { + let domain = spec.get_builder_domain(); + let message = self.signing_root(domain); + sk.sign(message) + } + + fn to_signed_bid(self, signature: Signature) -> SignedBuilderBid { + SignedBuilderBid { + message: self, + signature, + } + } } #[derive(Clone)] @@ -176,8 +196,7 @@ pub struct MockBuilder { el: ExecutionLayer, beacon_client: BeaconNodeHttpClient, spec: ChainSpec, - context: Arc, - val_registration_cache: Arc>>, + val_registration_cache: Arc>>, builder_sk: SecretKey, operations: Arc>>, invalidate_signatures: Arc>, @@ -189,7 +208,7 @@ impl MockBuilder { beacon_url: SensitiveUrl, spec: ChainSpec, executor: TaskExecutor, - ) -> (Self, MockBuilderServer) { + ) -> (Self, (SocketAddr, impl Future)) { let file = NamedTempFile::new().unwrap(); let path = file.path().into(); std::fs::write(&path, hex::encode(DEFAULT_JWT_SECRET)).unwrap(); @@ -205,23 +224,14 @@ impl MockBuilder { let el = ExecutionLayer::from_config(config, executor.clone(), executor.log().clone()).unwrap(); - // This should probably be done for all fields, we only update ones we are testing with so far. - let mut context = Context::for_mainnet(); - context.terminal_total_difficulty = to_ssz_rs(&spec.terminal_total_difficulty).unwrap(); - context.terminal_block_hash = to_ssz_rs(&spec.terminal_block_hash).unwrap(); - context.terminal_block_hash_activation_epoch = - to_ssz_rs(&spec.terminal_block_hash_activation_epoch).unwrap(); - let builder = MockBuilder::new( el, BeaconNodeHttpClient::new(beacon_url, Timeouts::set_all(Duration::from_secs(1))), spec, - context, ); let host: Ipv4Addr = Ipv4Addr::LOCALHOST; let port = 0; - let provider = BlindedBlockProviderServer::new(host, port, builder.clone()); - let server = provider.serve(); + let server = serve(host, port, builder.clone()).expect("mock builder server should start"); (builder, server) } @@ -229,15 +239,13 @@ impl MockBuilder { el: ExecutionLayer, beacon_client: BeaconNodeHttpClient, spec: ChainSpec, - context: Context, ) -> Self { - let sk = SecretKey::random(&mut rand::thread_rng()).unwrap(); + let sk = SecretKey::random(); Self { el, beacon_client, // Should keep spec and context consistent somehow spec, - context: Arc::new(context), val_registration_cache: Arc::new(RwLock::new(HashMap::new())), builder_sk: sk, operations: Arc::new(RwLock::new(vec![])), @@ -259,296 +267,335 @@ impl MockBuilder { *self.invalidate_signatures.write() = false; } - fn apply_operations(&self, bid: &mut B) -> Result<(), MevError> { + fn apply_operations>(&self, bid: &mut B) { let mut guard = self.operations.write(); while let Some(op) = guard.pop() { - op.apply(bid)?; + op.apply(bid); } - Ok(()) - } - - pub fn pubkey(&self) -> ethereum_consensus::crypto::PublicKey { - self.builder_sk.public_key() } } -#[async_trait] -impl mev_rs::BlindedBlockProvider for MockBuilder { - async fn register_validators( - &self, - registrations: &mut [SignedValidatorRegistration], - ) -> Result<(), MevError> { - for registration in registrations { - let pubkey = registration.message.public_key.clone(); - let message = &mut registration.message; - verify_signed_builder_message( - message, - ®istration.signature, - &pubkey, - &self.context, - )?; - self.val_registration_cache.write().insert( - registration.message.public_key.clone(), - registration.clone(), - ); - } +pub fn serve( + listen_addr: Ipv4Addr, + listen_port: u16, + builder: MockBuilder, +) -> Result<(SocketAddr, impl Future), crate::test_utils::Error> { + let inner_ctx = builder.clone(); + let ctx_filter = warp::any().map(move || inner_ctx.clone()); - Ok(()) - } + let prefix = warp::path("eth") + .and(warp::path("v1")) + .and(warp::path("builder")); - async fn fetch_best_bid(&self, bid_request: &BidRequest) -> Result { - let slot = Slot::new(bid_request.slot); - let fork = self.spec.fork_name_at_slot::(slot); - let signed_cached_data = self - .val_registration_cache - .read() - .get(&bid_request.public_key) - .ok_or_else(|| convert_err("missing registration"))? - .clone(); - let cached_data = signed_cached_data.message; + let validators = prefix + .and(warp::path("validators")) + .and(warp::body::json()) + .and(warp::path::end()) + .and(ctx_filter.clone()) + .and_then( + |registrations: Vec, builder: MockBuilder| async move { + for registration in registrations { + if !registration.verify_signature(&builder.spec) { + return Err(reject("invalid signature")); + } + builder + .val_registration_cache + .write() + .insert(registration.message.pubkey, registration); + } + Ok(warp::reply()) + }, + ); - let head = self - .beacon_client - .get_beacon_blocks::(BlockId::Head) - .await - .map_err(convert_err)? - .ok_or_else(|| convert_err("missing head block"))?; + let blinded_block = prefix + .and(warp::path("blinded_blocks")) + .and(warp::body::json()) + .and(warp::path::end()) + .and(ctx_filter.clone()) + .and_then( + |block: SignedBlindedBeaconBlock, builder: MockBuilder| async move { + let slot = block.slot(); + let root = match block { + SignedBlindedBeaconBlock::Base(_) | types::SignedBeaconBlock::Altair(_) => { + return Err(reject("invalid fork")); + } + SignedBlindedBeaconBlock::Merge(block) => { + block.message.body.execution_payload.tree_hash_root() + } + SignedBlindedBeaconBlock::Capella(block) => { + block.message.body.execution_payload.tree_hash_root() + } + SignedBlindedBeaconBlock::Deneb(block) => { + block.message.body.execution_payload.tree_hash_root() + } + }; - let block = head.data.message(); - let head_block_root = block.tree_hash_root(); - let head_execution_hash = block - .body() - .execution_payload() - .map_err(convert_err)? - .block_hash(); - if head_execution_hash != from_ssz_rs(&bid_request.parent_hash)? { - return Err(custom_err(format!( - "head mismatch: {} {}", - head_execution_hash, bid_request.parent_hash - ))); - } + let fork_name = builder.spec.fork_name_at_slot::(slot); + let payload = builder + .el + .get_payload_by_root(&root) + .ok_or_else(|| reject("missing payload for tx root"))?; + let resp = ForkVersionedResponse { + version: Some(fork_name), + data: payload, + }; - let finalized_execution_hash = self - .beacon_client - .get_beacon_blocks::(BlockId::Finalized) - .await - .map_err(convert_err)? - .ok_or_else(|| convert_err("missing finalized block"))? - .data - .message() - .body() - .execution_payload() - .map_err(convert_err)? - .block_hash(); + let json_payload = serde_json::to_string(&resp) + .map_err(|_| reject("coudn't serialize response"))?; + Ok::<_, warp::reject::Rejection>( + warp::http::Response::builder() + .status(200) + .body( + serde_json::to_string(&json_payload) + .map_err(|_| reject("nvalid JSON"))?, + ) + .unwrap(), + ) + }, + ); - let justified_execution_hash = self - .beacon_client - .get_beacon_blocks::(BlockId::Justified) - .await - .map_err(convert_err)? - .ok_or_else(|| convert_err("missing finalized block"))? - .data - .message() - .body() - .execution_payload() - .map_err(convert_err)? - .block_hash(); + let status = prefix + .and(warp::path("status")) + .then(|| async { warp::reply() }); - let val_index = self - .beacon_client - .get_beacon_states_validator_id( - StateId::Head, - &ValidatorId::PublicKey(from_ssz_rs(&cached_data.public_key)?), - ) - .await - .map_err(convert_err)? - .ok_or_else(|| convert_err("missing validator from state"))? - .data - .index; - let fee_recipient = from_ssz_rs(&cached_data.fee_recipient)?; - let slots_since_genesis = slot.as_u64() - self.spec.genesis_slot.as_u64(); + let header = prefix + .and(warp::path("header")) + .and(warp::path::param::().or_else(|_| async { Err(reject("Invalid slot")) })) + .and( + warp::path::param::() + .or_else(|_| async { Err(reject("Invalid parent hash")) }), + ) + .and( + warp::path::param::() + .or_else(|_| async { Err(reject("Invalid pubkey")) }), + ) + .and(warp::path::end()) + .and(ctx_filter.clone()) + .and_then( + |slot: Slot, + parent_hash: ExecutionBlockHash, + pubkey: PublicKeyBytes, + builder: MockBuilder| async move { + let fork = builder.spec.fork_name_at_slot::(slot); + let signed_cached_data = builder + .val_registration_cache + .read() + .get(&pubkey) + .ok_or_else(|| reject("missing registration"))? + .clone(); + let cached_data = signed_cached_data.message; - let genesis_time = self - .beacon_client - .get_beacon_genesis() - .await - .map_err(convert_err)? - .data - .genesis_time; - let timestamp = (slots_since_genesis * self.spec.seconds_per_slot) + genesis_time; - - let head_state: BeaconState = self - .beacon_client - .get_debug_beacon_states(StateId::Head) - .await - .map_err(convert_err)? - .ok_or_else(|| custom_err("missing head state".to_string()))? - .data; - let prev_randao = head_state - .get_randao_mix(head_state.current_epoch()) - .map_err(convert_err)?; - let expected_withdrawals = match fork { - ForkName::Base | ForkName::Altair | ForkName::Merge => None, - ForkName::Capella | ForkName::Deneb => Some( - self.beacon_client - .get_expected_withdrawals(&StateId::Head) + let head = builder + .beacon_client + .get_beacon_blocks::(BlockId::Head) .await - .unwrap() - .data, - ), - }; + .map_err(|_| reject("couldn't get head"))? + .ok_or_else(|| reject("missing head block"))?; - let payload_attributes = match fork { - // the withdrawals root is filled in by operations, but we supply the valid withdrawals - // first to avoid polluting the execution block generator with invalid payload attributes - // NOTE: this was part of an effort to add payload attribute uniqueness checks, - // which was abandoned because it broke too many tests in subtle ways. - ForkName::Merge | ForkName::Capella => PayloadAttributes::new( - timestamp, - *prev_randao, - fee_recipient, - expected_withdrawals, - None, - ), - ForkName::Deneb => PayloadAttributes::new( - timestamp, - *prev_randao, - fee_recipient, - expected_withdrawals, - Some(head_block_root), - ), - ForkName::Base | ForkName::Altair => { - return Err(MevError::InvalidFork); - } - }; + let block = head.data.message(); + let head_block_root = block.tree_hash_root(); + let head_execution_hash = block + .body() + .execution_payload() + .map_err(|_| reject("pre-merge block"))? + .block_hash(); + if head_execution_hash != parent_hash { + return Err(reject("head mismatch")); + } - self.el - .insert_proposer(slot, head_block_root, val_index, payload_attributes.clone()) - .await; + let finalized_execution_hash = builder + .beacon_client + .get_beacon_blocks::(BlockId::Finalized) + .await + .map_err(|_| reject("couldn't get finalized block"))? + .ok_or_else(|| reject("missing finalized block"))? + .data + .message() + .body() + .execution_payload() + .map_err(|_| reject("pre-merge block"))? + .block_hash(); - let forkchoice_update_params = ForkchoiceUpdateParameters { - head_root: Hash256::zero(), - head_hash: None, - justified_hash: Some(justified_execution_hash), - finalized_hash: Some(finalized_execution_hash), - }; + let justified_execution_hash = builder + .beacon_client + .get_beacon_blocks::(BlockId::Justified) + .await + .map_err(|_| reject("couldn't get justified block"))? + .ok_or_else(|| reject("missing justified block"))? + .data + .message() + .body() + .execution_payload() + .map_err(|_| reject("pre-merge block"))? + .block_hash(); - let (payload, _block_value, maybe_blobs_bundle): ( - ExecutionPayload, - Uint256, - Option>, - ) = self - .el - .get_full_payload_caching( - head_execution_hash, - &payload_attributes, - forkchoice_update_params, - fork, - ) - .await - .map_err(convert_err)? - .into(); + let val_index = builder + .beacon_client + .get_beacon_states_validator_id(StateId::Head, &ValidatorId::PublicKey(pubkey)) + .await + .map_err(|_| reject("couldn't get validator"))? + .ok_or_else(|| reject("missing validator"))? + .data + .index; + let fee_recipient = cached_data.fee_recipient; + let slots_since_genesis = slot.as_u64() - builder.spec.genesis_slot.as_u64(); - let header = match payload { - ExecutionPayload::Merge(payload) => ExecutionPayloadHeader::Merge((&payload).into()), - ExecutionPayload::Capella(payload) => { - ExecutionPayloadHeader::Capella((&payload).into()) - } - ExecutionPayload::Deneb(payload) => ExecutionPayloadHeader::Deneb((&payload).into()), - }; + let genesis_data = builder + .beacon_client + .get_beacon_genesis() + .await + .map_err(|_| reject("couldn't get beacon genesis"))? + .data; + let genesis_time = genesis_data.genesis_time; + let timestamp = + (slots_since_genesis * builder.spec.seconds_per_slot) + genesis_time; - let mut message = match fork { - ForkName::Deneb => { - let blinded_blobs: BlindedBlobsBundle = - maybe_blobs_bundle.map(Into::into).unwrap_or_default(); - BuilderBid::Deneb(BuilderBidDeneb { - header: to_ssz_rs(&header)?, - blinded_blobs_bundle: to_ssz_rs(&blinded_blobs)?, - value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?, - public_key: self.builder_sk.public_key(), - }) - } - ForkName::Capella => BuilderBid::Capella(BuilderBidCapella { - header: to_ssz_rs(&header)?, - value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?, - public_key: self.builder_sk.public_key(), - }), - ForkName::Merge => BuilderBid::Bellatrix(BuilderBidBellatrix { - header: to_ssz_rs(&header)?, - value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?, - public_key: self.builder_sk.public_key(), - }), - ForkName::Base | ForkName::Altair => return Err(MevError::InvalidFork), - }; - *message.gas_limit_mut() = cached_data.gas_limit; + let head_state: BeaconState = builder + .beacon_client + .get_debug_beacon_states(StateId::Head) + .await + .map_err(|_| reject("couldn't get state"))? + .ok_or_else(|| reject("missing state"))? + .data; + let prev_randao = head_state + .get_randao_mix(head_state.current_epoch()) + .map_err(|_| reject("couldn't get prev randao"))?; + let expected_withdrawals = match fork { + ForkName::Base | ForkName::Altair | ForkName::Merge => None, + ForkName::Capella | ForkName::Deneb => Some( + builder + .beacon_client + .get_expected_withdrawals(&StateId::Head) + .await + .unwrap() + .data, + ), + }; - self.apply_operations(&mut message)?; - let mut signature = - message.sign_builder_message(&self.builder_sk, self.context.as_ref())?; + let payload_attributes = match fork { + // the withdrawals root is filled in by operations, but we supply the valid withdrawals + // first to avoid polluting the execution block generator with invalid payload attributes + // NOTE: this was part of an effort to add payload attribute uniqueness checks, + // which was abandoned because it broke too many tests in subtle ways. + ForkName::Merge | ForkName::Capella => PayloadAttributes::new( + timestamp, + *prev_randao, + fee_recipient, + expected_withdrawals, + None, + ), + ForkName::Deneb => PayloadAttributes::new( + timestamp, + *prev_randao, + fee_recipient, + expected_withdrawals, + Some(head_block_root), + ), + ForkName::Base | ForkName::Altair => { + return Err(reject("invalid fork")); + } + }; - if *self.invalidate_signatures.read() { - signature = Signature::default(); - } + builder + .el + .insert_proposer(slot, head_block_root, val_index, payload_attributes.clone()) + .await; - Ok(message.to_signed_bid(signature)) - } + let forkchoice_update_params = ForkchoiceUpdateParameters { + head_root: Hash256::zero(), + head_hash: None, + justified_hash: Some(justified_execution_hash), + finalized_hash: Some(finalized_execution_hash), + }; - async fn open_bid( - &self, - signed_block: &mut SignedBlindedBeaconBlock, - ) -> Result { - let node = match signed_block { - SignedBlindedBeaconBlock::Bellatrix(block) => { - block.message.body.execution_payload_header.hash_tree_root() - } - SignedBlindedBeaconBlock::Capella(block) => { - block.message.body.execution_payload_header.hash_tree_root() - } - SignedBlindedBeaconBlock::Deneb(block_and_blobs) => block_and_blobs - .signed_blinded_block - .message - .body - .execution_payload_header - .hash_tree_root(), - } - .map_err(convert_err)?; + let (payload, _block_value, maybe_blobs_bundle): ( + ExecutionPayload, + Uint256, + Option>, + ) = builder + .el + .get_full_payload_caching( + head_execution_hash, + &payload_attributes, + forkchoice_update_params, + fork, + ) + .await + .map_err(|_| reject("couldn't get payload"))? + .into(); - let payload = self - .el - .get_payload_by_root(&from_ssz_rs(&node)?) - .ok_or_else(|| convert_err("missing payload for tx root"))?; + let mut message = match fork { + ForkName::Deneb => BuilderBid::Deneb(BuilderBidDeneb { + header: payload + .as_deneb() + .map_err(|_| reject("incorrect payload variant"))? + .into(), + blinded_blobs_bundle: maybe_blobs_bundle + .map(Into::into) + .unwrap_or_default(), + value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), + pubkey: builder.builder_sk.public_key().compress(), + }), + ForkName::Capella => BuilderBid::Capella(BuilderBidCapella { + header: payload + .as_capella() + .map_err(|_| reject("incorrect payload variant"))? + .into(), + value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), + pubkey: builder.builder_sk.public_key().compress(), + }), + ForkName::Merge => BuilderBid::Merge(BuilderBidMerge { + header: payload + .as_merge() + .map_err(|_| reject("incorrect payload variant"))? + .into(), + value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), + pubkey: builder.builder_sk.public_key().compress(), + }), + ForkName::Base | ForkName::Altair => return Err(reject("invalid fork")), + }; - let fork = payload.payload_ref().fork_name(); - let resp = ForkVersionedResponse { - version: Some(fork), - data: payload, - }; + message.set_gas_limit(cached_data.gas_limit); - let json_payload = serde_json::to_string(&resp).map_err(convert_err)?; - serde_json::from_str(json_payload.as_str()).map_err(convert_err) - } + builder.apply_operations(&mut message); + + let mut signature = + message.sign_builder_message(&builder.builder_sk, &builder.spec); + + if *builder.invalidate_signatures.read() { + signature = Signature::empty(); + } + + let fork_name = builder + .spec + .fork_name_at_epoch(slot.epoch(E::slots_per_epoch())); + let signed_bid = SignedBuilderBid { message, signature }; + let resp = ForkVersionedResponse { + version: Some(fork_name), + data: signed_bid, + }; + let json_bid = serde_json::to_string(&resp) + .map_err(|_| reject("coudn't serialize signed bid"))?; + Ok::<_, Rejection>( + warp::http::Response::builder() + .status(200) + .body(json_bid) + .unwrap(), + ) + }, + ); + + let routes = warp::post() + .and(validators.or(blinded_block)) + .or(warp::get().and(status).or(header)) + .map(|reply| warp::reply::with_header(reply, "Server", "lighthouse-mock-builder-server")); + + let (listening_socket, server) = warp::serve(routes) + .try_bind_ephemeral(SocketAddrV4::new(listen_addr, listen_port)) + .expect("mock builder server should start"); + Ok((listening_socket, server)) } -pub fn from_ssz_rs(ssz_rs_data: &T) -> Result { - U::from_ssz_bytes( - ssz_rs::serialize(ssz_rs_data) - .map_err(convert_err)? - .as_ref(), - ) - .map_err(convert_err) -} - -pub fn to_ssz_rs(ssz_data: &T) -> Result { - ssz_rs::deserialize::(&ssz_data.as_ssz_bytes()).map_err(convert_err) -} - -pub fn convert_err(e: E) -> MevError { - custom_err(format!("{e:?}")) -} - -// This is a bit of a hack since the `Custom` variant was removed from `mev_rs::Error`. -pub fn custom_err(s: String) -> MevError { - MevError::Consensus(ethereum_consensus::state_transition::Error::Io( - std::io::Error::new(std::io::ErrorKind::Other, s), - )) +fn reject(msg: &'static str) -> Rejection { + warp::reject::custom(Custom(msg.to_string())) } diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index b322eb221..f56a04b07 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -29,10 +29,7 @@ pub use execution_block_generator::{ Block, ExecutionBlockGenerator, }; pub use hook::Hook; -pub use mock_builder::{ - convert_err, custom_err, from_ssz_rs, to_ssz_rs, Context as MockBuilderContext, MockBuilder, - MockBuilderServer, Operation, -}; +pub use mock_builder::{MockBuilder, Operation}; pub use mock_execution_layer::MockExecutionLayer; pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 005604391..ca81ac508 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1632,13 +1632,19 @@ pub fn serve( |block_id: BlockId, task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (block, execution_optimistic, finalized) = - block_id.blinded_block(&chain)?; - Ok(api_types::GenericResponse::from(api_types::RootData::from( - block.canonical_root(), - )) - .add_execution_optimistic_finalized(execution_optimistic, finalized)) + // Prioritise requests for the head block root, as it is used by some VCs (including + // the Lighthouse VC) to create sync committee messages. + let priority = if let BlockId(eth2::types::BlockId::Head) = block_id { + Priority::P0 + } else { + Priority::P1 + }; + task_spawner.blocking_json_task(priority, move || { + let (block_root, execution_optimistic, finalized) = block_id.root(&chain)?; + Ok( + api_types::GenericResponse::from(api_types::RootData::from(block_root)) + .add_execution_optimistic_finalized(execution_optimistic, finalized), + ) }) }, ); diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index a1576d33d..82a3fe6ee 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -267,11 +267,7 @@ impl ApiTester { // Start the mock builder service prior to building the chain out. harness.runtime.task_executor.spawn( - async move { - if let Err(e) = mock_builder_server.await { - panic!("error in mock builder server: {e:?}"); - } - }, + async move { mock_builder_server.await }, "mock_builder_server", ); diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 30c949407..472f3ef75 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -11,11 +11,17 @@ use libp2p::Multiaddr; use serde_derive::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use std::net::{Ipv4Addr, Ipv6Addr}; +use std::num::NonZeroU16; use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; use types::{ForkContext, ForkName}; +pub const DEFAULT_IPV4_ADDRESS: Ipv4Addr = Ipv4Addr::UNSPECIFIED; +pub const DEFAULT_TCP_PORT: u16 = 9000u16; +pub const DEFAULT_DISC_PORT: u16 = 9000u16; +pub const DEFAULT_QUIC_PORT: u16 = 9001u16; + /// The cache time is set to accommodate the circulation time of an attestation. /// /// The p2p spec declares that we accept attestations within the following range: @@ -59,22 +65,22 @@ pub struct Config { pub enr_address: (Option, Option), /// The udp ipv4 port to broadcast to peers in order to reach back for discovery. - pub enr_udp4_port: Option, + pub enr_udp4_port: Option, /// The quic ipv4 port to broadcast to peers in order to reach back for libp2p services. - pub enr_quic4_port: Option, + pub enr_quic4_port: Option, /// The tcp ipv4 port to broadcast to peers in order to reach back for libp2p services. - pub enr_tcp4_port: Option, + pub enr_tcp4_port: Option, /// The udp ipv6 port to broadcast to peers in order to reach back for discovery. - pub enr_udp6_port: Option, + pub enr_udp6_port: Option, /// The tcp ipv6 port to broadcast to peers in order to reach back for libp2p services. - pub enr_tcp6_port: Option, + pub enr_tcp6_port: Option, /// The quic ipv6 port to broadcast to peers in order to reach back for libp2p services. - pub enr_quic6_port: Option, + pub enr_quic6_port: Option, /// Target number of connected peers. pub target_peers: usize, @@ -304,10 +310,10 @@ impl Default for Config { .expect("The total rate limit has been specified"), ); let listen_addresses = ListenAddress::V4(ListenAddr { - addr: Ipv4Addr::UNSPECIFIED, - disc_port: 9000, - quic_port: 9001, - tcp_port: 9000, + addr: DEFAULT_IPV4_ADDRESS, + disc_port: DEFAULT_DISC_PORT, + quic_port: DEFAULT_QUIC_PORT, + tcp_port: DEFAULT_TCP_PORT, }); let discv5_listen_config = diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 3f46285a8..8eacabb4d 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -158,11 +158,11 @@ pub fn create_enr_builder_from_config( } if let Some(udp4_port) = config.enr_udp4_port { - builder.udp4(udp4_port); + builder.udp4(udp4_port.get()); } if let Some(udp6_port) = config.enr_udp6_port { - builder.udp6(udp6_port); + builder.udp6(udp6_port.get()); } if enable_libp2p { @@ -171,35 +171,45 @@ pub fn create_enr_builder_from_config( // the related fields should only be added when both QUIC and libp2p are enabled if !config.disable_quic_support { // If we are listening on ipv4, add the quic ipv4 port. - if let Some(quic4_port) = config - .enr_quic4_port - .or_else(|| config.listen_addrs().v4().map(|v4_addr| v4_addr.quic_port)) - { - builder.add_value(QUIC_ENR_KEY, &quic4_port); + if let Some(quic4_port) = config.enr_quic4_port.or_else(|| { + config + .listen_addrs() + .v4() + .and_then(|v4_addr| v4_addr.quic_port.try_into().ok()) + }) { + builder.add_value(QUIC_ENR_KEY, &quic4_port.get()); } // If we are listening on ipv6, add the quic ipv6 port. - if let Some(quic6_port) = config - .enr_quic6_port - .or_else(|| config.listen_addrs().v6().map(|v6_addr| v6_addr.quic_port)) - { - builder.add_value(QUIC6_ENR_KEY, &quic6_port); + if let Some(quic6_port) = config.enr_quic6_port.or_else(|| { + config + .listen_addrs() + .v6() + .and_then(|v6_addr| v6_addr.quic_port.try_into().ok()) + }) { + builder.add_value(QUIC6_ENR_KEY, &quic6_port.get()); } } // If the ENR port is not set, and we are listening over that ip version, use the listening port instead. - let tcp4_port = config - .enr_tcp4_port - .or_else(|| config.listen_addrs().v4().map(|v4_addr| v4_addr.tcp_port)); + let tcp4_port = config.enr_tcp4_port.or_else(|| { + config + .listen_addrs() + .v4() + .and_then(|v4_addr| v4_addr.tcp_port.try_into().ok()) + }); if let Some(tcp4_port) = tcp4_port { - builder.tcp4(tcp4_port); + builder.tcp4(tcp4_port.get()); } - let tcp6_port = config - .enr_tcp6_port - .or_else(|| config.listen_addrs().v6().map(|v6_addr| v6_addr.tcp_port)); + let tcp6_port = config.enr_tcp6_port.or_else(|| { + config + .listen_addrs() + .v6() + .and_then(|v6_addr| v6_addr.tcp_port.try_into().ok()) + }); if let Some(tcp6_port) = tcp6_port { - builder.tcp6(tcp6_port); + builder.tcp6(tcp6_port.get()); } } builder diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 4d8807336..388790568 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -21,10 +21,11 @@ pub use libp2p::identity::{Keypair, PublicKey}; use enr::{ATTESTATION_BITFIELD_ENR_KEY, ETH2_ENR_KEY, SYNC_COMMITTEE_BITFIELD_ENR_KEY}; use futures::prelude::*; use futures::stream::FuturesUnordered; +use libp2p::multiaddr::Protocol; use libp2p::swarm::behaviour::{DialFailure, FromSwarm}; use libp2p::swarm::THandlerInEvent; pub use libp2p::{ - core::{ConnectedPoint, Multiaddr}, + core::{transport::ListenerId, ConnectedPoint, Multiaddr}, identity::PeerId, swarm::{ dummy::ConnectionHandler, ConnectionId, DialError, NetworkBehaviour, NotifyHandler, @@ -77,6 +78,19 @@ pub struct DiscoveredPeers { pub peers: HashMap>, } +/// Specifies which port numbers should be modified after start of the discovery service +#[derive(Debug)] +pub struct UpdatePorts { + /// TCP port associated wih IPv4 address (if present) + pub tcp4: bool, + /// TCP port associated wih IPv6 address (if present) + pub tcp6: bool, + /// QUIC port associated wih IPv4 address (if present) + pub quic4: bool, + /// QUIC port associated wih IPv6 address (if present) + pub quic6: bool, +} + #[derive(Clone, PartialEq)] struct SubnetQuery { subnet: Subnet, @@ -177,12 +191,8 @@ pub struct Discovery { /// always false. pub started: bool, - /// This keeps track of whether an external UDP port change should also indicate an internal - /// TCP port change. As we cannot detect our external TCP port, we assume that the external UDP - /// port is also our external TCP port. This assumption only holds if the user has not - /// explicitly set their ENR TCP port via the CLI config. The first indicates tcp4 and the - /// second indicates tcp6. - update_tcp_port: (bool, bool), + /// Specifies whether various port numbers should be updated after the discovery service has been started + update_ports: UpdatePorts, /// Logger for the discovery behaviour. log: slog::Logger, @@ -300,10 +310,12 @@ impl Discovery { } } - let update_tcp_port = ( - config.enr_tcp4_port.is_none(), - config.enr_tcp6_port.is_none(), - ); + let update_ports = UpdatePorts { + tcp4: config.enr_tcp4_port.is_none(), + tcp6: config.enr_tcp6_port.is_none(), + quic4: config.enr_quic4_port.is_none(), + quic6: config.enr_quic6_port.is_none(), + }; Ok(Self { cached_enrs: LruCache::new(50), @@ -314,7 +326,7 @@ impl Discovery { discv5, event_stream, started: !config.disable_discovery, - update_tcp_port, + update_ports, log, enr_dir, }) @@ -555,8 +567,6 @@ impl Discovery { if let Ok(node_id) = peer_id_to_node_id(peer_id) { // If we could convert this peer id, remove it from the DHT and ban it from discovery. self.discv5.ban_node(&node_id, None); - // Remove the node from the routing table. - self.discv5.remove_node(&node_id); } for ip_address in ip_addresses { @@ -1006,8 +1016,8 @@ impl NetworkBehaviour for Discovery { // Discv5 will have updated our local ENR. We save the updated version // to disk. - if (self.update_tcp_port.0 && socket_addr.is_ipv4()) - || (self.update_tcp_port.1 && socket_addr.is_ipv6()) + if (self.update_ports.tcp4 && socket_addr.is_ipv4()) + || (self.update_ports.tcp6 && socket_addr.is_ipv6()) { // Update the TCP port in the ENR self.discv5.update_local_enr_socket(socket_addr, true); @@ -1036,12 +1046,79 @@ impl NetworkBehaviour for Discovery { FromSwarm::DialFailure(DialFailure { peer_id, error, .. }) => { self.on_dial_failure(peer_id, error) } + FromSwarm::NewListenAddr(ev) => { + let addr = ev.addr; + let listener_id = ev.listener_id; + + trace!(self.log, "Received NewListenAddr event from swarm"; "listener_id" => ?listener_id, "addr" => ?addr); + + let mut addr_iter = addr.iter(); + + let attempt_enr_update = match addr_iter.next() { + Some(Protocol::Ip4(_)) => match (addr_iter.next(), addr_iter.next()) { + (Some(Protocol::Tcp(port)), None) => { + if !self.update_ports.tcp4 { + debug!(self.log, "Skipping ENR update"; "multiaddr" => ?addr); + return; + } + + self.update_enr_tcp_port(port) + } + (Some(Protocol::Udp(port)), Some(Protocol::QuicV1)) => { + if !self.update_ports.quic4 { + debug!(self.log, "Skipping ENR update"; "multiaddr" => ?addr); + return; + } + + self.update_enr_quic_port(port) + } + _ => { + debug!(self.log, "Encountered unacceptable multiaddr for listening (unsupported transport)"; "addr" => ?addr); + return; + } + }, + Some(Protocol::Ip6(_)) => match (addr_iter.next(), addr_iter.next()) { + (Some(Protocol::Tcp(port)), None) => { + if !self.update_ports.tcp6 { + debug!(self.log, "Skipping ENR update"; "multiaddr" => ?addr); + return; + } + + self.update_enr_tcp_port(port) + } + (Some(Protocol::Udp(port)), Some(Protocol::QuicV1)) => { + if !self.update_ports.quic6 { + debug!(self.log, "Skipping ENR update"; "multiaddr" => ?addr); + return; + } + + self.update_enr_quic_port(port) + } + _ => { + debug!(self.log, "Encountered unacceptable multiaddr for listening (unsupported transport)"; "addr" => ?addr); + return; + } + }, + _ => { + debug!(self.log, "Encountered unacceptable multiaddr for listening (no IP)"; "addr" => ?addr); + return; + } + }; + + let local_enr: Enr = self.discv5.local_enr(); + + match attempt_enr_update { + Ok(_) => { + info!(self.log, "Updated local ENR"; "enr" => local_enr.to_base64(), "seq" => local_enr.seq(), "id"=> %local_enr.node_id(), "ip4" => ?local_enr.ip4(), "udp4"=> ?local_enr.udp4(), "tcp4" => ?local_enr.tcp4(), "tcp6" => ?local_enr.tcp6(), "udp6" => ?local_enr.udp6()) + } + Err(e) => warn!(self.log, "Failed to update ENR"; "error" => ?e), + } + } FromSwarm::ConnectionEstablished(_) | FromSwarm::ConnectionClosed(_) | FromSwarm::AddressChange(_) | FromSwarm::ListenFailure(_) | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) | FromSwarm::ExpiredListenAddr(_) | FromSwarm::ListenerError(_) | FromSwarm::ListenerClosed(_) diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 9006d6486..93e0f77f8 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -415,7 +415,7 @@ impl PeerManager { /// Reports if a peer is banned or not. /// /// This is used to determine if we should accept incoming connections. - pub fn ban_status(&self, peer_id: &PeerId) -> BanResult { + pub fn ban_status(&self, peer_id: &PeerId) -> Option { self.network_globals.peers.read().ban_status(peer_id) } @@ -815,7 +815,7 @@ impl PeerManager { ) -> bool { { let mut peerdb = self.network_globals.peers.write(); - if !matches!(peerdb.ban_status(peer_id), BanResult::NotBanned) { + if peerdb.ban_status(peer_id).is_some() { // don't connect if the peer is banned error!(self.log, "Connection has been allowed to a banned peer"; "peer_id" => %peer_id); } diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index fedb876bb..0617c8fa3 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -1,5 +1,6 @@ //! Implementation of [`NetworkBehaviour`] for the [`PeerManager`]. +use std::net::IpAddr; use std::task::{Context, Poll}; use futures::StreamExt; @@ -8,17 +9,17 @@ use libp2p::identity::PeerId; use libp2p::swarm::behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}; use libp2p::swarm::dial_opts::{DialOpts, PeerCondition}; use libp2p::swarm::dummy::ConnectionHandler; -use libp2p::swarm::{ConnectionId, NetworkBehaviour, PollParameters, ToSwarm}; -use slog::{debug, error}; +use libp2p::swarm::{ConnectionDenied, ConnectionId, NetworkBehaviour, PollParameters, ToSwarm}; +use slog::{debug, error, trace}; use types::EthSpec; use crate::discovery::enr_ext::EnrExt; +use crate::peer_manager::peerdb::BanResult; use crate::rpc::GoodbyeReason; use crate::types::SyncState; use crate::{metrics, ClearDialError}; -use super::peerdb::BanResult; -use super::{ConnectingType, PeerManager, PeerManagerEvent, ReportSource}; +use super::{ConnectingType, PeerManager, PeerManagerEvent}; impl NetworkBehaviour for PeerManager { type ConnectionHandler = ConnectionHandler; @@ -169,26 +170,64 @@ impl NetworkBehaviour for PeerManager { } } + fn handle_pending_inbound_connection( + &mut self, + _connection_id: ConnectionId, + _local_addr: &libp2p::Multiaddr, + remote_addr: &libp2p::Multiaddr, + ) -> Result<(), ConnectionDenied> { + // get the IP address to verify it's not banned. + let ip = match remote_addr.iter().next() { + Some(libp2p::multiaddr::Protocol::Ip6(ip)) => IpAddr::V6(ip), + Some(libp2p::multiaddr::Protocol::Ip4(ip)) => IpAddr::V4(ip), + _ => { + return Err(ConnectionDenied::new(format!( + "Connection to peer rejected: invalid multiaddr: {remote_addr}" + ))) + } + }; + + if self.network_globals.peers.read().is_ip_banned(&ip) { + return Err(ConnectionDenied::new(format!( + "Connection to peer rejected: peer {ip} is banned" + ))); + } + + Ok(()) + } + fn handle_established_inbound_connection( &mut self, _connection_id: ConnectionId, - _peer: PeerId, + peer_id: PeerId, _local_addr: &libp2p::Multiaddr, - _remote_addr: &libp2p::Multiaddr, - ) -> Result, libp2p::swarm::ConnectionDenied> { - // TODO: we might want to check if we accept this peer or not in the future. + remote_addr: &libp2p::Multiaddr, + ) -> Result, ConnectionDenied> { + trace!(self.log, "Inbound connection"; "peer_id" => %peer_id, "multiaddr" => %remote_addr); + // We already checked if the peer was banned on `handle_pending_inbound_connection`. + if let Some(BanResult::BadScore) = self.ban_status(&peer_id) { + return Err(ConnectionDenied::new( + "Connection to peer rejected: peer has a bad score", + )); + } Ok(ConnectionHandler) } fn handle_established_outbound_connection( &mut self, _connection_id: ConnectionId, - _peer: PeerId, - _addr: &libp2p::Multiaddr, + peer_id: PeerId, + addr: &libp2p::Multiaddr, _role_override: libp2p::core::Endpoint, ) -> Result, libp2p::swarm::ConnectionDenied> { - // TODO: we might want to check if we accept this peer or not in the future. - Ok(ConnectionHandler) + trace!(self.log, "Outbound connection"; "peer_id" => %peer_id, "multiaddr" => %addr); + match self.ban_status(&peer_id) { + Some(cause) => { + error!(self.log, "Connected a banned peer. Rejecting connection"; "peer_id" => %peer_id); + Err(ConnectionDenied::new(cause)) + } + None => Ok(ConnectionHandler), + } } } @@ -215,10 +254,7 @@ impl PeerManager { // increment prometheus metrics if self.metrics_enabled { - let remote_addr = match endpoint { - ConnectedPoint::Dialer { address, .. } => address, - ConnectedPoint::Listener { send_back_addr, .. } => send_back_addr, - }; + let remote_addr = endpoint.get_remote_address(); match remote_addr.iter().find(|proto| { matches!( proto, @@ -241,28 +277,6 @@ impl PeerManager { metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT); } - // Check to make sure the peer is not supposed to be banned - match self.ban_status(&peer_id) { - // TODO: directly emit the ban event? - BanResult::BadScore => { - // This is a faulty state - error!(self.log, "Connected to a banned peer. Re-banning"; "peer_id" => %peer_id); - // Disconnect the peer. - self.goodbye_peer(&peer_id, GoodbyeReason::Banned, ReportSource::PeerManager); - // Re-ban the peer to prevent repeated errors. - self.events.push(PeerManagerEvent::Banned(peer_id, vec![])); - return; - } - BanResult::BannedIp(ip_addr) => { - // A good peer has connected to us via a banned IP address. We ban the peer and - // prevent future connections. - debug!(self.log, "Peer connected via banned IP. Banning"; "peer_id" => %peer_id, "banned_ip" => %ip_addr); - self.goodbye_peer(&peer_id, GoodbyeReason::BannedIP, ReportSource::PeerManager); - return; - } - BanResult::NotBanned => {} - } - // Count dialing peers in the limit if the peer dialed us. let count_dialing = endpoint.is_listener(); // Check the connection limits @@ -326,11 +340,7 @@ impl PeerManager { // reference so that peer manager can track this peer. self.inject_disconnect(&peer_id); - let remote_addr = match endpoint { - ConnectedPoint::Listener { send_back_addr, .. } => send_back_addr, - ConnectedPoint::Dialer { address, .. } => address, - }; - + let remote_addr = endpoint.get_remote_address(); // Update the prometheus metrics if self.metrics_enabled { match remote_addr.iter().find(|proto| { diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index 4a1efe8f2..7157a6272 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -3,10 +3,13 @@ use peer_info::{ConnectionDirection, PeerConnectionStatus, PeerInfo}; use rand::seq::SliceRandom; use score::{PeerAction, ReportSource, Score, ScoreState}; use slog::{crit, debug, error, trace, warn}; -use std::cmp::Ordering; -use std::collections::{HashMap, HashSet}; use std::net::IpAddr; use std::time::Instant; +use std::{cmp::Ordering, fmt::Display}; +use std::{ + collections::{HashMap, HashSet}, + fmt::Formatter, +}; use sync_status::SyncStatus; use types::EthSpec; @@ -136,26 +139,18 @@ impl PeerDB { } } - /// Returns the current [`BanResult`] of the peer. This doesn't check the connection state, rather the + /// Returns the current [`BanResult`] of the peer if banned. This doesn't check the connection state, rather the /// underlying score of the peer. A peer may be banned but still in the connected state /// temporarily. /// /// This is used to determine if we should accept incoming connections or not. - pub fn ban_status(&self, peer_id: &PeerId) -> BanResult { - if let Some(peer) = self.peers.get(peer_id) { - match peer.score_state() { - ScoreState::Banned => BanResult::BadScore, - _ => { - if let Some(ip) = self.ip_is_banned(peer) { - BanResult::BannedIp(ip) - } else { - BanResult::NotBanned - } - } - } - } else { - BanResult::NotBanned - } + pub fn ban_status(&self, peer_id: &PeerId) -> Option { + self.peers + .get(peer_id) + .and_then(|peer| match peer.score_state() { + ScoreState::Banned => Some(BanResult::BadScore), + _ => self.ip_is_banned(peer).map(BanResult::BannedIp), + }) } /// Checks if the peer's known addresses are currently banned. @@ -1183,23 +1178,25 @@ pub enum BanOperation { } /// When checking if a peer is banned, it can be banned for multiple reasons. +#[derive(Copy, Clone, Debug)] pub enum BanResult { /// The peer's score is too low causing it to be banned. BadScore, /// The peer should be banned because it is connecting from a banned IP address. BannedIp(IpAddr), - /// The peer is not banned. - NotBanned, } -// Helper function for unit tests -#[cfg(test)] -impl BanResult { - pub fn is_banned(&self) -> bool { - !matches!(self, BanResult::NotBanned) +impl Display for BanResult { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + BanResult::BadScore => write!(f, "Peer has a bad score"), + BanResult::BannedIp(addr) => write!(f, "Peer address: {} is banned", addr), + } } } +impl std::error::Error for BanResult {} + #[derive(Default)] pub struct BannedPeersCount { /// The number of banned peers in the database. @@ -1852,11 +1849,11 @@ mod tests { } //check that ip1 and ip2 are banned but ip3-5 not - assert!(pdb.ban_status(&p1).is_banned()); - assert!(pdb.ban_status(&p2).is_banned()); - assert!(!pdb.ban_status(&p3).is_banned()); - assert!(!pdb.ban_status(&p4).is_banned()); - assert!(!pdb.ban_status(&p5).is_banned()); + assert!(pdb.ban_status(&p1).is_some()); + assert!(pdb.ban_status(&p2).is_some()); + assert!(pdb.ban_status(&p3).is_none()); + assert!(pdb.ban_status(&p4).is_none()); + assert!(pdb.ban_status(&p5).is_none()); //ban also the last peer in peers let _ = pdb.report_peer( @@ -1868,11 +1865,11 @@ mod tests { pdb.inject_disconnect(&peers[BANNED_PEERS_PER_IP_THRESHOLD + 1]); //check that ip1-ip4 are banned but ip5 not - assert!(pdb.ban_status(&p1).is_banned()); - assert!(pdb.ban_status(&p2).is_banned()); - assert!(pdb.ban_status(&p3).is_banned()); - assert!(pdb.ban_status(&p4).is_banned()); - assert!(!pdb.ban_status(&p5).is_banned()); + assert!(pdb.ban_status(&p1).is_some()); + assert!(pdb.ban_status(&p2).is_some()); + assert!(pdb.ban_status(&p3).is_some()); + assert!(pdb.ban_status(&p4).is_some()); + assert!(pdb.ban_status(&p5).is_none()); //peers[0] gets unbanned reset_score(&mut pdb, &peers[0]); @@ -1880,11 +1877,11 @@ mod tests { let _ = pdb.shrink_to_fit(); //nothing changed - assert!(pdb.ban_status(&p1).is_banned()); - assert!(pdb.ban_status(&p2).is_banned()); - assert!(pdb.ban_status(&p3).is_banned()); - assert!(pdb.ban_status(&p4).is_banned()); - assert!(!pdb.ban_status(&p5).is_banned()); + assert!(pdb.ban_status(&p1).is_some()); + assert!(pdb.ban_status(&p2).is_some()); + assert!(pdb.ban_status(&p3).is_some()); + assert!(pdb.ban_status(&p4).is_some()); + assert!(pdb.ban_status(&p5).is_none()); //peers[1] gets unbanned reset_score(&mut pdb, &peers[1]); @@ -1892,11 +1889,11 @@ mod tests { let _ = pdb.shrink_to_fit(); //all ips are unbanned - assert!(!pdb.ban_status(&p1).is_banned()); - assert!(!pdb.ban_status(&p2).is_banned()); - assert!(!pdb.ban_status(&p3).is_banned()); - assert!(!pdb.ban_status(&p4).is_banned()); - assert!(!pdb.ban_status(&p5).is_banned()); + assert!(pdb.ban_status(&p1).is_none()); + assert!(pdb.ban_status(&p2).is_none()); + assert!(pdb.ban_status(&p3).is_none()); + assert!(pdb.ban_status(&p4).is_none()); + assert!(pdb.ban_status(&p5).is_none()); } #[test] @@ -1921,8 +1918,8 @@ mod tests { } // check ip is banned - assert!(pdb.ban_status(&p1).is_banned()); - assert!(!pdb.ban_status(&p2).is_banned()); + assert!(pdb.ban_status(&p1).is_some()); + assert!(pdb.ban_status(&p2).is_none()); // unban a peer reset_score(&mut pdb, &peers[0]); @@ -1930,8 +1927,8 @@ mod tests { let _ = pdb.shrink_to_fit(); // check not banned anymore - assert!(!pdb.ban_status(&p1).is_banned()); - assert!(!pdb.ban_status(&p2).is_banned()); + assert!(pdb.ban_status(&p1).is_none()); + assert!(pdb.ban_status(&p2).is_none()); // unban all peers for p in &peers { @@ -1950,8 +1947,8 @@ mod tests { } // both IP's are now banned - assert!(pdb.ban_status(&p1).is_banned()); - assert!(pdb.ban_status(&p2).is_banned()); + assert!(pdb.ban_status(&p1).is_some()); + assert!(pdb.ban_status(&p2).is_some()); // unban all peers for p in &peers { @@ -1967,16 +1964,16 @@ mod tests { } // nothing is banned - assert!(!pdb.ban_status(&p1).is_banned()); - assert!(!pdb.ban_status(&p2).is_banned()); + assert!(pdb.ban_status(&p1).is_none()); + assert!(pdb.ban_status(&p2).is_none()); // reban last peer let _ = pdb.report_peer(&peers[0], PeerAction::Fatal, ReportSource::PeerManager, ""); pdb.inject_disconnect(&peers[0]); //Ip's are banned again - assert!(pdb.ban_status(&p1).is_banned()); - assert!(pdb.ban_status(&p2).is_banned()); + assert!(pdb.ban_status(&p1).is_some()); + assert!(pdb.ban_status(&p2).is_some()); } #[test] diff --git a/beacon_node/lighthouse_network/src/service/behaviour.rs b/beacon_node/lighthouse_network/src/service/behaviour.rs index 6c52a07c1..8dd750429 100644 --- a/beacon_node/lighthouse_network/src/service/behaviour.rs +++ b/beacon_node/lighthouse_network/src/service/behaviour.rs @@ -20,8 +20,6 @@ where AppReqId: ReqId, TSpec: EthSpec, { - /// Peers banned. - pub banned_peers: libp2p::allow_block_list::Behaviour, /// Keep track of active and pending connections to enforce hard limits. pub connection_limits: libp2p::connection_limits::Behaviour, /// The routing pub-sub mechanism for eth2. diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 8e97487bd..53b6bcab3 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -27,6 +27,7 @@ use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettin use libp2p::bandwidth::BandwidthSinks; use libp2p::gossipsub::{ self, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, PublishError, + TopicScoreParams, }; use libp2p::identify; use libp2p::multiaddr::{Multiaddr, Protocol as MProtocol}; @@ -353,11 +354,8 @@ impl Network { libp2p::connection_limits::Behaviour::new(limits) }; - let banned_peers = libp2p::allow_block_list::Behaviour::default(); - let behaviour = { Behaviour { - banned_peers, gossipsub, eth2_rpc, discovery, @@ -637,6 +635,38 @@ impl Network { } } + /// Remove topic weight from all topics that don't have the given fork digest. + pub fn remove_topic_weight_except(&mut self, except: [u8; 4]) { + let new_param = TopicScoreParams { + topic_weight: 0.0, + ..Default::default() + }; + let subscriptions = self.network_globals.gossipsub_subscriptions.read().clone(); + for topic in subscriptions + .iter() + .filter(|topic| topic.fork_digest != except) + { + let libp2p_topic: Topic = topic.clone().into(); + match self + .gossipsub_mut() + .set_topic_params(libp2p_topic, new_param.clone()) + { + Ok(_) => debug!(self.log, "Removed topic weight"; "topic" => %topic), + Err(e) => { + warn!(self.log, "Failed to remove topic weight"; "topic" => %topic, "error" => e) + } + } + } + } + + /// Returns the scoring parameters for a topic if set. + pub fn get_topic_params(&self, topic: GossipTopic) -> Option<&TopicScoreParams> { + self.swarm + .behaviour() + .gossipsub + .get_topic_params(&topic.into()) + } + /// Subscribes to a gossipsub topic. /// /// Returns `true` if the subscription was successful and `false` otherwise. @@ -1445,15 +1475,10 @@ impl Network { Some(NetworkEvent::PeerDisconnected(peer_id)) } PeerManagerEvent::Banned(peer_id, associated_ips) => { - self.swarm.behaviour_mut().banned_peers.block_peer(peer_id); self.discovery_mut().ban_peer(&peer_id, associated_ips); None } PeerManagerEvent::UnBanned(peer_id, associated_ips) => { - self.swarm - .behaviour_mut() - .banned_peers - .unblock_peer(peer_id); self.discovery_mut().unban_peer(&peer_id, associated_ips); None } @@ -1502,7 +1527,6 @@ impl Network { let maybe_event = match swarm_event { SwarmEvent::Behaviour(behaviour_event) => match behaviour_event { // Handle sub-behaviour events. - BehaviourEvent::BannedPeers(void) => void::unreachable(void), BehaviourEvent::Gossipsub(ge) => self.inject_gs_event(ge), BehaviourEvent::Eth2Rpc(re) => self.inject_rpc_event(re), // Inform the peer manager about discovered peers. diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index fbcbf0141..fe70f3c1b 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -978,9 +978,17 @@ impl NetworkBeaconProcessor { self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); return None; } + Err(BlockError::BlockIsAlreadyKnown) => { + debug!( + self.log, + "Gossip block is already known"; + "block_root" => %block_root, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + return None; + } Err(e @ BlockError::FutureSlot { .. }) | Err(e @ BlockError::WouldRevertFinalizedSlot { .. }) - | Err(e @ BlockError::BlockIsAlreadyKnown) | Err(e @ BlockError::NotFinalizedDescendant { .. }) => { debug!(self.log, "Could not verify block for gossip. Ignoring the block"; "error" => %e); diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 381829f93..7d0dc4b77 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -215,15 +215,18 @@ pub struct NetworkService { } impl NetworkService { - #[allow(clippy::type_complexity)] - pub async fn start( + async fn build( beacon_chain: Arc>, config: &NetworkConfig, executor: task_executor::TaskExecutor, gossipsub_registry: Option<&'_ mut Registry>, beacon_processor_send: BeaconProcessorSend, beacon_processor_reprocess_tx: mpsc::Sender, - ) -> error::Result<(Arc>, NetworkSenders)> { + ) -> error::Result<( + NetworkService, + Arc>, + NetworkSenders, + )> { let network_log = executor.log().clone(); // build the channels for external comms let (network_senders, network_recievers) = NetworkSenders::new(); @@ -369,6 +372,28 @@ impl NetworkService { enable_light_client_server: config.enable_light_client_server, }; + Ok((network_service, network_globals, network_senders)) + } + + #[allow(clippy::type_complexity)] + pub async fn start( + beacon_chain: Arc>, + config: &NetworkConfig, + executor: task_executor::TaskExecutor, + gossipsub_registry: Option<&'_ mut Registry>, + beacon_processor_send: BeaconProcessorSend, + beacon_processor_reprocess_tx: mpsc::Sender, + ) -> error::Result<(Arc>, NetworkSenders)> { + let (network_service, network_globals, network_senders) = Self::build( + beacon_chain, + config, + executor.clone(), + gossipsub_registry, + beacon_processor_send, + beacon_processor_reprocess_tx, + ) + .await?; + network_service.spawn_service(executor); Ok((network_globals, network_senders)) @@ -885,9 +910,10 @@ impl NetworkService { fn update_next_fork(&mut self) { let new_enr_fork_id = self.beacon_chain.enr_fork_id(); + let new_fork_digest = new_enr_fork_id.fork_digest; let fork_context = &self.fork_context; - if let Some(new_fork_name) = fork_context.from_context_bytes(new_enr_fork_id.fork_digest) { + if let Some(new_fork_name) = fork_context.from_context_bytes(new_fork_digest) { info!( self.log, "Transitioned to new fork"; @@ -910,6 +936,10 @@ impl NetworkService { Box::pin(next_fork_subscriptions_delay(&self.beacon_chain).into()); self.next_unsubscribe = Box::pin(Some(tokio::time::sleep(unsubscribe_delay)).into()); info!(self.log, "Network will unsubscribe from old fork gossip topics in a few epochs"; "remaining_epochs" => UNSUBSCRIBE_DELAY_EPOCHS); + + // Remove topic weight from old fork topics to prevent peers that left on the mesh on + // old topics from being penalized for not sending us messages. + self.libp2p.remove_topic_weight_except(new_fork_digest); } else { crit!(self.log, "Unknown new enr fork id"; "new_fork_id" => ?new_enr_fork_id); } diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index 548dee45b..35a7f1eab 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -4,14 +4,26 @@ mod tests { use crate::persisted_dht::load_dht; use crate::{NetworkConfig, NetworkService}; use beacon_chain::test_utils::BeaconChainHarness; - use beacon_processor::BeaconProcessorChannels; - use lighthouse_network::Enr; + use beacon_chain::BeaconChainTypes; + use beacon_processor::{BeaconProcessorChannels, BeaconProcessorConfig}; + use futures::StreamExt; + use lighthouse_network::types::{GossipEncoding, GossipKind}; + use lighthouse_network::{Enr, GossipTopic}; use slog::{o, Drain, Level, Logger}; use sloggers::{null::NullLoggerBuilder, Build}; use std::str::FromStr; use std::sync::Arc; use tokio::runtime::Runtime; - use types::MinimalEthSpec as E; + use types::{Epoch, EthSpec, ForkName, MinimalEthSpec, SubnetId}; + + impl NetworkService { + fn get_topic_params( + &self, + topic: GossipTopic, + ) -> Option<&lighthouse_network::libp2p::gossipsub::TopicScoreParams> { + self.libp2p.get_topic_params(topic) + } + } fn get_logger(actual_log: bool) -> Logger { if actual_log { @@ -35,7 +47,7 @@ mod tests { fn test_dht_persistence() { let log = get_logger(false); - let beacon_chain = BeaconChainHarness::builder(E) + let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec) .default_spec() .deterministic_keypairs(8) .fresh_ephemeral_store() @@ -102,4 +114,126 @@ mod tests { "should have persisted the second ENR to store" ); } + + // Test removing topic weight on old topics when a fork happens. + #[test] + fn test_removing_topic_weight_on_old_topics() { + let runtime = Arc::new(Runtime::new().unwrap()); + + // Capella spec + let mut spec = MinimalEthSpec::default_spec(); + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(1)); + + // Build beacon chain. + let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec) + .spec(spec.clone()) + .deterministic_keypairs(8) + .fresh_ephemeral_store() + .mock_execution_layer() + .build() + .chain; + let (next_fork_name, _) = beacon_chain.duration_to_next_fork().expect("next fork"); + assert_eq!(next_fork_name, ForkName::Capella); + + // Build network service. + let (mut network_service, network_globals, _network_senders) = runtime.block_on(async { + let (_, exit) = exit_future::signal(); + let (shutdown_tx, _) = futures::channel::mpsc::channel(1); + let executor = task_executor::TaskExecutor::new( + Arc::downgrade(&runtime), + exit, + get_logger(false), + shutdown_tx, + ); + + let mut config = NetworkConfig::default(); + config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21214, 21214, 21215); + config.discv5_config.table_filter = |_| true; // Do not ignore local IPs + config.upnp_enabled = false; + + let beacon_processor_channels = + BeaconProcessorChannels::new(&BeaconProcessorConfig::default()); + NetworkService::build( + beacon_chain.clone(), + &config, + executor.clone(), + None, + beacon_processor_channels.beacon_processor_tx, + beacon_processor_channels.work_reprocessing_tx, + ) + .await + .unwrap() + }); + + // Subscribe to the topics. + runtime.block_on(async { + while network_globals.gossipsub_subscriptions.read().len() < 2 { + if let Some(msg) = network_service.attestation_service.next().await { + network_service.on_attestation_service_msg(msg); + } + } + }); + + // Make sure the service is subscribed to the topics. + let (old_topic1, old_topic2) = { + let mut subnets = SubnetId::compute_subnets_for_epoch::( + network_globals.local_enr().node_id().raw().into(), + beacon_chain.epoch().unwrap(), + &spec, + ) + .unwrap() + .0 + .collect::>(); + assert_eq!(2, subnets.len()); + + let old_fork_digest = beacon_chain.enr_fork_id().fork_digest; + let old_topic1 = GossipTopic::new( + GossipKind::Attestation(subnets.pop().unwrap()), + GossipEncoding::SSZSnappy, + old_fork_digest, + ); + let old_topic2 = GossipTopic::new( + GossipKind::Attestation(subnets.pop().unwrap()), + GossipEncoding::SSZSnappy, + old_fork_digest, + ); + + (old_topic1, old_topic2) + }; + let subscriptions = network_globals.gossipsub_subscriptions.read().clone(); + assert_eq!(2, subscriptions.len()); + assert!(subscriptions.contains(&old_topic1)); + assert!(subscriptions.contains(&old_topic2)); + let old_topic_params1 = network_service + .get_topic_params(old_topic1.clone()) + .expect("topic score params"); + assert!(old_topic_params1.topic_weight > 0.0); + let old_topic_params2 = network_service + .get_topic_params(old_topic2.clone()) + .expect("topic score params"); + assert!(old_topic_params2.topic_weight > 0.0); + + // Advance slot to the next fork + for _ in 0..MinimalEthSpec::slots_per_epoch() { + beacon_chain.slot_clock.advance_slot(); + } + + // Run `NetworkService::update_next_fork()`. + runtime.block_on(async { + network_service.update_next_fork(); + }); + + // Check that topic_weight on the old topics has been zeroed. + let old_topic_params1 = network_service + .get_topic_params(old_topic1) + .expect("topic score params"); + assert_eq!(0.0, old_topic_params1.topic_weight); + + let old_topic_params2 = network_service + .get_topic_params(old_topic2) + .expect("topic score params"); + assert_eq!(0.0, old_topic_params2.topic_weight); + } } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index a624d8ffb..39bc7f8a2 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -22,6 +22,7 @@ use std::fmt::Debug; use std::fs; use std::net::Ipv6Addr; use std::net::{IpAddr, Ipv4Addr, ToSocketAddrs}; +use std::num::NonZeroU16; use std::path::{Path, PathBuf}; use std::str::FromStr; use std::time::Duration; @@ -1216,23 +1217,23 @@ pub fn set_network_config( if let Some(enr_udp_port_str) = cli_args.value_of("enr-udp-port") { config.enr_udp4_port = Some( enr_udp_port_str - .parse::() - .map_err(|_| format!("Invalid discovery port: {}", enr_udp_port_str))?, + .parse::() + .map_err(|_| format!("Invalid ENR discovery port: {}", enr_udp_port_str))?, ); } if let Some(enr_quic_port_str) = cli_args.value_of("enr-quic-port") { config.enr_quic4_port = Some( enr_quic_port_str - .parse::() - .map_err(|_| format!("Invalid quic port: {}", enr_quic_port_str))?, + .parse::() + .map_err(|_| format!("Invalid ENR quic port: {}", enr_quic_port_str))?, ); } if let Some(enr_tcp_port_str) = cli_args.value_of("enr-tcp-port") { config.enr_tcp4_port = Some( enr_tcp_port_str - .parse::() + .parse::() .map_err(|_| format!("Invalid ENR TCP port: {}", enr_tcp_port_str))?, ); } @@ -1240,23 +1241,23 @@ pub fn set_network_config( if let Some(enr_udp_port_str) = cli_args.value_of("enr-udp6-port") { config.enr_udp6_port = Some( enr_udp_port_str - .parse::() - .map_err(|_| format!("Invalid discovery port: {}", enr_udp_port_str))?, + .parse::() + .map_err(|_| format!("Invalid ENR discovery port: {}", enr_udp_port_str))?, ); } if let Some(enr_quic_port_str) = cli_args.value_of("enr-quic6-port") { config.enr_quic6_port = Some( enr_quic_port_str - .parse::() - .map_err(|_| format!("Invalid quic port: {}", enr_quic_port_str))?, + .parse::() + .map_err(|_| format!("Invalid ENR quic port: {}", enr_quic_port_str))?, ); } if let Some(enr_tcp_port_str) = cli_args.value_of("enr-tcp6-port") { config.enr_tcp6_port = Some( enr_tcp_port_str - .parse::() + .parse::() .map_err(|_| format!("Invalid ENR TCP port: {}", enr_tcp_port_str))?, ); } @@ -1264,25 +1265,38 @@ pub fn set_network_config( if cli_args.is_present("enr-match") { // Match the IP and UDP port in the ENR. - // Set the ENR address to localhost if the address is unspecified. if let Some(ipv4_addr) = config.listen_addrs().v4().cloned() { + // ensure the port is valid to be advertised + let disc_port = ipv4_addr + .disc_port + .try_into() + .map_err(|_| "enr-match can only be used with non-zero listening ports")?; + + // Set the ENR address to localhost if the address is unspecified. let ipv4_enr_addr = if ipv4_addr.addr == Ipv4Addr::UNSPECIFIED { Ipv4Addr::LOCALHOST } else { ipv4_addr.addr }; config.enr_address.0 = Some(ipv4_enr_addr); - config.enr_udp4_port = Some(ipv4_addr.disc_port); + config.enr_udp4_port = Some(disc_port); } if let Some(ipv6_addr) = config.listen_addrs().v6().cloned() { + // ensure the port is valid to be advertised + let disc_port = ipv6_addr + .disc_port + .try_into() + .map_err(|_| "enr-match can only be used with non-zero listening ports")?; + + // Set the ENR address to localhost if the address is unspecified. let ipv6_enr_addr = if ipv6_addr.addr == Ipv6Addr::UNSPECIFIED { Ipv6Addr::LOCALHOST } else { ipv6_addr.addr }; config.enr_address.1 = Some(ipv6_enr_addr); - config.enr_udp6_port = Some(ipv6_addr.disc_port); + config.enr_udp6_port = Some(disc_port); } } diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index 5d7853bd2..d435efc6f 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -60,19 +60,25 @@ impl BootNodeConfig { // Set the Enr Discovery ports to the listening ports if not present. if let Some(listening_addr_v4) = network_config.listen_addrs().v4() { - network_config.enr_udp4_port = Some( - network_config - .enr_udp4_port - .unwrap_or(listening_addr_v4.disc_port), - ) + if network_config.enr_udp4_port.is_none() { + network_config.enr_udp4_port = + Some(network_config.enr_udp4_port.unwrap_or( + listening_addr_v4.disc_port.try_into().map_err(|_| { + "boot node enr-udp-port not set and listening port is zero" + })?, + )) + } }; if let Some(listening_addr_v6) = network_config.listen_addrs().v6() { - network_config.enr_udp6_port = Some( - network_config - .enr_udp6_port - .unwrap_or(listening_addr_v6.disc_port), - ) + if network_config.enr_udp6_port.is_none() { + network_config.enr_udp6_port = + Some(network_config.enr_udp6_port.unwrap_or( + listening_addr_v6.disc_port.try_into().map_err(|_| { + "boot node enr-udp-port not set and listening port is zero" + })?, + )) + } }; // By default this is enabled. If it is not set, revert to false. diff --git a/bors.toml b/bors.toml index 9e633d63f..e821b89a8 100644 --- a/bors.toml +++ b/bors.toml @@ -1,5 +1,4 @@ status = [ - "cargo-fmt", "release-tests-ubuntu", "release-tests-windows", "debug-tests-ubuntu", @@ -9,20 +8,16 @@ status = [ "eth1-simulator-ubuntu", "merge-transition-ubuntu", "no-eth1-simulator-ubuntu", - "check-benchmarks", - "clippy", - "arbitrary-check", - "cargo-audit", + "check-code", "cargo-udeps", "beacon-chain-tests", "op-pool-tests", "doppelganger-protection-test", "execution-engine-integration-ubuntu", - "cargo-vendor", "check-msrv", "slasher-tests", "syncing-simulator-ubuntu", - "compile-with-beta-compiler" + "compile-with-beta-compiler", ] use_squash_merge = true timeout_sec = 10800 diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml index 7442f6576..3c6e1bad8 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml @@ -9,8 +9,6 @@ - enr:-Ku4QPn5eVhcoF1opaFEvg1b6JNFD2rqVkHQ8HApOKK61OIcIXD127bKWgAtbwI7pnxx6cDyk_nI88TrZKQaGMZj0q0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhDayLMaJc2VjcDI1NmsxoQK2sBOLGcUb4AwuYzFuAVCaNHA-dy24UuEKkeFNgCVCsIN1ZHCCIyg - enr:-Ku4QEWzdnVtXc2Q0ZVigfCGggOVB2Vc1ZCPEc6j21NIFLODSJbvNaef1g4PxhPwl_3kax86YPheFUSLXPRs98vvYsoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhDZBrP2Jc2VjcDI1NmsxoQM6jr8Rb1ktLEsVcKAPa08wCsKUmvoQ8khiOl_SLozf9IN1ZHCCIyg # Teku team (Consensys) -- enr:-KG4QOtcP9X1FbIMOe17QNMKqDxCpm14jcX5tiOE4_TyMrFqbmhPZHK_ZPG2Gxb1GE2xdtodOfx9-cgvNtxnRyHEmC0ghGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQDE8KdiXNlY3AyNTZrMaEDhpehBDbZjM_L9ek699Y7vhUJ-eAdMyQW_Fil522Y0fODdGNwgiMog3VkcIIjKA -- enr:-KG4QL-eqFoHy0cI31THvtZjpYUu_Jdw_MO7skQRJxY1g5HTN1A0epPCU6vi0gLGUgrzpU-ygeMSS8ewVxDpKfYmxMMGhGV0aDKQtTA_KgAAAAD__________4JpZIJ2NIJpcIQ2_DUbiXNlY3AyNTZrMaED8GJ2vzUqgL6-KD1xalo1CsmY4X1HaDnyl6Y_WayCo9GDdGNwgiMog3VkcIIjKA - enr:-KG4QMOEswP62yzDjSwWS4YEjtTZ5PO6r65CPqYBkgTTkrpaedQ8uEUo1uMALtJIvb2w_WWEVmg5yt1UAuK1ftxUU7QDhGV0aDKQu6TalgMAAAD__________4JpZIJ2NIJpcIQEnfA2iXNlY3AyNTZrMaEDfol8oLr6XJ7FsdAYE7lpJhKMls4G_v6qQOGKJUWGb_uDdGNwgiMog3VkcIIjKA - enr:-KG4QF4B5WrlFcRhUU6dZETwY5ZzAXnA0vGC__L1Kdw602nDZwXSTs5RFXFIFUnbQJmhNGVU6OIX7KVrCSTODsz1tK4DhGV0aDKQu6TalgMAAAD__________4JpZIJ2NIJpcIQExNYEiXNlY3AyNTZrMaECQmM9vp7KhaXhI-nqL_R0ovULLCFSFTa9CPPSdb1zPX6DdGNwgiMog3VkcIIjKA # Prysm team (Prysmatic Labs) diff --git a/common/lru_cache/Cargo.toml b/common/lru_cache/Cargo.toml index 73c623ed4..c1bd15f9f 100644 --- a/common/lru_cache/Cargo.toml +++ b/common/lru_cache/Cargo.toml @@ -6,3 +6,6 @@ edition = { workspace = true } [dependencies] fnv = { workspace = true } + +[dev-dependencies] +mock_instant = "0.3" diff --git a/common/lru_cache/src/time.rs b/common/lru_cache/src/time.rs index 966741ca4..0b2fd8356 100644 --- a/common/lru_cache/src/time.rs +++ b/common/lru_cache/src/time.rs @@ -1,7 +1,13 @@ //! This implements a time-based LRU cache for fast checking of duplicates use fnv::FnvHashSet; +#[cfg(test)] +use mock_instant::Instant; use std::collections::VecDeque; -use std::time::{Duration, Instant}; + +#[cfg(not(test))] +use std::time::Instant; + +use std::time::Duration; struct Element { /// The key being inserted. @@ -222,16 +228,16 @@ mod test { cache.insert("a"); cache.insert("b"); - std::thread::sleep(Duration::from_millis(20)); + mock_instant::MockClock::advance(Duration::from_millis(20)); cache.insert("a"); // a is newer now - std::thread::sleep(Duration::from_millis(85)); + mock_instant::MockClock::advance(Duration::from_millis(85)); assert!(cache.contains(&"a"),); // b was inserted first but was not as recent it should have been removed assert!(!cache.contains(&"b")); - std::thread::sleep(Duration::from_millis(16)); + mock_instant::MockClock::advance(Duration::from_millis(16)); assert!(!cache.contains(&"a")); } } diff --git a/consensus/state_processing/src/block_replayer.rs b/consensus/state_processing/src/block_replayer.rs index ed5e64294..f502d7f69 100644 --- a/consensus/state_processing/src/block_replayer.rs +++ b/consensus/state_processing/src/block_replayer.rs @@ -3,6 +3,8 @@ use crate::{ BlockProcessingError, BlockSignatureStrategy, ConsensusContext, SlotProcessingError, VerifyBlockRoot, }; +use itertools::Itertools; +use std::iter::Peekable; use std::marker::PhantomData; use types::{BeaconState, BlindedPayload, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, Slot}; @@ -25,7 +27,7 @@ pub struct BlockReplayer< 'a, Spec: EthSpec, Error = BlockReplayError, - StateRootIter = StateRootIterDefault, + StateRootIter: Iterator> = StateRootIterDefault, > { state: BeaconState, spec: &'a ChainSpec, @@ -36,7 +38,7 @@ pub struct BlockReplayer< post_block_hook: Option>, pre_slot_hook: Option>, post_slot_hook: Option>, - state_root_iter: Option, + pub(crate) state_root_iter: Option>, state_root_miss: bool, _phantom: PhantomData, } @@ -138,7 +140,7 @@ where /// `self.state.slot` to the `target_slot` supplied to `apply_blocks` (inclusive of both /// endpoints). pub fn state_root_iter(mut self, iter: StateRootIter) -> Self { - self.state_root_iter = Some(iter); + self.state_root_iter = Some(iter.peekable()); self } @@ -192,7 +194,7 @@ where // If a state root iterator is configured, use it to find the root. if let Some(ref mut state_root_iter) = self.state_root_iter { let opt_root = state_root_iter - .take_while(|res| res.as_ref().map_or(true, |(_, s)| *s <= slot)) + .peeking_take_while(|res| res.as_ref().map_or(true, |(_, s)| *s <= slot)) .find(|res| res.as_ref().map_or(true, |(_, s)| *s == slot)) .transpose()?; diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index 590514def..75eb43896 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -1,11 +1,11 @@ -#![cfg(all(test, not(feature = "fake_crypto")))] +#![cfg(all(test, not(feature = "fake_crypto"), not(debug_assertions)))] use crate::per_block_processing::errors::{ AttestationInvalid, AttesterSlashingInvalid, BlockOperationError, BlockProcessingError, DepositInvalid, HeaderInvalid, IndexedAttestationInvalid, IntoWithIndex, ProposerSlashingInvalid, }; -use crate::{per_block_processing, StateProcessingStrategy}; +use crate::{per_block_processing, BlockReplayError, BlockReplayer, StateProcessingStrategy}; use crate::{ per_block_processing::{process_operations, verify_exit::verify_exit}, BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, VerifySignatures, @@ -1035,3 +1035,51 @@ async fn fork_spanning_exit() { ) .expect_err("phase0 exit does not verify against bellatrix state"); } + +/// Check that the block replayer does not consume state roots unnecessarily. +#[tokio::test] +async fn block_replayer_peeking_state_roots() { + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + + let target_state = harness.get_current_state(); + let target_block_root = harness.head_block_root(); + let target_block = harness + .chain + .get_blinded_block(&target_block_root) + .unwrap() + .unwrap(); + + let parent_block_root = target_block.parent_root(); + let parent_block = harness + .chain + .get_blinded_block(&parent_block_root) + .unwrap() + .unwrap(); + let parent_state = harness + .chain + .get_state(&parent_block.state_root(), Some(parent_block.slot())) + .unwrap() + .unwrap(); + + // Omit the state root for `target_state` but provide a dummy state root at the *next* slot. + // If the block replayer is peeking at the state roots rather than consuming them, then the + // dummy state should still be there after block replay completes. + let dummy_state_root = Hash256::repeat_byte(0xff); + let dummy_slot = target_state.slot() + 1; + let state_root_iter = vec![Ok::<_, BlockReplayError>((dummy_state_root, dummy_slot))]; + let block_replayer = BlockReplayer::new(parent_state, &harness.chain.spec) + .state_root_iter(state_root_iter.into_iter()) + .no_signature_verification() + .apply_blocks(vec![target_block], None) + .unwrap(); + + assert_eq!( + block_replayer + .state_root_iter + .unwrap() + .next() + .unwrap() + .unwrap(), + (dummy_state_root, dummy_slot) + ); +} diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs index 4c266a9a3..d621c3891 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder_bid.rs @@ -1,8 +1,8 @@ use crate::beacon_block_body::KzgCommitments; use crate::{ BlobRootsList, ChainSpec, EthSpec, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, - ExecutionPayloadHeaderMerge, ExecutionPayloadHeaderRef, ForkName, ForkVersionDeserialize, - KzgProofs, SignedRoot, Uint256, + ExecutionPayloadHeaderMerge, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, ForkName, + ForkVersionDeserialize, KzgProofs, SignedRoot, Uint256, }; use bls::PublicKeyBytes; use bls::Signature; @@ -26,7 +26,8 @@ pub struct BlindedBlobsBundle { derive(PartialEq, Debug, Serialize, Deserialize, TreeHash, Clone), serde(bound = "E: EthSpec", deny_unknown_fields) ), - map_ref_into(ExecutionPayloadHeaderRef) + map_ref_into(ExecutionPayloadHeaderRef), + map_ref_mut_into(ExecutionPayloadHeaderRefMut) )] #[derive(PartialEq, Debug, Serialize, Deserialize, TreeHash, Clone)] #[serde(bound = "E: EthSpec", deny_unknown_fields, untagged)] @@ -59,6 +60,14 @@ impl<'a, E: EthSpec> BuilderBidRef<'a, E> { } } +impl<'a, E: EthSpec> BuilderBidRefMut<'a, E> { + pub fn header_mut(self) -> ExecutionPayloadHeaderRefMut<'a, E> { + map_builder_bid_ref_mut_into_execution_payload_header_ref_mut!(&'a _, self, |bid, cons| { + cons(&mut bid.header) + }) + } +} + impl SignedRoot for BuilderBid {} /// Validator registration, for use in interacting with servers implementing the builder API. diff --git a/consensus/types/src/validator_registration_data.rs b/consensus/types/src/validator_registration_data.rs index de7f26cc6..174014df8 100644 --- a/consensus/types/src/validator_registration_data.rs +++ b/consensus/types/src/validator_registration_data.rs @@ -21,3 +21,17 @@ pub struct ValidatorRegistrationData { } impl SignedRoot for ValidatorRegistrationData {} + +impl SignedValidatorRegistrationData { + pub fn verify_signature(&self, spec: &ChainSpec) -> bool { + self.message + .pubkey + .decompress() + .map(|pubkey| { + let domain = spec.get_builder_domain(); + let message = self.message.signing_root(domain); + self.signature.verify(&pubkey, message) + }) + .unwrap_or(false) + } +} diff --git a/lcli/src/generate_bootnode_enr.rs b/lcli/src/generate_bootnode_enr.rs index 0584cd654..1d41bedc8 100644 --- a/lcli/src/generate_bootnode_enr.rs +++ b/lcli/src/generate_bootnode_enr.rs @@ -4,16 +4,16 @@ use lighthouse_network::{ libp2p::identity::secp256k1, NetworkConfig, NETWORK_KEY_FILENAME, }; -use std::fs::File; use std::io::Write; use std::path::PathBuf; use std::{fs, net::Ipv4Addr}; +use std::{fs::File, num::NonZeroU16}; use types::{ChainSpec, EnrForkId, Epoch, EthSpec, Hash256}; pub fn run(matches: &ArgMatches) -> Result<(), String> { let ip: Ipv4Addr = clap_utils::parse_required(matches, "ip")?; - let udp_port: u16 = clap_utils::parse_required(matches, "udp-port")?; - let tcp_port: u16 = clap_utils::parse_required(matches, "tcp-port")?; + let udp_port: NonZeroU16 = clap_utils::parse_required(matches, "udp-port")?; + let tcp_port: NonZeroU16 = clap_utils::parse_required(matches, "tcp-port")?; let output_dir: PathBuf = clap_utils::parse_required(matches, "output-dir")?; let genesis_fork_version: [u8; 4] = clap_utils::parse_ssz_required(matches, "genesis-fork-version")?; diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 4dca5cb1c..95172980f 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -22,9 +22,14 @@ use types::{ Address, Checkpoint, Epoch, ExecutionBlockHash, ForkName, Hash256, MainnetEthSpec, ProgressiveBalancesMode, }; -use unused_port::{unused_tcp4_port, unused_tcp6_port, unused_udp4_port, unused_udp6_port}; const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545/"; +const DUMMY_ENR_TCP_PORT: u16 = 7777; +const DUMMY_ENR_UDP_PORT: u16 = 8888; +const DUMMY_ENR_QUIC_PORT: u16 = 9999; + +const _: () = + assert!(DUMMY_ENR_QUIC_PORT != 0 && DUMMY_ENR_TCP_PORT != 0 && DUMMY_ENR_UDP_PORT != 0); /// Returns the `lighthouse beacon_node` command. fn base_cmd() -> Command { @@ -1036,7 +1041,7 @@ fn network_listen_address_flag_wrong_double_v6_value_config() { } #[test] fn network_port_flag_over_ipv4() { - let port = unused_tcp4_port().expect("Unable to find unused port."); + let port = 0; CommandLineTest::new() .flag("port", Some(port.to_string().as_str())) .run() @@ -1053,7 +1058,7 @@ fn network_port_flag_over_ipv4() { } #[test] fn network_port_flag_over_ipv6() { - let port = unused_tcp6_port().expect("Unable to find unused port."); + let port = 0; CommandLineTest::new() .flag("listen-address", Some("::1")) .flag("port", Some(port.to_string().as_str())) @@ -1071,8 +1076,8 @@ fn network_port_flag_over_ipv6() { } #[test] fn network_port_and_discovery_port_flags_over_ipv4() { - let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); - let disc4_port = unused_udp4_port().expect("Unable to find unused port."); + let tcp4_port = 0; + let disc4_port = 0; CommandLineTest::new() .flag("port", Some(tcp4_port.to_string().as_str())) .flag("discovery-port", Some(disc4_port.to_string().as_str())) @@ -1090,8 +1095,8 @@ fn network_port_and_discovery_port_flags_over_ipv4() { } #[test] fn network_port_and_discovery_port_flags_over_ipv6() { - let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); - let disc6_port = unused_udp6_port().expect("Unable to find unused port."); + let tcp6_port = 0; + let disc6_port = 0; CommandLineTest::new() .flag("listen-address", Some("::1")) .flag("port", Some(tcp6_port.to_string().as_str())) @@ -1110,10 +1115,10 @@ fn network_port_and_discovery_port_flags_over_ipv6() { } #[test] fn network_port_and_discovery_port_flags_over_ipv4_and_ipv6() { - let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); - let disc4_port = unused_udp4_port().expect("Unable to find unused port."); - let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); - let disc6_port = unused_udp6_port().expect("Unable to find unused port."); + let tcp4_port = 0; + let disc4_port = 0; + let tcp6_port = 0; + let disc6_port = 0; CommandLineTest::new() .flag("listen-address", Some("::1")) .flag("listen-address", Some("127.0.0.1")) @@ -1145,12 +1150,12 @@ fn network_port_and_discovery_port_flags_over_ipv4_and_ipv6() { #[test] fn network_port_discovery_quic_port_flags_over_ipv4_and_ipv6() { - let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); - let disc4_port = unused_udp4_port().expect("Unable to find unused port."); - let quic4_port = unused_udp4_port().expect("Unable to find unused port."); - let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); - let disc6_port = unused_udp6_port().expect("Unable to find unused port."); - let quic6_port = unused_udp6_port().expect("Unable to find unused port."); + let tcp4_port = 0; + let disc4_port = 0; + let quic4_port = 0; + let tcp6_port = 0; + let disc6_port = 0; + let quic6_port = 0; CommandLineTest::new() .flag("listen-address", Some("::1")) .flag("listen-address", Some("127.0.0.1")) @@ -1226,7 +1231,7 @@ fn default_backfill_rate_limiting_flag() { } #[test] fn default_boot_nodes() { - let number_of_boot_nodes = 17; + let number_of_boot_nodes = 15; CommandLineTest::new() .run_with_zero_port() @@ -1296,57 +1301,91 @@ fn network_load_flag() { // Tests for ENR flags. #[test] fn enr_udp_port_flag() { - let port = unused_udp4_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_UDP_PORT; + assert!(port != 0); CommandLineTest::new() .flag("enr-udp-port", Some(port.to_string().as_str())) .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.enr_udp4_port, Some(port))); + .with_config(|config| { + assert_eq!( + config.network.enr_udp4_port.map(|port| port.get()), + Some(port) + ) + }); } #[test] fn enr_quic_port_flag() { - let port = unused_udp4_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_QUIC_PORT; CommandLineTest::new() .flag("enr-quic-port", Some(port.to_string().as_str())) .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.enr_quic4_port, Some(port))); + .with_config(|config| { + assert_eq!( + config.network.enr_quic4_port.map(|port| port.get()), + Some(port) + ) + }); } #[test] fn enr_tcp_port_flag() { - let port = unused_tcp4_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_TCP_PORT; CommandLineTest::new() .flag("enr-tcp-port", Some(port.to_string().as_str())) .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.enr_tcp4_port, Some(port))); + .with_config(|config| { + assert_eq!( + config.network.enr_tcp4_port.map(|port| port.get()), + Some(port) + ) + }); } #[test] fn enr_udp6_port_flag() { - let port = unused_udp6_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_UDP_PORT; CommandLineTest::new() .flag("enr-udp6-port", Some(port.to_string().as_str())) .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.enr_udp6_port, Some(port))); + .with_config(|config| { + assert_eq!( + config.network.enr_udp6_port.map(|port| port.get()), + Some(port) + ) + }); } #[test] fn enr_quic6_port_flag() { - let port = unused_udp6_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_QUIC_PORT; CommandLineTest::new() .flag("enr-quic6-port", Some(port.to_string().as_str())) .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.enr_quic6_port, Some(port))); + .with_config(|config| { + assert_eq!( + config.network.enr_quic6_port.map(|port| port.get()), + Some(port) + ) + }); } #[test] fn enr_tcp6_port_flag() { - let port = unused_tcp6_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_TCP_PORT; CommandLineTest::new() .flag("enr-tcp6-port", Some(port.to_string().as_str())) .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.enr_tcp6_port, Some(port))); + .with_config(|config| { + assert_eq!( + config.network.enr_tcp6_port.map(|port| port.get()), + Some(port) + ) + }); } #[test] fn enr_match_flag_over_ipv4() { let addr = "127.0.0.2".parse::().unwrap(); - let udp4_port = unused_udp4_port().expect("Unable to find unused port."); - let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); + + // the reason we use the ENR dummy values is because, due to the nature of the `--enr-match` flag, these will eventually become ENR ports (as well as listening ports). + let udp4_port = DUMMY_ENR_UDP_PORT; + let tcp4_port = DUMMY_ENR_TCP_PORT; + CommandLineTest::new() .flag("enr-match", None) .flag("listen-address", Some("127.0.0.2")) @@ -1363,15 +1402,21 @@ fn enr_match_flag_over_ipv4() { Some((addr, udp4_port, tcp4_port)) ); assert_eq!(config.network.enr_address, (Some(addr), None)); - assert_eq!(config.network.enr_udp4_port, Some(udp4_port)); + assert_eq!( + config.network.enr_udp4_port.map(|port| port.get()), + Some(udp4_port) + ); }); } #[test] fn enr_match_flag_over_ipv6() { const ADDR: &str = "::1"; let addr = ADDR.parse::().unwrap(); - let udp6_port = unused_udp6_port().expect("Unable to find unused port."); - let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); + + // the reason we use the ENR dummy values is because, due to the nature of the `--enr-match` flag, these will eventually become ENR ports (as well as listening ports). + let udp6_port = DUMMY_ENR_UDP_PORT; + let tcp6_port = DUMMY_ENR_TCP_PORT; + CommandLineTest::new() .flag("enr-match", None) .flag("listen-address", Some(ADDR)) @@ -1388,19 +1433,27 @@ fn enr_match_flag_over_ipv6() { Some((addr, udp6_port, tcp6_port)) ); assert_eq!(config.network.enr_address, (None, Some(addr))); - assert_eq!(config.network.enr_udp6_port, Some(udp6_port)); + assert_eq!( + config.network.enr_udp6_port.map(|port| port.get()), + Some(udp6_port) + ); }); } #[test] fn enr_match_flag_over_ipv4_and_ipv6() { const IPV6_ADDR: &str = "::1"; + + // the reason we use the ENR dummy values is because, due to the nature of the `--enr-match` flag, these will eventually become ENR ports (as well as listening ports). + let udp6_port = DUMMY_ENR_UDP_PORT; + let tcp6_port = DUMMY_ENR_TCP_PORT; let ipv6_addr = IPV6_ADDR.parse::().unwrap(); - let udp6_port = unused_udp6_port().expect("Unable to find unused port."); - let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); + const IPV4_ADDR: &str = "127.0.0.1"; + // the reason we use the ENR dummy values is because, due to the nature of the `--enr-match` flag, these will eventually become ENR ports (as well as listening ports). + let udp4_port = DUMMY_ENR_UDP_PORT; + let tcp4_port = DUMMY_ENR_TCP_PORT; let ipv4_addr = IPV4_ADDR.parse::().unwrap(); - let udp4_port = unused_udp4_port().expect("Unable to find unused port."); - let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); + CommandLineTest::new() .flag("enr-match", None) .flag("listen-address", Some(IPV4_ADDR)) @@ -1431,41 +1484,53 @@ fn enr_match_flag_over_ipv4_and_ipv6() { config.network.enr_address, (Some(ipv4_addr), Some(ipv6_addr)) ); - assert_eq!(config.network.enr_udp6_port, Some(udp6_port)); - assert_eq!(config.network.enr_udp4_port, Some(udp4_port)); + assert_eq!( + config.network.enr_udp6_port.map(|port| port.get()), + Some(udp6_port) + ); + assert_eq!( + config.network.enr_udp4_port.map(|port| port.get()), + Some(udp4_port) + ); }); } #[test] fn enr_address_flag_with_ipv4() { let addr = "192.167.1.1".parse::().unwrap(); - let port = unused_udp4_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_UDP_PORT; CommandLineTest::new() .flag("enr-address", Some("192.167.1.1")) .flag("enr-udp-port", Some(port.to_string().as_str())) .run_with_zero_port() .with_config(|config| { assert_eq!(config.network.enr_address, (Some(addr), None)); - assert_eq!(config.network.enr_udp4_port, Some(port)); + assert_eq!( + config.network.enr_udp4_port.map(|port| port.get()), + Some(port) + ); }); } #[test] fn enr_address_flag_with_ipv6() { let addr = "192.167.1.1".parse::().unwrap(); - let port = unused_udp4_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_UDP_PORT; CommandLineTest::new() .flag("enr-address", Some("192.167.1.1")) .flag("enr-udp-port", Some(port.to_string().as_str())) .run_with_zero_port() .with_config(|config| { assert_eq!(config.network.enr_address, (Some(addr), None)); - assert_eq!(config.network.enr_udp4_port, Some(port)); + assert_eq!( + config.network.enr_udp4_port.map(|port| port.get()), + Some(port) + ); }); } #[test] fn enr_address_dns_flag() { let addr = Ipv4Addr::LOCALHOST; let ipv6addr = Ipv6Addr::LOCALHOST; - let port = unused_udp4_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_UDP_PORT; CommandLineTest::new() .flag("enr-address", Some("localhost")) .flag("enr-udp-port", Some(port.to_string().as_str())) @@ -1475,7 +1540,10 @@ fn enr_address_dns_flag() { config.network.enr_address.0 == Some(addr) || config.network.enr_address.1 == Some(ipv6addr) ); - assert_eq!(config.network.enr_udp4_port, Some(port)); + assert_eq!( + config.network.enr_udp4_port.map(|port| port.get()), + Some(port) + ); }); } #[test] @@ -1514,8 +1582,8 @@ fn http_address_ipv6_flag() { } #[test] fn http_port_flag() { - let port1 = unused_tcp4_port().expect("Unable to find unused port."); - let port2 = unused_tcp4_port().expect("Unable to find unused port."); + let port1 = 0; + let port2 = 0; CommandLineTest::new() .flag("http", None) .flag("http-port", Some(port1.to_string().as_str())) @@ -1671,8 +1739,8 @@ fn metrics_address_ipv6_flag() { } #[test] fn metrics_port_flag() { - let port1 = unused_tcp4_port().expect("Unable to find unused port."); - let port2 = unused_tcp4_port().expect("Unable to find unused port."); + let port1 = 0; + let port2 = 0; CommandLineTest::new() .flag("metrics", None) .flag("metrics-port", Some(port1.to_string().as_str())) diff --git a/testing/simulator/src/local_network.rs b/testing/simulator/src/local_network.rs index 69fa8ded0..1024c46e4 100644 --- a/testing/simulator/src/local_network.rs +++ b/testing/simulator/src/local_network.rs @@ -66,8 +66,8 @@ impl LocalNetwork { BOOTNODE_PORT, QUIC_PORT, ); - beacon_config.network.enr_udp4_port = Some(BOOTNODE_PORT); - beacon_config.network.enr_tcp4_port = Some(BOOTNODE_PORT); + beacon_config.network.enr_udp4_port = Some(BOOTNODE_PORT.try_into().expect("non zero")); + beacon_config.network.enr_tcp4_port = Some(BOOTNODE_PORT.try_into().expect("non zero")); beacon_config.network.discv5_config.table_filter = |_| true; let execution_node = if let Some(el_config) = &mut beacon_config.execution_layer { @@ -152,14 +152,16 @@ impl LocalNetwork { .expect("bootnode must have a network"), ); let count = (self.beacon_node_count() + self.proposer_node_count()) as u16; + let libp2p_tcp_port = BOOTNODE_PORT + count; + let discv5_port = BOOTNODE_PORT + count; beacon_config.network.set_ipv4_listening_address( std::net::Ipv4Addr::UNSPECIFIED, - BOOTNODE_PORT + count, - BOOTNODE_PORT + count, + libp2p_tcp_port, + discv5_port, QUIC_PORT + count, ); - beacon_config.network.enr_udp4_port = Some(BOOTNODE_PORT + count); - beacon_config.network.enr_tcp4_port = Some(BOOTNODE_PORT + count); + beacon_config.network.enr_udp4_port = Some(discv5_port.try_into().unwrap()); + beacon_config.network.enr_tcp4_port = Some(libp2p_tcp_port.try_into().unwrap()); beacon_config.network.discv5_config.table_filter = |_| true; beacon_config.network.proposer_only = is_proposer; } diff --git a/testing/web3signer_tests/tls/generate.sh b/testing/web3signer_tests/tls/generate.sh index f00e7b7e3..f918e87cf 100755 --- a/testing/web3signer_tests/tls/generate.sh +++ b/testing/web3signer_tests/tls/generate.sh @@ -1,7 +1,7 @@ #!/bin/bash openssl req -x509 -sha256 -nodes -days 36500 -newkey rsa:4096 -keyout web3signer/key.key -out web3signer/cert.pem -config web3signer/config && -openssl pkcs12 -export -out web3signer/key.p12 -inkey web3signer/key.key -in web3signer/cert.pem -password pass:$(cat web3signer/password.txt) && +openssl pkcs12 -export -aes256 -out web3signer/key.p12 -inkey web3signer/key.key -in web3signer/cert.pem -password pass:$(cat web3signer/password.txt) && cp web3signer/cert.pem lighthouse/web3signer.pem && openssl req -x509 -sha256 -nodes -days 36500 -newkey rsa:4096 -keyout lighthouse/key.key -out lighthouse/cert.pem -config lighthouse/config && -openssl pkcs12 -export -out lighthouse/key.p12 -inkey lighthouse/key.key -in lighthouse/cert.pem -password pass:$(cat lighthouse/password.txt) && +openssl pkcs12 -export -aes256 -out lighthouse/key.p12 -inkey lighthouse/key.key -in lighthouse/cert.pem -password pass:$(cat lighthouse/password.txt) && openssl x509 -noout -fingerprint -sha256 -inform pem -in lighthouse/cert.pem | cut -b 20-| sed "s/^/lighthouse /" > web3signer/known_clients.txt diff --git a/testing/web3signer_tests/tls/lighthouse/cert.pem b/testing/web3signer_tests/tls/lighthouse/cert.pem index 5746d19a1..24b0a2e5c 100644 --- a/testing/web3signer_tests/tls/lighthouse/cert.pem +++ b/testing/web3signer_tests/tls/lighthouse/cert.pem @@ -1,33 +1,33 @@ -----BEGIN CERTIFICATE----- -MIIFujCCA6KgAwIBAgIUELASgYwStCn/u/8tPByRADyCwLEwDQYJKoZIhvcNAQEL +MIIFujCCA6KgAwIBAgIUXZijYo8W4/9dAq58ocFEbZDxohwwDQYJKoZIhvcNAQEL BQAwazELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0 eTESMBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYD -VQQDDApsaWdodGhvdXNlMCAXDTIzMDkyMjAzMDA1N1oYDzIxMjMwODI5MDMwMDU3 +VQQDDApsaWdodGhvdXNlMCAXDTIzMDkyMDAyNTYzNloYDzIxMjMwODI3MDI1NjM2 WjBrMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExETAPBgNVBAcMCFNvbWVDaXR5 MRIwEAYDVQQKDAlNeUNvbXBhbnkxEzARBgNVBAsMCk15RGl2aXNpb24xEzARBgNV -BAMMCmxpZ2h0aG91c2UwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCc -i30cib5B/B5QNd8grzi4LxmlyfZFi3VfpukwdwOD1Xk3ODk1OtjAzhK46YhDclvc -u98m1Dnib1Z+eTjRuEEoekIxz2+BbOle7G52LNvuDZpD+HKucqIU3TnEKPPuTYPp -lZ1n/9EyxXUwD5uTkn7xXzK8UFXUt73j6I6VFMdHlNcwLcx8KSwBDzvnGT4ew/UL -+ThON3j5rIT+nFHDcC2zoM+6ANdVkL6GHid4/cOcYW6GxB9TRZtEasqze41bC+kX -ZtPlV5V2nilAzVj8z9ynwBpHkLH+E6sMUhSEwA++QfI1gGf0FmSBgSIZ3RdPo/dp -hkLG8fZXKMkMzKkRm5hcstDP6DnTIYl+CfuVez5gZ0/yelAqXNvTqMKuDhHTTRRY -aOXZX4BAiQO2Q6a6WYLe87E2ka5AF2T2y/BPeXjUwDS/1mFIB3FUGlMLVJt8/RLz -nXVGoSsYapttiiPucQbMPEysCJ4/LZ9zxe3EDWWjpurLHGi/Y/dVziEvg1Eoycix -dZogKz0QVCz4++QI0kPDDX7So7CWni2JJuYguF/8CX8QbCT2L8jXf0uQrq76FLKj -88A7lS8DzXBt/pRryiIlDyLenJwHmrv6p+P/FYvgnJHvAEtTynxYm5GA16YWy+Dj -c5XVgNHjV4TdX3GueAp+NhBBaHDFvYCbP/oXkRvNRQIDAQABo1QwUjALBgNVHQ8E +BAMMCmxpZ2h0aG91c2UwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC1 +R1M9NnRwUsqFvJzNWPKuY1PW7llwRRWCixiWNvcxukGTa6AMLZDrYO1Y7qlw5m52 +aHSA2fs2KyeA61yajG/BsLn1vmTtJMZXgLsG0MIqvhgOoh+ZZbl8biO0gQJSRSDE +jf0ogUVM9TCEt6ydbGnzgs8EESqvyXcreaXfmLI7jiX/BkwCdf+Ru+H3MF96QgAw +Oz1d8/fxYJvIpT/DOx4NuMZouSAcUVXgwcVb6JXeTg0xVcL33lluquhYDR0gD5Fe +V0fPth+e9XMAH7udim8E5wn2Ep8CAVoeVq6K9mBM3NqP7+2YmU//jLbkd6UvKPaI +0vps1zF9Bo8QewiRbM0IRse99ikCVZcjOcZSitw3kwTg59NjZ0Vk9R/2YQt/gGWM +VcR//EtbOZGqzGrLPFKOcWO85Ggz746Saj15N+bqT20hXHyiwYL8DLgJkMR2W9Nr +67Vyi9SWSM6rdRQlezlHq/yNEh+JuY7eoC3VeVw9K1ZXP+OKAwbpcnvd3uLwV91f +kpT6kjc6d2h4bK8fhvF16Em42JypQCl0xMhgg/8MFO+6ZLy5otWAdsSYyO5k9CAa +3zLeqd89dS7HNLdLZ0Y5SFWm6y5Kqu89ErIENafX5DxupHWsruiBV7zhDHNPaGcf +TPFe8xuDYsi155veOfEiDh4g+X1qjL8x8OEDjgsM3QIDAQABo1QwUjALBgNVHQ8E BAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0RBAgwBocEfwAAATAdBgNV -HQ4EFgQUoeeF4G1qTRzLvO583qitbNDzr10wDQYJKoZIhvcNAQELBQADggIBAA9Y -YZP0pZLyovSnjyyuTR4KE9B+TSwqHe/LvH+7EAXLH+cwhyS7ADfJyt3mOCbKHZSo -dmJ5KWQ6M2Xn9Wq40BPk8mQPmAxy0nHg5beG03HYXOIsK8zgXTMad1+D1jnHPAda -ldXJ2Y+ljx4TDXKCWpTaq1+flqgRD3t98tOLuiULZ5jsTFX8Xbun7matcjziU5Lo -GWVQPWkb8Vx+3QyfbfiYJ7hggfYTxQsVJOXKuD8k2FMtKn5oTp3VwD2kY1q2X2Yk -HsDZJdYrvjWi2LcZDKoSNeusuLrv1XoUnwsAa3ng6drvoEU16vfILLYqH820UJ61 -/fFm3a9BFHRvPVd/WcSeIVc9jx9+32RIVxlppwCINnGMGE20kUZxu0TiMjTX9bCp -AouDuhwMt7z5jiQIi/CMxN6IlHBeVLqyK8ayWvH40xYgZTXlePpmLcQhcieNk7oJ -ard9jMfj4JhH5GbLXVptMBVJ0f9Ql4rW3EyNipvVKdkgTUNIeVm7LyUK220aT7ty -a0pGWHHViiF1MjGExo0P3gjZIML32TjZWlG3Nts5NAiyXDo4f78VeLyZQ7efVkub -GpjMf89vrmPdQhssoFr8fRFQObDe7hgxkgeiw9jgHItJl2/MWAxfsHV18HwiBqGW -QzaZR995YhU480jvA5XR8+EB6QUZeCEKunW8WK/F +HQ4EFgQU6r7QHkcEsWhEZHpcMpGxwKXQL9swDQYJKoZIhvcNAQELBQADggIBACyO +8xzqotye1J6xhDQCQnQF3dXaPTqfT31Ypg8UeU25V9N+bZO04CJKlOblukuvkedE +x1RDeqG3A81D4JOgTGFmFVoEF4iTk3NBrsHuMzph6ImHTd3TD+5iG5a3GL0i9PAI +dHTT6z6t2wlayjmHotqQ+N4A4msx8IPBRULcCmId319gpSDHsvt2wYbLdh+d9E2h +vI0VleJpJ7eoy05842VTkFJebriSpi75yFphKUnyAKlONiMN3o6eg90wpWdI+1rQ +js5lfm+pxYw8H6eSf+rl30m+amrxUlooqrSCHNVSO2c4+W5m/r3JfOiRqVUTxaO8 +0f/xYXo6SdRxdvJV18LEzOHURvkbqBjLoEfHbCC2EApevWAeCdjhvCBPl1IJZtFP +sYDpYtHhw69JmZ7Nj75cQyRtJMQ5S4GsJ/haYXNZPgRL1XBo1ntuc8K1cLZ2MucQ +1170+2pi3IvwmST+/+7+2fyms1AwF7rj2dVxNfPIvOxi6E9lHmPVxvpbuOYOEhex +XqTum/MjI17Qf6eoipk81ppCFtO9s3qNe9SBSjzYEYnsytaMdZSSjsOhE/IyYPHI +SICMjWE13du03Z5xWwK9i3UiFq+hIPhBHFPGkNFMmkQtcyS9lj9R0tKUmWdFPNa8 +nuhxn5kLUMriv3zsdhMPUC4NwM5XsopdWcuSxfnt -----END CERTIFICATE----- diff --git a/testing/web3signer_tests/tls/lighthouse/key.key b/testing/web3signer_tests/tls/lighthouse/key.key index 91bee6a78..d00b6c212 100644 --- a/testing/web3signer_tests/tls/lighthouse/key.key +++ b/testing/web3signer_tests/tls/lighthouse/key.key @@ -1,52 +1,52 @@ -----BEGIN PRIVATE KEY----- -MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQCci30cib5B/B5Q -Nd8grzi4LxmlyfZFi3VfpukwdwOD1Xk3ODk1OtjAzhK46YhDclvcu98m1Dnib1Z+ -eTjRuEEoekIxz2+BbOle7G52LNvuDZpD+HKucqIU3TnEKPPuTYPplZ1n/9EyxXUw -D5uTkn7xXzK8UFXUt73j6I6VFMdHlNcwLcx8KSwBDzvnGT4ew/UL+ThON3j5rIT+ -nFHDcC2zoM+6ANdVkL6GHid4/cOcYW6GxB9TRZtEasqze41bC+kXZtPlV5V2nilA -zVj8z9ynwBpHkLH+E6sMUhSEwA++QfI1gGf0FmSBgSIZ3RdPo/dphkLG8fZXKMkM -zKkRm5hcstDP6DnTIYl+CfuVez5gZ0/yelAqXNvTqMKuDhHTTRRYaOXZX4BAiQO2 -Q6a6WYLe87E2ka5AF2T2y/BPeXjUwDS/1mFIB3FUGlMLVJt8/RLznXVGoSsYaptt -iiPucQbMPEysCJ4/LZ9zxe3EDWWjpurLHGi/Y/dVziEvg1EoycixdZogKz0QVCz4 -++QI0kPDDX7So7CWni2JJuYguF/8CX8QbCT2L8jXf0uQrq76FLKj88A7lS8DzXBt -/pRryiIlDyLenJwHmrv6p+P/FYvgnJHvAEtTynxYm5GA16YWy+Djc5XVgNHjV4Td -X3GueAp+NhBBaHDFvYCbP/oXkRvNRQIDAQABAoICACCSBxxeblblQVtX8g4nVso/ -hnsPi61JiEi3/hGG2ZTe4AMEsCZqkXmABrYxZJf/3awN7K5z/n0lxB25VACScQAe -e9JIQf9wLRgCYjM1PycG7n9Q3G9+S0nDA4dUK/h7aUQ6zE68k4aYPbsbrDdmhgHr -WC+FGW6SMjCOjMfo1FOI3MLZ7I8ys8Seqkx5XIrjI4NzvWrMsN9lrSAaXwqmNuQG -Q+ID1cmoPXPDJ1xNlBrfzLK+cHQPafAwte7k+HKmhj9HtjOj5uWQn62ra+Xhy5ud -ZPpZ2Savaem81CcQnNXte5r1Fevbktq9Bt7RuM1ppIrwk8k3w5S72CTRGiYfNPJV -M1RMp46GrXVJdmx3k9LQfKdT6Gv9xTJXYQl7jN0+4uZ7QrVQHpcMpxPsATl+cQQH -wzCTbj2Oqn/30KqkZLyueN2MalRP8mVSe5nD+vvGb/sWLs52kp6QvHdlXER2RBFk -tJ5cGi+vgueoukb+qatiAE2y5MxYCqD02ShGcLos/SUQThRhL+iD8t0h+FoPTD5y -eTNZ85hF1HdypH1If8/YGETg55+fHYUAtYGT6R8lYeFMvBC05suorLBciXShOGuY -4zBbt32fPlsXlLneAtAAFv2BiJMt0TQavWHITLInFW1/aMHDV4/Pq69sRZuHdRaW -XFXD8CjnPUS5tBMQOqYhAoIBAQDLqUo7v3SpIstXmyU7BfUBuTYGS7MzjMhDxFUl -HvmbVZlOXhnPb3p4mW/XHrah9CjFBLJt3CF+PP/njwMw0YtPxCQpQwj0pI8CuveE -4Puq2wEfxVg+JKh1xidNj8230/WINzwfLCVfco7KKmjQX0MgMGaANQ0sGnt/r1eB -MwpY5uID+D5PORXUcHxBWlsVLyzZ9ZqKhAgewr3i7BLX2y7nwqEGlWTt1lxzZGCR -a8NZIAIs3qGzAgtm7O3hMz6XZulVyVSrMmmT8qXT4Lo1nW/9J6slV7Wdp9f++mr9 -m2vwrpJtmdPcA+YKPVgoFlKmZpZZbVvd+4uy8ksoxs1/cF7VAoIBAQDExnLQplq2 -BPoxEDAnlS+8Jju5en5Pk70HOfpQGUa4/6vY60x/N5sJqc6CcDySkkxRI8jLzMTe -AE9jqM+Z39MvGCH+SF9EPRopbAJIrcChXfvk2Imp7PLFRGrEBju63nQfaHdcefFy -Ia7RA8SCHLCReRdqPjSXbPAYPZK84vVNSfhrkytA4FJnaojvaqJqLQH9vB7CXv18 -Fu6w5fnrgARIoBhy2mb0QWzgd9JMsVDgS5XyX/4HBUejjXDdmLosOZ4PJ0GM2+tr -ilO/9KKhV9lqH7DcFgJBNkgVKRD1Ijr21yyOkttB5PULzaTQhzqkorGkWgyTzLWn -ksqOr2cWt0yxAoIBAElIazvAkpvht0WYv/bTF+w81uHBD4R3HgC0fubSlIJ+dKGs -XqEzvd/zZjkEBjeUga8TF5lMYojoLjUGUuGYJQbYzyJBytEs/YDAAhzAUA6Uq3zh -J/WEf1GRscbI/f8tt+YB6hJVckU8FYFNbVW9UYwdnmR3snuyM8ooL9Z/pTOEMMO4 -6cLcCazdpPhnKOsghIURSUCabcmTzXv/8m/VoLUoZYTW8PBb9/xVnCH3ot1JFT9M -BOdCzxOEIbytEdKs5z1FKsBHbZIc9+qbrKVqN0fipETVoLZQFPrc5O7IpDiAuJPT -jFZY2MfKdxRFpAvYUjVvkmT4BLapVL4hewRmTNkCggEBAKuJP8/KJSulvSEGNqRa -9kjzn376XKAsb02caixnTHK7Vuh7fq0sIThUUhT9mKBBbswRANtAv6Gz7YE4SPmf -1+6nAAM2ve2zwlm3sWoAJfvF/W+qoJ+EUsJK+TO3J1yozdwPanYwS52t5UKKIUU3 -k2jNge75GUmkCs1m58NHqoXc5PWKTtt4cf17LrJfaARdBe5Wjw3sVtdU+nE1mh+E -8rcI8Sc2Yyes3Sf07Fw0+wb8fVPUAJPIM4JNK8XRfQJOnA4jr44GrPyLkqS0sw0p -kvtjcv75JLAKjN39da3sUDCctVf4h7Cy0jee5n1uVV3uAiP+6BX0D6tsWK34FEsG -MZECggEBAIi/sjZNQjplD5zOULEWL8W6b+3CZymR5Qqa0brlx1Lz8h/daIITIFvm -bue/CjIht/oRGLVE8yzw2ojLf424h3h5PjmXMBNHlVkWQXfn6xCI8MjfZ71uA39O -RVCXAYwcghOWZL4Fkz+XQmIOdJ1OPXfU0py943joYZbgXXAYOc/zNylo9j7+bqDK -vLtFd4IIQoRzjsY//FoAuAditf4xDRqLwOh4amboZw1Qmn6bwDnCaKsFmA3o5BYR -4aRUm1dEbZgPtm2tuHQpEKuOPhWHroi3NsEdbhoyy3IUe0c3w4YGgnuvVy616wkV -GlPvUaKC1KX0CX1qT1anVZq9bSMTG+M= +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQC1R1M9NnRwUsqF +vJzNWPKuY1PW7llwRRWCixiWNvcxukGTa6AMLZDrYO1Y7qlw5m52aHSA2fs2KyeA +61yajG/BsLn1vmTtJMZXgLsG0MIqvhgOoh+ZZbl8biO0gQJSRSDEjf0ogUVM9TCE +t6ydbGnzgs8EESqvyXcreaXfmLI7jiX/BkwCdf+Ru+H3MF96QgAwOz1d8/fxYJvI +pT/DOx4NuMZouSAcUVXgwcVb6JXeTg0xVcL33lluquhYDR0gD5FeV0fPth+e9XMA +H7udim8E5wn2Ep8CAVoeVq6K9mBM3NqP7+2YmU//jLbkd6UvKPaI0vps1zF9Bo8Q +ewiRbM0IRse99ikCVZcjOcZSitw3kwTg59NjZ0Vk9R/2YQt/gGWMVcR//EtbOZGq +zGrLPFKOcWO85Ggz746Saj15N+bqT20hXHyiwYL8DLgJkMR2W9Nr67Vyi9SWSM6r +dRQlezlHq/yNEh+JuY7eoC3VeVw9K1ZXP+OKAwbpcnvd3uLwV91fkpT6kjc6d2h4 +bK8fhvF16Em42JypQCl0xMhgg/8MFO+6ZLy5otWAdsSYyO5k9CAa3zLeqd89dS7H +NLdLZ0Y5SFWm6y5Kqu89ErIENafX5DxupHWsruiBV7zhDHNPaGcfTPFe8xuDYsi1 +55veOfEiDh4g+X1qjL8x8OEDjgsM3QIDAQABAoICAEP5a1KMPUwzF0Lfr1Jm1JUk +pLb26C2rkf3B56XIFZgddeJwHHMEkQ9Z6JYM5Bd0KJ6Y23rHgiXVN7plRvOiznMs +MAbgblroC8GbAUZ0eCJr5nxyOXQdS1jHufbA21x7FGbvsSqDkrdhR2C0uPLMyMvp +VHP7dey1mEyCkHrP+KFRU5kVxOG1WnBMqdY1Ws/uuMBdLk0xItttdOzfXhH4dHQD +wc5aAJrtusyNDFLC25Og49yIgpPMWe+gAYCm5jFz9PgRtVlDOwcxlX5J5+GSm7+U +XM1bPSmU1TSEH233JbQcqo4HkynB71ftbVUtMhEFhLBYoFO4u5Ncpr+wys0xJY4f +3aJRV5+gtlmAmsKN66GoMA10KNlLp2z7XMlx1EXegOHthcKfgf5D6LKRz8qZhknm +FFgAOg9Bak1mt1DighhPUJ0vLYU6K+u0ZXwysYygOkBJ/yj63ApuPCSTQb7U0JlL +JMgesy1om3rVdN0Oc7hNaxq7VwswkzUTUKS2ZvGozF3MmdPHNm5weJTb3NsWv8Qo +HiK1I88tY9oZ5r91SC82hMErmG4ElXFLxic1B29h3fsIe/l+WjmZRXixD9ugV0gj +CvNa8QD9K3hljlNrR6eSXeO2QOyxAEUr2N1MBlxrnAWZCzXKiTvTx1aKDYhJT0DY +zae/etTLHVjzgdH6GS33AoIBAQDaaWYHa9wkJIJPX4siVCatwWKGTjVfDb5Q9upf +twkxCf58pmbzUOXW3dbaz6S0npR0V6Wqh3S8HW7xaHgDZDMLJ1WxLJrgqDKU3Pqc +k7xnA/krWqoRVSOOGkPnSrnZo6AVc6FR+iwJjfuUu0rFDwiyuqvuXpwNsVwvAOoL +xIbaEbGUHiFsZamm2YkoxrEjXGFkZxQX9+n9f+IAiMxMQc0wezRREc8e61/mTovJ +QJ7ZDd7zLUR7Yeqciy59NOsD57cGtnp1K28I2eKLA4taghgd5bJjPkUaHg9j5Xf6 +nsxU2QCp9kpwXvtMxN7pERKWFsnmu8tfJOiUWCpp8SLbIl6nAoIBAQDUefKKjRLa +6quNW0rOGn2kx0K6sG7T45OhwvWXVjnPAjX3/2mAMALT1wc3t0iKDvpIEfMadW2S +O8x2FwyifdJXmkz943EZ/J5Tq1H0wr4NeClX4UlPIAx3CdFlCphqH6QfKtrpQ+Hf ++e8XzjVvdg8Y/RcbWgPgBtOh2oKT5QHDh13/994nH7GhVM7PjLUVvZVmNWaC77zr +bXcvJFF/81PAPWC2JoV6TL/CXvda2tG2clxbSfykfUBPBpeyEijMoxC4UMuCHhbp +NpLfKJQp9XNqbBG2K4jgLQ8Ipk6Vtia/hktLgORf/pbQ4PxEv7OP5e1AOreDg/CW +RnQtBb+/8czbAoIBABfDA8Cm8WpVNoAgKujvMs4QjgGCnLfcrOnuEw2awjs9lRxG +lki+cmLv+6IOmSK1Zf1KU9G7ru2QXjORZA0qZ4s9GkuOSMNMSUR8zh8ey46Bligr +UvlTw+x/2wdcz99nt9DdpZ1flE7tzYMe5UGPIykeufnS/TNYKmlKtivVk75B0ooE +xSof3Vczr4JqK3dnY4ki1cLNy/0yXookV+Wr+wDdRpHTWC9K+EH8JaUdjKqcobbf +I+Ywfu/NDJ++lBr2qKjoTWZV9VyHJ+hr2Etef/Uwujml2qq+vnnlyynPAPfyK+pR +y0NycfCmMoI0w0rk685YfAW75DnPZb3k6B/jG10CggEBAMxf2DoI5EAKRaUcUOHa +fUxIFhl4p8HMPy7zVkORPt2tZLf8xz/z7mRRirG+7FlPetJj4ZBrr09fkZVtKkwJ +9o8o7jGv2hSC9s/IFHb38tMF586N9nPTgenmWbF09ZHuiXEpSZPiJZvIzn/5a1Ch +IHiKyPUYKm4MYvhmM/+J4Z5v0KzrgJXlWHi0GJFu6KfWyaOcbdQ4QWG6009XAcWv +Cbn5z9KlTvKKbFDMA+UyYVG6wrdUfVzC1V6uGq+/49qiZuzDWlz4EFWWlsNsRsft +Pmz5Mjglu+zVqoZJYYGDydWjmT0w53qmae7U2hJOyqr5ILINSIOKH5qMfiboRr6c +GM0CggEAJTQD/jWjHDIZFRO4SmurNLoyY7bSXJsYAhl77j9Cw/G4vcE+erZYAhp3 +LYu2nrnA8498T9F3H1oKWnK7u4YXO8ViyQd73ql7iKrMjE98CjfGcTPCXwOcPAts +ZpM8ykgFTsJpXEFvIR5cyZ6XFSw2m/Z7CRDpmwQ8es4LpNnYA7V5Yu/zDE4h2/2T +NmftCiZvkxwgj6VyKumOxXBnGK6lB+b6YMTltRrgD/35zmJoKRdqyLb1szPJtQuh +HjRTa/BVPgA66xYFWhifRUiYKpc0bARTYofHeoDgu6yPzcHMuM70NQQGF+WWJySg +vc3Za4ClKSLmb3ZA9giTswYMev+3BQ== -----END PRIVATE KEY----- diff --git a/testing/web3signer_tests/tls/lighthouse/key.p12 b/testing/web3signer_tests/tls/lighthouse/key.p12 index d96ab4775..73468fa08 100644 Binary files a/testing/web3signer_tests/tls/lighthouse/key.p12 and b/testing/web3signer_tests/tls/lighthouse/key.p12 differ diff --git a/testing/web3signer_tests/tls/lighthouse/web3signer.pem b/testing/web3signer_tests/tls/lighthouse/web3signer.pem index 455021447..6266cadf9 100644 --- a/testing/web3signer_tests/tls/lighthouse/web3signer.pem +++ b/testing/web3signer_tests/tls/lighthouse/web3signer.pem @@ -1,33 +1,33 @@ -----BEGIN CERTIFICATE----- -MIIFujCCA6KgAwIBAgIUOVccYETgo2YpKO85U4XRKifK09kwDQYJKoZIhvcNAQEL +MIIFujCCA6KgAwIBAgIUIP5CN0WpH5om1bGaFn17Xc5ITJIwDQYJKoZIhvcNAQEL BQAwazELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0 eTESMBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYD -VQQDDAp3ZWIzc2lnbmVyMCAXDTIzMDkyMjAzMDA1NloYDzIxMjMwODI5MDMwMDU2 +VQQDDAp3ZWIzc2lnbmVyMCAXDTIzMDkyMDAyNTYzNFoYDzIxMjMwODI3MDI1NjM0 WjBrMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExETAPBgNVBAcMCFNvbWVDaXR5 MRIwEAYDVQQKDAlNeUNvbXBhbnkxEzARBgNVBAsMCk15RGl2aXNpb24xEzARBgNV -BAMMCndlYjNzaWduZXIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCr -JajqnvRQEREph+zu7rw1QjHQG1x2H44SJSMjX1Wzi9FErlRSOzywPFL2AzGsNvNS -tPmxN/kF9mBjQIQHxo90M4GcZgW1aljPaXLvQWFrP9ak+JjHuUG+j51fVJp8F2Qc -BG8i2LjjSLvkEYSULHI0kbMPws+DKcemvZJ6IhkoPkbtnx5Z1zDj8D6vvWGJguMO -VSNJY7SoBNuSB6CJ7wCWBg7UPtTUrtnuJVvUh+3k2wc7LJ+C9wd7rt+qYb8LxQuc -j8dDyncXXeI583oGzjTE+1kFrE5TuMDlnWyKPa6NQPeXQtygFTyQL9RMW6JkgWWg -tDFWqd2Mgb8sCRtl5uTJFGJ7PFBP4T69JqYhz817tDS3JrMbbzzhRzf3cB6V2NCC -zVKBrO7gfAyDwWVr5iUyaXhLGyzuGg2nMbFMj/Pr7csravs+Jq5apwyZDNTv+2WQ -xP6d2gGFwQOxcPt4OGPjtFpVHH3cxLkcGsSOZ31akuhdSJ6MqWI4tkgRpsf5Ff0+ -z8SLZaCQIp7M4O4LpMreAT7smvEQpLphK1oKWlsY6ukkJ1y8KD3EfeJRpDL0PBTy -jacQATPsqUzeryCfqAMulLLqUbNFqv6Slhzt2vr+lfIr+IeUa/7XMeZOZJu1T/7n -fTjpdokSTx8DageE4Z3j90q5d4hdXvMWq6MpQW7RqQIDAQABo1QwUjALBgNVHQ8E +BAMMCndlYjNzaWduZXIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDS +cvshqu3747j4KMaGyGW0CA2GAznogVyKqNt4lan/8mdYUI2PUeezaUOnmoyM9oWz +1FPflpj7pVWagWlSOgZ9vOElqQhe+la4ZEdGmOpe44c1rBoeHK314Gbmr2EuCxaa +J3smHx2+VOhaMWDeebRHQqy/s5tf3Um7G2iXU2iexriz42I8d6efWGmaL2sTLQ6H +9C0UBIzXP7PnGrMlef9eR+7pu/ai9MjD1M7CWpwvPhEjanA2InwKugiDXj+A5/6G +WLtJvk5ekfOVlRHPZQbKJc/SG9tbbH9dHLEezIbZ6a5Y0iTcIfoiBxUpX5KyK/pB +YKPThE5zW5KhIxXcpqFIMaTW/nK33BlOJ0fPNtX/SWLyoBsTtxCo1XFFUjHCkXK8 +4y5L4BXxxohG0DAuO4BtQHE5hgyswGQX2t4RjDvzvSm4tN02m9HUh7gu/d2FbgX8 +HtmSgkPEgfSVRxegmbA71qHqKS0/i5BbnQjLkeWiWKRWGJoHFfhGN1sY0jUGFvQr +rrIUQAuXDcQX11UzgwkX5/cowtlm8IB/RWggPfC4gfCL4QvNz4pMxuMUWjXUn0uS +8kbmmuhxshsnZUL+l+nnpRSobZqHRvvqiFKg8q9GsBUTGu0fFbjDeVQyYF2UOWeN +/IC4PpwtYUO3/gR0babEffgYOWwWbQQGSPcmG7Y4zwIDAQABo1QwUjALBgNVHQ8E BAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0RBAgwBocEfwAAATAdBgNV -HQ4EFgQUsBCvmwLPQDG+iN5qI6P7SgLZyP0wDQYJKoZIhvcNAQELBQADggIBAE/j -mwchm30rB+dheTRBcVD0yHgYL2tQlpfKZeX9JDVWNMQ5OYHtMVwdD7RBQJ2ypqIr -5VP6/Hf0M1GE03mnDpjv29q57AkuGFZpBvZ+1XCG87336QIPqkPR4uMJ86MalsX2 -f9GHMG4H0rd1j+ozM0jhJNoVG4lSq/GNn2E9oRjMG8lq0M7apWwK1FQUBECIlfw+ -tk9aq2zLl409vuqzgsWeffBcdVEDHGCLQauzYRtxMBbzLb33gWWd+9149dWeG5up -P0CZvdetgXhlcbusmrBWVn0O57/QDaGzEUZKxqoy8Ncv04KMYN1gOF+nO5cKn0R1 -+4yvb/NJTdo9WcdLcleqSL1Ju3kX1dCIPOpuaZ3aEwLHrvlNxT8Y5OMvRsYPINAU -6JfNGu21+Bq2nEqSqrw8Ys2hdGI+E95uXjPcsm8BZRCfxfkEeYVtx4ZaqMF+bkfD -d+uePSFp4VBWbg40RMVymr1YcNTX3CjvtLZDH4BZBdx/8YjUEUYPpC7xGoaQDGvA -+J9cVHRpxYpry5fbBmSvrKvKXU6aijLpM7etjYWzYFturpi52Ya9h3LIHd4RaBzB -0YzmatirLK/07YBUECsVcAlddIK5KOA5Nd7+oUikmrR1wMY+I/hym6fSTZGo/TDY -vDFERRj1XOOhlCzHx94SS1DS0rVTAj4uxbuZisaz +HQ4EFgQURs+EV23UZh/nDfRX412nxbn4dc8wDQYJKoZIhvcNAQELBQADggIBAHbg +/YOp/MAf+inmH9Docup+Uj/WVJ32I1mMXlpoTKQ6YExR0DAtf1bmP65EGyvJkFTu +taGM4FNdsn4JCJxDfCY5X5M5YcPmjj6n58UcFr418DiZFCRT5MAdOxyYZVszFIc3 +RiYiOocbM30tGiqFm23NwWlAmaSjIeozERk2RgdRDnDG08xEbskn2yvsvvgnZJ8d +0wxyMPHvno664bCNOJfljXYclHBk2coOFDWJ5q8DFCBLXlt+Z95ceaNLA9bMXfhv +gVnKWn+1hcD33pMGyH7POXt+neZxIracTUJDIm39Vx0sQmHdeDxGSe7+qI2dYKbJ +v6srSWw4Y5TEPpkdXg2+R8zM2hO7kxDqjWDiCTjeMWMEdmUW/hYN6ndhfJ5ZLKut +OM/2jAf+ZijB1j7ORgP7haa//31YaPS4efnurDItI5dlQkLY2gKjLfdsEe1NsVR5 +mUjE8HZoVGRFfGca+39TjTTp+mVN0bQhoi+qu11QwB39hl/3I1jVjmUb71MAmva2 +4wh5RblJukbFVcs5Cco1+fpd7j9pSrWD/wsf+l7XM57Mvt9his8pk9yZolLgKT0Z +yio8eJVOfTr8JHmVpbvE3KQ8cLk0qwjs/iSzsSA0wau9RXNmJVVGHWqEjo+i7dzX +JzEM/ha455mjGbrAqJLFMC0yMMjQX4YIvGJENqRS -----END CERTIFICATE----- diff --git a/testing/web3signer_tests/tls/web3signer/cert.pem b/testing/web3signer_tests/tls/web3signer/cert.pem index 455021447..6266cadf9 100644 --- a/testing/web3signer_tests/tls/web3signer/cert.pem +++ b/testing/web3signer_tests/tls/web3signer/cert.pem @@ -1,33 +1,33 @@ -----BEGIN CERTIFICATE----- -MIIFujCCA6KgAwIBAgIUOVccYETgo2YpKO85U4XRKifK09kwDQYJKoZIhvcNAQEL +MIIFujCCA6KgAwIBAgIUIP5CN0WpH5om1bGaFn17Xc5ITJIwDQYJKoZIhvcNAQEL BQAwazELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0 eTESMBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYD -VQQDDAp3ZWIzc2lnbmVyMCAXDTIzMDkyMjAzMDA1NloYDzIxMjMwODI5MDMwMDU2 +VQQDDAp3ZWIzc2lnbmVyMCAXDTIzMDkyMDAyNTYzNFoYDzIxMjMwODI3MDI1NjM0 WjBrMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExETAPBgNVBAcMCFNvbWVDaXR5 MRIwEAYDVQQKDAlNeUNvbXBhbnkxEzARBgNVBAsMCk15RGl2aXNpb24xEzARBgNV -BAMMCndlYjNzaWduZXIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCr -JajqnvRQEREph+zu7rw1QjHQG1x2H44SJSMjX1Wzi9FErlRSOzywPFL2AzGsNvNS -tPmxN/kF9mBjQIQHxo90M4GcZgW1aljPaXLvQWFrP9ak+JjHuUG+j51fVJp8F2Qc -BG8i2LjjSLvkEYSULHI0kbMPws+DKcemvZJ6IhkoPkbtnx5Z1zDj8D6vvWGJguMO -VSNJY7SoBNuSB6CJ7wCWBg7UPtTUrtnuJVvUh+3k2wc7LJ+C9wd7rt+qYb8LxQuc -j8dDyncXXeI583oGzjTE+1kFrE5TuMDlnWyKPa6NQPeXQtygFTyQL9RMW6JkgWWg -tDFWqd2Mgb8sCRtl5uTJFGJ7PFBP4T69JqYhz817tDS3JrMbbzzhRzf3cB6V2NCC -zVKBrO7gfAyDwWVr5iUyaXhLGyzuGg2nMbFMj/Pr7csravs+Jq5apwyZDNTv+2WQ -xP6d2gGFwQOxcPt4OGPjtFpVHH3cxLkcGsSOZ31akuhdSJ6MqWI4tkgRpsf5Ff0+ -z8SLZaCQIp7M4O4LpMreAT7smvEQpLphK1oKWlsY6ukkJ1y8KD3EfeJRpDL0PBTy -jacQATPsqUzeryCfqAMulLLqUbNFqv6Slhzt2vr+lfIr+IeUa/7XMeZOZJu1T/7n -fTjpdokSTx8DageE4Z3j90q5d4hdXvMWq6MpQW7RqQIDAQABo1QwUjALBgNVHQ8E +BAMMCndlYjNzaWduZXIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDS +cvshqu3747j4KMaGyGW0CA2GAznogVyKqNt4lan/8mdYUI2PUeezaUOnmoyM9oWz +1FPflpj7pVWagWlSOgZ9vOElqQhe+la4ZEdGmOpe44c1rBoeHK314Gbmr2EuCxaa +J3smHx2+VOhaMWDeebRHQqy/s5tf3Um7G2iXU2iexriz42I8d6efWGmaL2sTLQ6H +9C0UBIzXP7PnGrMlef9eR+7pu/ai9MjD1M7CWpwvPhEjanA2InwKugiDXj+A5/6G +WLtJvk5ekfOVlRHPZQbKJc/SG9tbbH9dHLEezIbZ6a5Y0iTcIfoiBxUpX5KyK/pB +YKPThE5zW5KhIxXcpqFIMaTW/nK33BlOJ0fPNtX/SWLyoBsTtxCo1XFFUjHCkXK8 +4y5L4BXxxohG0DAuO4BtQHE5hgyswGQX2t4RjDvzvSm4tN02m9HUh7gu/d2FbgX8 +HtmSgkPEgfSVRxegmbA71qHqKS0/i5BbnQjLkeWiWKRWGJoHFfhGN1sY0jUGFvQr +rrIUQAuXDcQX11UzgwkX5/cowtlm8IB/RWggPfC4gfCL4QvNz4pMxuMUWjXUn0uS +8kbmmuhxshsnZUL+l+nnpRSobZqHRvvqiFKg8q9GsBUTGu0fFbjDeVQyYF2UOWeN +/IC4PpwtYUO3/gR0babEffgYOWwWbQQGSPcmG7Y4zwIDAQABo1QwUjALBgNVHQ8E BAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0RBAgwBocEfwAAATAdBgNV -HQ4EFgQUsBCvmwLPQDG+iN5qI6P7SgLZyP0wDQYJKoZIhvcNAQELBQADggIBAE/j -mwchm30rB+dheTRBcVD0yHgYL2tQlpfKZeX9JDVWNMQ5OYHtMVwdD7RBQJ2ypqIr -5VP6/Hf0M1GE03mnDpjv29q57AkuGFZpBvZ+1XCG87336QIPqkPR4uMJ86MalsX2 -f9GHMG4H0rd1j+ozM0jhJNoVG4lSq/GNn2E9oRjMG8lq0M7apWwK1FQUBECIlfw+ -tk9aq2zLl409vuqzgsWeffBcdVEDHGCLQauzYRtxMBbzLb33gWWd+9149dWeG5up -P0CZvdetgXhlcbusmrBWVn0O57/QDaGzEUZKxqoy8Ncv04KMYN1gOF+nO5cKn0R1 -+4yvb/NJTdo9WcdLcleqSL1Ju3kX1dCIPOpuaZ3aEwLHrvlNxT8Y5OMvRsYPINAU -6JfNGu21+Bq2nEqSqrw8Ys2hdGI+E95uXjPcsm8BZRCfxfkEeYVtx4ZaqMF+bkfD -d+uePSFp4VBWbg40RMVymr1YcNTX3CjvtLZDH4BZBdx/8YjUEUYPpC7xGoaQDGvA -+J9cVHRpxYpry5fbBmSvrKvKXU6aijLpM7etjYWzYFturpi52Ya9h3LIHd4RaBzB -0YzmatirLK/07YBUECsVcAlddIK5KOA5Nd7+oUikmrR1wMY+I/hym6fSTZGo/TDY -vDFERRj1XOOhlCzHx94SS1DS0rVTAj4uxbuZisaz +HQ4EFgQURs+EV23UZh/nDfRX412nxbn4dc8wDQYJKoZIhvcNAQELBQADggIBAHbg +/YOp/MAf+inmH9Docup+Uj/WVJ32I1mMXlpoTKQ6YExR0DAtf1bmP65EGyvJkFTu +taGM4FNdsn4JCJxDfCY5X5M5YcPmjj6n58UcFr418DiZFCRT5MAdOxyYZVszFIc3 +RiYiOocbM30tGiqFm23NwWlAmaSjIeozERk2RgdRDnDG08xEbskn2yvsvvgnZJ8d +0wxyMPHvno664bCNOJfljXYclHBk2coOFDWJ5q8DFCBLXlt+Z95ceaNLA9bMXfhv +gVnKWn+1hcD33pMGyH7POXt+neZxIracTUJDIm39Vx0sQmHdeDxGSe7+qI2dYKbJ +v6srSWw4Y5TEPpkdXg2+R8zM2hO7kxDqjWDiCTjeMWMEdmUW/hYN6ndhfJ5ZLKut +OM/2jAf+ZijB1j7ORgP7haa//31YaPS4efnurDItI5dlQkLY2gKjLfdsEe1NsVR5 +mUjE8HZoVGRFfGca+39TjTTp+mVN0bQhoi+qu11QwB39hl/3I1jVjmUb71MAmva2 +4wh5RblJukbFVcs5Cco1+fpd7j9pSrWD/wsf+l7XM57Mvt9his8pk9yZolLgKT0Z +yio8eJVOfTr8JHmVpbvE3KQ8cLk0qwjs/iSzsSA0wau9RXNmJVVGHWqEjo+i7dzX +JzEM/ha455mjGbrAqJLFMC0yMMjQX4YIvGJENqRS -----END CERTIFICATE----- diff --git a/testing/web3signer_tests/tls/web3signer/key.key b/testing/web3signer_tests/tls/web3signer/key.key index 42b7ad9a9..d96975340 100644 --- a/testing/web3signer_tests/tls/web3signer/key.key +++ b/testing/web3signer_tests/tls/web3signer/key.key @@ -1,52 +1,52 @@ -----BEGIN PRIVATE KEY----- -MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQCrJajqnvRQEREp -h+zu7rw1QjHQG1x2H44SJSMjX1Wzi9FErlRSOzywPFL2AzGsNvNStPmxN/kF9mBj -QIQHxo90M4GcZgW1aljPaXLvQWFrP9ak+JjHuUG+j51fVJp8F2QcBG8i2LjjSLvk -EYSULHI0kbMPws+DKcemvZJ6IhkoPkbtnx5Z1zDj8D6vvWGJguMOVSNJY7SoBNuS -B6CJ7wCWBg7UPtTUrtnuJVvUh+3k2wc7LJ+C9wd7rt+qYb8LxQucj8dDyncXXeI5 -83oGzjTE+1kFrE5TuMDlnWyKPa6NQPeXQtygFTyQL9RMW6JkgWWgtDFWqd2Mgb8s -CRtl5uTJFGJ7PFBP4T69JqYhz817tDS3JrMbbzzhRzf3cB6V2NCCzVKBrO7gfAyD -wWVr5iUyaXhLGyzuGg2nMbFMj/Pr7csravs+Jq5apwyZDNTv+2WQxP6d2gGFwQOx -cPt4OGPjtFpVHH3cxLkcGsSOZ31akuhdSJ6MqWI4tkgRpsf5Ff0+z8SLZaCQIp7M -4O4LpMreAT7smvEQpLphK1oKWlsY6ukkJ1y8KD3EfeJRpDL0PBTyjacQATPsqUze -ryCfqAMulLLqUbNFqv6Slhzt2vr+lfIr+IeUa/7XMeZOZJu1T/7nfTjpdokSTx8D -ageE4Z3j90q5d4hdXvMWq6MpQW7RqQIDAQABAoICAAajqX2/kJn+DYDDUoJS3deB -k8HfW9sDvpzO1sH/p+kVEJdV3XTKskAiePPs/AtynsYbue+BbL5J2GOlTDHqhqRi -/qFQ9mH7KAqUzEYCjutMkNC5yaB+2/Fu7BOXij4r4KDzHZYYGvULPGm8sbxXTI9k -QxJmk+sCTwnVgxYMllYAs3ryoChrUAzZpC7oXX0qiBElZZ7qWKbneFaeB+Dt9gN7 -5O2gKdy90zu5NIqmQsjs48cMhDweBerrBed7zv/fgyOt0rS7KRtNk7H8k2Rp8bNe -Dk4paOj3yvjlXmFvAuNdLLWwHPOzWzP7PQTAzcgRGn6NWvgiExOJMX+9czQE7OVd -OY47PndUFU6zkiOMYipnsEOFrZvHrvuCquQ+5X6x8PXdK4aFJ8VphH2HTo6xXr6E -q3zTHZq7rXSuI2yLBE6JslqP3D2H022cow6iLGnuJKYVXMOcOOTrrVBJjjau/OfN -feOvEgut6T7BmdWrcdgQzh3rvvMKdawdekuQgPjNfLxR5JCjWKaKqkJ1iBZ1jkiC -LqoeelsJnWSG+P9QKO+ntt3TW7qUsMPBAHIk2UqbsZcnX9La9huiIfABP1L1qGTb -WQJiIumyCY7LDEKcaqrFbsBS45xoQVoVlDeJPAFk48947mZY+m6TnwEC/K000ENU -fYS0x+CsNmEaXGbItrZBAoIBAQDouRfE1B/bl8KktK3uQ+wwlTWpiZnzji8wg8FG -O68BsL1qmxDG0eShBQzwNdFY9HTgGu/BjPz02liXY+smB1DXgk1tuP6NXl7ZakE4 -gdaL9wifjvoTqzgf3nBJguUAxGRBpYzbYRMELnw/FSjwLykpGUTSv+jKhOqNqb8r -T/JIFq/DG2oioYuzksEdDNaWOD3CkTjkA4guBvM5iONSed4VIn4C/L31jNFXeG1u -ToowtFLr8zG2h6sfI2NWHD8cR1LKQA6hSaimrrHUFYBo4qzNJ7afVFkF/zO37UGL -isNAmMQfFE7Lqom7YcI+QRDhtBX3XsvN3Y/RPQASZWtOTr/BAoIBAQC8Q+ggBpVK -En2CWXTvoBys9Ad3le50RIH3pmM4Uv1AQeNNtT6PKRKiL18stRxDql0oGCslOJh4 -FvawJGfANVN0vu3aIwG6kg6myYxn4sP9x2VeQUktaKcdCZ4oVuG2aXwCeg92Cpmz -W7jok8qvWjmN8IDBM4iN2Q5auO0Xg7n6vjZ6EBkm+XCsIzSazgN2sLoNC2RUKbVT -U6shGkPGhHJwumXtcPp+Ogljlv/8Gc+oc5Ty+hdhmMzTGDYwy3bwd4yfIFRRSmCr -OS0V2cwnsUQkmH0c5DVVIa0s1i+nqM2epvxjQOIsBJpEwzHXY00YZb5d4jeELPqU -XUhnrKqKxQvpAoIBAFHTerL/LrBkPNDyolErWql+XR7ePd4v+RGi0dsi8xayEPeh -zBVMCYpAH1t6YMBZO5rsfa5dJzfkac/ZFv4JBniv3Q+eQwprywfA32vB4zDVTBfm -CrHNuu8ho/OE7YYGh4W5crxT9n665X68ruc8fclwlA1R4sUKVPo4W/obowGL0ILW -acwBZwBdsj7Hm8+3uKdnrkwlncUpNm3dXqhKJzbhKNNeEGB9AcIymq91OAuF674A -hVM7goRxSeUmC16jCU4JldtJ7d2lgOskIEFAqid8Ni7xVlfQclvSNQCeaqaU0Chp -WIct0D2tUsHW2NuzGSIgF6Krq3yTaSoOtNsUv0ECggEAFR9lVtqGO3ZRoKNazFvh -e8IxaEhpJaBeGKQRc8tT4LbDwv830qYgEhRQkFqNnkXqB8qWZKmx6Z9h9CdRgK46 -+9lEJHpTAlTK0gnA+BLoPHv3spiOlkqsnURr+0isMGQrZre9LlhIIGiFGYsjbYMo -+/Tk7UhT5N5ajvE6oK3F2w0mXZGa0NWhv55/k3LTzqhLZ5VEn3DCiGPVynQA8LAB -iwZO01IeuLTYQtU5SVa4BsVZC93la6zSJkkMI3Ngl+BB5cSh0TEQIYXbuhzim/12 -kMiPGQO9vBx4KpSpah01XLyNirFH7vphOJ/R4sGgb8FSl4P/CJRnVOgWbJNh2wn6 -qQKCAQAkZMqlOokxcpiNLDyBS33mLzVuVIXIBsKmZibmurWxcXvyHGA7K/uHRvE/ -5pajoO8Pw9dQhAX2LmOISW8YJwR0UR9LmDOeYUW+8nypG2jprKezMVSNu+lWHanE -vw+fLvRWyDEdKQK6RHOytHppFn48eC5HrPdOe4EaNQ09vUiMsJmVL6ep4nuAg4nr -WilB9iJQtrFcItB5tnfD2puJQKaFV3rgqWCFIgJJg0ThuiWyoVNKtlRvv5o3mQyz -Y+jyCm4RtgSDm9+e/Tcv2vUeoiNt2bVb9tK3r2M2cZ6N1PuHV/cmBjf6I/ssPqmM -CXDusRSlsQNpzHc6QKq8IDZLut9g +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDScvshqu3747j4 +KMaGyGW0CA2GAznogVyKqNt4lan/8mdYUI2PUeezaUOnmoyM9oWz1FPflpj7pVWa +gWlSOgZ9vOElqQhe+la4ZEdGmOpe44c1rBoeHK314Gbmr2EuCxaaJ3smHx2+VOha +MWDeebRHQqy/s5tf3Um7G2iXU2iexriz42I8d6efWGmaL2sTLQ6H9C0UBIzXP7Pn +GrMlef9eR+7pu/ai9MjD1M7CWpwvPhEjanA2InwKugiDXj+A5/6GWLtJvk5ekfOV +lRHPZQbKJc/SG9tbbH9dHLEezIbZ6a5Y0iTcIfoiBxUpX5KyK/pBYKPThE5zW5Kh +IxXcpqFIMaTW/nK33BlOJ0fPNtX/SWLyoBsTtxCo1XFFUjHCkXK84y5L4BXxxohG +0DAuO4BtQHE5hgyswGQX2t4RjDvzvSm4tN02m9HUh7gu/d2FbgX8HtmSgkPEgfSV +RxegmbA71qHqKS0/i5BbnQjLkeWiWKRWGJoHFfhGN1sY0jUGFvQrrrIUQAuXDcQX +11UzgwkX5/cowtlm8IB/RWggPfC4gfCL4QvNz4pMxuMUWjXUn0uS8kbmmuhxshsn +ZUL+l+nnpRSobZqHRvvqiFKg8q9GsBUTGu0fFbjDeVQyYF2UOWeN/IC4PpwtYUO3 +/gR0babEffgYOWwWbQQGSPcmG7Y4zwIDAQABAoICABRxePXJ+KOpznPE5Owo7BWe +BqTzC/K1xlCYm0v5IJzYEQlM4e4p4wZ+/kR6Hex/nM4IR+bbZpxjcOUObIsWpJTI +VAgS2y5RcTp+UJzfXpJogIpKiqBMNutAqPOrK8Hg797PtlsmAKoBmNn8xqU1+2Oa +FX/rKaJus6qKZ2bz16DnkFUL4foabDJte0IFbd2yAyGv1ZqGiqFKSJFK+wYeoMZU +LzWOEyUR/wK5ryVwJJCY8z9BKAoKNYnb4oHTFlDRDdztIlxv29sR9dtHsjA3EdQc +nOCTNi7eY6JJlucgBSWGrsS6vTvpImGggIIWt6sOh0Px6Fg0F7mFtsESex2GePow +50MwKFbbVo3TUYRYTggJj7ba4+yrl/dsAWJUX3F90xNj/6REF+2+Licb7kgCHQKw +TvdExiikOOFtuFRkl5fqyoM9Ph+sj7/db5Pd53D8vaMjR3Yw/JA5dKPZS5ZKHBs0 +qo7FxV8ZlOESMv2eF6y0kM4wLhUN8wnEWxpsFWtXDNjYIlQ6W5qrfwR1vlnIkrmb +bYQCJFtko6CKUEa8yb4OvLgyX6VSskeYEC5zdekivZWJN/OZZa/xIS2nupYqD4GT +Y3QcsEhfzDvVIwI7M+eBwS5qjgdwN2qEGrXva5KKesb2zdjNircKaUahTWJNYHjj +jHGOSY/vyGFH2HFZNYZpAoIBAQDyoMpeXBDQhAXbHpIm6p8KljqRMHU05UeRRWVR +d0RKXGYq/bUzoAhr8F2QE2+HC+2NnBGh6qR5QNO/6H6p8Du6aSXDaDNJxTErOOmY +pAkbOlcA7TjpDSrNUr4EfAXl6vUF7JB8jJHEXIqBkbGWOFYPzwLEwErQAlQN2u4e +u9HKG3Me+DP2IcrCgZ5iWvmjV4l+vXYyBEXoJqHOWEscWXHiz64c336oZqwqKe/x +s8Xy2sd6FRU/mp34wXT4kZ56/U4BV+DEN20fffBiTfMQxKmXhMykmD/O63dASCiA +seZrZK5mRND+aS95MqI6FMm0ToKj24RvvAWR8w50cuF7wl5zAoIBAQDeDC6ImN7K +mSLaMBaIhoZsJDdG0cJiFPRmwtepeoWt4qUWuc51LOFthhlkyGx/JbEzFMK6uYTu +hHHNOgk6ydrz1+HOzpSvN0Iz61j1hJd8Ve/0MyTBg912FPe2p3hR9dN4j5Ly+oes +QvNIr/ReW5HJhDcgXm/9oT68XyzrKM3t93XPoO4wDPSHPbRWE2dzLrNi1xg/ZyRz +ZLAtBsGPG5rVAeSEob0ytZH2H1pHfkRQ/1jSKxwb+QVMfjDd5FrEAMLA4E6J8HFz +RDHTmrveGrR1i5BJrce3VUOAuL7Y3iw6Sb+b1LyA8htxiYfBVdVfCeocDv64m0R5 +NJs6Milm9uk1AoIBAQCdQLForusG+kqBVjMLng0uY2faKjoM6n2UHhIo1tAgEfr1 +6jHDH/nVW5iIhNBICucQXRLgip/HJskXHKzbn6RWkUe0epijO3c+uEhOciKkzw8M +vrOf+LTBFtupNGjuN3ZPPJ/42XKwffoXOEKNRj4hSN5Wfvr+DkREJp0mtjymbVwT +unKTGBu+LRxmSuh5gYbP6iPtDu/wIvnEL12fJim2Azyp4gDJTKJRQZUOZqHpYPrg +mUGIU8IHM/uID3lT5VDldftrsTC8tHdUf4kGWTBB0ASCuVrB1cMYmqwFnUfmWv7d +scRy3+Gw/6w9ULPadPgfE2umr4o8qfe4aazS9YsZAoIBADZH+hQwcr5KQ0fdW5TS +dgf3rn+khYVepAR++yOWLRm9/yeYEo14hD82+fw2Nre6aiAXoibtdT6tp/hIiLsT +X3AexTe+LoDK3Gc+0Edsu2+MvpUO75xS9Q+JvqirNfGrS5/8USsO7Z3B3CFXykBK +2E/P/33tOCljgqegCKYQGo9i4Cz6pV+fuyNYhT5Jjg+NShMOjAHr3/BJm/vV2/l1 +ARuzU77MnyjHVEA7l+FET8URNxBhs4RvEsmJS77itQGXQgTOkMSNv94yvI+DEwwP +sS/PB13LmrgJou/TuevgHCW/o5Sfo9lN1kGiIkq0Be4uyUlErSZJ5qpOnufSHWbr +U0UCggEAC5WM3BXKo11Y+XphsYnpJesiB9C5HMvhnB5oCHH7ffIVqkXp2AiUnWy6 +HE+DwUWFEtRLYr4beTXn+TeunoQa7X5K1JXV41XENf5CsbQTIUnX2j7o2ilCEx9C +rDPtpUZPObqXHBiHSF67Il7GitCud+7YDAGqbJABlV3WF0MkPIfW/cxN3cb65FoI +AEV3OZiS6zvDR91++ovNV5QAmH1vljvipM7kKy5RsLFF8GYa0KNTNJ/EYojKmw00 +2OakG0pjjDcWjfdGI+i5gcHNUZwbgqx4NG/RY3YslJswBhGGlhEGuuUtpH47HTM2 +oJ/aHbXf6PdOO9MYiI/es/dfKK8ywA== -----END PRIVATE KEY----- diff --git a/testing/web3signer_tests/tls/web3signer/key.p12 b/testing/web3signer_tests/tls/web3signer/key.p12 index 51269be8d..792dc197f 100644 Binary files a/testing/web3signer_tests/tls/web3signer/key.p12 and b/testing/web3signer_tests/tls/web3signer/key.p12 differ diff --git a/testing/web3signer_tests/tls/web3signer/known_clients.txt b/testing/web3signer_tests/tls/web3signer/known_clients.txt index 330132731..c4722fe58 100644 --- a/testing/web3signer_tests/tls/web3signer/known_clients.txt +++ b/testing/web3signer_tests/tls/web3signer/known_clients.txt @@ -1 +1 @@ -lighthouse FF:4C:84:A6:37:28:EC:7E:A7:D8:C6:49:0D:C6:F9:5D:C1:06:BA:6D:69:49:0A:AA:38:32:01:2B:ED:D9:F2:FA +lighthouse 02:D0:A8:C0:6A:59:90:40:54:67:D4:BD:AE:5A:D4:F5:14:A9:79:38:98:E0:62:93:C1:77:13:FC:B4:60:65:CE diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 46f523c99..18b71afc3 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -56,7 +56,7 @@ itertools = { workspace = true } monitoring_api = { workspace = true } sensitive_url = { workspace = true } task_executor = { workspace = true } -reqwest = { workspace = true } +reqwest = { workspace = true, features = ["native-tls"] } url = { workspace = true } malloc_utils = { workspace = true } sysinfo = { workspace = true } diff --git a/validator_client/slashing_protection/Makefile b/validator_client/slashing_protection/Makefile index e3d935b4c..0663b3cba 100644 --- a/validator_client/slashing_protection/Makefile +++ b/validator_client/slashing_protection/Makefile @@ -6,20 +6,23 @@ ARCHIVE_URL := https://github.com/eth-clients/slashing-protection-interchange-te ifeq ($(OS),Windows_NT) ifeq (, $(shell where rm)) - rmfile = if exist $(1) (del /F /Q $(1)) - rmdir = if exist $(1) (rmdir /Q /S $(1)) + rmfile = if exist $(1) (del /F /Q $(1)) + rmdir = if exist $(1) (rmdir /Q /S $(1)) + makedir = if not exist $(1) (mkdir $(1)) else - rmfile = rm -f $(1) - rmdir = rm -rf $(1) + rmfile = rm -f $(1) + rmdir = rm -rf $(1) + makedir = mkdir -p $(1) endif else - rmfile = rm -f $(1) - rmdir = rm -rf $(1) + rmfile = rm -f $(1) + rmdir = rm -rf $(1) + makedir = mkdir -p $(1) endif $(OUTPUT_DIR): $(TARBALL) $(call rmdir,$@) - mkdir $@ + $(call makedir,$@) tar --strip-components=1 -xzf $^ -C $@ $(TARBALL): diff --git a/validator_client/slashing_protection/tests/interop.rs b/validator_client/slashing_protection/tests/interop.rs index ee5bb1147..ee8f522cd 100644 --- a/validator_client/slashing_protection/tests/interop.rs +++ b/validator_client/slashing_protection/tests/interop.rs @@ -25,8 +25,10 @@ fn test_root_dir() -> PathBuf { .join("tests") } +// NOTE: I've combined two tests together to avoid a race-condition which occurs when fighting over +// which test builds the TEST_ROOT_DIR lazy static. #[test] -fn generated() { +fn generated_and_with_minification() { for entry in TEST_ROOT_DIR .join("generated") .read_dir() @@ -37,10 +39,7 @@ fn generated() { let test_case: MultiTestCase = serde_json::from_reader(&file).unwrap(); test_case.run(false); } -} -#[test] -fn generated_with_minification() { for entry in TEST_ROOT_DIR .join("generated") .read_dir() diff --git a/validator_client/src/attestation_service.rs b/validator_client/src/attestation_service.rs index f0a9258c7..1b7b391a0 100644 --- a/validator_client/src/attestation_service.rs +++ b/validator_client/src/attestation_service.rs @@ -491,6 +491,14 @@ impl AttestationService { ) -> Result<(), String> { let log = self.context.log(); + if !validator_duties + .iter() + .any(|duty_and_proof| duty_and_proof.selection_proof.is_some()) + { + // Exit early if no validator is aggregator + return Ok(()); + } + let aggregated_attestation = &self .beacon_nodes .first_success( diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index f654833cb..669edc671 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -1176,7 +1176,8 @@ pub fn serve( .or(get_fee_recipient) .or(get_gas_limit) .or(get_std_keystores) - .or(get_std_remotekeys), + .or(get_std_remotekeys) + .recover(warp_utils::reject::handle_rejection), ) .or(warp::post().and( post_validators @@ -1187,15 +1188,18 @@ pub fn serve( .or(post_fee_recipient) .or(post_gas_limit) .or(post_std_keystores) - .or(post_std_remotekeys), + .or(post_std_remotekeys) + .recover(warp_utils::reject::handle_rejection), )) - .or(warp::patch().and(patch_validators)) + .or(warp::patch() + .and(patch_validators.recover(warp_utils::reject::handle_rejection))) .or(warp::delete().and( delete_lighthouse_keystores .or(delete_fee_recipient) .or(delete_gas_limit) .or(delete_std_keystores) - .or(delete_std_remotekeys), + .or(delete_std_remotekeys) + .recover(warp_utils::reject::handle_rejection), )), ) // The auth route and logs are the only routes that are allowed to be accessed without the API token. diff --git a/validator_client/src/http_api/tests/keystores.rs b/validator_client/src/http_api/tests/keystores.rs index d60872e49..f301af1c2 100644 --- a/validator_client/src/http_api/tests/keystores.rs +++ b/validator_client/src/http_api/tests/keystores.rs @@ -2146,7 +2146,7 @@ async fn import_remotekey_web3signer_enabled() { assert_eq!(tester.vals_total(), 1); assert_eq!(tester.vals_enabled(), 1); let vals = tester.initialized_validators.read(); - let web3_vals = vals.validator_definitions().clone(); + let web3_vals = vals.validator_definitions(); // Import remotekeys. let import_res = tester @@ -2164,7 +2164,7 @@ async fn import_remotekey_web3signer_enabled() { assert_eq!(tester.vals_total(), 1); assert_eq!(tester.vals_enabled(), 1); let vals = tester.initialized_validators.read(); - let remote_vals = vals.validator_definitions().clone(); + let remote_vals = vals.validator_definitions(); // Web3signer should not be overwritten since it is enabled. assert!(web3_vals == remote_vals);