diff --git a/.github/workflows/docker-antithesis.yml b/.github/workflows/docker-antithesis.yml index 40de0bd0a..84f5541a3 100644 --- a/.github/workflows/docker-antithesis.yml +++ b/.github/workflows/docker-antithesis.yml @@ -17,7 +17,7 @@ jobs: build-docker: runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Update Rust run: rustup update stable - name: Dockerhub login diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 8d72319c6..13b841169 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -22,7 +22,7 @@ jobs: # `unstable`, but for now we keep the two parts of the version separate for backwards # compatibility. extract-version: - runs-on: ubuntu-18.04 + runs-on: ubuntu-22.04 steps: - name: Extract version (if stable) if: github.event.ref == 'refs/heads/stable' @@ -44,7 +44,7 @@ jobs: VERSION_SUFFIX: ${{ env.VERSION_SUFFIX }} build-docker-single-arch: name: build-docker-${{ matrix.binary }} - runs-on: ubuntu-18.04 + runs-on: ubuntu-22.04 strategy: matrix: binary: [aarch64, @@ -61,7 +61,7 @@ jobs: VERSION: ${{ needs.extract-version.outputs.VERSION }} VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Update Rust run: rustup update stable - name: Dockerhub login @@ -102,7 +102,7 @@ jobs: --push build-docker-multiarch: name: build-docker-multiarch${{ matrix.modernity }} - runs-on: ubuntu-18.04 + runs-on: ubuntu-22.04 needs: [build-docker-single-arch, extract-version] strategy: matrix: @@ -123,13 +123,13 @@ jobs: --amend ${IMAGE_NAME}:${VERSION}-amd64${VERSION_SUFFIX}${{ matrix.modernity }}; docker manifest push ${IMAGE_NAME}:${VERSION}${VERSION_SUFFIX}${{ matrix.modernity }} build-docker-lcli: - runs-on: ubuntu-18.04 + runs-on: ubuntu-22.04 needs: [extract-version] env: VERSION: ${{ needs.extract-version.outputs.VERSION }} VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Dockerhub login run: | echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin diff --git a/.github/workflows/linkcheck.yml b/.github/workflows/linkcheck.yml index 30a891feb..4d4e92ae1 100644 --- a/.github/workflows/linkcheck.yml +++ b/.github/workflows/linkcheck.yml @@ -15,7 +15,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Create docker network run: docker network create book diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index 170bd9e21..b916ffee6 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -12,11 +12,11 @@ jobs: strategy: matrix: os: - - ubuntu-18.04 - - macos-latest + - ubuntu-22.04 + - macos-12 runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable @@ -28,7 +28,7 @@ jobs: run: npm install ganache@latest --global # https://github.com/actions/cache/blob/main/examples.md#rust---cargo - - uses: actions/cache@v2 + - uses: actions/cache@v3 id: cache-cargo with: path: | diff --git a/.github/workflows/publish-crate.yml b/.github/workflows/publish-crate.yml index a7fda90f7..736057f78 100644 --- a/.github/workflows/publish-crate.yml +++ b/.github/workflows/publish-crate.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Extract tag - run: echo "::set-output name=TAG::$(echo ${GITHUB_REF#refs/tags/})" + run: echo "TAG=$(echo ${GITHUB_REF#refs/tags/})" >> $GITHUB_OUTPUT id: extract_tag outputs: TAG: ${{ steps.extract_tag.outputs.TAG }} @@ -30,7 +30,7 @@ jobs: env: TAG: ${{ needs.extract-tag.outputs.TAG }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Update Rust run: rustup update stable - name: Cargo login diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 6edb1f76c..957d016dc 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -16,7 +16,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Extract version - run: echo "::set-output name=VERSION::$(echo ${GITHUB_REF#refs/tags/})" + run: echo "VERSION=$(echo ${GITHUB_REF#refs/tags/})" >> $GITHUB_OUTPUT id: extract_version outputs: VERSION: ${{ steps.extract_version.outputs.VERSION }} @@ -62,7 +62,7 @@ jobs: needs: extract-version steps: - name: Checkout sources - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Build toolchain uses: actions-rs/toolchain@v1 with: @@ -199,7 +199,7 @@ jobs: steps: # This is necessary for generating the changelog. It has to come before "Download Artifacts" or else it deletes the artifacts. - name: Checkout sources - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: fetch-depth: 0 @@ -216,7 +216,7 @@ jobs: - name: Generate Full Changelog id: changelog - run: echo "::set-output name=CHANGELOG::$(git log --pretty=format:"- %s" $(git describe --tags --abbrev=0 ${{ env.VERSION }}^)..${{ env.VERSION }})" + run: echo "CHANGELOG=$(git log --pretty=format:"- %s" $(git describe --tags --abbrev=0 ${{ env.VERSION }}^)..${{ env.VERSION }})" >> $GITHUB_OUTPUT - name: Create Release Draft env: diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index a3e9625b5..d536869e4 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -24,12 +24,12 @@ jobs: extract-msrv: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Extract Minimum Supported Rust Version (MSRV) run: | metadata=$(cargo metadata --no-deps --format-version 1) msrv=$(echo $metadata | jq -r '.packages | map(select(.name == "lighthouse")) | .[0].rust_version') - echo "::set-output name=MSRV::$msrv" + echo "MSRV=$msrv" >> $GITHUB_OUTPUT id: extract_msrv outputs: MSRV: ${{ steps.extract_msrv.outputs.MSRV }} @@ -37,7 +37,7 @@ jobs: name: cargo-fmt runs-on: ubuntu-latest steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Check formatting with cargo fmt @@ -47,11 +47,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache run: sudo npm install -g ganache - name: Run tests in release @@ -61,7 +63,7 @@ jobs: runs-on: windows-2019 needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Use Node.js @@ -89,11 +91,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run beacon_chain tests for all known forks run: make test-beacon-chain op-pool-tests: @@ -101,11 +105,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run operation_pool tests for all known forks run: make test-op-pool slasher-tests: @@ -113,7 +119,7 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Run slasher tests for all supported backends @@ -123,11 +129,13 @@ jobs: runs-on: ubuntu-22.04 needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache run: sudo npm install -g ganache - name: Run tests in debug @@ -137,11 +145,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run state_transition_vectors in release. run: make run-state-transition-tests ef-tests-ubuntu: @@ -149,11 +159,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run consensus-spec-tests with blst, milagro and fake_crypto run: make test-ef dockerfile-ubuntu: @@ -161,7 +173,7 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Build the root Dockerfile @@ -173,11 +185,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache run: sudo npm install -g ganache - name: Run the beacon chain sim that starts from an eth1 contract @@ -187,11 +201,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache run: sudo npm install -g ganache - name: Run the beacon chain sim and go through the merge transition @@ -201,11 +217,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache run: sudo npm install -g ganache - name: Run the beacon chain sim without an eth1 connection @@ -215,11 +233,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache run: sudo npm install -g ganache - name: Run the syncing simulator @@ -229,11 +249,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache run: sudo npm install -g ganache - name: Install lighthouse and lcli @@ -253,17 +275,19 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 - - uses: actions/setup-go@v2 + - uses: actions/checkout@v3 + - uses: actions/setup-go@v3 with: go-version: '1.17' - - uses: actions/setup-dotnet@v1 + - uses: actions/setup-dotnet@v3 with: dotnet-version: '6.0.201' - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run exec engine integration tests in release run: make test-exec-engine check-benchmarks: @@ -271,11 +295,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Typecheck benchmark code without running it run: make check-benches check-consensus: @@ -283,7 +309,7 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Typecheck consensus code in strict mode @@ -293,11 +319,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Lint code for quality and style with Clippy run: make lint - name: Certify Cargo.lock freshness @@ -308,7 +336,7 @@ jobs: needs: cargo-fmt continue-on-error: true steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Install SigP Clippy fork run: | cd .. @@ -319,6 +347,8 @@ jobs: cargo build --release --bin cargo-clippy --bin clippy-driver -Zunstable-options --out-dir $(rustc --print=sysroot)/bin - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run Clippy with the disallowed-from-async lint run: make nightly-lint check-msrv: @@ -326,11 +356,13 @@ jobs: runs-on: ubuntu-latest needs: [cargo-fmt, extract-msrv] steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }}) run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }} - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run cargo check run: cargo check --workspace arbitrary-check: @@ -338,7 +370,7 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Validate state_processing feature arbitrary-fuzz @@ -348,7 +380,7 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Run cargo audit to identify known security vulnerabilities reported to the RustSec Advisory Database @@ -358,7 +390,7 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Run cargo vendor to make sure dependencies can be vendored for packaging, reproducibility and archival purpose run: CARGO_HOME=$(readlink -f $HOME) make vendor cargo-udeps: @@ -366,13 +398,15 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Install Rust (${{ env.PINNED_NIGHTLY }}) run: rustup toolchain install $PINNED_NIGHTLY # NOTE: cargo-udeps version is pinned until this issue is resolved: # https://github.com/est31/cargo-udeps/issues/135 - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install cargo-udeps run: cargo install cargo-udeps --locked --force --version 0.1.30 - name: Create Cargo config dir @@ -384,3 +418,14 @@ jobs: env: # Allow warnings on Nightly RUSTFLAGS: "" + compile-with-beta-compiler: + name: compile-with-beta-compiler + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Install dependencies + run: sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler + - name: Use Rust beta + run: rustup override set beta + - name: Run make + run: make diff --git a/Cargo.lock b/Cargo.lock index 2376a7175..c1167adbd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -111,16 +111,16 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.7", + "getrandom 0.2.8", "once_cell", "version_check", ] [[package]] name = "aho-corasick" -version = "0.7.18" +version = "0.7.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" +checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e" dependencies = [ "memchr", ] @@ -130,6 +130,15 @@ name = "amcl" version = "0.3.0" source = "git+https://github.com/sigp/milagro_bls?tag=v1.4.2#16655aa033175a90c10ef02aa144e2835de23aec" +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + [[package]] name = "ansi_term" version = "0.12.1" @@ -141,24 +150,24 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.58" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb07d2053ccdbe10e2af2995a2f116c1330396493dc1269f6a91d0ae82e19704" +checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6" [[package]] name = "arbitrary" -version = "1.1.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a7924531f38b1970ff630f03eb20a2fde69db5c590c93b0f3482e95dcc5fd60" +checksum = "29d47fbf90d5149a107494b15a7dc8d69b351be2db3bb9691740e88ec17fd880" dependencies = [ "derive_arbitrary", ] [[package]] name = "arc-swap" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5d78ce20460b82d3fa150275ed9d55e21064fc7951177baacf86a145c4a4b1f" +checksum = "983cd8b9d4b02a6dc6ffa557262eb5858a27a0038ffffe21a0f133eaa819a164" [[package]] name = "arrayref" @@ -201,9 +210,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.56" +version = "0.1.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96cf8829f67d2eab0b2dfa42c5d0ef737e0724e4a82b01b3e292456202b19716" +checksum = "1e805d94e6b5001b651426cf4cd446b1ab5f319d27bab5c644f61de0a804360c" dependencies = [ "proc-macro2", "quote", @@ -285,9 +294,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.5.16" +version = "0.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e3356844c4d6a6d6467b8da2cffb4a2820be256f50a3a386c9d152bab31043" +checksum = "acee9fd5073ab6b045a275b3e709c163dd36c90685219cb21804a147b58dba43" dependencies = [ "async-trait", "axum-core", @@ -297,7 +306,7 @@ dependencies = [ "http", "http-body", "hyper", - "itoa 1.0.2", + "itoa 1.0.4", "matchit", "memchr", "mime", @@ -316,9 +325,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9f0c0a60006f2a293d82d571f635042a72edf927539b7685bd62d361963839b" +checksum = "37e5939e02c56fecd5c017c37df4238c0a839fa76b7f97acdd7efb804fd181cc" dependencies = [ "async-trait", "bytes", @@ -353,15 +362,15 @@ checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" [[package]] name = "base64" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64ct" -version = "1.5.1" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bdca834647821e0b13d9539a8634eb62d3501b6b6c2cec1722786ee6671b851" +checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf" [[package]] name = "beacon-api-client" @@ -439,7 +448,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "3.2.1" +version = "3.3.0" dependencies = [ "beacon_chain", "clap", @@ -535,7 +544,7 @@ version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9cf849ee05b2ee5fba5e36f97ff8ec2533916700fc0758d40d92136a42f3388" dependencies = [ - "digest 0.10.3", + "digest 0.10.5", ] [[package]] @@ -550,9 +559,9 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324" +checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" dependencies = [ "generic-array", ] @@ -597,7 +606,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "3.2.1" +version = "3.3.0" dependencies = [ "beacon_node", "clap", @@ -662,15 +671,15 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.10.0" +version = "3.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ccbd214614c6783386c1af30caf03192f17891059cecc394b4fb119e363de3" +checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" [[package]] name = "byte-slice-cast" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87c5fdd0166095e1d463fc6cc01aa8ce547ad77a4e84d42eb6762b084e28067e" +checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "byteorder" @@ -680,9 +689,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0b3de4a0c5e67e16066a0715723abd91edc2f9001d09c46e1dca929351e130e" +checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" dependencies = [ "serde", ] @@ -731,9 +740,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.73" +version = "1.0.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" +checksum = "581f5dba903aac52ea3feb5ec4810848460ee833876f1f9b0fdeab1f19091574" [[package]] name = "cexpr" @@ -777,14 +786,16 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.19" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +checksum = "bfd4d1b31faaa3a89d7934dbded3111da0d2ef28e3ebccdb4f0179f5929d1ef1" dependencies = [ - "libc", + "iana-time-zone", + "js-sys", "num-integer", "num-traits", "time 0.1.44", + "wasm-bindgen", "winapi", ] @@ -799,9 +810,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.3.3" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a050e2153c5be08febd6734e29298e844fdb0fa21aeddd63b4eb7baa106c69b" +checksum = "fa2e27ae6ab525c3d369ded447057bca5438d86dc3a68f6faafb8269ba82ebf3" dependencies = [ "glob", "libc", @@ -871,7 +882,7 @@ dependencies = [ "slot_clock", "store", "task_executor", - "time 0.3.11", + "time 0.3.16", "timer", "tokio", "types", @@ -879,13 +890,23 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.48" +version = "0.1.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8ad8cef104ac57b68b89df3208164d228503abbdce70f6880ffa3d970e7443a" +checksum = "db34956e100b30725f2eb215f90d4871051239535632f84fea3bc92722c66b7c" dependencies = [ "cc", ] +[[package]] +name = "codespan-reporting" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" +dependencies = [ + "termcolor", + "unicode-width", +] + [[package]] name = "compare_fields" version = "0.2.0" @@ -1025,26 +1046,24 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.10" +version = "0.9.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "045ebe27666471bb549370b4b0b3e51b07f56325befa4284db65fc89c02511b1" +checksum = "f916dfc5d356b0ed9dae65f1db9fc9770aa2851d2662b988ccf4fe3516e86348" dependencies = [ "autocfg 1.1.0", "cfg-if", "crossbeam-utils", "memoffset", - "once_cell", "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51887d4adc7b564537b15adcfb307936f8075dfcd5f00dde9a9f1d29383682bc" +checksum = "edbafec5fa1f196ca66527c1b12c2ec4745ca14b50f1ad8f9f6f720b55d11fac" dependencies = [ "cfg-if", - "once_cell", ] [[package]] @@ -1055,12 +1074,12 @@ checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] name = "crypto-bigint" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f2b443d17d49dad5ef0ede301c3179cc923b8822f3393b4d2c28c269dd4a122" +checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" dependencies = [ "generic-array", - "rand_core 0.6.3", + "rand_core 0.6.4", "subtle", "zeroize", ] @@ -1128,11 +1147,11 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.2.2" +version = "3.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b37feaa84e6861e00a1f5e5aa8da3ee56d605c9992d33e082786754828e20865" +checksum = "1d91974fbbe88ec1df0c24a4f00f99583667a7e2e6272b2b92d294d81e462173" dependencies = [ - "nix 0.24.2", + "nix 0.25.0", "winapi", ] @@ -1157,11 +1176,55 @@ checksum = "4033478fbf70d6acf2655ac70da91ee65852d69daf7a67bf7a2f518fb47aafcf" dependencies = [ "byteorder", "digest 0.9.0", - "rand_core 0.6.3", + "rand_core 0.6.4", "subtle", "zeroize", ] +[[package]] +name = "cxx" +version = "1.0.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b7d4e43b25d3c994662706a1d4fcfc32aaa6afd287502c111b237093bb23f3a" +dependencies = [ + "cc", + "cxxbridge-flags", + "cxxbridge-macro", + "link-cplusplus", +] + +[[package]] +name = "cxx-build" +version = "1.0.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84f8829ddc213e2c1368e51a2564c552b65a8cb6a28f31e576270ac81d5e5827" +dependencies = [ + "cc", + "codespan-reporting", + "once_cell", + "proc-macro2", + "quote", + "scratch", + "syn", +] + +[[package]] +name = "cxxbridge-flags" +version = "1.0.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e72537424b474af1460806647c41d4b6d35d09ef7fe031c5c2fa5766047cc56a" + +[[package]] +name = "cxxbridge-macro" +version = "1.0.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "309e4fb93eed90e1e14bea0da16b209f81813ba9fc7830c20ed151dd7bc0a4d7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "darling" version = "0.13.4" @@ -1249,9 +1312,9 @@ checksum = "b72465f46d518f6015d9cf07f7f3013a95dd6b9c2747c3d65ae0cce43929d14f" [[package]] name = "delay_map" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6716ce9729be9628979ae1ff63e8bc8b7ad53b5472a2633bf079607a55328d36" +checksum = "9c4d75d3abfe4830dcbf9bcb1b926954e121669f74dd1ca7aa0183b1755d83f6" dependencies = [ "futures", "tokio-util 0.6.10", @@ -1266,7 +1329,7 @@ dependencies = [ "hex", "reqwest", "serde_json", - "sha2 0.10.2", + "sha2 0.10.6", "tree_hash", "types", ] @@ -1294,9 +1357,9 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.1.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9a577516173adb681466d517d39bd468293bc2c2a16439375ef0f35bba45f3d" +checksum = "4903dff04948f22033ca30232ab8eca2c3fc4c913a8b6a34ee5199699814817f" dependencies = [ "proc-macro2", "quote", @@ -1327,11 +1390,11 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.3" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506" +checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c" dependencies = [ - "block-buffer 0.10.2", + "block-buffer 0.10.3", "crypto-common", "subtle", ] @@ -1422,9 +1485,9 @@ dependencies = [ [[package]] name = "dtoa" -version = "1.0.2" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5caaa75cbd2b960ff1e5392d2cfb1f44717fffe12fc1f32b7b5d1267f99732a6" +checksum = "f8a6eee2d5d0d113f015688310da018bd1d864d86bd567c8fca9c266889e1bfa" [[package]] name = "ecdsa" @@ -1494,9 +1557,9 @@ dependencies = [ [[package]] name = "either" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f107b87b6afc2a64fd13cac55fe06d6c8859f12d4b14cbcdd2c67d0976781be" +checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" [[package]] name = "elliptic-curve" @@ -1507,12 +1570,12 @@ dependencies = [ "base16ct", "crypto-bigint", "der", - "digest 0.10.3", + "digest 0.10.5", "ff", "generic-array", "group", "pkcs8", - "rand_core 0.6.3", + "rand_core 0.6.4", "sec1", "subtle", "zeroize", @@ -1543,7 +1606,7 @@ dependencies = [ "rand 0.8.5", "rlp", "serde", - "sha3 0.10.1", + "sha3 0.10.6", "zeroize", ] @@ -1571,9 +1634,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3" +checksum = "c90bf5f19754d10198ccb95b70664fc925bd1fc090a0fd9a6ebc54acc8cd6272" dependencies = [ "atty", "humantime", @@ -1702,7 +1765,7 @@ dependencies = [ "lazy_static", "ring", "rustc-hex", - "sha2 0.10.2", + "sha2 0.10.6", "wasm-bindgen-test", ] @@ -1729,7 +1792,7 @@ dependencies = [ "hex", "num-bigint-dig", "ring", - "sha2 0.10.2", + "sha2 0.10.6", "zeroize", ] @@ -1859,9 +1922,9 @@ dependencies = [ [[package]] name = "ethabi" -version = "17.1.0" +version = "17.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f186de076b3e77b8e6d73c99d1b52edc2a229e604f4b5eb6992c06c11d79d537" +checksum = "e4966fba78396ff92db3b817ee71143eccd98acf0f876b8d600e585a670c5d1b" dependencies = [ "ethereum-types 0.13.1", "hex", @@ -1869,7 +1932,7 @@ dependencies = [ "regex", "serde", "serde_json", - "sha3 0.10.1", + "sha3 0.10.6", "thiserror", "uint", ] @@ -1960,7 +2023,7 @@ dependencies = [ "bytes", "chrono", "elliptic-curve", - "ethabi 17.1.0", + "ethabi 17.2.0", "fastrlp", "generic-array", "hex", @@ -1991,7 +2054,7 @@ dependencies = [ "futures-core", "futures-timer", "futures-util", - "getrandom 0.2.7", + "getrandom 0.2.8", "hashers", "hex", "http", @@ -2128,9 +2191,9 @@ dependencies = [ [[package]] name = "fastrlp-derive" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1fa41ebc231af281098b11ad4a4f6182ec9096902afffe948034a20d4e1385a" +checksum = "d9e9158c1d8f0a7a716c9191562eaabba70268ba64972ef4871ce8d66fd08872" dependencies = [ "bytes", "proc-macro2", @@ -2140,11 +2203,11 @@ dependencies = [ [[package]] name = "ff" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df689201f395c6b90dfe87127685f8dbfc083a5e779e613575d8bd7314300c3e" +checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" dependencies = [ - "rand_core 0.6.3", + "rand_core 0.6.4", "subtle", ] @@ -2239,11 +2302,10 @@ dependencies = [ [[package]] name = "form_urlencoded" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" +checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" dependencies = [ - "matches", "percent-encoding", ] @@ -2271,9 +2333,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" +checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0" dependencies = [ "futures-channel", "futures-core", @@ -2286,9 +2348,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" +checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed" dependencies = [ "futures-core", "futures-sink", @@ -2296,15 +2358,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" +checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac" [[package]] name = "futures-executor" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6" +checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2" dependencies = [ "futures-core", "futures-task", @@ -2314,15 +2376,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" +checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" [[package]] name = "futures-macro" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" +checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d" dependencies = [ "proc-macro2", "quote", @@ -2331,26 +2393,26 @@ dependencies = [ [[package]] name = "futures-rustls" -version = "0.22.1" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e01fe9932a224b72b45336d96040aa86386d674a31d0af27d800ea7bc8ca97fe" +checksum = "d2411eed028cdf8c8034eaf21f9915f956b6c3abec4d4c7949ee67f0721127bd" dependencies = [ "futures-io", - "rustls 0.20.6", + "rustls 0.20.7", "webpki 0.22.0", ] [[package]] name = "futures-sink" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" +checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9" [[package]] name = "futures-task" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" +checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea" [[package]] name = "futures-timer" @@ -2360,9 +2422,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" +checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6" dependencies = [ "futures-channel", "futures-core", @@ -2387,9 +2449,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.5" +version = "0.14.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" +checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" dependencies = [ "typenum", "version_check", @@ -2431,9 +2493,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6" +checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" dependencies = [ "cfg-if", "js-sys", @@ -2488,20 +2550,20 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "group" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7391856def869c1c81063a03457c676fbcd419709c3dfb33d8d319de484b154d" +checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" dependencies = [ "ff", - "rand_core 0.6.3", + "rand_core 0.6.4", "subtle", ] [[package]] name = "h2" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57" +checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" dependencies = [ "bytes", "fnv", @@ -2512,7 +2574,7 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util 0.7.3", + "tokio-util 0.7.4", "tracing", ] @@ -2560,9 +2622,9 @@ dependencies = [ [[package]] name = "headers" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cff78e5788be1e0ab65b04d306b2ed5092c815ec97ec70f4ebd5aee158aa55d" +checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ "base64", "bitflags", @@ -2571,7 +2633,7 @@ dependencies = [ "http", "httpdate", "mime", - "sha-1 0.10.0", + "sha1", ] [[package]] @@ -2645,7 +2707,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.3", + "digest 0.10.5", ] [[package]] @@ -2678,7 +2740,7 @@ checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", - "itoa 1.0.2", + "itoa 1.0.4", ] [[package]] @@ -2704,6 +2766,7 @@ version = "0.1.0" dependencies = [ "beacon_chain", "bs58", + "directory", "environment", "eth1", "eth2", @@ -2728,6 +2791,8 @@ dependencies = [ "slot_clock", "state_processing", "store", + "sysinfo", + "system_health", "task_executor", "tokio", "tokio-stream", @@ -2761,9 +2826,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "496ce29bb5a52785b44e0f7ca2847ae0bb839c9bd28f69acac9b99d461c0c04c" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" @@ -2779,9 +2844,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.20" +version = "0.14.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02c929dc5c39e335a03c405292728118860721b10190d98c2a0f0efd5baafbac" +checksum = "abfba89e19b959ca163c7752ba59d737c1ceea53a5d31a149c805446fc958064" dependencies = [ "bytes", "futures-channel", @@ -2792,7 +2857,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 1.0.2", + "itoa 1.0.4", "pin-project-lite 0.2.9", "socket2", "tokio", @@ -2809,7 +2874,7 @@ checksum = "d87c48c02e0dc5e3b849a2041db3029fd066650f8f717c07bf8ed78ccb895cac" dependencies = [ "http", "hyper", - "rustls 0.20.6", + "rustls 0.20.7", "tokio", "tokio-rustls 0.23.4", ] @@ -2827,6 +2892,30 @@ dependencies = [ "tokio-native-tls", ] +[[package]] +name = "iana-time-zone" +version = "0.1.53" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "winapi", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +dependencies = [ + "cxx", + "cxx-build", +] + [[package]] name = "ident_case" version = "1.0.1" @@ -2844,6 +2933,16 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "idna" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "if-addrs" version = "0.6.7" @@ -2903,7 +3002,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", ] [[package]] @@ -2995,9 +3094,9 @@ checksum = "879d54834c8c76457ef4293a689b2a8c59b076067ad77b15efafbb05f92a592b" [[package]] name = "itertools" -version = "0.10.3" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" dependencies = [ "either", ] @@ -3010,15 +3109,15 @@ checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "itoa" -version = "1.0.2" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" +checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" [[package]] name = "js-sys" -version = "0.3.59" +version = "0.3.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "258451ab10b34f8af53416d1fdab72c22e805f0c92a1136d59470ec0b11138b2" +checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" dependencies = [ "wasm-bindgen", ] @@ -3061,8 +3160,8 @@ dependencies = [ "cfg-if", "ecdsa", "elliptic-curve", - "sha2 0.10.2", - "sha3 0.10.1", + "sha2 0.10.6", + "sha3 0.10.6", ] [[package]] @@ -3088,7 +3187,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "3.2.1" +version = "3.3.0" dependencies = [ "account_utils", "beacon_chain", @@ -3097,7 +3196,7 @@ dependencies = [ "clap_utils", "deposit_contract", "directory", - "env_logger 0.9.0", + "env_logger 0.9.1", "environment", "eth1_test_rig", "eth2", @@ -3147,9 +3246,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.126" +version = "0.2.137" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" +checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89" [[package]] name = "libflate" @@ -3183,9 +3282,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.2" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33a33a362ce288760ec6a508b94caaec573ae7d3bbbd91b87aa0bad4456839db" +checksum = "292a948cd991e376cf75541fe5b97a1081d713c618b4f1b9500f8844e49eb565" [[package]] name = "libmdbx" @@ -3211,7 +3310,7 @@ dependencies = [ "bytes", "futures", "futures-timer", - "getrandom 0.2.7", + "getrandom 0.2.8", "instant", "lazy_static", "libp2p-core", @@ -3261,7 +3360,7 @@ dependencies = [ "prost-build", "rand 0.8.5", "rw-stream-sink", - "sha2 0.10.2", + "sha2 0.10.6", "smallvec", "thiserror", "unsigned-varint 0.7.1", @@ -3305,7 +3404,7 @@ dependencies = [ "prost-build", "rand 0.7.3", "regex", - "sha2 0.10.2", + "sha2 0.10.6", "smallvec", "unsigned-varint 0.7.1", "wasm-timer", @@ -3378,7 +3477,7 @@ dependencies = [ "prost", "prost-build", "rand 0.8.5", - "sha2 0.10.2", + "sha2 0.10.6", "snow", "static_assertions", "x25519-dalek", @@ -3424,9 +3523,9 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" -version = "0.30.0" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f02622b9dd150011b4eeec387f8bd013189a2f27da08ba363e7c6e606d77a48" +checksum = "a0eddc4497a8b5a506013c40e8189864f9c3a00db2b25671f428ae9007f3ba32" dependencies = [ "heck", "quote", @@ -3554,7 +3653,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "3.2.1" +version = "3.3.0" dependencies = [ "account_manager", "account_utils", @@ -3565,7 +3664,7 @@ dependencies = [ "clap_utils", "database_manager", "directory", - "env_logger 0.9.0", + "env_logger 0.9.1", "environment", "eth1", "eth2_hashing", @@ -3629,7 +3728,7 @@ dependencies = [ "regex", "serde", "serde_derive", - "sha2 0.10.2", + "sha2 0.10.6", "slog", "slog-async", "slog-term", @@ -3660,6 +3759,15 @@ dependencies = [ "target_info", ] +[[package]] +name = "link-cplusplus" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9272ab7b96c9046fbc5bc56c06c117cb639fe2d509df0c421cad82d2915cf369" +dependencies = [ + "cc", +] + [[package]] name = "linked-hash-map" version = "0.5.6" @@ -3689,9 +3797,9 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53" +checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" dependencies = [ "autocfg 1.1.0", "scopeguard", @@ -3892,23 +4000,23 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f5c75688da582b8ffc1f1799e9db273f32133c49e048f614d22ec3256773ccc" +checksum = "96590ba8f175222643a85693f33d26e9c8a015f599c216509b1a6894af675d34" dependencies = [ "adler", ] [[package]] name = "mio" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf" +checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys", + "windows-sys 0.42.0", ] [[package]] @@ -3957,14 +4065,14 @@ dependencies = [ [[package]] name = "multihash" -version = "0.16.2" +version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3db354f401db558759dfc1e568d010a5d4146f4d3f637be1275ec4a3cf09689" +checksum = "1c346cf9999c631f002d8f977c4eaeaa0e6386f16007202308d0b3757522c2cc" dependencies = [ "core2", - "digest 0.10.3", + "digest 0.10.5", "multihash-derive", - "sha2 0.10.2", + "sha2 0.10.6", "unsigned-varint 0.7.1", ] @@ -4097,10 +4205,11 @@ dependencies = [ [[package]] name = "nix" -version = "0.24.2" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "195cdbc1741b8134346d515b3a56a1c94b0912758009cfd53f99ea0f57b065fc" +checksum = "e322c04a9e3440c327fca7b6c8a63e6890a32fa2ad689db972425f07e0d22abb" dependencies = [ + "autocfg 1.1.0", "bitflags", "cfg-if", "libc", @@ -4143,6 +4252,25 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "ntapi" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc51db7b362b205941f71232e56c625156eb9a929f8cf74a428fd5bc094a4afc" +dependencies = [ + "winapi", +] + +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + [[package]] name = "num-bigint" version = "0.4.3" @@ -4233,9 +4361,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.13.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18a6dbe30758c9f83eb00cbea4ac95966305f5a7772f3f42ebfc7fc7eddbd8e1" +checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860" [[package]] name = "oneshot_broadcast" @@ -4258,9 +4386,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.41" +version = "0.10.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "618febf65336490dfcf20b73f885f5651a0c89c64c2d4a8c3662585a70bf5bd0" +checksum = "12fc0523e3bd51a692c8850d075d74dc062ccf251c0110668cbd921917118a13" dependencies = [ "bitflags", "cfg-if", @@ -4299,9 +4427,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.75" +version = "0.9.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5f9bd0c2710541a3cda73d6f9ac4f1b240de4ae261065d309dbe73d9dceb42f" +checksum = "b03b84c3b2d099b81f0953422b4d4ad58761589d0229b5506356afca05a3670a" dependencies = [ "autocfg 1.1.0", "cc", @@ -4334,6 +4462,12 @@ dependencies = [ "types", ] +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + [[package]] name = "p256" version = "0.11.1" @@ -4342,7 +4476,7 @@ checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" dependencies = [ "ecdsa", "elliptic-curve", - "sha2 0.10.2", + "sha2 0.10.6", ] [[package]] @@ -4361,9 +4495,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.1.5" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9182e4a71cae089267ab03e67c99368db7cd877baf50f931e5d6d4b71e195ac0" +checksum = "366e44391a8af4cfd6002ef6ba072bae071a96aafca98d7d448a34c5dca38b6a" dependencies = [ "arrayvec", "bitvec 1.0.1", @@ -4415,7 +4549,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.3", + "parking_lot_core 0.9.4", ] [[package]] @@ -4434,22 +4568,22 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929" +checksum = "4dc9e0dc2adc1c69d09143aff38d3d30c5c3f0df0dad82e6d25547af174ebec0" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", - "windows-sys", + "windows-sys 0.42.0", ] [[package]] name = "paste" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c520e05135d6e763148b6426a837e239041653ba7becd2e538c076c738025fc" +checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1" [[package]] name = "pbkdf2" @@ -4486,15 +4620,15 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b0560d531d1febc25a3c9398a62a71256c0178f2e3443baedd9ad4bb8c9deb4" +checksum = "dbc7bc69c062e492337d74d59b120c274fd3d261b6bf6d3207d499b4b379c41a" dependencies = [ "thiserror", "ucd-trie", @@ -4522,18 +4656,18 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78203e83c48cffbe01e4a2d35d566ca4de445d79a85372fc64e378bfc812a260" +checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "710faf75e1b33345361201d36d04e98ac1ed8909151a017ed384700836104c74" +checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", @@ -4570,9 +4704,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" +checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" [[package]] name = "platforms" @@ -4582,9 +4716,9 @@ checksum = "e8d0eef3571242013a0d5dc84861c3ae4a652e56e12adf8bdc26ff5f8cb34c94" [[package]] name = "plotters" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "716b4eeb6c4a1d3ecc956f75b43ec2e8e8ba80026413e70a3f41fd3313d3492b" +checksum = "2538b639e642295546c50fcd545198c9d64ee2a38620a628724a3b266d5fbf97" dependencies = [ "num-traits", "plotters-backend", @@ -4601,9 +4735,9 @@ checksum = "193228616381fecdc1224c62e96946dfbc73ff4384fba576e052ff8c1bea8142" [[package]] name = "plotters-svg" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0918736323d1baff32ee0eade54984f6f201ad7e97d5cfb5d6ab4a358529615" +checksum = "f9a81d2759aae1dae668f783c308bc5c8ebd191ff4184aaa1b37f65a6ae5a56f" dependencies = [ "plotters-backend", ] @@ -4665,10 +4799,11 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "1.1.3" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" +checksum = "eda0fc3b0fb7c975631757e14d9049da17374063edb6ebbcbc54d880d4fe94e9" dependencies = [ + "once_cell", "thiserror", "toml", ] @@ -4705,9 +4840,9 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro2" -version = "1.0.42" +version = "1.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c278e965f1d8cf32d6e0e96de3d3e79712178ae67986d9cf9151f51e95aac89b" +checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725" dependencies = [ "unicode-ident", ] @@ -4726,9 +4861,9 @@ dependencies = [ [[package]] name = "prometheus" -version = "0.13.1" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cface98dfa6d645ea4c789839f176e4b072265d085bfcc48eaa8d137f58d3c39" +checksum = "449811d15fbdf5ceb5c1144416066429cf82316e2ec8ce0c1f6f8a02e7bbcf8c" dependencies = [ "cfg-if", "fnv", @@ -4741,12 +4876,12 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.18.0" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c473049631c233933d6286c88bbb7be30e62ec534cf99a9ae0079211f7fa603" +checksum = "83cd1b99916654a69008fd66b4f9397fbe08e6e51dfe23d4417acf5d3b8cb87c" dependencies = [ "dtoa", - "itoa 1.0.2", + "itoa 1.0.4", "parking_lot 0.12.1", "prometheus-client-derive-text-encode", ] @@ -4842,9 +4977,9 @@ dependencies = [ [[package]] name = "protobuf" -version = "2.27.1" +version = "2.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf7e6d18738ecd0902d30d1ad232c9125985a3422929b16c65517b38adc14f96" +checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" [[package]] name = "psutil" @@ -4907,9 +5042,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.20" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bcdf212e9776fbcb2d23ab029360416bb1706b1aea2d1a5ba002727cbcab804" +checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" dependencies = [ "proc-macro2", ] @@ -4968,7 +5103,7 @@ checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha 0.3.1", - "rand_core 0.6.3", + "rand_core 0.6.4", ] [[package]] @@ -4988,7 +5123,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.3", + "rand_core 0.6.4", ] [[package]] @@ -5002,11 +5137,11 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.7", + "getrandom 0.2.8", ] [[package]] @@ -5024,7 +5159,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" dependencies = [ - "rand_core 0.6.3", + "rand_core 0.6.4", ] [[package]] @@ -5066,7 +5201,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.7", + "getrandom 0.2.8", "redox_syscall", "thiserror", ] @@ -5108,9 +5243,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.11" +version = "0.11.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b75aa69a3f06bbcc66ede33af2af253c6f7a86b1ca0033f60c580a27074fbf92" +checksum = "431949c384f4e2ae07605ccaa56d1d9d2ecdb5cadd4f9577ccfab29f2e5149fc" dependencies = [ "base64", "bytes", @@ -5125,13 +5260,13 @@ dependencies = [ "hyper-tls", "ipnet", "js-sys", - "lazy_static", "log", "mime", "native-tls", + "once_cell", "percent-encoding", "pin-project-lite 0.2.9", - "rustls 0.20.6", + "rustls 0.20.7", "rustls-pemfile", "serde", "serde_json", @@ -5139,7 +5274,7 @@ dependencies = [ "tokio", "tokio-native-tls", "tokio-rustls 0.23.4", - "tokio-util 0.7.3", + "tokio-util 0.7.4", "tower-service", "url", "wasm-bindgen", @@ -5193,9 +5328,9 @@ checksum = "3582f63211428f83597b51b2ddb88e2a91a9d52d12831f9d08f5e624e8977422" [[package]] name = "rlp" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "999508abb0ae792aabed2460c45b89106d97fe4adac593bdaef433c2605847b5" +checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" dependencies = [ "bytes", "rustc-hex", @@ -5290,7 +5425,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.12", + "semver 1.0.14", ] [[package]] @@ -5308,9 +5443,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.20.6" +version = "0.20.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aab8ee6c7097ed6057f43c187a62418d0c05a4bd5f18b3571db50ee0f9ce033" +checksum = "539a2bfe908f471bfa933876bd1eb6a19cf2176d375f82ef7f99530a40e48c2c" dependencies = [ "log", "ring", @@ -5320,18 +5455,18 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7522c9de787ff061458fe9a829dc790a3f5b22dc571694fc5883f448b94d9a9" +checksum = "0864aeff53f8c05aa08d86e5ef839d3dfcf07aeba2db32f12db0ef716e87bd55" dependencies = [ "base64", ] [[package]] name = "rustversion" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24c8ad4f0c00e1eb5bc7614d236a7f1300e3dbd76b68cac8e06fb00b015ad8d8" +checksum = "97477e48b4cf8603ad5f7aaf897467cf42ab4218a38ef76fb14c2d6773a6d6a8" [[package]] name = "rw-stream-sink" @@ -5346,9 +5481,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3f6f92acf49d1b98f7a81226834412ada05458b7364277387724a237f062695" +checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" [[package]] name = "safe_arith" @@ -5385,7 +5520,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2" dependencies = [ "lazy_static", - "windows-sys", + "windows-sys 0.36.1", ] [[package]] @@ -5399,9 +5534,9 @@ dependencies = [ [[package]] name = "scoped-tls" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" [[package]] name = "scopeguard" @@ -5409,6 +5544,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +[[package]] +name = "scratch" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8132065adcfd6e02db789d9285a0deb2f3fcb04002865ab67d5fb103533898" + [[package]] name = "scrypt" version = "0.7.0" @@ -5475,9 +5616,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.6.1" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dc14f172faf8a0194a3aded622712b0de276821addc574fa54fc0a1167e10dc" +checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c" dependencies = [ "bitflags", "core-foundation", @@ -5516,9 +5657,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.12" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2333e6df6d6598f2b1974829f853c2b4c5f4a6e503c10af918081aa6f8564e1" +checksum = "e25dfac463d778e353db5be2449d1cce89bd6fd23c9f1ea21310ce6e5a1b29c4" [[package]] name = "semver-parser" @@ -5551,9 +5692,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.140" +version = "1.0.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc855a42c7967b7c369eb5860f7164ef1f6f81c20c7cc1141f2a604e18723b03" +checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965" dependencies = [ "serde_derive", ] @@ -5590,9 +5731,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.140" +version = "1.0.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f2122636b9fe3b81f1cb25099fcf2d3f542cdb1d45940d56c713158884a05da" +checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852" dependencies = [ "proc-macro2", "quote", @@ -5601,20 +5742,20 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.82" +version = "1.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82c2c1fdcd807d1098552c5b9a36e425e42e9fbd7c6a37a8425f390f781f7fa7" +checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45" dependencies = [ - "itoa 1.0.2", + "itoa 1.0.4", "ryu", "serde", ] [[package]] name = "serde_repr" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2ad84e47328a31223de7fed7a4f5087f2d6ddfe586cf3ca25b7a165bc0a5aed" +checksum = "1fe39d9fbb0ebf5eb2c7cb7e2a47e4f462fad1379f1166b8ae49ad9eae89a7ca" dependencies = [ "proc-macro2", "quote", @@ -5628,7 +5769,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 1.0.2", + "itoa 1.0.4", "ryu", "serde", ] @@ -5688,7 +5829,18 @@ checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.3", + "digest 0.10.5", +] + +[[package]] +name = "sha1" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.5", ] [[package]] @@ -5706,13 +5858,13 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.2" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55deaec60f81eefe3cce0dc50bda92d6d8e88f2a27df7c5033b42afeb1ed2676" +checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.3", + "digest 0.10.5", ] [[package]] @@ -5729,11 +5881,11 @@ dependencies = [ [[package]] name = "sha3" -version = "0.10.1" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "881bf8156c87b6301fc5ca6b27f11eeb2761224c7081e69b409d5a1951a70c86" +checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" dependencies = [ - "digest 0.10.3", + "digest 0.10.5", "keccak", ] @@ -5763,12 +5915,12 @@ dependencies = [ [[package]] name = "signature" -version = "1.6.3" +version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "deb766570a2825fa972bceff0d195727876a9cdf2460ab2e52d455dc2de47fd9" +checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" dependencies = [ - "digest 0.10.3", - "rand_core 0.6.3", + "digest 0.10.5", + "rand_core 0.6.4", ] [[package]] @@ -5780,7 +5932,7 @@ dependencies = [ "num-bigint", "num-traits", "thiserror", - "time 0.3.11", + "time 0.3.16", ] [[package]] @@ -5788,7 +5940,7 @@ name = "simulator" version = "0.2.0" dependencies = [ "clap", - "env_logger 0.9.0", + "env_logger 0.9.1", "eth1", "eth1_test_rig", "execution_layer", @@ -5906,7 +6058,7 @@ dependencies = [ "serde", "serde_json", "slog", - "time 0.3.11", + "time 0.3.16", ] [[package]] @@ -5951,7 +6103,7 @@ dependencies = [ "slog", "term", "thread_local", - "time 0.3.11", + "time 0.3.16", ] [[package]] @@ -5990,9 +6142,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1" +checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "snap" @@ -6010,18 +6162,18 @@ dependencies = [ "blake2", "chacha20poly1305", "curve25519-dalek 4.0.0-pre.1", - "rand_core 0.6.3", + "rand_core 0.6.4", "ring", "rustc_version 0.4.0", - "sha2 0.10.2", + "sha2 0.10.6", "subtle", ] [[package]] name = "socket2" -version = "0.4.4" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" +checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" dependencies = [ "libc", "winapi", @@ -6092,7 +6244,7 @@ dependencies = [ "beacon_chain", "bls", "derivative", - "env_logger 0.9.0", + "env_logger 0.9.1", "eth2_hashing", "eth2_ssz", "eth2_ssz_derive", @@ -6177,9 +6329,9 @@ dependencies = [ [[package]] name = "strum_macros" -version = "0.24.2" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4faebde00e8ff94316c01800f9054fd2ba77d30d9e922541913051d1d978918b" +checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck", "proc-macro2", @@ -6233,9 +6385,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.98" +version = "1.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c50aef8a904de4c23c788f104b7dddc7d6f79c647c7c8ce4cc8f73eb0ca773dd" +checksum = "a864042229133ada95abf3b54fdc62ef5ccabe9515b64717bcb9a1919e59445d" dependencies = [ "proc-macro2", "quote", @@ -6260,6 +6412,34 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "sysinfo" +version = "0.26.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c375d5fd899e32847b8566e10598d6e9f1d9b55ec6de3cdf9e7da4bdc51371bc" +dependencies = [ + "cfg-if", + "core-foundation-sys", + "libc", + "ntapi", + "once_cell", + "rayon", + "winapi", +] + +[[package]] +name = "system_health" +version = "0.1.0" +dependencies = [ + "lighthouse_network", + "parking_lot 0.12.1", + "serde", + "serde_derive", + "serde_json", + "sysinfo", + "types", +] + [[package]] name = "take_mut" version = "0.2.2" @@ -6359,18 +6539,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.31" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd829fe32373d27f76265620b5309d0340cb8550f523c1dda251d6298069069a" +checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.31" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a" +checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" dependencies = [ "proc-macro2", "quote", @@ -6408,21 +6588,32 @@ dependencies = [ [[package]] name = "time" -version = "0.3.11" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72c91f41dcb2f096c05f0873d667dceec1087ce5bcf984ec8ffb19acddbb3217" +checksum = "0fab5c8b9980850e06d92ddbe3ab839c062c801f3927c0fb8abd6fc8e918fbca" dependencies = [ - "itoa 1.0.2", + "itoa 1.0.4", "libc", "num_threads", + "serde", + "time-core", "time-macros", ] [[package]] -name = "time-macros" -version = "0.2.4" +name = "time-core" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42657b1a6f4d817cda8e7a0ace261fe0cc946cf3a80314390b22cc61ae080792" +checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" + +[[package]] +name = "time-macros" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65bb801831d812c562ae7d2bfb531f26e66e4e1f6b17307ba4149c5064710e5b" +dependencies = [ + "time-core", +] [[package]] name = "timer" @@ -6490,9 +6681,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.20.1" +version = "1.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a8325f63a7d4774dd041e363b2409ed1c5cbbd0f867795e661df066b2b0a581" +checksum = "a9e03c497dc955702ba729190dc4aac6f2a0ce97f913e5b1b5912fc5039d9099" dependencies = [ "autocfg 1.1.0", "bytes", @@ -6500,7 +6691,6 @@ dependencies = [ "memchr", "mio", "num_cpus", - "once_cell", "parking_lot 0.12.1", "pin-project-lite 0.2.9", "signal-hook-registry", @@ -6557,21 +6747,21 @@ version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ - "rustls 0.20.6", + "rustls 0.20.7", "tokio", "webpki 0.22.0", ] [[package]] name = "tokio-stream" -version = "0.1.9" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df54d54117d6fdc4e4fea40fe1e4e566b3505700e148a6827e59b34b0d2600d9" +checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" dependencies = [ "futures-core", "pin-project-lite 0.2.9", "tokio", - "tokio-util 0.7.3", + "tokio-util 0.7.4", ] [[package]] @@ -6595,7 +6785,7 @@ checksum = "f714dd15bead90401d77e04243611caec13726c2408afd5b31901dfcdcb3b181" dependencies = [ "futures-util", "log", - "rustls 0.20.6", + "rustls 0.20.7", "tokio", "tokio-rustls 0.23.4", "tungstenite 0.17.3", @@ -6621,9 +6811,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc463cd8deddc3770d20f9852143d50bf6094e640b485cb2e189a2099085ff45" +checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" dependencies = [ "bytes", "futures-core", @@ -6679,9 +6869,9 @@ dependencies = [ [[package]] name = "tower-layer" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" [[package]] name = "tower-service" @@ -6691,9 +6881,9 @@ checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.35" +version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a400e31aa60b9d44a52a8ee0343b5b18566b03a8321e0d321f695cf56e940160" +checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ "cfg-if", "log", @@ -6704,9 +6894,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11c75893af559bc8e10716548bdef5cb2b983f8e637db9d0e15126b61b484ee2" +checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" dependencies = [ "proc-macro2", "quote", @@ -6715,9 +6905,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.28" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b7358be39f2f274f322d2aaed611acc57f382e8eb1e5b48cb9ae30933495ce7" +checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" dependencies = [ "once_cell", "valuable", @@ -6746,12 +6936,12 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60db860322da191b40952ad9affe65ea23e7dd6a5c442c2c42865810c6ab8e6b" +checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" dependencies = [ - "ansi_term", "matchers", + "nu-ansi-term", "once_cell", "regex", "sharded-slab", @@ -6818,7 +7008,7 @@ dependencies = [ "futures-channel", "futures-io", "futures-util", - "idna", + "idna 0.2.3", "ipnet", "lazy_static", "log", @@ -6888,7 +7078,7 @@ dependencies = [ "httparse", "log", "rand 0.8.5", - "rustls 0.20.6", + "rustls 0.20.7", "sha-1 0.10.0", "thiserror", "url", @@ -6936,6 +7126,7 @@ dependencies = [ "lazy_static", "log", "maplit", + "merkle_proof", "parking_lot 0.12.1", "rand 0.8.5", "rand_xorshift", @@ -6963,15 +7154,15 @@ dependencies = [ [[package]] name = "ucd-trie" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89570599c4fe5585de2b388aab47e99f7fa4e9238a1399f707a02e356058141c" +checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" [[package]] name = "uint" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12f03af7ccf01dd611cc450a0d10dbc9b745770d096473e2faf0ca6e2d66d1e0" +checksum = "a45526d29728d135c2900b0d30573fe3ee79fceb12ef534c7bb30e810a91b601" dependencies = [ "arbitrary", "byteorder", @@ -7003,30 +7194,30 @@ checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" [[package]] name = "unicode-ident" -version = "1.0.2" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15c61ba63f9235225a22310255a29b806b907c9b8c964bcbd0a2c70f3f2deea7" +checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" [[package]] name = "unicode-normalization" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "854cbdc4f7bc6ae19c820d44abdc3277ac3e1b2b93db20a636825d9322fb60e6" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] [[package]] name = "unicode-width" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" +checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" [[package]] name = "unicode-xid" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957e51f3646910546462e67d5f7599b9e4fb8acdd304b087a6494730f9eebf04" +checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" [[package]] name = "universal-hash" @@ -7070,13 +7261,12 @@ version = "0.1.0" [[package]] name = "url" -version = "2.2.2" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" +checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" dependencies = [ "form_urlencoded", - "idna", - "matches", + "idna 0.3.0", "percent-encoding", ] @@ -7092,7 +7282,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.7", + "getrandom 0.2.8", "serde", ] @@ -7138,6 +7328,8 @@ dependencies = [ "slashing_protection", "slog", "slot_clock", + "sysinfo", + "system_health", "task_executor", "tempfile", "tokio", @@ -7285,9 +7477,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.82" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc7652e3f6c4706c8d9cd54832c4a4ccb9b5336e2c3bd154d5cccfbf1c1f5f7d" +checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -7295,9 +7487,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.82" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "662cd44805586bd52971b9586b1df85cdbbd9112e4ef4d8f41559c334dc6ac3f" +checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" dependencies = [ "bumpalo", "log", @@ -7310,9 +7502,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.32" +version = "0.4.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa76fb221a1f8acddf5b54ace85912606980ad661ac7a503b4570ffd3a624dad" +checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d" dependencies = [ "cfg-if", "js-sys", @@ -7322,9 +7514,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.82" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b260f13d3012071dfb1512849c033b1925038373aea48ced3012c09df952c602" +checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -7332,9 +7524,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.82" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5be8e654bdd9b79216c2929ab90721aa82faf65c48cdf08bdc4e7f51357b80da" +checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" dependencies = [ "proc-macro2", "quote", @@ -7345,15 +7537,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.82" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6598dd0bd3c7d51095ff6531a5b23e02acdc81804e30d8f07afb77b7215a140a" +checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" [[package]] name = "wasm-bindgen-test" -version = "0.3.32" +version = "0.3.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "513df541345bb9fcc07417775f3d51bbb677daf307d8035c0afafd87dc2e6599" +checksum = "09d2fff962180c3fadf677438054b1db62bee4aa32af26a45388af07d1287e1d" dependencies = [ "console_error_panic_hook", "js-sys", @@ -7365,9 +7557,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-test-macro" -version = "0.3.32" +version = "0.3.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6150d36a03e90a3cf6c12650be10626a9902d70c5270fd47d7a47e5389a10d56" +checksum = "4683da3dfc016f704c9f82cf401520c4f1cb3ee440f7f52b3d6ac29506a49ca7" dependencies = [ "proc-macro2", "quote", @@ -7390,9 +7582,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.59" +version = "0.3.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed055ab27f941423197eb86b2035720b1a3ce40504df082cac2ecc6ed73335a1" +checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" dependencies = [ "js-sys", "wasm-bindgen", @@ -7414,7 +7606,7 @@ dependencies = [ "futures-timer", "headers", "hex", - "idna", + "idna 0.2.3", "jsonrpc-core", "log", "once_cell", @@ -7492,22 +7684,22 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1c760f0d366a6c24a02ed7816e23e691f5d92291f94d15e836006fd11b04daf" +checksum = "368bfe657969fb01238bb756d351dcade285e0f6fcbd36dcb23359a5169975be" dependencies = [ "webpki 0.22.0", ] [[package]] name = "which" -version = "4.2.5" +version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c4fb54e6113b6a8772ee41c3404fb0301ac79604489467e0a9ce1f3e97c24ae" +checksum = "1c831fbbee9e129a8cf93e7747a82da9d95ba8e16621cae60ec2cdc849bacb7b" dependencies = [ "either", - "lazy_static", "libc", + "once_cell", ] [[package]] @@ -7571,43 +7763,100 @@ version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" dependencies = [ - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_msvc", + "windows_aarch64_msvc 0.36.1", + "windows_i686_gnu 0.36.1", + "windows_i686_msvc 0.36.1", + "windows_x86_64_gnu 0.36.1", + "windows_x86_64_msvc 0.36.1", ] +[[package]] +name = "windows-sys" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc 0.42.0", + "windows_i686_gnu 0.42.0", + "windows_i686_msvc 0.42.0", + "windows_x86_64_gnu 0.42.0", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc 0.42.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" + [[package]] name = "windows_aarch64_msvc" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" + [[package]] name = "windows_i686_gnu" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" +[[package]] +name = "windows_i686_gnu" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" + [[package]] name = "windows_i686_msvc" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" +[[package]] +name = "windows_i686_msvc" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" + [[package]] name = "windows_x86_64_gnu" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" + [[package]] name = "windows_x86_64_msvc" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" + [[package]] name = "winreg" version = "0.7.0" @@ -7696,9 +7945,9 @@ dependencies = [ [[package]] name = "yamux" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c0608f53c1dc0bad505d03a34bbd49fbf2ad7b51eb036123e896365532745a1" +checksum = "e5d9ba232399af1783a58d8eb26f6b5006fbefe2dc9ef36bd283324792d03ea5" dependencies = [ "futures", "log", diff --git a/Cargo.toml b/Cargo.toml index 02cf4d943..e254400e8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,6 +37,7 @@ members = [ "common/oneshot_broadcast", "common/sensitive_url", "common/slot_clock", + "common/system_health", "common/task_executor", "common/target_check", "common/test_random_derive", diff --git a/account_manager/src/validator/create.rs b/account_manager/src/validator/create.rs index bbd2cbc99..da0112105 100644 --- a/account_manager/src/validator/create.rs +++ b/account_manager/src/validator/create.rs @@ -114,7 +114,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { pub fn cli_run( matches: &ArgMatches, - mut env: Environment, + env: Environment, validator_dir: PathBuf, ) -> Result<(), String> { let spec = env.core_context().eth2_config.spec; diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 18973cb9d..309d7a83f 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "3.2.1" +version = "3.3.0" authors = ["Paul Hauner ", "Age Manning BeaconChain { Ok(self.store.get_state(state_root, slot)?) } + /// Run a function with mutable access to a state for `block_root`. + /// + /// The primary purpose of this function is to borrow a state with its tree hash cache + /// from the snapshot cache *without moving it*. This means that calls to this function should + /// be kept to an absolute minimum, because holding the snapshot cache lock has the ability + /// to delay block import. + /// + /// If there is no appropriate state in the snapshot cache then one will be loaded from disk. + /// If no state is found on disk then `Ok(None)` will be returned. + /// + /// The 2nd parameter to the closure is a bool indicating whether the snapshot cache was used, + /// which can inform logging/metrics. + /// + /// NOTE: the medium-term plan is to delete this function and the snapshot cache in favour + /// of `tree-states`, where all caches are CoW and everything is good in the world. + pub fn with_mutable_state_for_block>( + &self, + block: &SignedBeaconBlock, + block_root: Hash256, + f: F, + ) -> Result, Error> + where + F: FnOnce(&mut BeaconState, bool) -> Result, + { + if let Some(state) = self + .snapshot_cache + .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) + .ok_or(Error::SnapshotCacheLockTimeout)? + .borrow_unadvanced_state_mut(block_root) + { + let cache_hit = true; + f(state, cache_hit).map(Some) + } else if let Some(mut state) = self.get_state(&block.state_root(), Some(block.slot()))? { + let cache_hit = false; + f(&mut state, cache_hit).map(Some) + } else { + Ok(None) + } + } + /// Return the sync committee at `slot + 1` from the canonical chain. /// /// This is useful when dealing with sync committee messages, because messages are signed @@ -2367,6 +2406,7 @@ impl BeaconChain { self: &Arc, chain_segment: Vec>>, count_unrealized: CountUnrealized, + notify_execution_layer: NotifyExecutionLayer, ) -> ChainSegmentResult { let mut imported_blocks = 0; @@ -2435,6 +2475,7 @@ impl BeaconChain { signature_verified_block.block_root(), signature_verified_block, count_unrealized, + notify_execution_layer, ) .await { @@ -2523,6 +2564,7 @@ impl BeaconChain { block_root: Hash256, unverified_block: B, count_unrealized: CountUnrealized, + notify_execution_layer: NotifyExecutionLayer, ) -> Result> { // Start the Prometheus timer. let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); @@ -2536,8 +2578,11 @@ impl BeaconChain { // A small closure to group the verification and import errors. let chain = self.clone(); let import_block = async move { - let execution_pending = - unverified_block.into_execution_pending_block(block_root, &chain)?; + let execution_pending = unverified_block.into_execution_pending_block( + block_root, + &chain, + notify_execution_layer, + )?; chain .import_execution_pending_block(execution_pending, count_unrealized) .await @@ -2607,6 +2652,7 @@ impl BeaconChain { confirmed_state_roots, payload_verification_handle, parent_eth1_finalization_data, + consensus_context, } = execution_pending_block; let PayloadVerificationOutcome { @@ -2660,6 +2706,7 @@ impl BeaconChain { count_unrealized, parent_block, parent_eth1_finalization_data, + consensus_context, ) }, "payload_verification_handle", @@ -2685,70 +2732,36 @@ impl BeaconChain { count_unrealized: CountUnrealized, parent_block: SignedBlindedBeaconBlock, parent_eth1_finalization_data: Eth1FinalizationData, + mut consensus_context: ConsensusContext, ) -> Result> { + // ----------------------------- BLOCK NOT YET ATTESTABLE ---------------------------------- + // Everything in this initial section is on the hot path between processing the block and + // being able to attest to it. DO NOT add any extra processing in this initial section + // unless it must run before fork choice. + // ----------------------------------------------------------------------------------------- let current_slot = self.slot()?; let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); + let block = signed_block.message(); + let post_exec_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_POST_EXEC_PROCESSING); - let attestation_observation_timer = - metrics::start_timer(&metrics::BLOCK_PROCESSING_ATTESTATION_OBSERVATION); - - // Iterate through the attestations in the block and register them as an "observed - // attestation". This will stop us from propagating them on the gossip network. - for a in signed_block.message().body().attestations() { - match self.observed_attestations.write().observe_item(a, None) { - // If the observation was successful or if the slot for the attestation was too - // low, continue. - // - // We ignore `SlotTooLow` since this will be very common whilst syncing. - Ok(_) | Err(AttestationObservationError::SlotTooLow { .. }) => {} - Err(e) => return Err(BlockError::BeaconChainError(e.into())), - } - } - - metrics::stop_timer(attestation_observation_timer); - - // If a slasher is configured, provide the attestations from the block. - if let Some(slasher) = self.slasher.as_ref() { - for attestation in signed_block.message().body().attestations() { - let committee = - state.get_beacon_committee(attestation.data.slot, attestation.data.index)?; - let indexed_attestation = get_indexed_attestation(committee.committee, attestation) - .map_err(|e| BlockError::BeaconChainError(e.into()))?; - slasher.accept_attestation(indexed_attestation); - } - } + // Check against weak subjectivity checkpoint. + self.check_block_against_weak_subjectivity_checkpoint(block, block_root, &state)?; // If there are new validators in this block, update our pubkey cache. // - // We perform this _before_ adding the block to fork choice because the pubkey cache is - // used by attestation processing which will only process an attestation if the block is - // known to fork choice. This ordering ensure that the pubkey cache is always up-to-date. - self.validator_pubkey_cache + // The only keys imported here will be ones for validators deposited in this block, because + // the cache *must* already have been updated for the parent block when it was imported. + // Newly deposited validators are not active and their keys are not required by other parts + // of block processing. The reason we do this here and not after making the block attestable + // is so we don't have to think about lock ordering with respect to the fork choice lock. + // There are a bunch of places where we lock both fork choice and the pubkey cache and it + // would be difficult to check that they all lock fork choice first. + let mut kv_store_ops = self + .validator_pubkey_cache .try_write_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) .ok_or(Error::ValidatorPubkeyCacheLockTimeout)? .import_new_pubkeys(&state)?; - // For the current and next epoch of this state, ensure we have the shuffling from this - // block in our cache. - for relative_epoch in &[RelativeEpoch::Current, RelativeEpoch::Next] { - let shuffling_id = AttestationShufflingId::new(block_root, &state, *relative_epoch)?; - - let shuffling_is_cached = self - .shuffling_cache - .try_read_for(ATTESTATION_CACHE_LOCK_TIMEOUT) - .ok_or(Error::AttestationCacheLockTimeout)? - .contains(&shuffling_id); - - if !shuffling_is_cached { - state.build_committee_cache(*relative_epoch, &self.spec)?; - let committee_cache = state.committee_cache(*relative_epoch)?; - self.shuffling_cache - .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) - .ok_or(Error::AttestationCacheLockTimeout)? - .insert_committee_cache(shuffling_id, committee_cache); - } - } - // Apply the state to the attester cache, only if it is from the previous epoch or later. // // In a perfect scenario there should be no need to add previous-epoch states to the cache. @@ -2760,52 +2773,7 @@ impl BeaconChain { .map_err(BeaconChainError::from)?; } - // Alias for readability. - let block = signed_block.message(); - - // Only perform the weak subjectivity check if it was configured. - if let Some(wss_checkpoint) = self.config.weak_subjectivity_checkpoint { - // Note: we're using the finalized checkpoint from the head state, rather than fork - // choice. - // - // We are doing this to ensure that we detect changes in finalization. It's possible - // that fork choice has already been updated to the finalized checkpoint in the block - // we're importing. - let current_head_finalized_checkpoint = - self.canonical_head.cached_head().finalized_checkpoint(); - // Compare the existing finalized checkpoint with the incoming block's finalized checkpoint. - let new_finalized_checkpoint = state.finalized_checkpoint(); - - // This ensures we only perform the check once. - if (current_head_finalized_checkpoint.epoch < wss_checkpoint.epoch) - && (wss_checkpoint.epoch <= new_finalized_checkpoint.epoch) - { - if let Err(e) = - self.verify_weak_subjectivity_checkpoint(wss_checkpoint, block_root, &state) - { - let mut shutdown_sender = self.shutdown_sender(); - crit!( - self.log, - "Weak subjectivity checkpoint verification failed while importing block!"; - "block_root" => ?block_root, - "parent_root" => ?block.parent_root(), - "old_finalized_epoch" => ?current_head_finalized_checkpoint.epoch, - "new_finalized_epoch" => ?new_finalized_checkpoint.epoch, - "weak_subjectivity_epoch" => ?wss_checkpoint.epoch, - "error" => ?e, - ); - crit!(self.log, "You must use the `--purge-db` flag to clear the database and restart sync. You may be on a hostile network."); - shutdown_sender - .try_send(ShutdownReason::Failure( - "Weak subjectivity checkpoint verification failed. Provided block root is not a checkpoint." - )) - .map_err(|err| BlockError::BeaconChainError(BeaconChainError::WeakSubjectivtyShutdownError(err)))?; - return Err(BlockError::WeakSubjectivityConflict); - } - } - } - - // Take an exclusive write-lock on fork choice. It's very important prevent deadlocks by + // Take an exclusive write-lock on fork choice. It's very important to prevent deadlocks by // avoiding taking other locks whilst holding this lock. let mut fork_choice = self.canonical_head.fork_choice_write_lock(); @@ -2835,77 +2803,6 @@ impl BeaconChain { .map_err(|e| BlockError::BeaconChainError(e.into()))?; } - // Allow the validator monitor to learn about a new valid state. - self.validator_monitor - .write() - .process_valid_state(current_slot.epoch(T::EthSpec::slots_per_epoch()), &state); - let validator_monitor = self.validator_monitor.read(); - - // Register each attester slashing in the block with fork choice. - for attester_slashing in block.body().attester_slashings() { - fork_choice.on_attester_slashing(attester_slashing); - } - - // Register each attestation in the block with the fork choice service. - for attestation in block.body().attestations() { - let _fork_choice_attestation_timer = - metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES); - let attestation_target_epoch = attestation.data.target.epoch; - - let committee = - state.get_beacon_committee(attestation.data.slot, attestation.data.index)?; - let indexed_attestation = get_indexed_attestation(committee.committee, attestation) - .map_err(|e| BlockError::BeaconChainError(e.into()))?; - - match fork_choice.on_attestation( - current_slot, - &indexed_attestation, - AttestationFromBlock::True, - &self.spec, - ) { - Ok(()) => Ok(()), - // Ignore invalid attestations whilst importing attestations from a block. The - // block might be very old and therefore the attestations useless to fork choice. - Err(ForkChoiceError::InvalidAttestation(_)) => Ok(()), - Err(e) => Err(BlockError::BeaconChainError(e.into())), - }?; - - // To avoid slowing down sync, only register attestations for the - // `observed_block_attesters` if they are from the previous epoch or later. - if attestation_target_epoch + 1 >= current_epoch { - let mut observed_block_attesters = self.observed_block_attesters.write(); - for &validator_index in &indexed_attestation.attesting_indices { - if let Err(e) = observed_block_attesters - .observe_validator(attestation_target_epoch, validator_index as usize) - { - debug!( - self.log, - "Failed to register observed block attester"; - "error" => ?e, - "epoch" => attestation_target_epoch, - "validator_index" => validator_index, - ) - } - } - } - - // Only register this with the validator monitor when the block is sufficiently close to - // the current slot. - if VALIDATOR_MONITOR_HISTORIC_EPOCHS as u64 * T::EthSpec::slots_per_epoch() - + block.slot().as_u64() - >= current_slot.as_u64() - { - match fork_choice.get_block(&block.parent_root()) { - Some(parent_block) => validator_monitor.register_attestation_in_block( - &indexed_attestation, - parent_block.slot, - &self.spec, - ), - None => warn!(self.log, "Failed to get parent block"; "slot" => %block.slot()), - } - } - } - // If the block is recent enough and it was not optimistically imported, check to see if it // becomes the head block. If so, apply it to the early attester cache. This will allow // attestations to the block without waiting for the block and state to be inserted to the @@ -2954,56 +2851,28 @@ impl BeaconChain { ), } } + drop(post_exec_timer); - // Register sync aggregate with validator monitor - if let Ok(sync_aggregate) = block.body().sync_aggregate() { - // `SyncCommittee` for the sync_aggregate should correspond to the duty slot - let duty_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); - let sync_committee = self.sync_committee_at_epoch(duty_epoch)?; - let participant_pubkeys = sync_committee - .pubkeys - .iter() - .zip(sync_aggregate.sync_committee_bits.iter()) - .filter_map(|(pubkey, bit)| bit.then_some(pubkey)) - .collect::>(); + // ---------------------------- BLOCK PROBABLY ATTESTABLE ---------------------------------- + // Most blocks are now capable of being attested to thanks to the `early_attester_cache` + // cache above. Resume non-essential processing. + // ----------------------------------------------------------------------------------------- - validator_monitor.register_sync_aggregate_in_block( - block.slot(), - block.parent_root(), - participant_pubkeys, - ); - } - - for exit in block.body().voluntary_exits() { - validator_monitor.register_block_voluntary_exit(&exit.message) - } - - for slashing in block.body().attester_slashings() { - validator_monitor.register_block_attester_slashing(slashing) - } - - for slashing in block.body().proposer_slashings() { - validator_monitor.register_block_proposer_slashing(slashing) - } - - drop(validator_monitor); - - // Only present some metrics for blocks from the previous epoch or later. - // - // This helps avoid noise in the metrics during sync. - if block.slot().epoch(T::EthSpec::slots_per_epoch()) + 1 >= self.epoch()? { - metrics::observe( - &metrics::OPERATIONS_PER_BLOCK_ATTESTATION, - block.body().attestations().len() as f64, - ); - - if let Ok(sync_aggregate) = block.body().sync_aggregate() { - metrics::set_gauge( - &metrics::BLOCK_SYNC_AGGREGATE_SET_BITS, - sync_aggregate.num_set_bits() as i64, - ); - } - } + self.import_block_update_shuffling_cache(block_root, &mut state)?; + self.import_block_observe_attestations( + block, + &state, + &mut consensus_context, + current_epoch, + ); + self.import_block_update_validator_monitor( + block, + &state, + &mut consensus_context, + current_slot, + parent_block.slot(), + ); + self.import_block_update_slasher(block, &state, &mut consensus_context); let db_write_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_WRITE); @@ -3020,7 +2889,9 @@ impl BeaconChain { ops.push(StoreOp::PutState(block.state_root(), &state)); let txn_lock = self.store.hot_db.begin_rw_transaction(); - if let Err(e) = self.store.do_atomically(ops) { + kv_store_ops.extend(self.store.convert_to_kv_batch(ops)?); + + if let Err(e) = self.store.hot_db.do_atomically(kv_store_ops) { error!( self.log, "Database write failed!"; @@ -3028,6 +2899,10 @@ impl BeaconChain { "error" => ?e, ); + // Clear the early attester cache to prevent attestations which we would later be unable + // to verify due to the failure. + self.early_attester_cache.clear(); + // Since the write failed, try to revert the canonical head back to what was stored // in the database. This attempts to prevent inconsistency between the database and // fork choice. @@ -3070,6 +2945,7 @@ impl BeaconChain { eth1_deposit_index: state.eth1_deposit_index(), }; let current_finalized_checkpoint = state.finalized_checkpoint(); + self.snapshot_cache .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) .ok_or(Error::SnapshotCacheLockTimeout) @@ -3077,7 +2953,7 @@ impl BeaconChain { snapshot_cache.insert( BeaconSnapshot { beacon_state: state, - beacon_block: signed_block, + beacon_block: signed_block.clone(), beacon_block_root: block_root, }, None, @@ -3096,22 +2972,312 @@ impl BeaconChain { self.head_tracker .register_block(block_root, parent_root, slot); - // Send an event to the `events` endpoint after fully processing the block. - if let Some(event_handler) = self.event_handler.as_ref() { - if event_handler.has_block_subscribers() { - event_handler.register(EventKind::Block(SseBlock { - slot, - block: block_root, - execution_optimistic: payload_verification_status.is_optimistic(), - })); - } - } - metrics::stop_timer(db_write_timer); metrics::inc_counter(&metrics::BLOCK_PROCESSING_SUCCESSES); - let block_delay_total = get_slot_delay_ms(block_time_imported, slot, &self.slot_clock); + // Update the deposit contract cache. + self.import_block_update_deposit_contract_finalization( + block, + block_root, + current_epoch, + current_finalized_checkpoint, + current_eth1_finalization_data, + parent_eth1_finalization_data, + parent_block.slot(), + ); + + // Inform the unknown block cache, in case it was waiting on this block. + self.pre_finalization_block_cache + .block_processed(block_root); + + self.import_block_update_metrics_and_events( + block, + block_root, + block_time_imported, + payload_verification_status, + current_slot, + ); + + Ok(block_root) + } + + /// Check block's consistentency with any configured weak subjectivity checkpoint. + fn check_block_against_weak_subjectivity_checkpoint( + &self, + block: BeaconBlockRef, + block_root: Hash256, + state: &BeaconState, + ) -> Result<(), BlockError> { + // Only perform the weak subjectivity check if it was configured. + let wss_checkpoint = if let Some(checkpoint) = self.config.weak_subjectivity_checkpoint { + checkpoint + } else { + return Ok(()); + }; + // Note: we're using the finalized checkpoint from the head state, rather than fork + // choice. + // + // We are doing this to ensure that we detect changes in finalization. It's possible + // that fork choice has already been updated to the finalized checkpoint in the block + // we're importing. + let current_head_finalized_checkpoint = + self.canonical_head.cached_head().finalized_checkpoint(); + // Compare the existing finalized checkpoint with the incoming block's finalized checkpoint. + let new_finalized_checkpoint = state.finalized_checkpoint(); + + // This ensures we only perform the check once. + if current_head_finalized_checkpoint.epoch < wss_checkpoint.epoch + && wss_checkpoint.epoch <= new_finalized_checkpoint.epoch + { + if let Err(e) = + self.verify_weak_subjectivity_checkpoint(wss_checkpoint, block_root, state) + { + let mut shutdown_sender = self.shutdown_sender(); + crit!( + self.log, + "Weak subjectivity checkpoint verification failed while importing block!"; + "block_root" => ?block_root, + "parent_root" => ?block.parent_root(), + "old_finalized_epoch" => ?current_head_finalized_checkpoint.epoch, + "new_finalized_epoch" => ?new_finalized_checkpoint.epoch, + "weak_subjectivity_epoch" => ?wss_checkpoint.epoch, + "error" => ?e + ); + crit!( + self.log, + "You must use the `--purge-db` flag to clear the database and restart sync. \ + You may be on a hostile network." + ); + shutdown_sender + .try_send(ShutdownReason::Failure( + "Weak subjectivity checkpoint verification failed. \ + Provided block root is not a checkpoint.", + )) + .map_err(|err| { + BlockError::BeaconChainError( + BeaconChainError::WeakSubjectivtyShutdownError(err), + ) + })?; + return Err(BlockError::WeakSubjectivityConflict); + } + } + Ok(()) + } + + /// Process a block for the validator monitor, including all its constituent messages. + fn import_block_update_validator_monitor( + &self, + block: BeaconBlockRef, + state: &BeaconState, + ctxt: &mut ConsensusContext, + current_slot: Slot, + parent_block_slot: Slot, + ) { + // Only register blocks with the validator monitor when the block is sufficiently close to + // the current slot. + if VALIDATOR_MONITOR_HISTORIC_EPOCHS as u64 * T::EthSpec::slots_per_epoch() + + block.slot().as_u64() + < current_slot.as_u64() + { + return; + } + + // Allow the validator monitor to learn about a new valid state. + self.validator_monitor + .write() + .process_valid_state(current_slot.epoch(T::EthSpec::slots_per_epoch()), state); + + let validator_monitor = self.validator_monitor.read(); + + // Sync aggregate. + if let Ok(sync_aggregate) = block.body().sync_aggregate() { + // `SyncCommittee` for the sync_aggregate should correspond to the duty slot + let duty_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); + + match self.sync_committee_at_epoch(duty_epoch) { + Ok(sync_committee) => { + let participant_pubkeys = sync_committee + .pubkeys + .iter() + .zip(sync_aggregate.sync_committee_bits.iter()) + .filter_map(|(pubkey, bit)| bit.then_some(pubkey)) + .collect::>(); + + validator_monitor.register_sync_aggregate_in_block( + block.slot(), + block.parent_root(), + participant_pubkeys, + ); + } + Err(e) => { + warn!( + self.log, + "Unable to fetch sync committee"; + "epoch" => duty_epoch, + "purpose" => "validator monitor", + "error" => ?e, + ); + } + } + } + + // Attestations. + for attestation in block.body().attestations() { + let indexed_attestation = match ctxt.get_indexed_attestation(state, attestation) { + Ok(indexed) => indexed, + Err(e) => { + debug!( + self.log, + "Failed to get indexed attestation"; + "purpose" => "validator monitor", + "attestation_slot" => attestation.data.slot, + "error" => ?e, + ); + continue; + } + }; + validator_monitor.register_attestation_in_block( + indexed_attestation, + parent_block_slot, + &self.spec, + ); + } + + for exit in block.body().voluntary_exits() { + validator_monitor.register_block_voluntary_exit(&exit.message) + } + + for slashing in block.body().attester_slashings() { + validator_monitor.register_block_attester_slashing(slashing) + } + + for slashing in block.body().proposer_slashings() { + validator_monitor.register_block_proposer_slashing(slashing) + } + } + + /// Iterate through the attestations in the block and register them as "observed". + /// + /// This will stop us from propagating them on the gossip network. + fn import_block_observe_attestations( + &self, + block: BeaconBlockRef, + state: &BeaconState, + ctxt: &mut ConsensusContext, + current_epoch: Epoch, + ) { + // To avoid slowing down sync, only observe attestations if the block is from the + // previous epoch or later. + if state.current_epoch() + 1 < current_epoch { + return; + } + + let _timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_ATTESTATION_OBSERVATION); + + for a in block.body().attestations() { + match self.observed_attestations.write().observe_item(a, None) { + // If the observation was successful or if the slot for the attestation was too + // low, continue. + // + // We ignore `SlotTooLow` since this will be very common whilst syncing. + Ok(_) | Err(AttestationObservationError::SlotTooLow { .. }) => {} + Err(e) => { + debug!( + self.log, + "Failed to register observed attestation"; + "error" => ?e, + "epoch" => a.data.target.epoch + ); + } + } + + let indexed_attestation = match ctxt.get_indexed_attestation(state, a) { + Ok(indexed) => indexed, + Err(e) => { + debug!( + self.log, + "Failed to get indexed attestation"; + "purpose" => "observation", + "attestation_slot" => a.data.slot, + "error" => ?e, + ); + continue; + } + }; + + let mut observed_block_attesters = self.observed_block_attesters.write(); + + for &validator_index in &indexed_attestation.attesting_indices { + if let Err(e) = observed_block_attesters + .observe_validator(a.data.target.epoch, validator_index as usize) + { + debug!( + self.log, + "Failed to register observed block attester"; + "error" => ?e, + "epoch" => a.data.target.epoch, + "validator_index" => validator_index, + ) + } + } + } + } + + /// If a slasher is configured, provide the attestations from the block. + fn import_block_update_slasher( + &self, + block: BeaconBlockRef, + state: &BeaconState, + ctxt: &mut ConsensusContext, + ) { + if let Some(slasher) = self.slasher.as_ref() { + for attestation in block.body().attestations() { + let indexed_attestation = match ctxt.get_indexed_attestation(state, attestation) { + Ok(indexed) => indexed, + Err(e) => { + debug!( + self.log, + "Failed to get indexed attestation"; + "purpose" => "slasher", + "attestation_slot" => attestation.data.slot, + "error" => ?e, + ); + continue; + } + }; + slasher.accept_attestation(indexed_attestation.clone()); + } + } + } + + fn import_block_update_metrics_and_events( + &self, + block: BeaconBlockRef, + block_root: Hash256, + block_time_imported: Duration, + payload_verification_status: PayloadVerificationStatus, + current_slot: Slot, + ) { + // Only present some metrics for blocks from the previous epoch or later. + // + // This helps avoid noise in the metrics during sync. + if block.slot() + 2 * T::EthSpec::slots_per_epoch() >= current_slot { + metrics::observe( + &metrics::OPERATIONS_PER_BLOCK_ATTESTATION, + block.body().attestations().len() as f64, + ); + + if let Ok(sync_aggregate) = block.body().sync_aggregate() { + metrics::set_gauge( + &metrics::BLOCK_SYNC_AGGREGATE_SET_BITS, + sync_aggregate.num_set_bits() as i64, + ); + } + } + + let block_delay_total = + get_slot_delay_ms(block_time_imported, block.slot(), &self.slot_clock); // Do not write to the cache for blocks older than 2 epochs, this helps reduce writes to // the cache during sync. @@ -3143,62 +3309,105 @@ impl BeaconChain { ); } - // Do not write to eth1 finalization cache for blocks older than 5 epochs - // this helps reduce noise during sync - if block_delay_total - < self.slot_clock.slot_duration() * 5 * (T::EthSpec::slots_per_epoch() as u32) - { - let parent_block_epoch = parent_block.slot().epoch(T::EthSpec::slots_per_epoch()); - if parent_block_epoch < current_epoch { - // we've crossed epoch boundary, store Eth1FinalizationData - let (checkpoint, eth1_finalization_data) = - if current_slot % T::EthSpec::slots_per_epoch() == 0 { - // current block is the checkpoint - ( - Checkpoint { - epoch: current_epoch, - root: block_root, - }, - current_eth1_finalization_data, - ) - } else { - // parent block is the checkpoint - ( - Checkpoint { - epoch: current_epoch, - root: parent_block.canonical_root(), - }, - parent_eth1_finalization_data, - ) - }; + if let Some(event_handler) = self.event_handler.as_ref() { + if event_handler.has_block_subscribers() { + event_handler.register(EventKind::Block(SseBlock { + slot: block.slot(), + block: block_root, + execution_optimistic: payload_verification_status.is_optimistic(), + })); + } + } + } - if let Some(finalized_eth1_data) = self - .eth1_finalization_cache - .try_write_for(ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT) - .and_then(|mut cache| { - cache.insert(checkpoint, eth1_finalization_data); - cache.finalize(¤t_finalized_checkpoint) - }) - { - if let Some(eth1_chain) = self.eth1_chain.as_ref() { - let finalized_deposit_count = finalized_eth1_data.deposit_count; - eth1_chain.finalize_eth1_data(finalized_eth1_data); - debug!( - self.log, - "called eth1_chain.finalize_eth1_data()"; - "epoch" => current_finalized_checkpoint.epoch, - "deposit count" => finalized_deposit_count, - ); - } + fn import_block_update_shuffling_cache( + &self, + block_root: Hash256, + state: &mut BeaconState, + ) -> Result<(), BlockError> { + // For the current and next epoch of this state, ensure we have the shuffling from this + // block in our cache. + for relative_epoch in [RelativeEpoch::Current, RelativeEpoch::Next] { + let shuffling_id = AttestationShufflingId::new(block_root, state, relative_epoch)?; + + let shuffling_is_cached = self + .shuffling_cache + .try_read_for(ATTESTATION_CACHE_LOCK_TIMEOUT) + .ok_or(Error::AttestationCacheLockTimeout)? + .contains(&shuffling_id); + + if !shuffling_is_cached { + state.build_committee_cache(relative_epoch, &self.spec)?; + let committee_cache = state.committee_cache(relative_epoch)?; + self.shuffling_cache + .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) + .ok_or(Error::AttestationCacheLockTimeout)? + .insert_committee_cache(shuffling_id, committee_cache); + } + } + Ok(()) + } + + #[allow(clippy::too_many_arguments)] + fn import_block_update_deposit_contract_finalization( + &self, + block: BeaconBlockRef, + block_root: Hash256, + current_epoch: Epoch, + current_finalized_checkpoint: Checkpoint, + current_eth1_finalization_data: Eth1FinalizationData, + parent_eth1_finalization_data: Eth1FinalizationData, + parent_block_slot: Slot, + ) { + // Do not write to eth1 finalization cache for blocks older than 5 epochs. + if block.slot().epoch(T::EthSpec::slots_per_epoch()) + 5 < current_epoch { + return; + } + + let parent_block_epoch = parent_block_slot.epoch(T::EthSpec::slots_per_epoch()); + if parent_block_epoch < current_epoch { + // we've crossed epoch boundary, store Eth1FinalizationData + let (checkpoint, eth1_finalization_data) = + if block.slot() % T::EthSpec::slots_per_epoch() == 0 { + // current block is the checkpoint + ( + Checkpoint { + epoch: current_epoch, + root: block_root, + }, + current_eth1_finalization_data, + ) + } else { + // parent block is the checkpoint + ( + Checkpoint { + epoch: current_epoch, + root: block.parent_root(), + }, + parent_eth1_finalization_data, + ) + }; + + if let Some(finalized_eth1_data) = self + .eth1_finalization_cache + .try_write_for(ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT) + .and_then(|mut cache| { + cache.insert(checkpoint, eth1_finalization_data); + cache.finalize(¤t_finalized_checkpoint) + }) + { + if let Some(eth1_chain) = self.eth1_chain.as_ref() { + let finalized_deposit_count = finalized_eth1_data.deposit_count; + eth1_chain.finalize_eth1_data(finalized_eth1_data); + debug!( + self.log, + "called eth1_chain.finalize_eth1_data()"; + "epoch" => current_finalized_checkpoint.epoch, + "deposit count" => finalized_deposit_count, + ); } } } - - // Inform the unknown block cache, in case it was waiting on this block. - self.pre_finalization_block_cache - .block_processed(block_root); - - Ok(block_root) } /// If configured, wait for the fork choice run at the start of the slot to complete. @@ -3591,10 +3800,12 @@ impl BeaconChain { // This will be a lot slower but guards against bugs in block production and can be // quickly rolled out without a release. if self.config.paranoid_block_proposal { + let mut tmp_ctxt = ConsensusContext::new(state.slot()); attestations.retain(|att| { verify_attestation_for_block_inclusion( &state, att, + &mut tmp_ctxt, VerifySignatures::True, &self.spec, ) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index f40f88813..1999cc53a 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -45,29 +45,29 @@ use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::execution_payload::{ is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, - AllowOptimisticImport, PayloadNotifier, + AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier, }; use crate::snapshot_cache::PreProcessingSnapshot; use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::{ beacon_chain::{ - BeaconForkChoice, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, MAXIMUM_GOSSIP_CLOCK_DISPARITY, - VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, + BeaconForkChoice, ForkChoiceError, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, + MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, }, metrics, BeaconChain, BeaconChainError, BeaconChainTypes, }; use derivative::Derivative; use eth2::types::EventKind; use execution_layer::PayloadStatus; -use fork_choice::PayloadVerificationStatus; +use fork_choice::{AttestationFromBlock, PayloadVerificationStatus}; use parking_lot::RwLockReadGuard; use proto_array::Block as ProtoBlock; use safe_arith::ArithError; use slog::{debug, error, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; -use state_processing::per_block_processing::is_merge_transition_block; +use state_processing::per_block_processing::{errors::IntoWithIndex, is_merge_transition_block}; use state_processing::{ block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError}, per_block_processing, per_slot_processing, @@ -551,8 +551,22 @@ pub fn signature_verify_chain_segment( let pubkey_cache = get_validator_pubkey_cache(chain)?; let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); + let mut signature_verified_blocks = Vec::with_capacity(chain_segment.len()); + for (block_root, block) in &chain_segment { - signature_verifier.include_all_signatures(block, Some(*block_root), None)?; + let mut consensus_context = + ConsensusContext::new(block.slot()).set_current_block_root(*block_root); + + signature_verifier.include_all_signatures(block, &mut consensus_context)?; + + // Save the block and its consensus context. The context will have had its proposer index + // and attesting indices filled in, which can be used to accelerate later block processing. + signature_verified_blocks.push(SignatureVerifiedBlock { + block: block.clone(), + block_root: *block_root, + parent: None, + consensus_context, + }); } if signature_verifier.verify().is_err() { @@ -561,22 +575,6 @@ pub fn signature_verify_chain_segment( drop(pubkey_cache); - let mut signature_verified_blocks = chain_segment - .into_iter() - .map(|(block_root, block)| { - // Proposer index has already been verified above during signature verification. - let consensus_context = ConsensusContext::new(block.slot()) - .set_current_block_root(block_root) - .set_proposer_index(block.message().proposer_index()); - SignatureVerifiedBlock { - block, - block_root, - parent: None, - consensus_context, - } - }) - .collect::>(); - if let Some(signature_verified_block) = signature_verified_blocks.first_mut() { signature_verified_block.parent = Some(parent); } @@ -626,6 +624,7 @@ pub struct ExecutionPendingBlock { pub parent_block: SignedBeaconBlock>, pub parent_eth1_finalization_data: Eth1FinalizationData, pub confirmed_state_roots: Vec, + pub consensus_context: ConsensusContext, pub payload_verification_handle: PayloadVerificationHandle, } @@ -637,8 +636,9 @@ pub trait IntoExecutionPendingBlock: Sized { self, block_root: Hash256, chain: &Arc>, + notify_execution_layer: NotifyExecutionLayer, ) -> Result, BlockError> { - self.into_execution_pending_block_slashable(block_root, chain) + self.into_execution_pending_block_slashable(block_root, chain, notify_execution_layer) .map(|execution_pending| { // Supply valid block to slasher. if let Some(slasher) = chain.slasher.as_ref() { @@ -654,6 +654,7 @@ pub trait IntoExecutionPendingBlock: Sized { self, block_root: Hash256, chain: &Arc>, + notify_execution_layer: NotifyExecutionLayer, ) -> Result, BlockSlashInfo>>; fn block(&self) -> &SignedBeaconBlock; @@ -900,10 +901,15 @@ impl IntoExecutionPendingBlock for GossipVerifiedBlock>, + notify_execution_layer: NotifyExecutionLayer, ) -> Result, BlockSlashInfo>> { let execution_pending = SignatureVerifiedBlock::from_gossip_verified_block_check_slashable(self, chain)?; - execution_pending.into_execution_pending_block_slashable(block_root, chain) + execution_pending.into_execution_pending_block_slashable( + block_root, + chain, + notify_execution_layer, + ) } fn block(&self) -> &SignedBeaconBlock { @@ -945,13 +951,14 @@ impl SignatureVerifiedBlock { let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); - signature_verifier.include_all_signatures(&block, Some(block_root), None)?; + let mut consensus_context = + ConsensusContext::new(block.slot()).set_current_block_root(block_root); + + signature_verifier.include_all_signatures(&block, &mut consensus_context)?; if signature_verifier.verify().is_ok() { Ok(Self { - consensus_context: ConsensusContext::new(block.slot()) - .set_current_block_root(block_root) - .set_proposer_index(block.message().proposer_index()), + consensus_context, block, block_root, parent: Some(parent), @@ -996,16 +1003,16 @@ impl SignatureVerifiedBlock { // Gossip verification has already checked the proposer index. Use it to check the RANDAO // signature. - let verified_proposer_index = Some(block.message().proposer_index()); + let mut consensus_context = from.consensus_context; signature_verifier - .include_all_signatures_except_proposal(&block, verified_proposer_index)?; + .include_all_signatures_except_proposal(&block, &mut consensus_context)?; if signature_verifier.verify().is_ok() { Ok(Self { block, block_root: from.block_root, parent: Some(parent), - consensus_context: from.consensus_context, + consensus_context, }) } else { Err(BlockError::InvalidSignature) @@ -1033,6 +1040,7 @@ impl IntoExecutionPendingBlock for SignatureVerifiedBloc self, block_root: Hash256, chain: &Arc>, + notify_execution_layer: NotifyExecutionLayer, ) -> Result, BlockSlashInfo>> { let header = self.block.signed_block_header(); let (parent, block) = if let Some(parent) = self.parent { @@ -1048,6 +1056,7 @@ impl IntoExecutionPendingBlock for SignatureVerifiedBloc parent, self.consensus_context, chain, + notify_execution_layer, ) .map_err(|e| BlockSlashInfo::SignatureValid(header, e)) } @@ -1064,13 +1073,14 @@ impl IntoExecutionPendingBlock for Arc>, + notify_execution_layer: NotifyExecutionLayer, ) -> Result, BlockSlashInfo>> { // Perform an early check to prevent wasting time on irrelevant blocks. let block_root = check_block_relevancy(&self, block_root, chain) .map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?; SignatureVerifiedBlock::check_slashable(self, block_root, chain)? - .into_execution_pending_block_slashable(block_root, chain) + .into_execution_pending_block_slashable(block_root, chain, notify_execution_layer) } fn block(&self) -> &SignedBeaconBlock { @@ -1092,6 +1102,7 @@ impl ExecutionPendingBlock { parent: PreProcessingSnapshot, mut consensus_context: ConsensusContext, chain: &Arc>, + notify_execution_layer: NotifyExecutionLayer, ) -> Result> { if let Some(parent) = chain .canonical_head @@ -1128,6 +1139,79 @@ impl ExecutionPendingBlock { check_block_relevancy(&block, block_root, chain)?; + // Define a future that will verify the execution payload with an execution engine. + // + // We do this as early as possible so that later parts of this function can run in parallel + // with the payload verification. + let payload_notifier = PayloadNotifier::new( + chain.clone(), + block.clone(), + &parent.pre_state, + notify_execution_layer, + )?; + let is_valid_merge_transition_block = + is_merge_transition_block(&parent.pre_state, block.message().body()); + let payload_verification_future = async move { + let chain = payload_notifier.chain.clone(); + let block = payload_notifier.block.clone(); + + // If this block triggers the merge, check to ensure that it references valid execution + // blocks. + // + // The specification defines this check inside `on_block` in the fork-choice specification, + // however we perform the check here for two reasons: + // + // - There's no point in importing a block that will fail fork choice, so it's best to fail + // early. + // - Doing the check here means we can keep our fork-choice implementation "pure". I.e., no + // calls to remote servers. + if is_valid_merge_transition_block { + validate_merge_block(&chain, block.message(), AllowOptimisticImport::Yes).await?; + }; + + // The specification declares that this should be run *inside* `per_block_processing`, + // however we run it here to keep `per_block_processing` pure (i.e., no calls to external + // servers). + let payload_verification_status = payload_notifier.notify_new_payload().await?; + + // If the payload did not validate or invalidate the block, check to see if this block is + // valid for optimistic import. + if payload_verification_status.is_optimistic() { + let block_hash_opt = block + .message() + .body() + .execution_payload() + .map(|full_payload| full_payload.block_hash()); + + // Ensure the block is a candidate for optimistic import. + if !is_optimistic_candidate_block(&chain, block.slot(), block.parent_root()).await? + { + warn!( + chain.log, + "Rejecting optimistic block"; + "block_hash" => ?block_hash_opt, + "msg" => "the execution engine is not synced" + ); + return Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into()); + } + } + + Ok(PayloadVerificationOutcome { + payload_verification_status, + is_valid_merge_transition_block, + }) + }; + // Spawn the payload verification future as a new task, but don't wait for it to complete. + // The `payload_verification_future` will be awaited later to ensure verification completed + // successfully. + let payload_verification_handle = chain + .task_executor + .spawn_handle( + payload_verification_future, + "execution_payload_verification", + ) + .ok_or(BeaconChainError::RuntimeShutdown)?; + /* * Advance the given `parent.beacon_state` to the slot of the given `block`. */ @@ -1232,79 +1316,11 @@ impl ExecutionPendingBlock { summaries.push(summary); } } + metrics::stop_timer(catchup_timer); let block_slot = block.slot(); let state_current_epoch = state.current_epoch(); - // Define a future that will verify the execution payload with an execution engine (but - // don't execute it yet). - let payload_notifier = PayloadNotifier::new(chain.clone(), block.clone(), &state)?; - let is_valid_merge_transition_block = - is_merge_transition_block(&state, block.message().body()); - let payload_verification_future = async move { - let chain = payload_notifier.chain.clone(); - let block = payload_notifier.block.clone(); - - // If this block triggers the merge, check to ensure that it references valid execution - // blocks. - // - // The specification defines this check inside `on_block` in the fork-choice specification, - // however we perform the check here for two reasons: - // - // - There's no point in importing a block that will fail fork choice, so it's best to fail - // early. - // - Doing the check here means we can keep our fork-choice implementation "pure". I.e., no - // calls to remote servers. - if is_valid_merge_transition_block { - validate_merge_block(&chain, block.message(), AllowOptimisticImport::Yes).await?; - }; - - // The specification declares that this should be run *inside* `per_block_processing`, - // however we run it here to keep `per_block_processing` pure (i.e., no calls to external - // servers). - // - // It is important that this function is called *after* `per_slot_processing`, since the - // `randao` may change. - let payload_verification_status = payload_notifier.notify_new_payload().await?; - - // If the payload did not validate or invalidate the block, check to see if this block is - // valid for optimistic import. - if payload_verification_status.is_optimistic() { - let block_hash_opt = block - .message() - .body() - .execution_payload() - .map(|full_payload| full_payload.block_hash()); - - // Ensure the block is a candidate for optimistic import. - if !is_optimistic_candidate_block(&chain, block.slot(), block.parent_root()).await? - { - warn!( - chain.log, - "Rejecting optimistic block"; - "block_hash" => ?block_hash_opt, - "msg" => "the execution engine is not synced" - ); - return Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into()); - } - } - - Ok(PayloadVerificationOutcome { - payload_verification_status, - is_valid_merge_transition_block, - }) - }; - // Spawn the payload verification future as a new task, but don't wait for it to complete. - // The `payload_verification_future` will be awaited later to ensure verification completed - // successfully. - let payload_verification_handle = chain - .task_executor - .spawn_handle( - payload_verification_future, - "execution_payload_verification", - ) - .ok_or(BeaconChainError::RuntimeShutdown)?; - // If the block is sufficiently recent, notify the validator monitor. if let Some(slot) = chain.slot_clock.now() { let epoch = slot.epoch(T::EthSpec::slots_per_epoch()); @@ -1331,8 +1347,6 @@ impl ExecutionPendingBlock { } } - metrics::stop_timer(catchup_timer); - /* * Build the committee caches on the state. */ @@ -1422,6 +1436,44 @@ impl ExecutionPendingBlock { }); } + /* + * Apply the block's attestations to fork choice. + * + * We're running in parallel with the payload verification at this point, so this is + * free real estate. + */ + let current_slot = chain.slot()?; + let mut fork_choice = chain.canonical_head.fork_choice_write_lock(); + + // Register each attester slashing in the block with fork choice. + for attester_slashing in block.message().body().attester_slashings() { + fork_choice.on_attester_slashing(attester_slashing); + } + + // Register each attestation in the block with fork choice. + for (i, attestation) in block.message().body().attestations().iter().enumerate() { + let _fork_choice_attestation_timer = + metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES); + + let indexed_attestation = consensus_context + .get_indexed_attestation(&state, attestation) + .map_err(|e| BlockError::PerBlockProcessingError(e.into_with_index(i)))?; + + match fork_choice.on_attestation( + current_slot, + indexed_attestation, + AttestationFromBlock::True, + &chain.spec, + ) { + Ok(()) => Ok(()), + // Ignore invalid attestations whilst importing attestations from a block. The + // block might be very old and therefore the attestations useless to fork choice. + Err(ForkChoiceError::InvalidAttestation(_)) => Ok(()), + Err(e) => Err(BlockError::BeaconChainError(e.into())), + }?; + } + drop(fork_choice); + Ok(Self { block, block_root, @@ -1429,6 +1481,7 @@ impl ExecutionPendingBlock { parent_block: parent.beacon_block, parent_eth1_finalization_data, confirmed_state_roots, + consensus_context, payload_verification_handle, }) } diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 5e16a29cf..286cc17a9 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -45,6 +45,8 @@ pub struct ChainConfig { pub paranoid_block_proposal: bool, /// Whether to strictly count unrealized justified votes. pub count_unrealized_full: CountUnrealizedFull, + /// Optionally set timeout for calls to checkpoint sync endpoint. + pub checkpoint_sync_url_timeout: u64, } impl Default for ChainConfig { @@ -65,6 +67,7 @@ impl Default for ChainConfig { always_reset_payload_statuses: false, paranoid_block_proposal: false, count_unrealized_full: CountUnrealizedFull::default(), + checkpoint_sync_url_timeout: 60, } } } diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index ff3167c70..1982bdbf0 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -38,6 +38,16 @@ pub enum AllowOptimisticImport { No, } +/// Signal whether the execution payloads of new blocks should be +/// immediately verified with the EL or imported optimistically without +/// any EL communication. +#[derive(Default, Clone, Copy)] +pub enum NotifyExecutionLayer { + #[default] + Yes, + No, +} + /// Used to await the result of executing payload with a remote EE. pub struct PayloadNotifier { pub chain: Arc>, @@ -50,21 +60,28 @@ impl PayloadNotifier { chain: Arc>, block: Arc>, state: &BeaconState, + notify_execution_layer: NotifyExecutionLayer, ) -> Result> { - let payload_verification_status = if is_execution_enabled(state, block.message().body()) { - // Perform the initial stages of payload verification. - // - // We will duplicate these checks again during `per_block_processing`, however these checks - // are cheap and doing them here ensures we protect the execution engine from junk. - partially_verify_execution_payload::>( - state, - block.message().execution_payload()?, - &chain.spec, - ) - .map_err(BlockError::PerBlockProcessingError)?; - None - } else { - Some(PayloadVerificationStatus::Irrelevant) + let payload_verification_status = match notify_execution_layer { + NotifyExecutionLayer::No => Some(PayloadVerificationStatus::Optimistic), + NotifyExecutionLayer::Yes => { + if is_execution_enabled(state, block.message().body()) { + // Perform the initial stages of payload verification. + // + // We will duplicate these checks again during `per_block_processing`, however these checks + // are cheap and doing them here ensures we protect the execution engine from junk. + partially_verify_execution_payload::>( + state, + block.slot(), + block.message().execution_payload()?, + &chain.spec, + ) + .map_err(BlockError::PerBlockProcessingError)?; + None + } else { + Some(PayloadVerificationStatus::Irrelevant) + } + } }; Ok(Self { @@ -360,7 +377,8 @@ pub fn get_execution_payload< let spec = &chain.spec; let current_epoch = state.current_epoch(); let is_merge_transition_complete = is_merge_transition_complete(state); - let timestamp = compute_timestamp_at_slot(state, spec).map_err(BeaconStateError::from)?; + let timestamp = + compute_timestamp_at_slot(state, state.slot(), spec).map_err(BeaconStateError::from)?; let random = *state.get_randao_mix(current_epoch)?; let latest_execution_payload_header_block_hash = state.latest_execution_payload_header()?.block_hash(); diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 615559608..1a30ccf52 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -64,6 +64,7 @@ pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock}; pub use eth1_chain::{Eth1Chain, Eth1ChainBackend}; pub use events::ServerSentEventHandler; pub use execution_layer::EngineState; +pub use execution_payload::NotifyExecutionLayer; pub use fork_choice::{ExecutionStatus, ForkchoiceUpdateParameters}; pub use metrics::scrape_for_metrics; pub use parking_lot; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index f8accec14..6eefc569f 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -64,6 +64,11 @@ lazy_static! { "beacon_block_processing_state_root_seconds", "Time spent calculating the state root when processing a block." ); + pub static ref BLOCK_PROCESSING_POST_EXEC_PROCESSING: Result = try_create_histogram_with_buckets( + "beacon_block_processing_post_exec_pre_attestable_seconds", + "Time between finishing execution processing and the block becoming attestable", + linear_buckets(5e-3, 5e-3, 10) + ); pub static ref BLOCK_PROCESSING_DB_WRITE: Result = try_create_histogram( "beacon_block_processing_db_write_seconds", "Time spent writing a newly processed block and state to DB" diff --git a/beacon_node/beacon_chain/src/snapshot_cache.rs b/beacon_node/beacon_chain/src/snapshot_cache.rs index 40b73451c..33447bc2e 100644 --- a/beacon_node/beacon_chain/src/snapshot_cache.rs +++ b/beacon_node/beacon_chain/src/snapshot_cache.rs @@ -298,6 +298,27 @@ impl SnapshotCache { }) } + /// Borrow the state corresponding to `block_root` if it exists in the cache *unadvanced*. + /// + /// Care must be taken not to mutate the state in an invalid way. This function should only + /// be used to mutate the *caches* of the state, for example the tree hash cache when + /// calculating a light client merkle proof. + pub fn borrow_unadvanced_state_mut( + &mut self, + block_root: Hash256, + ) -> Option<&mut BeaconState> { + self.snapshots + .iter_mut() + .find(|snapshot| { + // If the pre-state exists then state advance has already taken the state for + // `block_root` and mutated its tree hash cache. Rather than re-building it while + // holding the snapshot cache lock (>1 second), prefer to return `None` from this + // function and force the caller to load it from disk. + snapshot.beacon_block_root == block_root && snapshot.pre_state.is_none() + }) + .map(|snapshot| &mut snapshot.beacon_state) + } + /// If there is a snapshot with `block_root`, clone it and return the clone. pub fn get_cloned( &self, diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index a1c7acf17..b88966b41 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -2,7 +2,7 @@ pub use crate::persisted_beacon_chain::PersistedBeaconChain; pub use crate::{ beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}, migrate::MigratorConfig, - BeaconChainError, ProduceBlockVerification, + BeaconChainError, NotifyExecutionLayer, ProduceBlockVerification, }; use crate::{ builder::{BeaconChainBuilder, Witness}, @@ -586,7 +586,7 @@ where pub fn get_timestamp_at_slot(&self) -> u64 { let state = self.get_current_state(); - compute_timestamp_at_slot(&state, &self.spec).unwrap() + compute_timestamp_at_slot(&state, state.slot(), &self.spec).unwrap() } pub fn get_current_state_and_root(&self) -> (BeaconState, Hash256) { @@ -1460,7 +1460,12 @@ where self.set_current_slot(slot); let block_hash: SignedBeaconBlockHash = self .chain - .process_block(block_root, Arc::new(block), CountUnrealized::True) + .process_block( + block_root, + Arc::new(block), + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await? .into(); self.chain.recompute_head_at_current_slot().await; @@ -1477,6 +1482,7 @@ where block.canonical_root(), Arc::new(block), CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await? .into(); diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index 95f4aadce..f9203f74b 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -109,6 +109,11 @@ impl EpochSummary { } } + pub fn register_block(&mut self, delay: Duration) { + self.blocks += 1; + Self::update_if_lt(&mut self.block_min_delay, delay); + } + pub fn register_unaggregated_attestation(&mut self, delay: Duration) { self.attestations += 1; Self::update_if_lt(&mut self.attestation_min_delay, delay); @@ -613,13 +618,6 @@ impl ValidatorMonitor { Ok(()) } - fn get_validator_id(&self, validator_index: u64) -> Option<&str> { - self.indices - .get(&validator_index) - .and_then(|pubkey| self.validators.get(pubkey)) - .map(|validator| validator.id.as_str()) - } - fn get_validator(&self, validator_index: u64) -> Option<&MonitoredValidator> { self.indices .get(&validator_index) @@ -685,7 +683,9 @@ impl ValidatorMonitor { block_root: Hash256, slot_clock: &S, ) { - if let Some(id) = self.get_validator_id(block.proposer_index()) { + let epoch = block.slot().epoch(T::slots_per_epoch()); + if let Some(validator) = self.get_validator(block.proposer_index()) { + let id = &validator.id; let delay = get_block_delay_ms(seen_timestamp, block, slot_clock); metrics::inc_counter_vec(&metrics::VALIDATOR_MONITOR_BEACON_BLOCK_TOTAL, &[src, id]); @@ -704,6 +704,8 @@ impl ValidatorMonitor { "src" => src, "validator" => %id, ); + + validator.with_epoch_summary(epoch, |summary| summary.register_block(delay)); } } diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index 60fdb607c..26aea2d27 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -3,7 +3,8 @@ use crate::{BeaconChainTypes, BeaconStore}; use ssz::{Decode, Encode}; use std::collections::HashMap; use std::convert::TryInto; -use store::{DBColumn, Error as StoreError, StoreItem}; +use std::marker::PhantomData; +use store::{DBColumn, Error as StoreError, KeyValueStore, KeyValueStoreOp, StoreItem}; use types::{BeaconState, Hash256, PublicKey, PublicKeyBytes}; /// Provides a mapping of `validator_index -> validator_publickey`. @@ -14,21 +15,17 @@ use types::{BeaconState, Hash256, PublicKey, PublicKeyBytes}; /// 2. To reduce the amount of public key _decompression_ required. A `BeaconState` stores public /// keys in compressed form and they are needed in decompressed form for signature verification. /// Decompression is expensive when many keys are involved. -/// -/// The cache has a `backing` that it uses to maintain a persistent, on-disk -/// copy of itself. This allows it to be restored between process invocations. pub struct ValidatorPubkeyCache { pubkeys: Vec, indices: HashMap, pubkey_bytes: Vec, - store: BeaconStore, + _phantom: PhantomData, } impl ValidatorPubkeyCache { /// Create a new public key cache using the keys in `state.validators`. /// - /// Also creates a new persistence file, returning an error if there is already a file at - /// `persistence_path`. + /// The new cache will be updated with the keys from `state` and immediately written to disk. pub fn new( state: &BeaconState, store: BeaconStore, @@ -37,10 +34,11 @@ impl ValidatorPubkeyCache { pubkeys: vec![], indices: HashMap::new(), pubkey_bytes: vec![], - store, + _phantom: PhantomData, }; - cache.import_new_pubkeys(state)?; + let store_ops = cache.import_new_pubkeys(state)?; + store.hot_db.do_atomically(store_ops)?; Ok(cache) } @@ -69,17 +67,19 @@ impl ValidatorPubkeyCache { pubkeys, indices, pubkey_bytes, - store, + _phantom: PhantomData, }) } /// Scan the given `state` and add any new validator public keys. /// /// Does not delete any keys from `self` if they don't appear in `state`. + /// + /// NOTE: The caller *must* commit the returned I/O batch as part of the block import process. pub fn import_new_pubkeys( &mut self, state: &BeaconState, - ) -> Result<(), BeaconChainError> { + ) -> Result, BeaconChainError> { if state.validators().len() > self.pubkeys.len() { self.import( state.validators()[self.pubkeys.len()..] @@ -87,12 +87,12 @@ impl ValidatorPubkeyCache { .map(|v| v.pubkey), ) } else { - Ok(()) + Ok(vec![]) } } /// Adds zero or more validators to `self`. - fn import(&mut self, validator_keys: I) -> Result<(), BeaconChainError> + fn import(&mut self, validator_keys: I) -> Result, BeaconChainError> where I: Iterator + ExactSizeIterator, { @@ -100,6 +100,7 @@ impl ValidatorPubkeyCache { self.pubkeys.reserve(validator_keys.len()); self.indices.reserve(validator_keys.len()); + let mut store_ops = Vec::with_capacity(validator_keys.len()); for pubkey in validator_keys { let i = self.pubkeys.len(); @@ -107,17 +108,11 @@ impl ValidatorPubkeyCache { return Err(BeaconChainError::DuplicateValidatorPublicKey); } - // The item is written to disk _before_ it is written into - // the local struct. - // - // This means that a pubkey cache read from disk will always be equivalent to or - // _later than_ the cache that was running in the previous instance of Lighthouse. - // - // The motivation behind this ordering is that we do not want to have states that - // reference a pubkey that is not in our cache. However, it's fine to have pubkeys - // that are never referenced in a state. - self.store - .put_item(&DatabasePubkey::key_for_index(i), &DatabasePubkey(pubkey))?; + // Stage the new validator key for writing to disk. + // It will be committed atomically when the block that introduced it is written to disk. + // Notably it is NOT written while the write lock on the cache is held. + // See: https://github.com/sigp/lighthouse/issues/2327 + store_ops.push(DatabasePubkey(pubkey).as_kv_store_op(DatabasePubkey::key_for_index(i))); self.pubkeys.push( (&pubkey) @@ -129,7 +124,7 @@ impl ValidatorPubkeyCache { self.indices.insert(pubkey, i); } - Ok(()) + Ok(store_ops) } /// Get the public key for a validator with index `i`. @@ -296,9 +291,10 @@ mod test { // Add some more keypairs. let (state, keypairs) = get_state(12); - cache + let ops = cache .import_new_pubkeys(&state) .expect("should import pubkeys"); + store.hot_db.do_atomically(ops).unwrap(); check_cache_get(&cache, &keypairs[..]); drop(cache); diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 998f22f77..38a55e221 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -3,7 +3,7 @@ use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; -use beacon_chain::{BeaconSnapshot, BlockError, ChainSegmentResult}; +use beacon_chain::{BeaconSnapshot, BlockError, ChainSegmentResult, NotifyExecutionLayer}; use fork_choice::CountUnrealized; use lazy_static::lazy_static; use logging::test_logger; @@ -147,14 +147,18 @@ async fn chain_segment_full_segment() { // Sneak in a little check to ensure we can process empty chain segments. harness .chain - .process_chain_segment(vec![], CountUnrealized::True) + .process_chain_segment(vec![], CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error() .expect("should import empty chain segment"); harness .chain - .process_chain_segment(blocks.clone(), CountUnrealized::True) + .process_chain_segment( + blocks.clone(), + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await .into_block_error() .expect("should import chain segment"); @@ -183,7 +187,11 @@ async fn chain_segment_varying_chunk_size() { for chunk in blocks.chunks(*chunk_size) { harness .chain - .process_chain_segment(chunk.to_vec(), CountUnrealized::True) + .process_chain_segment( + chunk.to_vec(), + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await .into_block_error() .unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size)); @@ -219,7 +227,7 @@ async fn chain_segment_non_linear_parent_roots() { matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True) + .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::NonLinearParentRoots) @@ -239,7 +247,7 @@ async fn chain_segment_non_linear_parent_roots() { matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True) + .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::NonLinearParentRoots) @@ -270,7 +278,7 @@ async fn chain_segment_non_linear_slots() { matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True) + .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::NonLinearSlots) @@ -291,7 +299,7 @@ async fn chain_segment_non_linear_slots() { matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True) + .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::NonLinearSlots) @@ -317,7 +325,7 @@ async fn assert_invalid_signature( matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True) + .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::InvalidSignature) @@ -339,7 +347,11 @@ async fn assert_invalid_signature( // imported prior to this test. let _ = harness .chain - .process_chain_segment(ancestor_blocks, CountUnrealized::True) + .process_chain_segment( + ancestor_blocks, + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await; harness.chain.recompute_head_at_current_slot().await; @@ -349,6 +361,7 @@ async fn assert_invalid_signature( snapshots[block_index].beacon_block.canonical_root(), snapshots[block_index].beacon_block.clone(), CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await; assert!( @@ -400,7 +413,11 @@ async fn invalid_signature_gossip_block() { .collect(); harness .chain - .process_chain_segment(ancestor_blocks, CountUnrealized::True) + .process_chain_segment( + ancestor_blocks, + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await .into_block_error() .expect("should import all blocks prior to the one being tested"); @@ -412,7 +429,8 @@ async fn invalid_signature_gossip_block() { .process_block( signed_block.canonical_root(), Arc::new(signed_block), - CountUnrealized::True + CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await, Err(BlockError::InvalidSignature) @@ -446,7 +464,7 @@ async fn invalid_signature_block_proposal() { matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True) + .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::InvalidSignature) @@ -644,7 +662,7 @@ async fn invalid_signature_deposit() { !matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True) + .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::InvalidSignature) @@ -725,6 +743,7 @@ async fn block_gossip_verification() { gossip_verified.block_root, gossip_verified, CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .expect("should import valid gossip verified block"); @@ -996,6 +1015,7 @@ async fn verify_block_for_gossip_slashing_detection() { verified_block.block_root, verified_block, CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .unwrap(); @@ -1035,6 +1055,7 @@ async fn verify_block_for_gossip_doppelganger_detection() { verified_block.block_root, verified_block, CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .unwrap(); @@ -1180,7 +1201,8 @@ async fn add_base_block_to_altair_chain() { .process_block( base_block.canonical_root(), Arc::new(base_block.clone()), - CountUnrealized::True + CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .err() @@ -1195,7 +1217,11 @@ async fn add_base_block_to_altair_chain() { assert!(matches!( harness .chain - .process_chain_segment(vec![Arc::new(base_block)], CountUnrealized::True) + .process_chain_segment( + vec![Arc::new(base_block)], + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await, ChainSegmentResult::Failed { imported_blocks: 0, @@ -1313,7 +1339,8 @@ async fn add_altair_block_to_base_chain() { .process_block( altair_block.canonical_root(), Arc::new(altair_block.clone()), - CountUnrealized::True + CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .err() @@ -1328,7 +1355,11 @@ async fn add_altair_block_to_base_chain() { assert!(matches!( harness .chain - .process_chain_segment(vec![Arc::new(altair_block)], CountUnrealized::True) + .process_chain_segment( + vec![Arc::new(altair_block)], + CountUnrealized::True, + NotifyExecutionLayer::Yes + ) .await, ChainSegmentResult::Failed { imported_blocks: 0, diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index a963f071a..35065a442 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -7,8 +7,8 @@ use beacon_chain::otb_verification_service::{ use beacon_chain::{ canonical_head::{CachedHead, CanonicalHead}, test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainError, BlockError, ExecutionPayloadError, StateSkipConfig, WhenSlotSkipped, - INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, + BeaconChainError, BlockError, ExecutionPayloadError, NotifyExecutionLayer, StateSkipConfig, + WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; use execution_layer::{ @@ -696,6 +696,7 @@ async fn invalidates_all_descendants() { fork_block.canonical_root(), Arc::new(fork_block), CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .unwrap(); @@ -792,6 +793,7 @@ async fn switches_heads() { fork_block.canonical_root(), Arc::new(fork_block), CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .unwrap(); @@ -1040,7 +1042,7 @@ async fn invalid_parent() { // Ensure the block built atop an invalid payload is invalid for import. assert!(matches!( - rig.harness.chain.process_block(block.canonical_root(), block.clone(), CountUnrealized::True).await, + rig.harness.chain.process_block(block.canonical_root(), block.clone(), CountUnrealized::True, NotifyExecutionLayer::Yes).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) if invalid_root == parent_root )); @@ -1322,7 +1324,12 @@ async fn build_optimistic_chain( for block in blocks { rig.harness .chain - .process_block(block.canonical_root(), block, CountUnrealized::True) + .process_block( + block.canonical_root(), + block, + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await .unwrap(); } @@ -1882,6 +1889,7 @@ async fn recover_from_invalid_head_by_importing_blocks() { fork_block.canonical_root(), fork_block.clone(), CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .unwrap(); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index b1907bc96..b2fc7a640 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -7,8 +7,8 @@ use beacon_chain::test_utils::{ }; use beacon_chain::{ historical_blocks::HistoricalBlockError, migrate::MigratorConfig, BeaconChain, - BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, ServerSentEventHandler, - WhenSlotSkipped, + BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, NotifyExecutionLayer, + ServerSentEventHandler, WhenSlotSkipped, }; use fork_choice::CountUnrealized; use lazy_static::lazy_static; @@ -2148,6 +2148,7 @@ async fn weak_subjectivity_sync() { full_block.canonical_root(), Arc::new(full_block), CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .unwrap(); diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index a13946bf2..d80db132e 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -6,7 +6,7 @@ use beacon_chain::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, OP_POOL_DB_KEY, }, - BeaconChain, StateSkipConfig, WhenSlotSkipped, + BeaconChain, NotifyExecutionLayer, StateSkipConfig, WhenSlotSkipped, }; use fork_choice::CountUnrealized; use lazy_static::lazy_static; @@ -687,7 +687,8 @@ async fn run_skip_slot_test(skip_slots: u64) { .process_block( harness_a.chain.head_snapshot().beacon_block_root, harness_a.chain.head_snapshot().beacon_block.clone(), - CountUnrealized::True + CountUnrealized::True, + NotifyExecutionLayer::Yes, ) .await .unwrap(), diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 36d6491a5..75b865407 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -40,9 +40,6 @@ use types::{ /// Interval between polling the eth1 node for genesis information. pub const ETH1_GENESIS_UPDATE_INTERVAL_MILLIS: u64 = 7_000; -/// Timeout for checkpoint sync HTTP requests. -pub const CHECKPOINT_SYNC_HTTP_TIMEOUT: Duration = Duration::from_secs(60); - /// Builds a `Client` instance. /// /// ## Notes @@ -273,8 +270,12 @@ where "remote_url" => %url, ); - let remote = - BeaconNodeHttpClient::new(url, Timeouts::set_all(CHECKPOINT_SYNC_HTTP_TIMEOUT)); + let remote = BeaconNodeHttpClient::new( + url, + Timeouts::set_all(Duration::from_secs( + config.chain.checkpoint_sync_url_timeout, + )), + ); let slots_per_epoch = TEthSpec::slots_per_epoch(); let deposit_snapshot = if config.sync_eth1_chain { diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 5e43c1eaa..0a2997762 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -42,7 +42,7 @@ pub enum ClientGenesis { /// The core configuration of a Lighthouse beacon node. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Config { - pub data_dir: PathBuf, + data_dir: PathBuf, /// Name of the directory inside the data directory where the main "hot" DB is located. pub db_name: String, /// Path where the freezer database will be located. @@ -103,6 +103,17 @@ impl Default for Config { } impl Config { + /// Updates the data directory for the Client. + pub fn set_data_dir(&mut self, data_dir: PathBuf) { + self.data_dir = data_dir.clone(); + self.http_api.data_dir = data_dir; + } + + /// Gets the config's data_dir. + pub fn data_dir(&self) -> &PathBuf { + &self.data_dir + } + /// Get the database path without initialising it. pub fn get_db_path(&self) -> PathBuf { self.get_data_dir().join(&self.db_name) diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index f24b746cd..31082394b 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -751,10 +751,11 @@ impl Service { let deposit_count_to_finalize = eth1data_to_finalize.deposit_count; if deposit_count_to_finalize > already_finalized { match self.finalize_deposits(eth1data_to_finalize) { - Err(e) => error!( + Err(e) => warn!( self.log, "Failed to finalize deposit cache"; "error" => ?e, + "info" => "this should resolve on its own" ), Ok(()) => info!( self.log, @@ -814,9 +815,10 @@ impl Service { .block_by_hash(ð1_data.block_hash) .cloned() .ok_or_else(|| { - Error::FailedToFinalizeDeposit( - "Finalized block not found in block cache".to_string(), - ) + Error::FailedToFinalizeDeposit(format!( + "Finalized block not found in block cache: {:?}", + eth1_data.block_hash + )) })?; self.inner .deposit_cache diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 25a19eb0b..996d98385 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -13,6 +13,7 @@ pub use engine_api::*; pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; use engines::{Engine, EngineError}; pub use engines::{EngineState, ForkchoiceState}; +use eth2::types::{builder_bid::SignedBuilderBid, ForkVersionedResponse}; use fork_choice::ForkchoiceUpdateParameters; use lru::LruCache; use payload_status::process_payload_status; @@ -22,11 +23,13 @@ use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use std::collections::HashMap; +use std::fmt; use std::future::Future; use std::io::Write; use std::path::PathBuf; use std::sync::Arc; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; +use strum::AsRefStr; use task_executor::TaskExecutor; use tokio::{ sync::{Mutex, MutexGuard, RwLock}, @@ -35,12 +38,14 @@ use tokio::{ use tokio_stream::wrappers::WatchStream; #[cfg(feature = "withdrawals")] use types::Withdrawal; -use types::{AbstractExecPayload, Blob, ExecPayload, ExecutionPayloadEip4844, KzgCommitment}; +use types::{AbstractExecPayload, Blob, ExecPayload, KzgCommitment}; use types::{ BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ForkName, - ProposerPreparationData, PublicKeyBytes, SignedBeaconBlock, Slot, + ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, Uint256, +}; +use types::{ + ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge, }; -use types::{ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge}; mod engine_api; mod engines; @@ -71,6 +76,14 @@ const DEFAULT_SUGGESTED_FEE_RECIPIENT: [u8; 20] = const CONFIG_POLL_INTERVAL: Duration = Duration::from_secs(60); +/// A payload alongside some information about where it came from. +enum ProvenancedPayload

{ + /// A good ol' fashioned farm-to-table payload from your local EE. + Local(P), + /// A payload from a builder (e.g. mev-boost). + Builder(P), +} + #[derive(Debug)] pub enum Error { NoEngine, @@ -78,6 +91,7 @@ pub enum Error { ApiError(ApiError), Builder(builder_client::Error), NoHeaderFromBuilder, + CannotProduceHeader, EngineError(Box), NotSynced, ShuttingDown, @@ -615,7 +629,7 @@ impl ExecutionLayer { current_fork: ForkName, spec: &ChainSpec, ) -> Result, Error> { - match Payload::block_type() { + let payload_result = match Payload::block_type() { BlockType::Blinded => { let _timer = metrics::start_timer_vec( &metrics::EXECUTION_LAYER_REQUEST_TIMES, @@ -643,6 +657,40 @@ impl ExecutionLayer { current_fork, ) .await + .map(ProvenancedPayload::Local) + } + }; + + // Track some metrics and return the result. + match payload_result { + Ok(ProvenancedPayload::Local(block_proposal_contents)) => { + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_GET_PAYLOAD_OUTCOME, + &[metrics::SUCCESS], + ); + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_GET_PAYLOAD_SOURCE, + &[metrics::LOCAL], + ); + Ok(block_proposal_contents) + } + Ok(ProvenancedPayload::Builder(block_proposal_contents)) => { + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_GET_PAYLOAD_OUTCOME, + &[metrics::SUCCESS], + ); + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_GET_PAYLOAD_SOURCE, + &[metrics::BUILDER], + ); + Ok(block_proposal_contents) + } + Err(e) => { + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_GET_PAYLOAD_OUTCOME, + &[metrics::FAILURE], + ); + Err(e) } } } @@ -655,7 +703,7 @@ impl ExecutionLayer { builder_params: BuilderParams, current_fork: ForkName, spec: &ChainSpec, - ) -> Result, Error> { + ) -> Result>, Error> { if let Some(builder) = self.builder() { let slot = builder_params.slot; let pubkey = builder_params.pubkey; @@ -669,134 +717,213 @@ impl ExecutionLayer { "pubkey" => ?pubkey, "parent_hash" => ?parent_hash, ); - let (relay_result, local_result) = tokio::join!( - builder.get_builder_header::(slot, parent_hash, &pubkey), - self.get_full_payload_caching( - parent_hash, - payload_attributes, - forkchoice_update_params, - current_fork, - ) + + // Wait for the builder *and* local EL to produce a payload (or return an error). + let ((relay_result, relay_duration), (local_result, local_duration)) = tokio::join!( + timed_future(metrics::GET_BLINDED_PAYLOAD_BUILDER, async { + builder + .get_builder_header::(slot, parent_hash, &pubkey) + .await + }), + timed_future(metrics::GET_BLINDED_PAYLOAD_LOCAL, async { + self.get_full_payload_caching::( + parent_hash, + payload_attributes, + forkchoice_update_params, + current_fork, + ) + .await + }) + ); + + info!( + self.log(), + "Requested blinded execution payload"; + "relay_fee_recipient" => match &relay_result { + Ok(Some(r)) => format!("{:?}", r.data.message.header.fee_recipient()), + Ok(None) => "empty response".to_string(), + Err(_) => "request failed".to_string(), + }, + "relay_response_ms" => relay_duration.as_millis(), + "local_fee_recipient" => match &local_result { + Ok(proposal_contents) => format!("{:?}", proposal_contents.payload().fee_recipient()), + Err(_) => "request failed".to_string() + }, + "local_response_ms" => local_duration.as_millis(), + "parent_hash" => ?parent_hash, ); return match (relay_result, local_result) { (Err(e), Ok(local)) => { warn!( self.log(), - "Unable to retrieve a payload from a connected \ - builder, falling back to the local execution client: {e:?}" + "Builder error when requesting payload"; + "info" => "falling back to local execution client", + "relay_error" => ?e, + "local_block_hash" => ?local.payload().block_hash(), + "parent_hash" => ?parent_hash, ); - Ok(local) + Ok(ProvenancedPayload::Local(local)) } (Ok(None), Ok(local)) => { info!( self.log(), - "No payload provided by connected builder. \ - Attempting to propose through local execution engine" + "Builder did not return a payload"; + "info" => "falling back to local execution client", + "local_block_hash" => ?local.payload().block_hash(), + "parent_hash" => ?parent_hash, ); - Ok(local) + Ok(ProvenancedPayload::Local(local)) } (Ok(Some(relay)), Ok(local)) => { - let local_payload = local.payload(); - let is_signature_valid = relay.data.verify_signature(spec); - let header = relay.data.message.header; + let header = &relay.data.message.header; info!( self.log(), - "Received a payload header from the connected builder"; - "block_hash" => ?header.block_hash(), + "Received local and builder payloads"; + "relay_block_hash" => ?header.block_hash(), + "local_block_hash" => ?local.payload().block_hash(), + "parent_hash" => ?parent_hash, ); - let relay_value = relay.data.message.value; - let configured_value = self.inner.builder_profit_threshold; - if relay_value < configured_value { - info!( - self.log(), - "The value offered by the connected builder does not meet \ - the configured profit threshold. Using local payload."; - "configured_value" => ?configured_value, "relay_value" => ?relay_value - ); - Ok(local) - } else if header.parent_hash() != parent_hash { - warn!( - self.log(), - "Invalid parent hash from connected builder, \ - falling back to local execution engine." - ); - Ok(local) - } else if header.prev_randao() != payload_attributes.prev_randao() { - warn!( - self.log(), - "Invalid prev randao from connected builder, \ - falling back to local execution engine." - ); - Ok(local) - } else if header.timestamp() != local_payload.timestamp() { - warn!( - self.log(), - "Invalid timestamp from connected builder, \ - falling back to local execution engine." - ); - Ok(local) - } else if header.block_number() != local_payload.block_number() { - warn!( - self.log(), - "Invalid block number from connected builder, \ - falling back to local execution engine." - ); - Ok(local) - } else if !matches!(relay.version, Some(ForkName::Merge)) { - // Once fork information is added to the payload, we will need to - // check that the local and relay payloads match. At this point, if - // we are requesting a payload at all, we have to assume this is - // the Bellatrix fork. - warn!( - self.log(), - "Invalid fork from connected builder, falling \ - back to local execution engine." - ); - Ok(local) - } else if !is_signature_valid { - let pubkey_bytes = relay.data.message.pubkey; - warn!(self.log(), "Invalid signature for pubkey {pubkey_bytes} on \ - bid from connected builder, falling back to local execution engine."); - Ok(local) - } else { - if header.fee_recipient() != payload_attributes.suggested_fee_recipient() { + match verify_builder_bid( + &relay, + parent_hash, + payload_attributes.prev_randao(), + payload_attributes.timestamp(), + Some(local.payload().block_number()), + self.inner.builder_profit_threshold, + spec, + ) { + Ok(()) => Ok(ProvenancedPayload::Builder( + //FIXME(sean) the builder API needs to be updated + // NOTE the comment above was removed in the + // rebase with unstable.. I think it goes + // here now? + BlockProposalContents::Payload(relay.data.message.header), + )), + Err(reason) if !reason.payload_invalid() => { info!( self.log(), - "Fee recipient from connected builder does \ - not match, using it anyways." + "Builder payload ignored"; + "info" => "using local payload", + "reason" => %reason, + "relay_block_hash" => ?header.block_hash(), + "parent_hash" => ?parent_hash, ); + Ok(ProvenancedPayload::Local(local)) + } + Err(reason) => { + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_GET_PAYLOAD_BUILDER_REJECTIONS, + &[reason.as_ref().as_ref()], + ); + warn!( + self.log(), + "Builder returned invalid payload"; + "info" => "using local payload", + "reason" => %reason, + "relay_block_hash" => ?header.block_hash(), + "parent_hash" => ?parent_hash, + ); + Ok(ProvenancedPayload::Local(local)) } - //FIXME(sean) the builder API needs to be updated - Ok(BlockProposalContents::Payload(header)) } } - (relay_result, Err(local_error)) => { - warn!(self.log(), "Failure from local execution engine. Attempting to \ - propose through connected builder"; "error" => ?local_error); - relay_result - .map_err(Error::Builder)? - .ok_or(Error::NoHeaderFromBuilder) - .map(|d| { + (Ok(Some(relay)), Err(local_error)) => { + let header = &relay.data.message.header; + + info!( + self.log(), + "Received builder payload with local error"; + "relay_block_hash" => ?header.block_hash(), + "local_error" => ?local_error, + "parent_hash" => ?parent_hash, + ); + + match verify_builder_bid( + &relay, + parent_hash, + payload_attributes.prev_randao(), + payload_attributes.timestamp(), + None, + self.inner.builder_profit_threshold, + spec, + ) { + Ok(()) => Ok(ProvenancedPayload::Builder( //FIXME(sean) the builder API needs to be updated - BlockProposalContents::Payload(d.data.message.header) - }) + // NOTE the comment above was removed in the + // rebase with unstable.. I think it goes + // here now? + BlockProposalContents::Payload(relay.data.message.header), + )), + // If the payload is valid then use it. The local EE failed + // to produce a payload so we have no alternative. + Err(e) if !e.payload_invalid() => Ok(ProvenancedPayload::Builder( + //FIXME(sean) the builder API needs to be updated + // NOTE the comment above was removed in the + // rebase with unstable.. I think it goes + // here now? + BlockProposalContents::Payload(relay.data.message.header), + )), + Err(reason) => { + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_GET_PAYLOAD_BUILDER_REJECTIONS, + &[reason.as_ref().as_ref()], + ); + crit!( + self.log(), + "Builder returned invalid payload"; + "info" => "no local payload either - unable to propose block", + "reason" => %reason, + "relay_block_hash" => ?header.block_hash(), + "parent_hash" => ?parent_hash, + ); + Err(Error::CannotProduceHeader) + } + } + } + (Err(relay_error), Err(local_error)) => { + crit!( + self.log(), + "Unable to produce execution payload"; + "info" => "the local EL and builder both failed - unable to propose block", + "relay_error" => ?relay_error, + "local_error" => ?local_error, + "parent_hash" => ?parent_hash, + ); + + Err(Error::CannotProduceHeader) + } + (Ok(None), Err(local_error)) => { + crit!( + self.log(), + "Unable to produce execution payload"; + "info" => "the local EL failed and the builder returned nothing - \ + the block proposal will be missed", + "local_error" => ?local_error, + "parent_hash" => ?parent_hash, + ); + + Err(Error::CannotProduceHeader) } }; } - ChainHealth::Unhealthy(condition) => { - info!(self.log(), "Due to poor chain health the local execution engine will be used \ - for payload construction. To adjust chain health conditions \ - Use `builder-fallback` prefixed flags"; - "failed_condition" => ?condition) - } + ChainHealth::Unhealthy(condition) => info!( + self.log(), + "Chain is unhealthy, using local payload"; + "info" => "this helps protect the network. the --builder-fallback flags \ + can adjust the expected health conditions.", + "failed_condition" => ?condition + ), // Intentional no-op, so we never attempt builder API proposals pre-merge. ChainHealth::PreMerge => (), - ChainHealth::Optimistic => info!(self.log(), "The local execution engine is syncing \ - so the builder network cannot safely be used. Attempting \ - to build a block with the local execution engine"), + ChainHealth::Optimistic => info!( + self.log(), + "Chain is optimistic; can't build payload"; + "info" => "the local execution engine is syncing and the builder network \ + cannot safely be used - unable to propose block" + ), } } self.get_full_payload_caching( @@ -806,6 +933,7 @@ impl ExecutionLayer { current_fork, ) .await + .map(ProvenancedPayload::Local) } /// Get a full payload without caching its result in the execution layer's payload cache. @@ -1547,18 +1675,223 @@ impl ExecutionLayer { "Sending block to builder"; "root" => ?block_root, ); + if let Some(builder) = self.builder() { - builder - .post_builder_blinded_blocks(block) - .await - .map_err(Error::Builder) - .map(|d| d.data) + let (payload_result, duration) = + timed_future(metrics::POST_BLINDED_PAYLOAD_BUILDER, async { + builder + .post_builder_blinded_blocks(block) + .await + .map_err(Error::Builder) + .map(|d| d.data) + }) + .await; + + match &payload_result { + Ok(payload) => { + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_BUILDER_REVEAL_PAYLOAD_OUTCOME, + &[metrics::SUCCESS], + ); + info!( + self.log(), + "Builder successfully revealed payload"; + "relay_response_ms" => duration.as_millis(), + "block_root" => ?block_root, + "fee_recipient" => ?payload.fee_recipient(), + "block_hash" => ?payload.block_hash(), + "parent_hash" => ?payload.parent_hash() + ) + } + Err(e) => { + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_BUILDER_REVEAL_PAYLOAD_OUTCOME, + &[metrics::FAILURE], + ); + crit!( + self.log(), + "Builder failed to reveal payload"; + "info" => "this relay failure may cause a missed proposal", + "error" => ?e, + "relay_response_ms" => duration.as_millis(), + "block_root" => ?block_root, + "parent_hash" => ?block + .message() + .execution_payload() + .map(|payload| format!("{}", payload.parent_hash())) + .unwrap_or_else(|_| "unknown".to_string()) + ) + } + } + + payload_result } else { Err(Error::NoPayloadBuilder) } } } +#[derive(AsRefStr)] +#[strum(serialize_all = "snake_case")] +enum InvalidBuilderPayload { + LowValue { + profit_threshold: Uint256, + payload_value: Uint256, + }, + ParentHash { + payload: ExecutionBlockHash, + expected: ExecutionBlockHash, + }, + PrevRandao { + payload: Hash256, + expected: Hash256, + }, + Timestamp { + payload: u64, + expected: u64, + }, + BlockNumber { + payload: u64, + expected: Option, + }, + Fork { + payload: Option, + expected: ForkName, + }, + Signature { + signature: Signature, + pubkey: PublicKeyBytes, + }, +} + +impl InvalidBuilderPayload { + /// Returns `true` if a payload is objectively invalid and should never be included on chain. + fn payload_invalid(&self) -> bool { + match self { + // A low-value payload isn't invalid, it should just be avoided if possible. + InvalidBuilderPayload::LowValue { .. } => false, + InvalidBuilderPayload::ParentHash { .. } => true, + InvalidBuilderPayload::PrevRandao { .. } => true, + InvalidBuilderPayload::Timestamp { .. } => true, + InvalidBuilderPayload::BlockNumber { .. } => true, + InvalidBuilderPayload::Fork { .. } => true, + InvalidBuilderPayload::Signature { .. } => true, + } + } +} + +impl fmt::Display for InvalidBuilderPayload { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + InvalidBuilderPayload::LowValue { + profit_threshold, + payload_value, + } => write!( + f, + "payload value of {} does not meet user-configured profit-threshold of {}", + payload_value, profit_threshold + ), + InvalidBuilderPayload::ParentHash { payload, expected } => { + write!(f, "payload block hash was {} not {}", payload, expected) + } + InvalidBuilderPayload::PrevRandao { payload, expected } => { + write!(f, "payload prev randao was {} not {}", payload, expected) + } + InvalidBuilderPayload::Timestamp { payload, expected } => { + write!(f, "payload timestamp was {} not {}", payload, expected) + } + InvalidBuilderPayload::BlockNumber { payload, expected } => { + write!(f, "payload block number was {} not {:?}", payload, expected) + } + InvalidBuilderPayload::Fork { payload, expected } => { + write!(f, "payload fork was {:?} not {}", payload, expected) + } + InvalidBuilderPayload::Signature { signature, pubkey } => write!( + f, + "invalid payload signature {} for pubkey {}", + signature, pubkey + ), + } + } +} + +/// Perform some cursory, non-exhaustive validation of the bid returned from the builder. +fn verify_builder_bid>( + bid: &ForkVersionedResponse>, + parent_hash: ExecutionBlockHash, + prev_randao: Hash256, + timestamp: u64, + block_number: Option, + profit_threshold: Uint256, + spec: &ChainSpec, +) -> Result<(), Box> { + let is_signature_valid = bid.data.verify_signature(spec); + let header = &bid.data.message.header; + let payload_value = bid.data.message.value; + + // Avoid logging values that we can't represent with our Prometheus library. + let payload_value_gwei = bid.data.message.value / 1_000_000_000; + if payload_value_gwei <= Uint256::from(i64::max_value()) { + metrics::set_gauge_vec( + &metrics::EXECUTION_LAYER_PAYLOAD_BIDS, + &[metrics::BUILDER], + payload_value_gwei.low_u64() as i64, + ); + } + + if payload_value < profit_threshold { + Err(Box::new(InvalidBuilderPayload::LowValue { + profit_threshold, + payload_value, + })) + } else if header.parent_hash() != parent_hash { + Err(Box::new(InvalidBuilderPayload::ParentHash { + payload: header.parent_hash(), + expected: parent_hash, + })) + } else if header.prev_randao() != prev_randao { + Err(Box::new(InvalidBuilderPayload::PrevRandao { + payload: header.prev_randao(), + expected: prev_randao, + })) + } else if header.timestamp() != timestamp { + Err(Box::new(InvalidBuilderPayload::Timestamp { + payload: header.timestamp(), + expected: timestamp, + })) + } else if block_number.map_or(false, |n| n != header.block_number()) { + Err(Box::new(InvalidBuilderPayload::BlockNumber { + payload: header.block_number(), + expected: block_number, + })) + } else if !matches!(bid.version, Some(ForkName::Merge)) { + // Once fork information is added to the payload, we will need to + // check that the local and relay payloads match. At this point, if + // we are requesting a payload at all, we have to assume this is + // the Bellatrix fork. + Err(Box::new(InvalidBuilderPayload::Fork { + payload: bid.version, + expected: ForkName::Merge, + })) + } else if !is_signature_valid { + Err(Box::new(InvalidBuilderPayload::Signature { + signature: bid.data.signature.clone(), + pubkey: bid.data.message.pubkey, + })) + } else { + Ok(()) + } +} + +/// A helper function to record the time it takes to execute a future. +async fn timed_future, T>(metric: &str, future: F) -> (T, Duration) { + let start = Instant::now(); + let result = future.await; + let duration = start.elapsed(); + metrics::observe_timer_vec(&metrics::EXECUTION_LAYER_REQUEST_TIMES, &[metric], duration); + (result, duration) +} + #[cfg(test)] mod test { use super::*; diff --git a/beacon_node/execution_layer/src/metrics.rs b/beacon_node/execution_layer/src/metrics.rs index 9b00193a4..bb5a1088d 100644 --- a/beacon_node/execution_layer/src/metrics.rs +++ b/beacon_node/execution_layer/src/metrics.rs @@ -4,10 +4,17 @@ pub const HIT: &str = "hit"; pub const MISS: &str = "miss"; pub const GET_PAYLOAD: &str = "get_payload"; pub const GET_BLINDED_PAYLOAD: &str = "get_blinded_payload"; +pub const GET_BLINDED_PAYLOAD_LOCAL: &str = "get_blinded_payload_local"; +pub const GET_BLINDED_PAYLOAD_BUILDER: &str = "get_blinded_payload_builder"; +pub const POST_BLINDED_PAYLOAD_BUILDER: &str = "post_blinded_payload_builder"; pub const NEW_PAYLOAD: &str = "new_payload"; pub const FORKCHOICE_UPDATED: &str = "forkchoice_updated"; pub const GET_TERMINAL_POW_BLOCK_HASH: &str = "get_terminal_pow_block_hash"; pub const IS_VALID_TERMINAL_POW_BLOCK_HASH: &str = "is_valid_terminal_pow_block_hash"; +pub const LOCAL: &str = "local"; +pub const BUILDER: &str = "builder"; +pub const SUCCESS: &str = "success"; +pub const FAILURE: &str = "failure"; lazy_static::lazy_static! { pub static ref EXECUTION_LAYER_PROPOSER_INSERTED: Result = try_create_int_counter( @@ -18,9 +25,11 @@ lazy_static::lazy_static! { "execution_layer_proposer_data_updated", "Count of times new proposer data is supplied", ); - pub static ref EXECUTION_LAYER_REQUEST_TIMES: Result = try_create_histogram_vec( + pub static ref EXECUTION_LAYER_REQUEST_TIMES: Result = + try_create_histogram_vec_with_buckets( "execution_layer_request_times", "Duration of calls to ELs", + decimal_buckets(-2, 1), &["method"] ); pub static ref EXECUTION_LAYER_PAYLOAD_ATTRIBUTES_LOOKAHEAD: Result = try_create_histogram( @@ -41,4 +50,29 @@ lazy_static::lazy_static! { "Indicates the payload status returned for a particular method", &["method", "status"] ); + pub static ref EXECUTION_LAYER_GET_PAYLOAD_OUTCOME: Result = try_create_int_counter_vec( + "execution_layer_get_payload_outcome", + "The success/failure outcomes from calling get_payload", + &["outcome"] + ); + pub static ref EXECUTION_LAYER_BUILDER_REVEAL_PAYLOAD_OUTCOME: Result = try_create_int_counter_vec( + "execution_layer_builder_reveal_payload_outcome", + "The success/failure outcomes from a builder un-blinding a payload", + &["outcome"] + ); + pub static ref EXECUTION_LAYER_GET_PAYLOAD_SOURCE: Result = try_create_int_counter_vec( + "execution_layer_get_payload_source", + "The source of each payload returned from get_payload", + &["source"] + ); + pub static ref EXECUTION_LAYER_GET_PAYLOAD_BUILDER_REJECTIONS: Result = try_create_int_counter_vec( + "execution_layer_get_payload_builder_rejections", + "The reasons why a payload from a builder was rejected", + &["reason"] + ); + pub static ref EXECUTION_LAYER_PAYLOAD_BIDS: Result = try_create_int_gauge_vec( + "execution_layer_payload_bids", + "The gwei bid value of payloads received by local EEs or builders. Only shows values up to i64::max_value.", + &["source"] + ); } diff --git a/beacon_node/genesis/tests/tests.rs b/beacon_node/genesis/tests/tests.rs index 58f28702b..aaf6a7bea 100644 --- a/beacon_node/genesis/tests/tests.rs +++ b/beacon_node/genesis/tests/tests.rs @@ -24,7 +24,7 @@ pub fn new_env() -> Environment { #[test] fn basic() { - let mut env = new_env(); + let env = new_env(); let log = env.core_context().log().clone(); let mut spec = env.eth2_config().spec.clone(); diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index cfd572083..e8a97fd0b 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -36,6 +36,9 @@ safe_arith = {path = "../../consensus/safe_arith"} task_executor = { path = "../../common/task_executor" } lru = "0.7.7" tree_hash = "0.4.1" +sysinfo = "0.26.5" +system_health = { path = "../../common/system_health" } +directory = { path = "../../common/directory" } [dev-dependencies] store = { path = "../store" } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index e26bbe6b3..7746bba21 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -26,12 +26,14 @@ use beacon_chain::{ BeaconChainTypes, ProduceBlockVerification, WhenSlotSkipped, }; pub use block_id::BlockId; +use directory::DEFAULT_ROOT_DIR; use eth2::types::{ self as api_types, EndpointVersion, SkipRandaoVerification, ValidatorId, ValidatorStatus, }; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage}; +use parking_lot::RwLock; use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; @@ -43,6 +45,8 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::path::PathBuf; use std::pin::Pin; use std::sync::Arc; +use sysinfo::{System, SystemExt}; +use system_health::observe_system_health_bn; use tokio::sync::mpsc::{Sender, UnboundedSender}; use tokio_stream::{wrappers::BroadcastStream, StreamExt}; use types::{ @@ -110,6 +114,7 @@ pub struct Config { pub tls_config: Option, pub allow_sync_stalled: bool, pub spec_fork_name: Option, + pub data_dir: PathBuf, } impl Default for Config { @@ -122,6 +127,7 @@ impl Default for Config { tls_config: None, allow_sync_stalled: false, spec_fork_name: None, + data_dir: PathBuf::from(DEFAULT_ROOT_DIR), } } } @@ -323,6 +329,10 @@ pub fn serve( } }); + // Create a `warp` filter for the data_dir. + let inner_data_dir = ctx.config.data_dir.clone(); + let data_dir_filter = warp::any().map(move || inner_data_dir.clone()); + // Create a `warp` filter that provides access to the beacon chain. let inner_ctx = ctx.clone(); let chain_filter = @@ -431,6 +441,37 @@ pub fn serve( let inner_ctx = ctx.clone(); let log_filter = warp::any().map(move || inner_ctx.log.clone()); + // Create a `warp` filter that provides access to local system information. + let system_info = Arc::new(RwLock::new(sysinfo::System::new())); + { + // grab write access for initialisation + let mut system_info = system_info.write(); + system_info.refresh_disks_list(); + system_info.refresh_networks_list(); + system_info.refresh_cpu_specifics(sysinfo::CpuRefreshKind::everything()); + system_info.refresh_cpu(); + } // end lock + + let system_info_filter = + warp::any() + .map(move || system_info.clone()) + .map(|sysinfo: Arc>| { + { + // refresh stats + let mut sysinfo_lock = sysinfo.write(); + sysinfo_lock.refresh_memory(); + sysinfo_lock.refresh_cpu_specifics(sysinfo::CpuRefreshKind::everything()); + sysinfo_lock.refresh_cpu(); + sysinfo_lock.refresh_system(); + sysinfo_lock.refresh_networks(); + sysinfo_lock.refresh_disks(); + } // end lock + sysinfo + }); + + let app_start = std::time::Instant::now(); + let app_start_filter = warp::any().map(move || app_start); + /* * * Start of HTTP method definitions. @@ -891,6 +932,37 @@ pub fn serve( }, ); + // GET beacon/states/{state_id}/randao?epoch + let get_beacon_state_randao = beacon_states_path + .clone() + .and(warp::path("randao")) + .and(warp::query::()) + .and(warp::path::end()) + .and_then( + |state_id: StateId, chain: Arc>, query: api_types::RandaoQuery| { + blocking_json_task(move || { + let (randao, execution_optimistic) = state_id + .map_state_and_execution_optimistic( + &chain, + |state, execution_optimistic| { + let epoch = query.epoch.unwrap_or_else(|| state.current_epoch()); + let randao = *state.get_randao_mix(epoch).map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "epoch out of range: {e:?}" + )) + })?; + Ok((randao, execution_optimistic)) + }, + )?; + + Ok( + api_types::GenericResponse::from(api_types::RandaoMix { randao }) + .add_execution_optimistic(execution_optimistic), + ) + }) + }, + ); + // GET beacon/headers // // Note: this endpoint only returns information about blocks in the canonical chain. Given that @@ -1169,6 +1241,51 @@ pub fn serve( }) }); + // GET beacon/blinded_blocks/{block_id} + let get_beacon_blinded_block = eth_v1 + .and(warp::path("beacon")) + .and(warp::path("blinded_blocks")) + .and(block_id_or_err) + .and(chain_filter.clone()) + .and(warp::path::end()) + .and(warp::header::optional::("accept")) + .and_then( + |block_id: BlockId, + chain: Arc>, + accept_header: Option| { + blocking_task(move || { + let (block, execution_optimistic) = block_id.blinded_block(&chain)?; + let fork_name = block + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; + + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .header("Content-Type", "application/octet-stream") + .body(block.as_ssz_bytes().into()) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + _ => { + // Post as a V2 endpoint so we return the fork version. + execution_optimistic_fork_versioned_response( + V2, + fork_name, + execution_optimistic, + block, + ) + .map(|res| warp::reply::json(&res).into_response()) + } + } + .map(|resp| add_consensus_version_header(resp, fork_name)) + }) + }, + ); + /* * beacon/pool */ @@ -2682,7 +2799,12 @@ pub fn serve( .await .map(|resp| warp::reply::json(&resp)) .map_err(|e| { - error!(log, "Error from connected relay"; "error" => ?e); + error!( + log, + "Relay error when registering validator(s)"; + "num_registrations" => filtered_registration_data.len(), + "error" => ?e + ); // Forward the HTTP status code if we are able to, otherwise fall back // to a server error. if let eth2::Error::ServerMessage(message) = e { @@ -2796,6 +2918,29 @@ pub fn serve( }) }); + // GET lighthouse/ui/health + let get_lighthouse_ui_health = warp::path("lighthouse") + .and(warp::path("ui")) + .and(warp::path("health")) + .and(warp::path::end()) + .and(system_info_filter) + .and(app_start_filter) + .and(data_dir_filter) + .and(network_globals.clone()) + .and_then( + |sysinfo, app_start: std::time::Instant, data_dir, network_globals| { + blocking_json_task(move || { + let app_uptime = app_start.elapsed().as_secs() as u64; + Ok(api_types::GenericResponse::from(observe_system_health_bn( + sysinfo, + data_dir, + app_uptime, + network_globals, + ))) + }) + }, + ); + // GET lighthouse/syncing let get_lighthouse_syncing = warp::path("lighthouse") .and(warp::path("syncing")) @@ -3214,10 +3359,12 @@ pub fn serve( .or(get_beacon_state_validators.boxed()) .or(get_beacon_state_committees.boxed()) .or(get_beacon_state_sync_committees.boxed()) + .or(get_beacon_state_randao.boxed()) .or(get_beacon_headers.boxed()) .or(get_beacon_headers_block_id.boxed()) .or(get_beacon_block.boxed()) .or(get_beacon_block_attestations.boxed()) + .or(get_beacon_blinded_block.boxed()) .or(get_beacon_block_root.boxed()) .or(get_beacon_pool_attestations.boxed()) .or(get_beacon_pool_attester_slashings.boxed()) @@ -3244,6 +3391,7 @@ pub fn serve( .or(get_validator_aggregate_attestation.boxed()) .or(get_validator_sync_committee_contribution.boxed()) .or(get_lighthouse_health.boxed()) + .or(get_lighthouse_ui_health.boxed()) .or(get_lighthouse_syncing.boxed()) .or(get_lighthouse_nat.boxed()) .or(get_lighthouse_peers.boxed()) @@ -3263,6 +3411,7 @@ pub fn serve( .or(get_lighthouse_merge_readiness.boxed()) .or(get_events.boxed()), ) + .boxed() .or(warp::post().and( post_beacon_blocks .boxed() diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 0167da8d4..139bb3558 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -1,6 +1,8 @@ use crate::metrics; use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; -use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, CountUnrealized}; +use beacon_chain::{ + BeaconChain, BeaconChainTypes, BlockError, CountUnrealized, NotifyExecutionLayer, +}; use lighthouse_network::{PubsubMessage, SignedBeaconBlockAndBlobsSidecar}; use network::NetworkMessage; use slog::{crit, error, info, warn, Logger}; @@ -53,7 +55,12 @@ pub async fn publish_block( let block_root = block_root.unwrap_or_else(|| block.canonical_root()); match chain - .process_block(block_root, block.clone(), CountUnrealized::True) + .process_block( + block_root, + block.clone(), + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await { Ok(root) => { diff --git a/beacon_node/http_api/tests/common.rs b/beacon_node/http_api/tests/common.rs index eaf91ce9d..ec1448df7 100644 --- a/beacon_node/http_api/tests/common.rs +++ b/beacon_node/http_api/tests/common.rs @@ -2,6 +2,7 @@ use beacon_chain::{ test_utils::{BeaconChainHarness, EphemeralHarnessType}, BeaconChain, BeaconChainTypes, }; +use directory::DEFAULT_ROOT_DIR; use eth2::{BeaconNodeHttpClient, Timeouts}; use http_api::{Config, Context}; use lighthouse_network::{ @@ -142,6 +143,7 @@ pub async fn create_api_server_on_port( allow_origin: None, tls_config: None, allow_sync_stalled: false, + data_dir: std::path::PathBuf::from(DEFAULT_ROOT_DIR), spec_fork_name: None, }, chain: Some(chain.clone()), diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index ff664d6ff..2e795e522 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -745,6 +745,36 @@ impl ApiTester { self } + pub async fn test_beacon_states_randao(self) -> Self { + for state_id in self.interesting_state_ids() { + let mut state_opt = state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic)| state); + + let epoch_opt = state_opt.as_ref().map(|state| state.current_epoch()); + let result = self + .client + .get_beacon_states_randao(state_id.0, epoch_opt) + .await + .unwrap() + .map(|res| res.data); + + if result.is_none() && state_opt.is_none() { + continue; + } + + let state = state_opt.as_mut().expect("result should be none"); + let randao_mix = state + .get_randao_mix(state.slot().epoch(E::slots_per_epoch())) + .unwrap(); + + assert_eq!(result.unwrap().randao, *randao_mix); + } + + self + } + pub async fn test_beacon_headers_all_slots(self) -> Self { for slot in 0..CHAIN_LENGTH { let slot = Slot::from(slot); @@ -1016,6 +1046,82 @@ impl ApiTester { self } + pub async fn test_beacon_blinded_blocks(self) -> Self { + for block_id in self.interesting_block_ids() { + let expected = block_id + .blinded_block(&self.chain) + .ok() + .map(|(block, _execution_optimistic)| block); + + if let CoreBlockId::Slot(slot) = block_id.0 { + if expected.is_none() { + assert!(SKIPPED_SLOTS.contains(&slot.as_u64())); + } else { + assert!(!SKIPPED_SLOTS.contains(&slot.as_u64())); + } + } + + // Check the JSON endpoint. + let json_result = self + .client + .get_beacon_blinded_blocks(block_id.0) + .await + .unwrap(); + + if let (Some(json), Some(expected)) = (&json_result, &expected) { + assert_eq!(&json.data, expected, "{:?}", block_id); + assert_eq!( + json.version, + Some(expected.fork_name(&self.chain.spec).unwrap()) + ); + } else { + assert_eq!(json_result, None); + assert_eq!(expected, None); + } + + // Check the SSZ endpoint. + let ssz_result = self + .client + .get_beacon_blinded_blocks_ssz(block_id.0, &self.chain.spec) + .await + .unwrap(); + assert_eq!(ssz_result.as_ref(), expected.as_ref(), "{:?}", block_id); + + // Check that version headers are provided. + let url = self + .client + .get_beacon_blinded_blocks_path(block_id.0) + .unwrap(); + + let builders: Vec RequestBuilder> = vec![ + |b| b, + |b| b.accept(Accept::Ssz), + |b| b.accept(Accept::Json), + |b| b.accept(Accept::Any), + ]; + + for req_builder in builders { + let raw_res = self + .client + .get_response(url.clone(), req_builder) + .await + .optional() + .unwrap(); + if let (Some(raw_res), Some(expected)) = (&raw_res, &expected) { + assert_eq!( + raw_res.fork_name_from_header().unwrap(), + Some(expected.fork_name(&self.chain.spec).unwrap()) + ); + } else { + assert!(raw_res.is_none()); + assert_eq!(expected, None); + } + } + } + + self + } + pub async fn test_beacon_blocks_attestations(self) -> Self { for block_id in self.interesting_block_ids() { let result = self @@ -3696,6 +3802,8 @@ async fn beacon_get() { .await .test_beacon_states_validator_id() .await + .test_beacon_states_randao() + .await .test_beacon_headers_all_slots() .await .test_beacon_headers_all_parents() @@ -3704,6 +3812,8 @@ async fn beacon_get() { .await .test_beacon_blocks() .await + .test_beacon_blinded_blocks() + .await .test_beacon_blocks_attestations() .await .test_beacon_blocks_root() diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 046167d5d..32712e32a 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -130,6 +130,9 @@ pub struct Config { /// Whether metrics are enabled. pub metrics_enabled: bool, + + /// Whether light client protocols should be enabled. + pub enable_light_client_server: bool, } impl Default for Config { @@ -207,6 +210,7 @@ impl Default for Config { shutdown_after_sync: false, topics: Vec::new(), metrics_enabled: false, + enable_light_client_server: false, } } } @@ -284,9 +288,11 @@ impl From for NetworkLoad { /// Return a Lighthouse specific `GossipsubConfig` where the `message_id_fn` depends on the current fork. pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> GossipsubConfig { // The function used to generate a gossipsub message id - // We use the first 8 bytes of SHA256(data) for content addressing - let fast_gossip_message_id = - |message: &RawGossipsubMessage| FastMessageId::from(&Sha256::digest(&message.data)[..8]); + // We use the first 8 bytes of SHA256(topic, data) for content addressing + let fast_gossip_message_id = |message: &RawGossipsubMessage| { + let data = [message.topic.as_str().as_bytes(), &message.data].concat(); + FastMessageId::from(&Sha256::digest(data)[..8]) + }; fn prefix( prefix: [u8; 4], message: &GossipsubMessage, diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 3535c6bd9..8e528f09d 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -834,6 +834,17 @@ impl Discovery { // Map each subnet query's min_ttl to the set of ENR's returned for that subnet. queries.iter().for_each(|query| { + let query_str = match query.subnet { + Subnet::Attestation(_) => "attestation", + Subnet::SyncCommittee(_) => "sync_committee", + }; + + if let Some(v) = metrics::get_int_counter( + &metrics::TOTAL_SUBNET_QUERIES, + &[query_str], + ) { + v.inc(); + } // A subnet query has completed. Add back to the queue, incrementing retries. self.add_subnet_query(query.subnet, query.min_ttl, query.retries + 1); @@ -845,6 +856,12 @@ impl Discovery { .filter(|enr| subnet_predicate(enr)) .map(|enr| enr.peer_id()) .for_each(|peer_id| { + if let Some(v) = metrics::get_int_counter( + &metrics::SUBNET_PEERS_FOUND, + &[query_str], + ) { + v.inc(); + } let other_min_ttl = mapped_results.get_mut(&peer_id); // map peer IDs to the min_ttl furthest in the future diff --git a/beacon_node/lighthouse_network/src/metrics.rs b/beacon_node/lighthouse_network/src/metrics.rs index 66d7a1f74..2ee224d5e 100644 --- a/beacon_node/lighthouse_network/src/metrics.rs +++ b/beacon_node/lighthouse_network/src/metrics.rs @@ -112,6 +112,19 @@ lazy_static! { &["client"] ); + pub static ref SUBNET_PEERS_FOUND: Result = + try_create_int_counter_vec( + "discovery_query_peers_found", + "Total number of peers found in attestation subnets and sync subnets", + &["type"] + ); + pub static ref TOTAL_SUBNET_QUERIES: Result = + try_create_int_counter_vec( + "discovery_total_queries", + "Total number of discovery subnet queries", + &["type"] + ); + /* * Inbound/Outbound peers */ diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 1029204ae..8c6ad17cd 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -405,7 +405,7 @@ impl PeerManager { debug!(self.log, "Identified Peer"; "peer" => %peer_id, "protocol_version" => &info.protocol_version, "agent_version" => &info.agent_version, - "listening_ addresses" => ?info.listen_addrs, + "listening_addresses" => ?info.listen_addrs, "observed_address" => ?info.observed_addr, "protocols" => ?info.protocols ); @@ -502,6 +502,7 @@ impl PeerManager { Protocol::BlocksByRange => PeerAction::MidToleranceError, Protocol::BlocksByRoot => PeerAction::MidToleranceError, Protocol::BlobsByRange => PeerAction::MidToleranceError, + Protocol::LightClientBootstrap => PeerAction::LowToleranceError, Protocol::Goodbye => PeerAction::LowToleranceError, Protocol::MetaData => PeerAction::LowToleranceError, Protocol::Status => PeerAction::LowToleranceError, @@ -519,6 +520,7 @@ impl PeerManager { Protocol::BlocksByRoot => return, Protocol::BlobsByRange => return, Protocol::Goodbye => return, + Protocol::LightClientBootstrap => return, Protocol::MetaData => PeerAction::LowToleranceError, Protocol::Status => PeerAction::LowToleranceError, } @@ -534,6 +536,7 @@ impl PeerManager { Protocol::BlocksByRange => PeerAction::MidToleranceError, Protocol::BlocksByRoot => PeerAction::MidToleranceError, Protocol::BlobsByRange => PeerAction::MidToleranceError, + Protocol::LightClientBootstrap => return, Protocol::Goodbye => return, Protocol::MetaData => return, Protocol::Status => return, diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index c84e368f1..175dfaf01 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -139,7 +139,7 @@ impl NetworkBehaviour for PeerManager { // TODO: directly emit the ban event? BanResult::BadScore => { // This is a faulty state - error!(self.log, "Connected to a banned peer, re-banning"; "peer_id" => %peer_id); + error!(self.log, "Connected to a banned peer. Re-banning"; "peer_id" => %peer_id); // Reban the peer self.goodbye_peer(peer_id, GoodbyeReason::Banned, ReportSource::PeerManager); return; diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index 01e40326c..611d37df5 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -15,10 +15,11 @@ use std::io::{Read, Write}; use std::marker::PhantomData; use std::sync::Arc; use tokio_util::codec::{Decoder, Encoder}; +use types::light_client_bootstrap::LightClientBootstrap; use types::{ - BlobsSidecar, EthSpec, ForkContext, ForkName, SignedBeaconBlock, SignedBeaconBlockAltair, - SignedBeaconBlockBase, SignedBeaconBlockCapella, SignedBeaconBlockEip4844, - SignedBeaconBlockMerge, + BlobsSidecar, EthSpec, ForkContext, ForkName, Hash256, SignedBeaconBlock, + SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockCapella, + SignedBeaconBlockEip4844, SignedBeaconBlockMerge, }; use unsigned_varint::codec::Uvi; @@ -72,6 +73,7 @@ impl Encoder> for SSZSnappyInboundCodec< RPCResponse::BlocksByRange(res) => res.as_ssz_bytes(), RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(), RPCResponse::BlobsByRange(res) => res.as_ssz_bytes(), + RPCResponse::LightClientBootstrap(res) => res.as_ssz_bytes(), RPCResponse::Pong(res) => res.data.as_ssz_bytes(), RPCResponse::MetaData(res) => // Encode the correct version of the MetaData response based on the negotiated version. @@ -233,6 +235,7 @@ impl Encoder> for SSZSnappyOutboundCodec< OutboundRequest::BlobsByRange(req) => req.as_ssz_bytes(), OutboundRequest::Ping(req) => req.as_ssz_bytes(), OutboundRequest::MetaData(_) => return Ok(()), // no metadata to encode + OutboundRequest::LightClientBootstrap(req) => req.as_ssz_bytes(), }; // SSZ encoded bytes should be within `max_packet_size` if bytes.len() > self.max_packet_size { @@ -486,7 +489,11 @@ fn handle_v1_request( Protocol::Ping => Ok(Some(InboundRequest::Ping(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), - + Protocol::LightClientBootstrap => Ok(Some(InboundRequest::LightClientBootstrap( + LightClientBootstrapRequest { + root: Hash256::from_ssz_bytes(decoded_buffer)?, + }, + ))), // MetaData requests return early from InboundUpgrade and do not reach the decoder. // Handle this case just for completeness. Protocol::MetaData => { @@ -562,6 +569,9 @@ fn handle_v1_response( Protocol::MetaData => Ok(Some(RPCResponse::MetaData(MetaData::V1( MetaDataV1::from_ssz_bytes(decoded_buffer)?, )))), + Protocol::LightClientBootstrap => Ok(Some(RPCResponse::LightClientBootstrap( + LightClientBootstrap::from_ssz_bytes(decoded_buffer)?, + ))), } } @@ -923,6 +933,9 @@ mod tests { OutboundRequest::MetaData(metadata) => { assert_eq!(decoded, InboundRequest::MetaData(metadata)) } + OutboundRequest::LightClientBootstrap(bootstrap) => { + assert_eq!(decoded, InboundRequest::LightClientBootstrap(bootstrap)) + } } } } diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 9ac062adc..9d6229eb3 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -285,7 +285,7 @@ where } else { if !matches!(response, RPCCodedResponse::StreamTermination(..)) { // the stream is closed after sending the expected number of responses - trace!(self.log, "Inbound stream has expired, response not sent"; + trace!(self.log, "Inbound stream has expired. Response not sent"; "response" => %response, "id" => inbound_id); } return; diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 62059610d..d66d587a0 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -12,8 +12,10 @@ use std::ops::Deref; use std::sync::Arc; use strum::IntoStaticStr; use superstruct::superstruct; -use types::blobs_sidecar::BlobsSidecar; -use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::{ + blobs_sidecar::BlobsSidecar, light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec, + Hash256, SignedBeaconBlock, Slot, +}; /// Maximum number of blocks in a single request. pub type MaxRequestBlocks = U1024; @@ -260,6 +262,9 @@ pub enum RPCResponse { /// A response to a get BLOBS_BY_RANGE request BlobsByRange(Arc>), + /// A response to a get LIGHTCLIENT_BOOTSTRAP request. + LightClientBootstrap(LightClientBootstrap), + /// A PONG response to a PING request. Pong(Ping), @@ -293,6 +298,12 @@ pub enum RPCCodedResponse { StreamTermination(ResponseTermination), } +/// Request a light_client_bootstrap for lightclients peers. +#[derive(Encode, Decode, Clone, Debug, PartialEq)] +pub struct LightClientBootstrapRequest { + pub root: Hash256, +} + /// The code assigned to an erroneous `RPCResponse`. #[derive(Debug, Clone, Copy, PartialEq, IntoStaticStr)] #[strum(serialize_all = "snake_case")] @@ -342,6 +353,7 @@ impl RPCCodedResponse { RPCResponse::BlobsByRange(_) => true, RPCResponse::Pong(_) => false, RPCResponse::MetaData(_) => false, + RPCResponse::LightClientBootstrap(_) => false, }, RPCCodedResponse::Error(_, _) => true, // Stream terminations are part of responses that have chunks @@ -377,6 +389,7 @@ impl RPCResponse { RPCResponse::BlobsByRange(_) => Protocol::BlobsByRange, RPCResponse::Pong(_) => Protocol::Ping, RPCResponse::MetaData(_) => Protocol::MetaData, + RPCResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap, } } } @@ -415,6 +428,9 @@ impl std::fmt::Display for RPCResponse { } RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()), + RPCResponse::LightClientBootstrap(bootstrap) => { + write!(f, "LightClientBootstrap Slot: {}", bootstrap.header.slot) + } } } } diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 75e78b0b3..57b00586d 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -27,8 +27,8 @@ pub(crate) use protocol::{InboundRequest, RPCProtocol}; use crate::rpc::methods::MAX_REQUEST_BLOBS_SIDECARS; pub use handler::SubstreamId; pub use methods::{ - BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, MaxRequestBlocks, - RPCResponseErrorCode, ResponseTermination, StatusMessage, MAX_REQUEST_BLOCKS, + BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, LightClientBootstrapRequest, + MaxRequestBlocks, RPCResponseErrorCode, ResponseTermination, StatusMessage, MAX_REQUEST_BLOCKS, }; pub(crate) use outbound::OutboundRequest; pub use protocol::{max_rpc_size, Protocol, RPCError}; @@ -109,18 +109,24 @@ pub struct RPC { /// Queue of events to be processed. events: Vec, RPCHandler>>, fork_context: Arc, + enable_light_client_server: bool, /// Slog logger for RPC behaviour. log: slog::Logger, } impl RPC { - pub fn new(fork_context: Arc, log: slog::Logger) -> Self { + pub fn new( + fork_context: Arc, + enable_light_client_server: bool, + log: slog::Logger, + ) -> Self { let log = log.new(o!("service" => "libp2p_rpc")); let limiter = RPCRateLimiterBuilder::new() .n_every(Protocol::MetaData, 2, Duration::from_secs(5)) .n_every(Protocol::Ping, 2, Duration::from_secs(10)) .n_every(Protocol::Status, 5, Duration::from_secs(15)) .one_every(Protocol::Goodbye, Duration::from_secs(10)) + .one_every(Protocol::LightClientBootstrap, Duration::from_secs(10)) .n_every( Protocol::BlocksByRange, methods::MAX_REQUEST_BLOCKS, @@ -138,6 +144,7 @@ impl RPC { limiter, events: Vec::new(), fork_context, + enable_light_client_server, log, } } @@ -194,6 +201,7 @@ where RPCProtocol { fork_context: self.fork_context.clone(), max_rpc_size: max_rpc_size(&self.fork_context), + enable_light_client_server: self.enable_light_client_server, phantom: PhantomData, }, (), diff --git a/beacon_node/lighthouse_network/src/rpc/outbound.rs b/beacon_node/lighthouse_network/src/rpc/outbound.rs index a2029fd24..250df1fa6 100644 --- a/beacon_node/lighthouse_network/src/rpc/outbound.rs +++ b/beacon_node/lighthouse_network/src/rpc/outbound.rs @@ -39,6 +39,7 @@ pub enum OutboundRequest { BlocksByRange(OldBlocksByRangeRequest), BlocksByRoot(BlocksByRootRequest), BlobsByRange(BlobsByRangeRequest), + LightClientBootstrap(LightClientBootstrapRequest), Ping(Ping), MetaData(PhantomData), } @@ -90,9 +91,12 @@ impl OutboundRequest { ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy), ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy), ], + // Note: This match arm is technically unreachable as we only respond to light client requests + // that we generate from the beacon state. + // We do not make light client rpc requests from the beacon node + OutboundRequest::LightClientBootstrap(_) => vec![], } } - /* These functions are used in the handler for stream management */ /// Number of responses expected for this request. @@ -105,6 +109,7 @@ impl OutboundRequest { OutboundRequest::BlobsByRange(req) => req.count, OutboundRequest::Ping(_) => 1, OutboundRequest::MetaData(_) => 1, + OutboundRequest::LightClientBootstrap(_) => 1, } } @@ -118,6 +123,7 @@ impl OutboundRequest { OutboundRequest::BlobsByRange(_) => Protocol::BlobsByRange, OutboundRequest::Ping(_) => Protocol::Ping, OutboundRequest::MetaData(_) => Protocol::MetaData, + OutboundRequest::LightClientBootstrap(_) => Protocol::LightClientBootstrap, } } @@ -130,6 +136,7 @@ impl OutboundRequest { OutboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange, OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, OutboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange, + OutboundRequest::LightClientBootstrap(_) => unreachable!(), OutboundRequest::Status(_) => unreachable!(), OutboundRequest::Goodbye(_) => unreachable!(), OutboundRequest::Ping(_) => unreachable!(), @@ -188,6 +195,9 @@ impl std::fmt::Display for OutboundRequest { OutboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req), OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), OutboundRequest::MetaData(_) => write!(f, "MetaData request"), + OutboundRequest::LightClientBootstrap(bootstrap) => { + write!(f, "Lightclient Bootstrap: {}", bootstrap.root) + } } } } diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index f71b8e605..8bf728346 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -185,6 +185,8 @@ pub enum Protocol { Ping, /// The `MetaData` protocol name. MetaData, + /// The `LightClientBootstrap` protocol name. + LightClientBootstrap, } /// RPC Versions @@ -212,6 +214,7 @@ impl std::fmt::Display for Protocol { Protocol::BlobsByRange => "blobs_sidecars_by_range", Protocol::Ping => "ping", Protocol::MetaData => "metadata", + Protocol::LightClientBootstrap => "light_client_bootstrap", }; f.write_str(repr) } @@ -240,6 +243,7 @@ impl std::fmt::Display for Version { pub struct RPCProtocol { pub fork_context: Arc, pub max_rpc_size: usize, + pub enable_light_client_server: bool, pub phantom: PhantomData, } @@ -249,7 +253,7 @@ impl UpgradeInfo for RPCProtocol { /// The list of supported RPC protocols for Lighthouse. fn protocol_info(&self) -> Self::InfoIter { - vec![ + let mut supported_protocols = vec![ ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy), ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZSnappy), // V2 variants have higher preference then V1 @@ -260,7 +264,15 @@ impl UpgradeInfo for RPCProtocol { ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZSnappy), ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy), ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy), - ] + ]; + if self.enable_light_client_server { + supported_protocols.push(ProtocolId::new( + Protocol::LightClientBootstrap, + Version::V1, + Encoding::SSZSnappy, + )); + } + supported_protocols } } @@ -326,6 +338,10 @@ impl ProtocolId { ::ssz_fixed_len(), ::ssz_fixed_len(), ), + Protocol::LightClientBootstrap => RpcLimits::new( + ::ssz_fixed_len(), + ::ssz_fixed_len(), + ), Protocol::MetaData => RpcLimits::new(0, 0), // Metadata requests are empty } } @@ -349,6 +365,10 @@ impl ProtocolId { as Encode>::ssz_fixed_len(), as Encode>::ssz_fixed_len(), ), + Protocol::LightClientBootstrap => RpcLimits::new( + ::ssz_fixed_len(), + ::ssz_fixed_len(), + ), } } @@ -455,62 +475,13 @@ pub enum InboundRequest { BlocksByRange(OldBlocksByRangeRequest), BlocksByRoot(BlocksByRootRequest), BlobsByRange(BlobsByRangeRequest), + LightClientBootstrap(LightClientBootstrapRequest), Ping(Ping), MetaData(PhantomData), } -impl UpgradeInfo for InboundRequest { - type Info = ProtocolId; - type InfoIter = Vec; - - // add further protocols as we support more encodings/versions - fn protocol_info(&self) -> Self::InfoIter { - self.supported_protocols() - } -} - /// Implements the encoding per supported protocol for `RPCRequest`. impl InboundRequest { - pub fn supported_protocols(&self) -> Vec { - match self { - // add more protocols when versions/encodings are supported - InboundRequest::Status(_) => vec![ProtocolId::new( - Protocol::Status, - Version::V1, - Encoding::SSZSnappy, - )], - InboundRequest::Goodbye(_) => vec![ProtocolId::new( - Protocol::Goodbye, - Version::V1, - Encoding::SSZSnappy, - )], - InboundRequest::BlocksByRange(_) => vec![ - // V2 has higher preference when negotiating a stream - ProtocolId::new(Protocol::BlocksByRange, Version::V2, Encoding::SSZSnappy), - ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy), - ], - InboundRequest::BlocksByRoot(_) => vec![ - // V2 has higher preference when negotiating a stream - ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy), - ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), - ], - InboundRequest::BlobsByRange(_) => vec![ProtocolId::new( - Protocol::BlobsByRange, - Version::V1, - Encoding::SSZSnappy, - )], - InboundRequest::Ping(_) => vec![ProtocolId::new( - Protocol::Ping, - Version::V1, - Encoding::SSZSnappy, - )], - InboundRequest::MetaData(_) => vec![ - ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy), - ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy), - ], - } - } - /* These functions are used in the handler for stream management */ /// Number of responses expected for this request. @@ -523,6 +494,7 @@ impl InboundRequest { InboundRequest::BlobsByRange(req) => req.count, InboundRequest::Ping(_) => 1, InboundRequest::MetaData(_) => 1, + InboundRequest::LightClientBootstrap(_) => 1, } } @@ -536,6 +508,7 @@ impl InboundRequest { InboundRequest::BlobsByRange(_) => Protocol::BlobsByRange, InboundRequest::Ping(_) => Protocol::Ping, InboundRequest::MetaData(_) => Protocol::MetaData, + InboundRequest::LightClientBootstrap(_) => Protocol::LightClientBootstrap, } } @@ -552,6 +525,7 @@ impl InboundRequest { InboundRequest::Goodbye(_) => unreachable!(), InboundRequest::Ping(_) => unreachable!(), InboundRequest::MetaData(_) => unreachable!(), + InboundRequest::LightClientBootstrap(_) => unreachable!(), } } } @@ -656,6 +630,9 @@ impl std::fmt::Display for InboundRequest { InboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req), InboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), InboundRequest::MetaData(_) => write!(f, "MetaData request"), + InboundRequest::LightClientBootstrap(bootstrap) => { + write!(f, "LightClientBootstrap: {}", bootstrap.root) + } } } } diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index 6aa91aab6..bdd61861a 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -75,6 +75,8 @@ pub struct RPCRateLimiter { bbroots_rl: Limiter, /// BlobsByRange rate limiter. blbrange_rl: Limiter, + /// LightClientBootstrap rate limiter. + lcbootstrap_rl: Limiter, } /// Error type for non conformant requests @@ -102,6 +104,8 @@ pub struct RPCRateLimiterBuilder { bbroots_quota: Option, /// Quota for the BlobsByRange protocol. blbrange_quota: Option, + /// Quota for the LightClientBootstrap protocol. + lcbootstrap_quota: Option, } impl RPCRateLimiterBuilder { @@ -121,6 +125,7 @@ impl RPCRateLimiterBuilder { Protocol::BlocksByRange => self.bbrange_quota = q, Protocol::BlocksByRoot => self.bbroots_quota = q, Protocol::BlobsByRange => self.blbrange_quota = q, + Protocol::LightClientBootstrap => self.lcbootstrap_quota = q, } self } @@ -160,6 +165,9 @@ impl RPCRateLimiterBuilder { let bbrange_quota = self .bbrange_quota .ok_or("BlocksByRange quota not specified")?; + let lcbootstrap_quote = self + .lcbootstrap_quota + .ok_or("LightClientBootstrap quota not specified")?; let blbrange_quota = self .blbrange_quota @@ -173,6 +181,7 @@ impl RPCRateLimiterBuilder { let bbroots_rl = Limiter::from_quota(bbroots_quota)?; let bbrange_rl = Limiter::from_quota(bbrange_quota)?; let blbrange_rl = Limiter::from_quota(blbrange_quota)?; + let lcbootstrap_rl = Limiter::from_quota(lcbootstrap_quote)?; // check for peers to prune every 30 seconds, starting in 30 seconds let prune_every = tokio::time::Duration::from_secs(30); @@ -187,6 +196,7 @@ impl RPCRateLimiterBuilder { bbroots_rl, bbrange_rl, blbrange_rl, + lcbootstrap_rl, init_time: Instant::now(), }) } @@ -211,6 +221,7 @@ impl RPCRateLimiter { Protocol::BlocksByRange => &mut self.bbrange_rl, Protocol::BlocksByRoot => &mut self.bbroots_rl, Protocol::BlobsByRange => &mut self.blbrange_rl, + Protocol::LightClientBootstrap => &mut self.lcbootstrap_rl, }; check(limiter) } diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index 46af7ddb2..5152c187e 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -1,13 +1,14 @@ use std::sync::Arc; use libp2p::core::connection::ConnectionId; +use types::light_client_bootstrap::LightClientBootstrap; use types::{BlobsSidecar, EthSpec, SignedBeaconBlock}; use crate::rpc::methods::BlobsByRangeRequest; use crate::rpc::{ methods::{ - BlocksByRangeRequest, BlocksByRootRequest, OldBlocksByRangeRequest, RPCCodedResponse, - RPCResponse, ResponseTermination, StatusMessage, + BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, + OldBlocksByRangeRequest, RPCCodedResponse, RPCResponse, ResponseTermination, StatusMessage, }, OutboundRequest, SubstreamId, }; @@ -37,6 +38,8 @@ pub enum Request { BlobsByRange(BlobsByRangeRequest), /// A request blocks root request. BlocksByRoot(BlocksByRootRequest), + // light client bootstrap request + LightClientBootstrap(LightClientBootstrapRequest), } impl std::convert::From for OutboundRequest { @@ -51,6 +54,7 @@ impl std::convert::From for OutboundRequest { }) } Request::BlobsByRange(r) => OutboundRequest::BlobsByRange(r), + Request::LightClientBootstrap(b) => OutboundRequest::LightClientBootstrap(b), Request::Status(s) => OutboundRequest::Status(s), } } @@ -72,6 +76,8 @@ pub enum Response { BlobsByRange(Option>>), /// A response to a get BLOCKS_BY_ROOT request. BlocksByRoot(Option>>), + /// A response to a LightClientUpdate request. + LightClientBootstrap(LightClientBootstrap), } impl std::convert::From> for RPCCodedResponse { @@ -90,6 +96,9 @@ impl std::convert::From> for RPCCodedResponse RPCCodedResponse::StreamTermination(ResponseTermination::BlobsByRange), }, Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)), + Response::LightClientBootstrap(b) => { + RPCCodedResponse::Success(RPCResponse::LightClientBootstrap(b)) + } } } } diff --git a/beacon_node/lighthouse_network/src/service/behaviour.rs b/beacon_node/lighthouse_network/src/service/behaviour.rs index 8327293a7..3adc940a6 100644 --- a/beacon_node/lighthouse_network/src/service/behaviour.rs +++ b/beacon_node/lighthouse_network/src/service/behaviour.rs @@ -8,7 +8,6 @@ use libp2p::gossipsub::subscription_filter::{ }; use libp2p::gossipsub::Gossipsub as BaseGossipsub; use libp2p::identify::Identify; -use libp2p::swarm::NetworkBehaviour; use libp2p::NetworkBehaviour; use types::EthSpec; diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 65e805ca8..e5fbfe0b0 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -262,7 +262,11 @@ impl Network { (gossipsub, update_gossipsub_scores) }; - let eth2_rpc = RPC::new(ctx.fork_context.clone(), log.clone()); + let eth2_rpc = RPC::new( + ctx.fork_context.clone(), + config.enable_light_client_server, + log.clone(), + ); let discovery = { // Build and start the discovery sub-behaviour @@ -981,6 +985,9 @@ impl Network { Request::Status(_) => { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["status"]) } + Request::LightClientBootstrap(_) => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["light_client_bootstrap"]) + } Request::BlocksByRange { .. } => { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_range"]) } @@ -1261,6 +1268,14 @@ impl Network { ); Some(event) } + InboundRequest::LightClientBootstrap(req) => { + let event = self.build_request( + peer_request_id, + peer_id, + Request::LightClientBootstrap(req), + ); + Some(event) + } } } Ok(RPCReceived::Response(id, resp)) => { @@ -1291,6 +1306,10 @@ impl Network { RPCResponse::BlocksByRoot(resp) => { self.build_response(id, peer_id, Response::BlocksByRoot(Some(resp))) } + // Should never be reached + RPCResponse::LightClientBootstrap(bootstrap) => { + self.build_response(id, peer_id, Response::LightClientBootstrap(bootstrap)) + } } } Ok(RPCReceived::EndOfStream(id, termination)) => { diff --git a/beacon_node/lighthouse_network/src/types/sync_state.rs b/beacon_node/lighthouse_network/src/types/sync_state.rs index ce03f61ff..5f09aec27 100644 --- a/beacon_node/lighthouse_network/src/types/sync_state.rs +++ b/beacon_node/lighthouse_network/src/types/sync_state.rs @@ -74,6 +74,17 @@ impl SyncState { } } + pub fn is_syncing_finalized(&self) -> bool { + match self { + SyncState::SyncingFinalized { .. } => true, + SyncState::SyncingHead { .. } => false, + SyncState::SyncTransition => false, + SyncState::BackFillSyncing { .. } => false, + SyncState::Synced => false, + SyncState::Stalled => false, + } + } + /// Returns true if the node is synced. /// /// NOTE: We consider the node synced if it is fetching old historical blocks. diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index ba71f0d95..86221d991 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -41,11 +41,12 @@ use crate::sync::manager::BlockProcessType; use crate::{metrics, service::NetworkMessage, sync::SyncMessage}; use beacon_chain::parking_lot::Mutex; -use beacon_chain::{BeaconChain, BeaconChainTypes, GossipVerifiedBlock}; +use beacon_chain::{BeaconChain, BeaconChainTypes, GossipVerifiedBlock, NotifyExecutionLayer}; use derivative::Derivative; use futures::stream::{Stream, StreamExt}; use futures::task::Poll; use lighthouse_network::rpc::methods::BlobsByRangeRequest; +use lighthouse_network::rpc::LightClientBootstrapRequest; use lighthouse_network::SignedBeaconBlockAndBlobsSidecar; use lighthouse_network::{ rpc::{BlocksByRangeRequest, BlocksByRootRequest, StatusMessage}, @@ -169,6 +170,10 @@ const MAX_BLOCKS_BY_ROOTS_QUEUE_LEN: usize = 1_024; /// is activated. const MAX_BLS_TO_EXECUTION_CHANGE_QUEUE_LEN: usize = 16_384; +/// The maximum number of queued `LightClientBootstrapRequest` objects received from the network RPC that +/// will be stored before we start dropping them. +const MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN: usize = 1_024; + /// The name of the manager tokio task. const MANAGER_TASK_NAME: &str = "beacon_processor_manager"; @@ -210,6 +215,7 @@ pub const STATUS_PROCESSING: &str = "status_processing"; pub const BLOCKS_BY_RANGE_REQUEST: &str = "blocks_by_range_request"; pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request"; pub const BLOBS_BY_RANGE_REQUEST: &str = "blobs_by_range_request"; +pub const LIGHT_CLIENT_BOOTSTRAP_REQUEST: &str = "light_client_bootstrap"; pub const UNKNOWN_BLOCK_ATTESTATION: &str = "unknown_block_attestation"; pub const UNKNOWN_BLOCK_AGGREGATE: &str = "unknown_block_aggregate"; pub const GOSSIP_BLS_TO_EXECUTION_CHANGE: &str = "gossip_bls_to_execution_change"; @@ -624,6 +630,22 @@ impl WorkEvent { } } + /// Create a new work event to process `LightClientBootstrap`s from the RPC network. + pub fn lightclient_bootstrap_request( + peer_id: PeerId, + request_id: PeerRequestId, + request: LightClientBootstrapRequest, + ) -> Self { + Self { + drop_during_sync: true, + work: Work::LightClientBootstrapRequest { + peer_id, + request_id, + request, + }, + } + } + /// Get a `str` representation of the type of work this `WorkEvent` contains. pub fn work_type(&self) -> &'static str { self.work.str_id() @@ -817,6 +839,11 @@ pub enum Work { peer_id: PeerId, bls_to_execution_change: Box, }, + LightClientBootstrapRequest { + peer_id: PeerId, + request_id: PeerRequestId, + request: LightClientBootstrapRequest, + }, } impl Work { @@ -841,6 +868,7 @@ impl Work { Work::BlocksByRangeRequest { .. } => BLOCKS_BY_RANGE_REQUEST, Work::BlocksByRootsRequest { .. } => BLOCKS_BY_ROOTS_REQUEST, Work::BlobsByRangeRequest { .. } => BLOBS_BY_RANGE_REQUEST, + Work::LightClientBootstrapRequest { .. } => LIGHT_CLIENT_BOOTSTRAP_REQUEST, Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION, Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE, Work::GossipBlsToExecutionChange { .. } => GOSSIP_BLS_TO_EXECUTION_CHANGE, @@ -992,6 +1020,7 @@ impl BeaconProcessor { let mut gossip_bls_to_execution_change_queue = FifoQueue::new(MAX_BLS_TO_EXECUTION_CHANGE_QUEUE_LEN); + let mut lcbootstrap_queue = FifoQueue::new(MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN); // Channels for sending work to the re-process scheduler (`work_reprocessing_tx`) and to // receive them back once they are ready (`ready_work_rx`). let (ready_work_tx, ready_work_rx) = mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN); @@ -1236,6 +1265,8 @@ impl BeaconProcessor { } else if let Some(item) = backfill_chain_segment.pop() { self.spawn_worker(item, toolbox); // This statement should always be the final else statement. + } else if let Some(item) = lcbootstrap_queue.pop() { + self.spawn_worker(item, toolbox); } else { // Let the journal know that a worker is freed and there's nothing else // for it to do. @@ -1342,6 +1373,9 @@ impl BeaconProcessor { Work::BlobsByRangeRequest { .. } => { blbrange_queue.push(work, work_id, &self.log) } + Work::LightClientBootstrapRequest { .. } => { + lcbootstrap_queue.push(work, work_id, &self.log) + } Work::UnknownBlockAttestation { .. } => { unknown_block_attestation_queue.push(work) } @@ -1700,8 +1734,24 @@ impl BeaconProcessor { /* * Verification for a chain segment (multiple blocks). */ - Work::ChainSegment { process_id, blocks } => task_spawner - .spawn_async(async move { worker.process_chain_segment(process_id, blocks).await }), + Work::ChainSegment { process_id, blocks } => { + let notify_execution_layer = if self + .network_globals + .sync_state + .read() + .is_syncing_finalized() + { + NotifyExecutionLayer::No + } else { + NotifyExecutionLayer::Yes + }; + + task_spawner.spawn_async(async move { + worker + .process_chain_segment(process_id, blocks, notify_execution_layer) + .await + }) + } /* * Processing of Status Messages. */ @@ -1740,7 +1790,6 @@ impl BeaconProcessor { request, ) }), - Work::BlobsByRangeRequest { peer_id, request_id, @@ -1754,7 +1803,16 @@ impl BeaconProcessor { request, ) }), - + /* + * Processing of lightclient bootstrap requests from other peers. + */ + Work::LightClientBootstrapRequest { + peer_id, + request_id, + request, + } => task_spawner.spawn_blocking(move || { + worker.handle_light_client_bootstrap(peer_id, request_id, request) + }), Work::UnknownBlockAttestation { message_id, peer_id, diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 59f157e21..327a3f683 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -8,7 +8,7 @@ use beacon_chain::{ sync_committee_verification::{self, Error as SyncCommitteeError}, validator_monitor::get_block_delay_ms, BeaconChainError, BeaconChainTypes, BlockError, CountUnrealized, ForkChoiceError, - GossipVerifiedBlock, + GossipVerifiedBlock, NotifyExecutionLayer, }; use lighthouse_network::{ Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource, @@ -812,7 +812,7 @@ impl Worker { | Err(e @ BlockError::BlockIsAlreadyKnown) | Err(e @ BlockError::RepeatProposal { .. }) | Err(e @ BlockError::NotFinalizedDescendant { .. }) => { - debug!(self.log, "Could not verify block for gossip, ignoring the block"; + debug!(self.log, "Could not verify block for gossip. Ignoring the block"; "error" => %e); // Prevent recurring behaviour by penalizing the peer slightly. self.gossip_penalize_peer( @@ -824,7 +824,7 @@ impl Worker { return None; } Err(ref e @ BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => { - debug!(self.log, "Could not verify block for gossip, ignoring the block"; + debug!(self.log, "Could not verify block for gossip. Ignoring the block"; "error" => %e); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); return None; @@ -846,7 +846,7 @@ impl Worker { // TODO(merge): reconsider peer scoring for this event. | Err(e @ BlockError::ParentExecutionPayloadInvalid { .. }) | Err(e @ BlockError::GenesisBlock) => { - warn!(self.log, "Could not verify block for gossip, rejecting the block"; + warn!(self.log, "Could not verify block for gossip. Rejecting the block"; "error" => %e); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); self.gossip_penalize_peer( @@ -953,7 +953,12 @@ impl Worker { match self .chain - .process_block(block_root, verified_block, CountUnrealized::True) + .process_block( + block_root, + verified_block, + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await { Ok(block_root) => { diff --git a/beacon_node/network/src/beacon_processor/worker/mod.rs b/beacon_node/network/src/beacon_processor/worker/mod.rs index f907c49b7..1cbc64b63 100644 --- a/beacon_node/network/src/beacon_processor/worker/mod.rs +++ b/beacon_node/network/src/beacon_processor/worker/mod.rs @@ -38,7 +38,7 @@ impl Worker { /// Creates a log if there is an internal error. fn send_network_message(&self, message: NetworkMessage) { self.network_tx.send(message).unwrap_or_else(|e| { - debug!(self.log, "Could not send message to the network service, likely shutdown"; + debug!(self.log, "Could not send message to the network service. Likely shutdown"; "error" => %e) }); } diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index beaea3833..82d65ef75 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -12,7 +12,7 @@ use slog::{debug, error}; use slot_clock::SlotClock; use std::sync::Arc; use task_executor::TaskExecutor; -use types::{Epoch, EthSpec, Hash256, Slot}; +use types::{light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec, Hash256, Slot}; use super::Worker; @@ -205,6 +205,79 @@ impl Worker { ) } + /// Handle a `BlocksByRoot` request from the peer. + pub fn handle_light_client_bootstrap( + self, + peer_id: PeerId, + request_id: PeerRequestId, + request: LightClientBootstrapRequest, + ) { + let block_root = request.root; + let state_root = match self.chain.get_blinded_block(&block_root) { + Ok(signed_block) => match signed_block { + Some(signed_block) => signed_block.state_root(), + None => { + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Bootstrap not avaiable".into(), + request_id, + ); + return; + } + }, + Err(_) => { + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Bootstrap not avaiable".into(), + request_id, + ); + return; + } + }; + let mut beacon_state = match self.chain.get_state(&state_root, None) { + Ok(beacon_state) => match beacon_state { + Some(state) => state, + None => { + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Bootstrap not avaiable".into(), + request_id, + ); + return; + } + }, + Err(_) => { + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Bootstrap not avaiable".into(), + request_id, + ); + return; + } + }; + let bootstrap = match LightClientBootstrap::from_beacon_state(&mut beacon_state) { + Ok(bootstrap) => bootstrap, + Err(_) => { + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Bootstrap not avaiable".into(), + request_id, + ); + return; + } + }; + self.send_response( + peer_id, + Response::LightClientBootstrap(bootstrap), + request_id, + ) + } + /// Handle a `BlocksByRange` request from the peer. pub fn handle_blocks_by_range_request( self, diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 5d97894fe..1ec045e97 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -10,6 +10,7 @@ use crate::sync::{BatchProcessResult, ChainId}; use beacon_chain::CountUnrealized; use beacon_chain::{ BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, + NotifyExecutionLayer, }; use lighthouse_network::PeerAction; use slog::{debug, error, info, warn}; @@ -85,7 +86,12 @@ impl Worker { let slot = block.slot(); let result = self .chain - .process_block(block_root, block, CountUnrealized::True) + .process_block( + block_root, + block, + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) .await; metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); @@ -127,6 +133,7 @@ impl Worker { &self, sync_type: ChainSegmentProcessId, downloaded_blocks: Vec>>, + notify_execution_layer: NotifyExecutionLayer, ) { let result = match sync_type { // this a request from the range sync @@ -136,7 +143,11 @@ impl Worker { let sent_blocks = downloaded_blocks.len(); match self - .process_blocks(downloaded_blocks.iter(), count_unrealized) + .process_blocks( + downloaded_blocks.iter(), + count_unrealized, + notify_execution_layer, + ) .await { (_, Ok(_)) => { @@ -215,7 +226,11 @@ impl Worker { // parent blocks are ordered from highest slot to lowest, so we need to process in // reverse match self - .process_blocks(downloaded_blocks.iter().rev(), CountUnrealized::True) + .process_blocks( + downloaded_blocks.iter().rev(), + CountUnrealized::True, + notify_execution_layer, + ) .await { (imported_blocks, Err(e)) => { @@ -246,11 +261,12 @@ impl Worker { &self, downloaded_blocks: impl Iterator>>, count_unrealized: CountUnrealized, + notify_execution_layer: NotifyExecutionLayer, ) -> (usize, Result<(), ChainSegmentFailed>) { let blocks: Vec> = downloaded_blocks.cloned().collect(); match self .chain - .process_chain_segment(blocks, count_unrealized) + .process_chain_segment(blocks, count_unrealized, notify_execution_layer) .await { ChainSegmentResult::Successful { imported_blocks } => { @@ -428,7 +444,7 @@ impl Worker { } else { // The block is in the future, but not too far. debug!( - self.log, "Block is slightly ahead of our slot clock, ignoring."; + self.log, "Block is slightly ahead of our slot clock. Ignoring."; "present_slot" => present_slot, "block_slot" => block_slot, "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, diff --git a/beacon_node/network/src/router/mod.rs b/beacon_node/network/src/router/mod.rs index 75986ff3f..fab23facc 100644 --- a/beacon_node/network/src/router/mod.rs +++ b/beacon_node/network/src/router/mod.rs @@ -171,6 +171,9 @@ impl Router { Request::BlobsByRange(request) => self .processor .on_blobs_by_range_request(peer_id, id, request), + Request::LightClientBootstrap(request) => self + .processor + .on_lightclient_bootstrap(peer_id, id, request), } } @@ -199,6 +202,7 @@ impl Router { self.processor .on_blobs_by_range_response(peer_id, request_id, beacon_blob); } + Response::LightClientBootstrap(_) => unreachable!(), } } diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index b8bcab847..ac826ed3e 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -172,6 +172,19 @@ impl Processor { peer_id, request_id, request, )) } + + /// Handle a `LightClientBootstrap` request from the peer. + pub fn on_lightclient_bootstrap( + &mut self, + peer_id: PeerId, + request_id: PeerRequestId, + request: LightClientBootstrapRequest, + ) { + self.send_beacon_processor_work(BeaconWorkEvent::lightclient_bootstrap_request( + peer_id, request_id, request, + )) + } + /// Handle a `BlocksByRange` request from the peer. pub fn on_blocks_by_range_request( &mut self, diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 5c2bc6522..aa2694769 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -1,4 +1,5 @@ use std::collections::hash_map::Entry; +use std::collections::HashMap; use std::time::Duration; use beacon_chain::{BeaconChainTypes, BlockError}; @@ -13,6 +14,7 @@ use store::{Hash256, SignedBeaconBlock}; use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent}; use crate::metrics; +use self::parent_lookup::PARENT_FAIL_TOLERANCE; use self::{ parent_lookup::{ParentLookup, VerifyError}, single_block_lookup::SingleBlockRequest, @@ -36,8 +38,11 @@ const FAILED_CHAINS_CACHE_EXPIRY_SECONDS: u64 = 60; const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 3; pub(crate) struct BlockLookups { - /// A collection of parent block lookups. - parent_queue: SmallVec<[ParentLookup; 3]>, + /// Parent chain lookups being downloaded. + parent_lookups: SmallVec<[ParentLookup; 3]>, + + processing_parent_lookups: + HashMap, SingleBlockRequest)>, /// A cache of failed chain lookups to prevent duplicate searches. failed_chains: LRUTimeCache, @@ -55,7 +60,8 @@ pub(crate) struct BlockLookups { impl BlockLookups { pub fn new(log: Logger) -> Self { Self { - parent_queue: Default::default(), + parent_lookups: Default::default(), + processing_parent_lookups: Default::default(), failed_chains: LRUTimeCache::new(Duration::from_secs( FAILED_CHAINS_CACHE_EXPIRY_SECONDS, )), @@ -78,6 +84,23 @@ impl BlockLookups { return; } + if self.parent_lookups.iter_mut().any(|parent_req| { + parent_req.add_peer(&hash, &peer_id) || parent_req.contains_block(&hash) + }) { + // If the block was already downloaded, or is being downloaded in this moment, do not + // request it. + return; + } + + if self + .processing_parent_lookups + .values() + .any(|(hashes, _last_parent_request)| hashes.contains(&hash)) + { + // we are already processing this block, ignore it. + return; + } + debug!( self.log, "Searching for block"; @@ -118,8 +141,8 @@ impl BlockLookups { // Make sure this block is not already downloaded, and that neither it or its parent is // being searched for. - if self.parent_queue.iter_mut().any(|parent_req| { - parent_req.contains_block(&block) + if self.parent_lookups.iter_mut().any(|parent_req| { + parent_req.contains_block(&block_root) || parent_req.add_peer(&block_root, &peer_id) || parent_req.add_peer(&parent_root, &peer_id) }) { @@ -127,6 +150,15 @@ impl BlockLookups { return; } + if self + .processing_parent_lookups + .values() + .any(|(hashes, _peers)| hashes.contains(&block_root) || hashes.contains(&parent_root)) + { + // we are already processing this block, ignore it. + return; + } + let parent_lookup = ParentLookup::new(block_root, block, peer_id); self.request_parent(parent_lookup, cx); } @@ -207,11 +239,11 @@ impl BlockLookups { cx: &mut SyncNetworkContext, ) { let mut parent_lookup = if let Some(pos) = self - .parent_queue + .parent_lookups .iter() .position(|request| request.pending_response(id)) { - self.parent_queue.remove(pos) + self.parent_lookups.remove(pos) } else { if block.is_some() { debug!(self.log, "Response for a parent lookup request that was not found"; "peer_id" => %peer_id); @@ -233,13 +265,13 @@ impl BlockLookups { ) .is_ok() { - self.parent_queue.push(parent_lookup) + self.parent_lookups.push(parent_lookup) } } Ok(None) => { // Request finished successfully, nothing else to do. It will be removed after the // processing result arrives. - self.parent_queue.push(parent_lookup); + self.parent_lookups.push(parent_lookup); } Err(e) => match e { VerifyError::RootMismatch @@ -276,7 +308,7 @@ impl BlockLookups { metrics::set_gauge( &metrics::SYNC_PARENT_BLOCK_LOOKUPS, - self.parent_queue.len() as i64, + self.parent_lookups.len() as i64, ); } @@ -324,11 +356,11 @@ impl BlockLookups { /* Check disconnection for parent lookups */ while let Some(pos) = self - .parent_queue + .parent_lookups .iter_mut() .position(|req| req.check_peer_disconnected(peer_id).is_err()) { - let parent_lookup = self.parent_queue.remove(pos); + let parent_lookup = self.parent_lookups.remove(pos); trace!(self.log, "Parent lookup's peer disconnected"; &parent_lookup); self.request_parent(parent_lookup, cx); } @@ -342,11 +374,11 @@ impl BlockLookups { cx: &mut SyncNetworkContext, ) { if let Some(pos) = self - .parent_queue + .parent_lookups .iter() .position(|request| request.pending_response(id)) { - let mut parent_lookup = self.parent_queue.remove(pos); + let mut parent_lookup = self.parent_lookups.remove(pos); parent_lookup.download_failed(); trace!(self.log, "Parent lookup request failed"; &parent_lookup); self.request_parent(parent_lookup, cx); @@ -355,7 +387,7 @@ impl BlockLookups { }; metrics::set_gauge( &metrics::SYNC_PARENT_BLOCK_LOOKUPS, - self.parent_queue.len() as i64, + self.parent_lookups.len() as i64, ); } @@ -470,7 +502,7 @@ impl BlockLookups { cx: &mut SyncNetworkContext, ) { let (mut parent_lookup, peer_id) = if let Some((pos, peer)) = self - .parent_queue + .parent_lookups .iter() .enumerate() .find_map(|(pos, request)| { @@ -478,7 +510,7 @@ impl BlockLookups { .get_processing_peer(chain_hash) .map(|peer| (pos, peer)) }) { - (self.parent_queue.remove(pos), peer) + (self.parent_lookups.remove(pos), peer) } else { return debug!(self.log, "Process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash); }; @@ -520,13 +552,13 @@ impl BlockLookups { ); } }; - let chain_hash = parent_lookup.chain_hash(); - let blocks = parent_lookup.chain_blocks(); + let (chain_hash, blocks, hashes, request) = parent_lookup.parts_for_processing(); let process_id = ChainSegmentProcessId::ParentLookup(chain_hash); match beacon_processor_send.try_send(WorkEvent::chain_segment(process_id, blocks)) { Ok(_) => { - self.parent_queue.push(parent_lookup); + self.processing_parent_lookups + .insert(chain_hash, (hashes, request)); } Err(e) => { error!( @@ -580,7 +612,7 @@ impl BlockLookups { metrics::set_gauge( &metrics::SYNC_PARENT_BLOCK_LOOKUPS, - self.parent_queue.len() as i64, + self.parent_lookups.len() as i64, ); } @@ -590,14 +622,11 @@ impl BlockLookups { result: BatchProcessResult, cx: &mut SyncNetworkContext, ) { - let parent_lookup = if let Some(pos) = self - .parent_queue - .iter() - .position(|request| request.chain_hash() == chain_hash) - { - self.parent_queue.remove(pos) - } else { - return debug!(self.log, "Chain process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash); + let request = match self.processing_parent_lookups.remove(&chain_hash) { + Some((_hashes, request)) => request, + None => { + return debug!(self.log, "Chain process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash, "result" => ?result) + } }; debug!(self.log, "Parent chain processed"; "chain_hash" => %chain_hash, "result" => ?result); @@ -609,8 +638,8 @@ impl BlockLookups { imported_blocks: _, penalty, } => { - self.failed_chains.insert(parent_lookup.chain_hash()); - for &peer_id in parent_lookup.used_peers() { + self.failed_chains.insert(chain_hash); + for peer_id in request.used_peers { cx.report_peer(peer_id, penalty, "parent_chain_failure") } } @@ -621,7 +650,7 @@ impl BlockLookups { metrics::set_gauge( &metrics::SYNC_PARENT_BLOCK_LOOKUPS, - self.parent_queue.len() as i64, + self.parent_lookups.len() as i64, ); } @@ -697,14 +726,14 @@ impl BlockLookups { } Ok(_) => { debug!(self.log, "Requesting parent"; &parent_lookup); - self.parent_queue.push(parent_lookup) + self.parent_lookups.push(parent_lookup) } } // We remove and add back again requests so we want this updated regardless of outcome. metrics::set_gauge( &metrics::SYNC_PARENT_BLOCK_LOOKUPS, - self.parent_queue.len() as i64, + self.parent_lookups.len() as i64, ); } @@ -715,6 +744,6 @@ impl BlockLookups { /// Drops all the parent chain requests and returns how many requests were dropped. pub fn drop_parent_chain_requests(&mut self) -> usize { - self.parent_queue.drain(..).len() + self.parent_lookups.drain(..).len() } } diff --git a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs index 38ad59ebc..a2c2f1d1c 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs @@ -24,7 +24,7 @@ pub(crate) struct ParentLookup { /// The root of the block triggering this parent request. chain_hash: Hash256, /// The blocks that have currently been downloaded. - downloaded_blocks: Vec>>, + downloaded_blocks: Vec>, /// Request of the last parent. current_parent_request: SingleBlockRequest, /// Id of the last parent request. @@ -53,10 +53,10 @@ pub enum RequestError { } impl ParentLookup { - pub fn contains_block(&self, block: &SignedBeaconBlock) -> bool { + pub fn contains_block(&self, block_root: &Hash256) -> bool { self.downloaded_blocks .iter() - .any(|d_block| d_block.as_ref() == block) + .any(|(root, _d_block)| root == block_root) } pub fn new( @@ -68,7 +68,7 @@ impl ParentLookup { Self { chain_hash: block_root, - downloaded_blocks: vec![block], + downloaded_blocks: vec![(block_root, block)], current_parent_request, current_parent_request_id: None, } @@ -100,7 +100,8 @@ impl ParentLookup { pub fn add_block(&mut self, block: Arc>) { let next_parent = block.parent_root(); - self.downloaded_blocks.push(block); + let current_root = self.current_parent_request.hash; + self.downloaded_blocks.push((current_root, block)); self.current_parent_request.hash = next_parent; self.current_parent_request.state = single_block_lookup::State::AwaitingDownload; self.current_parent_request_id = None; @@ -110,6 +111,32 @@ impl ParentLookup { self.current_parent_request_id == Some(req_id) } + /// Consumes the parent request and destructures it into it's parts. + #[allow(clippy::type_complexity)] + pub fn parts_for_processing( + self, + ) -> ( + Hash256, + Vec>>, + Vec, + SingleBlockRequest, + ) { + let ParentLookup { + chain_hash, + downloaded_blocks, + current_parent_request, + current_parent_request_id: _, + } = self; + let block_count = downloaded_blocks.len(); + let mut blocks = Vec::with_capacity(block_count); + let mut hashes = Vec::with_capacity(block_count); + for (hash, block) in downloaded_blocks { + blocks.push(block); + hashes.push(hash); + } + (chain_hash, blocks, hashes, current_parent_request) + } + /// Get the parent lookup's chain hash. pub fn chain_hash(&self) -> Hash256 { self.chain_hash @@ -125,10 +152,6 @@ impl ParentLookup { self.current_parent_request_id = None; } - pub fn chain_blocks(&mut self) -> Vec>> { - std::mem::take(&mut self.downloaded_blocks) - } - /// Verifies that the received block is what we requested. If so, parent lookup now waits for /// the processing result of the block. pub fn verify_block( diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 64a1a6e83..8ade622f8 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -259,7 +259,7 @@ fn test_single_block_lookup_becomes_parent_request() { assert_eq!(bl.single_block_lookups.len(), 0); rig.expect_parent_request(); rig.expect_empty_network(); - assert_eq!(bl.parent_queue.len(), 1); + assert_eq!(bl.parent_lookups.len(), 1); } #[test] @@ -287,7 +287,7 @@ fn test_parent_lookup_happy_path() { was_non_empty: true, }; bl.parent_chain_processed(chain_hash, process_result, &mut cx); - assert_eq!(bl.parent_queue.len(), 0); + assert_eq!(bl.parent_lookups.len(), 0); } #[test] @@ -324,7 +324,7 @@ fn test_parent_lookup_wrong_response() { was_non_empty: true, }; bl.parent_chain_processed(chain_hash, process_result, &mut cx); - assert_eq!(bl.parent_queue.len(), 0); + assert_eq!(bl.parent_lookups.len(), 0); } #[test] @@ -356,7 +356,7 @@ fn test_parent_lookup_empty_response() { was_non_empty: true, }; bl.parent_chain_processed(chain_hash, process_result, &mut cx); - assert_eq!(bl.parent_queue.len(), 0); + assert_eq!(bl.parent_lookups.len(), 0); } #[test] @@ -387,7 +387,7 @@ fn test_parent_lookup_rpc_failure() { was_non_empty: true, }; bl.parent_chain_processed(chain_hash, process_result, &mut cx); - assert_eq!(bl.parent_queue.len(), 0); + assert_eq!(bl.parent_lookups.len(), 0); } #[test] @@ -419,11 +419,11 @@ fn test_parent_lookup_too_many_attempts() { } } if i < parent_lookup::PARENT_FAIL_TOLERANCE { - assert_eq!(bl.parent_queue[0].failed_attempts(), dbg!(i)); + assert_eq!(bl.parent_lookups[0].failed_attempts(), dbg!(i)); } } - assert_eq!(bl.parent_queue.len(), 0); + assert_eq!(bl.parent_lookups.len(), 0); } #[test] @@ -450,11 +450,11 @@ fn test_parent_lookup_too_many_download_attempts_no_blacklist() { rig.expect_penalty(); } if i < parent_lookup::PARENT_FAIL_TOLERANCE { - assert_eq!(bl.parent_queue[0].failed_attempts(), dbg!(i)); + assert_eq!(bl.parent_lookups[0].failed_attempts(), dbg!(i)); } } - assert_eq!(bl.parent_queue.len(), 0); + assert_eq!(bl.parent_lookups.len(), 0); assert!(!bl.failed_chains.contains(&block_hash)); assert!(!bl.failed_chains.contains(&parent.canonical_root())); } @@ -491,7 +491,7 @@ fn test_parent_lookup_too_many_processing_attempts_must_blacklist() { } assert!(bl.failed_chains.contains(&block_hash)); - assert_eq!(bl.parent_queue.len(), 0); + assert_eq!(bl.parent_lookups.len(), 0); } #[test] @@ -545,7 +545,7 @@ fn test_parent_lookup_disconnection() { &mut cx, ); bl.peer_disconnected(&peer_id, &mut cx); - assert!(bl.parent_queue.is_empty()); + assert!(bl.parent_lookups.is_empty()); } #[test] @@ -598,5 +598,78 @@ fn test_parent_lookup_ignored_response() { // Return an Ignored result. The request should be dropped bl.parent_block_processed(chain_hash, BlockProcessResult::Ignored, &mut cx); rig.expect_empty_network(); - assert_eq!(bl.parent_queue.len(), 0); + assert_eq!(bl.parent_lookups.len(), 0); +} + +/// This is a regression test. +#[test] +fn test_same_chain_race_condition() { + let (mut bl, mut cx, mut rig) = TestRig::test_setup(Some(Level::Debug)); + + #[track_caller] + fn parent_lookups_consistency(bl: &BlockLookups) { + let hashes: Vec<_> = bl + .parent_lookups + .iter() + .map(|req| req.chain_hash()) + .collect(); + let expected = hashes.len(); + assert_eq!( + expected, + hashes + .into_iter() + .collect::>() + .len(), + "duplicated chain hashes in parent queue" + ) + } + // if we use one or two blocks it will match on the hash or the parent hash, so make a longer + // chain. + let depth = 4; + let mut blocks = Vec::>>::with_capacity(depth); + while blocks.len() < depth { + let parent = blocks + .last() + .map(|b| b.canonical_root()) + .unwrap_or_else(Hash256::random); + let block = Arc::new(rig.block_with_parent(parent)); + blocks.push(block); + } + + let peer_id = PeerId::random(); + let trigger_block = blocks.pop().unwrap(); + let chain_hash = trigger_block.canonical_root(); + bl.search_parent(chain_hash, trigger_block.clone(), peer_id, &mut cx); + + for (i, block) in blocks.into_iter().rev().enumerate() { + let id = rig.expect_parent_request(); + // the block + bl.parent_lookup_response(id, peer_id, Some(block.clone()), D, &mut cx); + // the stream termination + bl.parent_lookup_response(id, peer_id, None, D, &mut cx); + // the processing request + rig.expect_block_process(); + // the processing result + if i + 2 == depth { + // one block was removed + bl.parent_block_processed(chain_hash, BlockError::BlockIsAlreadyKnown.into(), &mut cx) + } else { + bl.parent_block_processed(chain_hash, BlockError::ParentUnknown(block).into(), &mut cx) + } + parent_lookups_consistency(&bl) + } + + // Processing succeeds, now the rest of the chain should be sent for processing. + rig.expect_parent_chain_process(); + + // Try to get this block again while the chain is being processed. We should not request it again. + let peer_id = PeerId::random(); + bl.search_parent(chain_hash, trigger_block, peer_id, &mut cx); + parent_lookups_consistency(&bl); + + let process_result = BatchProcessResult::Success { + was_non_empty: true, + }; + bl.parent_chain_processed(chain_hash, process_result, &mut cx); + assert_eq!(bl.parent_lookups.len(), 0); } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 928669590..a1eeda84e 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -643,7 +643,7 @@ impl SyncManager { // Some logs. if dropped_single_blocks_requests > 0 || dropped_parent_chain_requests > 0 { - debug!(self.log, "Execution engine not online, dropping active requests."; + debug!(self.log, "Execution engine not online. Dropping active requests."; "dropped_single_blocks_requests" => dropped_single_blocks_requests, "dropped_parent_chain_requests" => dropped_parent_chain_requests, ); diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 45ade7034..c81fed244 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -242,7 +242,7 @@ impl SyncNetworkContext { source: ReportSource::SyncService, }) .unwrap_or_else(|_| { - warn!(self.log, "Could not report peer, channel failed"); + warn!(self.log, "Could not report peer: channel failed"); }); } @@ -257,7 +257,7 @@ impl SyncNetworkContext { msg, }) .unwrap_or_else(|e| { - warn!(self.log, "Could not report peer, channel failed"; "error"=> %e); + warn!(self.log, "Could not report peer: channel failed"; "error"=> %e); }); } diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 81a7c6bbe..44a995176 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -714,6 +714,14 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .conflicts_with("checkpoint-state") ) + .arg( + Arg::with_name("checkpoint-sync-url-timeout") + .long("checkpoint-sync-url-timeout") + .help("Set the timeout for checkpoint sync calls to remote beacon node HTTP endpoint.") + .value_name("SECONDS") + .takes_value(true) + .default_value("60") + ) .arg( Arg::with_name("reconstruct-historic-states") .long("reconstruct-historic-states") @@ -860,4 +868,19 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Useful if you intend to run a non-validating beacon node.") .takes_value(false) ) + .arg( + Arg::with_name("light-client-server") + .long("light-client-server") + .help("Act as a full node supporting light clients on the p2p network \ + [experimental]") + .takes_value(false) + ) + .arg( + Arg::with_name("gui") + .long("gui") + .hidden(true) + .help("Enable the graphical user interface and all its requirements. \ + This is equivalent to --http and --validator-monitor-auto.") + .takes_value(false) + ) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 3b94c3129..e98b585f5 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -14,6 +14,7 @@ use std::cmp::max; use std::fmt::Debug; use std::fmt::Write; use std::fs; +use std::net::Ipv6Addr; use std::net::{IpAddr, Ipv4Addr, ToSocketAddrs}; use std::path::{Path, PathBuf}; use std::str::FromStr; @@ -34,13 +35,13 @@ pub fn get_config( let spec = &context.eth2_config.spec; let log = context.log(); - let mut client_config = ClientConfig { - data_dir: get_data_dir(cli_args), - ..Default::default() - }; + let mut client_config = ClientConfig::default(); + + // Update the client's data directory + client_config.set_data_dir(get_data_dir(cli_args)); // If necessary, remove any existing database and configuration - if client_config.data_dir.exists() && cli_args.is_present("purge-db") { + if client_config.data_dir().exists() && cli_args.is_present("purge-db") { // Remove the chain_db. let chain_db = client_config.get_db_path(); if chain_db.exists() { @@ -57,11 +58,11 @@ pub fn get_config( } // Create `datadir` and any non-existing parent directories. - fs::create_dir_all(&client_config.data_dir) + fs::create_dir_all(client_config.data_dir()) .map_err(|e| format!("Failed to create data dir: {}", e))?; // logs the chosen data directory - let mut log_dir = client_config.data_dir.clone(); + let mut log_dir = client_config.data_dir().clone(); // remove /beacon from the end log_dir.pop(); info!(log, "Data directory initialised"; "datadir" => log_dir.into_os_string().into_string().expect("Datadir should be a valid os string")); @@ -69,10 +70,13 @@ pub fn get_config( /* * Networking */ + + let data_dir_ref = client_config.data_dir().clone(); + set_network_config( &mut client_config.network, cli_args, - &client_config.data_dir, + &data_dir_ref, log, false, )?; @@ -303,7 +307,7 @@ pub fn get_config( } else if let Some(jwt_secret_key) = cli_args.value_of("execution-jwt-secret-key") { use std::fs::File; use std::io::Write; - secret_file = client_config.data_dir.join(DEFAULT_JWT_FILE); + secret_file = client_config.data_dir().join(DEFAULT_JWT_FILE); let mut jwt_secret_key_file = File::create(secret_file.clone()) .map_err(|e| format!("Error while creating jwt_secret_key file: {:?}", e))?; jwt_secret_key_file @@ -332,7 +336,7 @@ pub fn get_config( clap_utils::parse_optional(cli_args, "suggested-fee-recipient")?; el_config.jwt_id = clap_utils::parse_optional(cli_args, "execution-jwt-id")?; el_config.jwt_version = clap_utils::parse_optional(cli_args, "execution-jwt-version")?; - el_config.default_datadir = client_config.data_dir.clone(); + el_config.default_datadir = client_config.data_dir().clone(); el_config.builder_profit_threshold = clap_utils::parse_required(cli_args, "builder-profit-threshold")?; let execution_timeout_multiplier = @@ -441,6 +445,8 @@ pub fn get_config( .extend_from_slice(boot_nodes) } } + client_config.chain.checkpoint_sync_url_timeout = + clap_utils::parse_required::(cli_args, "checkpoint-sync-url-timeout")?; client_config.genesis = if let Some(genesis_state_bytes) = eth2_network_config.genesis_state_bytes.clone() @@ -571,7 +577,7 @@ pub fn get_config( let slasher_dir = if let Some(slasher_dir) = cli_args.value_of("slasher-dir") { PathBuf::from(slasher_dir) } else { - client_config.data_dir.join("slasher_db") + client_config.data_dir().join("slasher_db") }; let mut slasher_config = slasher::Config::new(slasher_dir); @@ -703,6 +709,12 @@ pub fn get_config( client_config.chain.builder_fallback_disable_checks = cli_args.is_present("builder-fallback-disable-checks"); + // Graphical user interface config. + if cli_args.is_present("gui") { + client_config.http_api.enabled = true; + client_config.validator_monitor_auto = true; + } + Ok(client_config) } @@ -832,9 +844,11 @@ pub fn set_network_config( } if cli_args.is_present("enr-match") { - // set the enr address to localhost if the address is 0.0.0.0 - if config.listen_address == "0.0.0.0".parse::().expect("valid ip addr") { - config.enr_address = Some("127.0.0.1".parse::().expect("valid ip addr")); + // set the enr address to localhost if the address is unspecified + if config.listen_address == IpAddr::V4(Ipv4Addr::UNSPECIFIED) { + config.enr_address = Some(IpAddr::V4(Ipv4Addr::LOCALHOST)); + } else if config.listen_address == IpAddr::V6(Ipv6Addr::UNSPECIFIED) { + config.enr_address = Some(IpAddr::V6(Ipv6Addr::LOCALHOST)); } else { config.enr_address = Some(config.listen_address); } @@ -914,6 +928,9 @@ pub fn set_network_config( config.discv5_config.table_filter = |_| true; } + // Light client server config. + config.enable_light_client_server = cli_args.is_present("light-client-server"); + Ok(()) } diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index d05677465..a43fa10e6 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -1,9 +1,6 @@ # Summary * [Introduction](./intro.md) -* [Become a Validator](./mainnet-validator.md) - * [Become a Testnet Validator](./testnet-validator.md) -* [Merge Migration](./merge-migration.md) * [Installation](./installation.md) * [System Requirements](./system-requirements.md) * [Pre-Built Binaries](./installation-binaries.md) @@ -13,6 +10,9 @@ * [Cross-Compiling](./cross-compiling.md) * [Homebrew](./homebrew.md) * [Update Priorities](./installation-priorities.md) +* [Run a Node](./run_a_node.md) +* [Become a Validator](./mainnet-validator.md) + * [Become a Testnet Validator](./testnet-validator.md) * [Key Management](./key-management.md) * [Create a wallet](./wallet-create.md) * [Create a validator](./validator-create.md) @@ -46,6 +46,7 @@ * [Pre-Releases](./advanced-pre-releases.md) * [Release Candidates](./advanced-release-candidates.md) * [MEV and Lighthouse](./builders.md) + * [Merge Migration](./merge-migration.md) * [Contributing](./contributing.md) * [Development Environment](./setup.md) * [FAQs](./faq.md) diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index d9c8080b4..c1ba6a2dc 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -62,6 +62,43 @@ curl -X GET "http://localhost:5052/lighthouse/health" -H "accept: application/j ``` +### `/lighthouse/ui/health` + + +```bash +curl -X GET "http://localhost:5052/lighthouse/ui/health" -H "accept: application/json" | jq +``` + +```json +{ + "data": { + "total_memory": 16443219968, + "free_memory": 1283739648, + "used_memory": 5586264064, + "sys_loadavg_1": 0.59, + "sys_loadavg_5": 1.13, + "sys_loadavg_15": 2.41, + "cpu_cores": 4, + "cpu_threads": 8, + "global_cpu_frequency": 3.4, + "disk_bytes_total": 502390845440, + "disk_bytes_free": 9981386752, + "network_name": "wlp0s20f3", + "network_bytes_total_received": 14105556611, + "network_bytes_total_transmit": 3649489389, + "nat_open": true, + "connected_peers": 80, + "sync_state": "Synced", + "system_uptime": 660706, + "app_uptime": 105, + "system_name": "Arch Linux", + "kernel_version": "5.19.13-arch1-1", + "os_version": "Linux rolling Arch Linux", + "host_name": "Computer1" + } +} +``` + ### `/lighthouse/syncing` ```bash diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index 9aedf6e24..76cffc0e4 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -6,6 +6,7 @@ HTTP Path | Description | | --- | -- | [`GET /lighthouse/version`](#get-lighthouseversion) | Get the Lighthouse software version. [`GET /lighthouse/health`](#get-lighthousehealth) | Get information about the host machine. +[`GET /lighthouse/ui/health`](#get-lighthouseuihealth) | Get information about the host machine. Focused for UI applications. [`GET /lighthouse/spec`](#get-lighthousespec) | Get the Ethereum proof-of-stake consensus specification used by the validator. [`GET /lighthouse/auth`](#get-lighthouseauth) | Get the location of the authorization token. [`GET /lighthouse/validators`](#get-lighthousevalidators) | List all validators. @@ -77,6 +78,45 @@ Returns information regarding the health of the host machine. } ``` +## `GET /lighthouse/ui/health` + +Returns information regarding the health of the host machine. + +### HTTP Specification + +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/ui/health` | +| Method | GET | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200 | + +### Example Response Body + +```json +{ + "data": { + "total_memory": 16443219968, + "free_memory": 1283739648, + "used_memory": 5586264064, + "sys_loadavg_1": 0.59, + "sys_loadavg_5": 1.13, + "sys_loadavg_15": 2.41, + "cpu_cores": 4, + "cpu_threads": 8, + "global_cpu_frequency": 3.4, + "disk_bytes_total": 502390845440, + "disk_bytes_free": 9981386752, + "system_uptime": 660706, + "app_uptime": 105, + "system_name": "Arch Linux", + "kernel_version": "5.19.13-arch1-1", + "os_version": "Linux rolling Arch Linux", + "host_name": "Computer1" + } +} +``` + ## `GET /lighthouse/spec` Returns the Ethereum proof-of-stake consensus specification loaded for this validator. diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md index c31e373b4..2b0ac836a 100644 --- a/book/src/database-migrations.md +++ b/book/src/database-migrations.md @@ -24,6 +24,8 @@ validator client or the slasher**. | v2.5.0 | Aug 2022 | v11 | yes | | v3.0.0 | Aug 2022 | v11 | yes | | v3.1.0 | Sep 2022 | v12 | yes | +| v3.2.0 | Oct 2022 | v12 | yes | +| v3.3.0 | TBD | v13 | yes | > **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release > (e.g. v2.3.0). diff --git a/book/src/intro.md b/book/src/intro.md index fca075892..ef16913d6 100644 --- a/book/src/intro.md +++ b/book/src/intro.md @@ -18,6 +18,7 @@ We implement the specification as defined in the You may read this book from start to finish, or jump to some of these topics: - Follow the [Installation Guide](./installation.md) to install Lighthouse. +- Run your very [own beacon node](./run_a_node.md). - Learn about [becoming a mainnet validator](./mainnet-validator.md). - Get hacking with the [Development Environment Guide](./setup.md). - Utilize the whole stack by starting a [local testnet](./setup.md#local-testnets). diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md index 8596cd942..08f1b51e4 100644 --- a/book/src/merge-migration.md +++ b/book/src/merge-migration.md @@ -1,9 +1,8 @@ # Merge Migration -This document provides detail for users who want to run a merge-ready Lighthouse node. +This document provides detail for users who want to run a Lighthouse node on post-merge Ethereum. -> The merge is occurring on mainnet in September. You _must_ have a merge-ready setup by September 6 -> 2022. +> The merge occurred on mainnet in September 2022. ## Necessary Configuration @@ -27,12 +26,9 @@ engine to a merge-ready version. You must configure your node to be merge-ready before the Bellatrix fork occurs on the network on which your node is operating. -* **Mainnet**: the Bellatrix fork is scheduled for epoch 144896, September 6 2022 11:34 UTC. - You must ensure your node configuration is updated before then in order to continue following - the chain. We recommend updating your configuration now. - -* **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln**: the Bellatrix fork has already occurred. - You must have a merge-ready configuration right now. +* **Gnosis**: the Bellatrix fork has not yet been scheduled. +* **Mainnet**, **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln**: the Bellatrix fork has + already occurred. You must have a merge-ready configuration right now. ## Connecting to an execution engine @@ -65,6 +61,7 @@ the relevant page for your execution engine for the required flags: - [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/interface/consensus-clients) - [Nethermind: Running Nethermind Post Merge](https://docs.nethermind.io/nethermind/first-steps-with-nethermind/running-nethermind-post-merge) - [Besu: Prepare For The Merge](https://besu.hyperledger.org/en/stable/HowTo/Upgrade/Prepare-for-The-Merge/) +- [Erigon: Beacon Chain (Consensus Layer)](https://github.com/ledgerwatch/erigon#beacon-chain-consensus-layer) Once you have configured your execution engine to open up the engine API (usually on port 8551) you should add the URL to your `lighthouse bn` flags with `--execution-endpoint `, as well as diff --git a/book/src/redundancy.md b/book/src/redundancy.md index dae7ac51f..dcd2ecdea 100644 --- a/book/src/redundancy.md +++ b/book/src/redundancy.md @@ -55,42 +55,27 @@ In our previous example, we listed `http://192.168.1.1:5052` as a redundant node. Apart from having sufficient resources, the backup node should have the following flags: -- `--staking`: starts the HTTP API server and ensures the execution chain is synced. +- `--http`: starts the HTTP API server. - `--http-address 0.0.0.0`: this allows *any* external IP address to access the HTTP server (a firewall should be configured to deny unauthorized access to port `5052`). This is only required if your backup node is on a different host. -- `--subscribe-all-subnets`: ensures that the beacon node subscribes to *all* - subnets, not just on-demand requests from validators. -- `--import-all-attestations`: ensures that the beacon node performs - aggregation on all seen attestations. +- `--execution-endpoint`: see [Merge Migration](./merge-migration.md). +- `--execution-jwt`: see [Merge Migration](./merge-migration.md). -Subsequently, one could use the following command to provide a backup beacon -node: +For example one could use the following command to provide a backup beacon node: ```bash lighthouse bn \ - --staking \ + --http \ --http-address 0.0.0.0 \ - --subscribe-all-subnets \ - --import-all-attestations + --execution-endpoint http://localhost:8551 \ + --execution-jwt /secrets/jwt.hex ``` -### Resource usage of redundant Beacon Nodes - -The `--subscribe-all-subnets` and `--import-all-attestations` flags typically -cause a significant increase in resource consumption. A doubling in CPU -utilization and RAM consumption is expected. - -The increase in resource consumption is due to the fact that the beacon node is -now processing, validating, aggregating and forwarding *all* attestations, -whereas previously it was likely only doing a fraction of this work. Without -these flags, subscription to attestation subnets and aggregation of -attestations is only performed for validators which [explicitly request -subscriptions][subscribe-api]. - -There are 64 subnets and each validator will result in a subscription to *at -least* one subnet. So, using the two aforementioned flags will result in -resource consumption akin to running 64+ validators. +Prior to v3.2.0 fallback beacon nodes also required the `--subscribe-all-subnets` and +`--import-all-attestations` flags. These flags are no longer required as the validator client will +now broadcast subscriptions to all connected beacon nodes by default. This broadcast behaviour +can be disabled using the `--disable-run-on-all` flag for `lighthouse vc`. ## Redundant execution nodes diff --git a/book/src/run_a_node.md b/book/src/run_a_node.md new file mode 100644 index 000000000..5ce42aa63 --- /dev/null +++ b/book/src/run_a_node.md @@ -0,0 +1,171 @@ +# Run a Node + +This document provides detail for users who want to run a Lighthouse beacon node. +You should be finished with one [Installation](./installation.md) method of your choice to continue with the following steps: + +1. Set up an [execution node](#step-1-set-up-an-execution-node); +1. Enable [checkpoint sync](#step-2-choose-a-checkpoint-sync-provider); +1. Run [Lighthouse](#step-3-run-lighthouse); +1. [Check logs](#step-4-check-logs); and +1. [Further readings](#step-5-further-readings). + +Checkpoint sync is *optional*; however, we recommend it since it is substantially faster +than syncing from genesis while still providing the same functionality. + +## Step 1: Set up an execution node + +The Lighthouse beacon node *must* connect to an execution engine in order to validate the transactions +present in blocks. Two flags are used to configure this connection: + +- `--execution-endpoint`: the *URL* of the execution engine API. Often this will be + `http://localhost:8551`. +- `--execution-jwt`: the *path* to the file containing the JWT secret shared by Lighthouse and the + execution engine. This is a mandatory form of authentication that ensures that Lighthouse +has authority to control the execution engine. + +Each execution engine has its own flags for configuring the engine API and JWT. +Please consult the relevant page of your execution engine for the required flags: + +- [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/interface/consensus-clients) +- [Nethermind: Running Nethermind & CL](https://docs.nethermind.io/nethermind/first-steps-with-nethermind/running-nethermind-post-merge) +- [Besu: Connect to Mainnet](https://besu.hyperledger.org/en/stable/public-networks/get-started/connect/mainnet/) +- [Erigon: Beacon Chain (Consensus Layer)](https://github.com/ledgerwatch/erigon#beacon-chain-consensus-layer) + +The execution engine connection must be *exclusive*, i.e. you must have one execution node +per beacon node. The reason for this is that the beacon node _controls_ the execution node. + +## Step 2: Choose a checkpoint sync provider + +Lighthouse supports fast sync from a recent finalized checkpoint. +The checkpoint sync is done using a [public endpoint](#use-a-community-checkpoint-sync-endpoint) +provided by the Ethereum community. + +In [step 3](#step-3-run-lighthouse), when running Lighthouse, +we will enable checkpoint sync by providing the URL to the `--checkpoint-sync-url` flag. + +### Use a community checkpoint sync endpoint + +The Ethereum community provides various [public endpoints](https://eth-clients.github.io/checkpoint-sync-endpoints/) for you to choose from for your initial checkpoint state. Select one for your network and use it as the URL. + +For example, the URL for Sigma Prime's checkpoint sync server for mainnet is `https://mainnet.checkpoint.sigp.io`, +which we will use in [step 3](#step-3-run-lighthouse). + +## Step 3: Run Lighthouse + +To run Lighthouse, we use the three flags from the steps above: +- `--execution-endpoint`; +- `--execution-jwt`; and +- `--checkpoint-sync-url`. + +Additionally, we run Lighthouse with the `--network` flag, which selects a network: + +- `lighthouse` (no flag): Mainnet. +- `lighthouse --network mainnet`: Mainnet. +- `lighthouse --network goerli`: Goerli (testnet). + +Using the correct `--network` flag is very important; using the wrong flag can +result in penalties, slashings or lost deposits. As a rule of thumb, *always* +provide a `--network` flag instead of relying on the default. + +For the testnets we support [Goerli](https://goerli.net/) (`--network goerli`), +[Sepolia](https://sepolia.dev/) (`--network sepolia`), and [Gnosis chain](https://www.gnosis.io/) (`--network gnosis`). + +Minor modifications depend on if you want to run your node while [staking](#staking) or [non-staking](#non-staking). +In the following, we will provide examples of what a Lighthouse setup could look like. + +### Staking + +``` +lighthouse bn \ + --network mainnet \ + --execution-endpoint http://localhost:8551 \ + --execution-jwt /secrets/jwt.hex \ + --checkpoint-sync-url https://mainnet.checkpoint.sigp.io \ + --http +``` + +A Lighthouse beacon node can be configured to expose an HTTP server by supplying the `--http` flag. +The default listen address is `127.0.0.1:5052`. +The HTTP API is required for the beacon node to accept connections from the *validator client*, which manages keys. + +### Non-staking + +``` +lighthouse bn \ + --network mainnet \ + --execution-endpoint http://localhost:8551 \ + --execution-jwt /secrets/jwt.hex \ + --checkpoint-sync-url https://mainnet.checkpoint.sigp.io \ + --disable-deposit-contract-sync +``` + +Since we are not staking, we can use the `--disable-deposit-contract-sync` flag. + +--- + +Once Lighthouse runs, we can monitor the logs to see if it is syncing correctly. + +## Step 4: Check logs +Several logs help you identify if Lighthouse is running correctly. + +### Logs - Checkpoint sync +Lighthouse will print a message to indicate that checkpoint sync is being used: + +``` +INFO Starting checkpoint sync remote_url: http://remote-bn:8000/, service: beacon +``` + +After a short time (usually less than a minute), it will log the details of the checkpoint +loaded from the remote beacon node: + +``` +INFO Loaded checkpoint block and state state_root: 0xe8252c68784a8d5cc7e5429b0e95747032dd1dcee0d1dc9bdaf6380bf90bc8a6, block_root: 0x5508a20147299b1a7fe9dbea1a8b3bf979f74c52e7242039bd77cbff62c0695a, slot: 2034720, service: beacon +``` + +Once the checkpoint is loaded Lighthouse will sync forwards to the head of the chain. + +If a validator client is connected to the node then it will be able to start completing its duties +as soon as forwards sync completes. + +> **Security Note**: You should cross-reference the `block_root` and `slot` of the loaded checkpoint +> against a trusted source like another [public endpoint](https://eth-clients.github.io/checkpoint-sync-endpoints/), +> a friend's node, or a block explorer. + +#### Backfilling Blocks + +Once forwards sync completes, Lighthouse will commence a "backfill sync" to download the blocks +from the checkpoint back to genesis. + +The beacon node will log messages similar to the following each minute while it completes backfill +sync: + +``` +INFO Downloading historical blocks est_time: 5 hrs 0 mins, speed: 111.96 slots/sec, distance: 2020451 slots (40 weeks 0 days), service: slot_notifier +``` + +Once backfill is complete, a `INFO Historical block download complete` log will be emitted. + +Check out the [FAQ](./checkpoint-sync.md#faq) for more information on checkpoint sync. + +### Logs - Syncing + +You should see that Lighthouse remains in sync and marks blocks +as `verified` indicating that they have been processed successfully by the execution engine: + +``` +INFO Synced, slot: 3690668, block: 0x1244…cb92, epoch: 115333, finalized_epoch: 115331, finalized_root: 0x0764…2a3d, exec_hash: 0x929c…1ff6 (verified), peers: 78 +``` + + +## Step 5: Further readings + +Several other resources are the next logical step to explore after running your beacon node: + +- Learn how to [become a validator](./mainnet-validator.md); +- Explore how to [manage your keys](./key-management.md); +- Research on [validator management](./validator-management.md); +- Dig into the [APIs](./api.md) that the beacon node and validator client provide; +- Study even more about [checkpoint sync](./checkpoint-sync.md); or +- Investigate what steps had to be taken in the past to execute a smooth [merge migration](./merge-migration.md). + +Finally, if you a struggling with anything, join our [Discord](https://discord.gg/cyAszAh). We are happy to help! \ No newline at end of file diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 4f4c18ac8..1dedabe4a 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "3.2.1" +version = "3.3.0" authors = ["Sigma Prime "] edition = "2021" diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index 4df7a5f23..b7a66cbbd 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -1,9 +1,11 @@ use beacon_node::{get_data_dir, set_network_config}; use clap::ArgMatches; use eth2_network_config::Eth2NetworkConfig; +use lighthouse_network::discv5::enr::EnrBuilder; +use lighthouse_network::discv5::IpMode; use lighthouse_network::discv5::{enr::CombinedKey, Discv5Config, Enr}; use lighthouse_network::{ - discovery::{create_enr_builder_from_config, load_enr_from_disk, use_or_load_enr}, + discovery::{load_enr_from_disk, use_or_load_enr}, load_private_key, CombinedKeyExt, NetworkConfig, }; use serde_derive::{Deserialize, Serialize}; @@ -70,6 +72,15 @@ impl BootNodeConfig { // the address to listen on let listen_socket = SocketAddr::new(network_config.listen_address, network_config.discovery_port); + if listen_socket.is_ipv6() { + // create ipv6 sockets and enable ipv4 mapped addresses. + network_config.discv5_config.ip_mode = IpMode::Ip6 { + enable_mapped_addresses: true, + }; + } else { + // Set explicitly as ipv4 otherwise + network_config.discv5_config.ip_mode = IpMode::Ip4; + } let private_key = load_private_key(&network_config, &logger); let local_key = CombinedKey::from_libp2p(&private_key)?; @@ -104,7 +115,29 @@ impl BootNodeConfig { // Build the local ENR let mut local_enr = { - let mut builder = create_enr_builder_from_config(&network_config, false); + let mut builder = EnrBuilder::new("v4"); + // Set the enr address if specified. Set also the port. + // NOTE: if the port is specified but the the address is not, the port won't be + // set since it can't be known if it's an ipv6 or ipv4 udp port. + if let Some(enr_address) = network_config.enr_address { + match enr_address { + std::net::IpAddr::V4(ipv4_addr) => { + builder.ip4(ipv4_addr); + if let Some(port) = network_config.enr_udp_port { + builder.udp4(port); + } + } + std::net::IpAddr::V6(ipv6_addr) => { + builder.ip6(ipv6_addr); + if let Some(port) = network_config.enr_udp_port { + builder.udp6(port); + // We are enabling mapped addresses in the boot node in this case, + // so advertise an udp4 port as well. + builder.udp4(port); + } + } + } + }; // If we know of the ENR field, add it to the initial construction if let Some(enr_fork_bytes) = enr_fork { diff --git a/boot_node/src/server.rs b/boot_node/src/server.rs index c4bf887e9..8f38fb300 100644 --- a/boot_node/src/server.rs +++ b/boot_node/src/server.rs @@ -9,53 +9,63 @@ use slog::info; use types::EthSpec; pub async fn run(config: BootNodeConfig, log: slog::Logger) { + let BootNodeConfig { + listen_socket, + boot_nodes, + local_enr, + local_key, + discv5_config, + .. + } = config; + // Print out useful information about the generated ENR - let enr_socket = config - .local_enr - .udp4_socket() - .expect("Enr has a UDP socket"); - let eth2_field = config - .local_enr + let enr_v4_socket = local_enr.udp4_socket(); + let enr_v6_socket = local_enr.udp6_socket(); + let eth2_field = local_enr .eth2() .map(|fork_id| hex::encode(fork_id.fork_digest)) .unwrap_or_default(); - info!(log, "Configuration parameters"; "listening_address" => format!("{}:{}", config.listen_socket.ip(), config.listen_socket.port()), "broadcast_address" => format!("{}:{}",enr_socket.ip(), enr_socket.port()), "eth2" => eth2_field); + let pretty_v4_socket = enr_v4_socket.as_ref().map(|addr| addr.to_string()); + let pretty_v6_socket = enr_v6_socket.as_ref().map(|addr| addr.to_string()); + info!( + log, "Configuration parameters"; + "listening_address" => %listen_socket, + "advertised_v4_address" => ?pretty_v4_socket, + "advertised_v6_address" => ?pretty_v6_socket, + "eth2" => eth2_field + ); - info!(log, "Identity established"; "peer_id" => config.local_enr.peer_id().to_string(), "node_id" => config.local_enr.node_id().to_string()); + info!(log, "Identity established"; "peer_id" => %local_enr.peer_id(), "node_id" => %local_enr.node_id()); // build the contactable multiaddr list, adding the p2p protocol - info!(log, "Contact information"; "enr" => config.local_enr.to_base64()); - info!(log, "Contact information"; "multiaddrs" => format!("{:?}", config.local_enr.multiaddr_p2p())); + info!(log, "Contact information"; "enr" => local_enr.to_base64()); + info!(log, "Contact information"; "multiaddrs" => ?local_enr.multiaddr_p2p()); // construct the discv5 server - let mut discv5 = Discv5::new( - config.local_enr.clone(), - config.local_key, - config.discv5_config, - ) - .unwrap(); + let mut discv5 = Discv5::new(local_enr.clone(), local_key, discv5_config).unwrap(); // If there are any bootnodes add them to the routing table - for enr in config.boot_nodes { + for enr in boot_nodes { info!( log, "Adding bootnode"; - "address" => ?enr.udp4_socket(), - "peer_id" => enr.peer_id().to_string(), - "node_id" => enr.node_id().to_string() + "ipv4_address" => ?enr.udp4_socket(), + "ipv6_address" => ?enr.udp6_socket(), + "peer_id" => ?enr.peer_id(), + "node_id" => ?enr.node_id() ); - if enr != config.local_enr { + if enr != local_enr { if let Err(e) = discv5.add_enr(enr) { - slog::warn!(log, "Failed adding ENR"; "error" => e.to_string()); + slog::warn!(log, "Failed adding ENR"; "error" => ?e); } } } // start the server - if let Err(e) = discv5.start(config.listen_socket).await { - slog::crit!(log, "Could not start discv5 server"; "error" => e.to_string()); + if let Err(e) = discv5.start(listen_socket).await { + slog::crit!(log, "Could not start discv5 server"; "error" => %e); return; } @@ -72,7 +82,7 @@ pub async fn run(config: BootNodeConfig, log: slog::Logger) { let mut event_stream = match discv5.event_stream().await { Ok(stream) => stream, Err(e) => { - slog::crit!(log, "Failed to obtain event stream"; "error" => e.to_string()); + slog::crit!(log, "Failed to obtain event stream"; "error" => %e); return; } }; @@ -81,9 +91,35 @@ pub async fn run(config: BootNodeConfig, log: slog::Logger) { loop { tokio::select! { _ = metric_interval.tick() => { + // Get some ipv4/ipv6 stats to add in the metrics. + let mut ipv4_only_reachable: usize = 0; + let mut ipv6_only_reachable: usize= 0; + let mut ipv4_ipv6_reachable: usize = 0; + let mut unreachable_nodes: usize = 0; + for enr in discv5.kbuckets().iter_ref().filter_map(|entry| entry.status.is_connected().then_some(entry.node.value)) { + let declares_ipv4 = enr.udp4_socket().is_some(); + let declares_ipv6 = enr.udp6_socket().is_some(); + match (declares_ipv4, declares_ipv6) { + (true, true) => ipv4_ipv6_reachable += 1, + (true, false) => ipv4_only_reachable += 1, + (false, true) => ipv6_only_reachable += 1, + (false, false) => unreachable_nodes += 1, + } + } + // display server metrics let metrics = discv5.metrics(); - info!(log, "Server metrics"; "connected_peers" => discv5.connected_peers(), "active_sessions" => metrics.active_sessions, "requests/s" => format!("{:.2}", metrics.unsolicited_requests_per_second)); + info!( + log, "Server metrics"; + "connected_peers" => discv5.connected_peers(), + "active_sessions" => metrics.active_sessions, + "requests/s" => format_args!("{:.2}", metrics.unsolicited_requests_per_second), + "ipv4_nodes" => ipv4_only_reachable, + "ipv6_nodes" => ipv6_only_reachable, + "ipv6_and_ipv4_nodes" => ipv4_ipv6_reachable, + "unreachable_nodes" => unreachable_nodes, + ); + } Some(event) = event_stream.recv() => { match event { @@ -95,7 +131,7 @@ pub async fn run(config: BootNodeConfig, log: slog::Logger) { Discv5Event::TalkRequest(_) => {} // Ignore Discv5Event::NodeInserted { .. } => {} // Ignore Discv5Event::SocketUpdated(socket_addr) => { - info!(log, "External socket address updated"; "socket_addr" => format!("{:?}", socket_addr)); + info!(log, "Advertised socket address updated"; "socket_addr" => %socket_addr); } Discv5Event::SessionEstablished{ .. } => {} // Ignore } diff --git a/bors.toml b/bors.toml index 6edf55bfa..dbe92c68f 100644 --- a/bors.toml +++ b/bors.toml @@ -23,7 +23,8 @@ status = [ "check-msrv", "slasher-tests", "syncing-simulator-ubuntu", - "disallowed-from-async-lint" + "disallowed-from-async-lint", + "compile-with-beta-compiler" ] use_squash_merge = true timeout_sec = 10800 diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index a89f64162..fcfff7284 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -518,6 +518,29 @@ impl BeaconNodeHttpClient { self.get(path).await } + /// `GET beacon/states/{state_id}/randao?epoch` + pub async fn get_beacon_states_randao( + &self, + state_id: StateId, + epoch: Option, + ) -> Result>, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("randao"); + + if let Some(epoch) = epoch { + path.query_pairs_mut() + .append_pair("epoch", &epoch.to_string()); + } + + self.get_opt(path).await + } + /// `GET beacon/states/{state_id}/validators/{validator_id}` /// /// Returns `Ok(None)` on a 404 error. @@ -657,6 +680,17 @@ impl BeaconNodeHttpClient { Ok(path) } + /// Path for `v1/beacon/blinded_blocks/{block_id}` + pub fn get_beacon_blinded_blocks_path(&self, block_id: BlockId) -> Result { + let mut path = self.eth_path(V1)?; + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("blinded_blocks") + .push(&block_id.to_string()); + Ok(path) + } + /// `GET v2/beacon/blocks` /// /// Returns `Ok(None)` on a 404 error. @@ -701,6 +735,51 @@ impl BeaconNodeHttpClient { })) } + /// `GET v1/beacon/blinded_blocks/{block_id}` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_blinded_blocks( + &self, + block_id: BlockId, + ) -> Result>>, Error> + { + let path = self.get_beacon_blinded_blocks_path(block_id)?; + let response = match self.get_response(path, |b| b).await.optional()? { + Some(res) => res, + None => return Ok(None), + }; + + // If present, use the fork provided in the headers to decode the block. Gracefully handle + // missing and malformed fork names by falling back to regular deserialisation. + let (block, version, execution_optimistic) = match response.fork_name_from_header() { + Ok(Some(fork_name)) => { + let (data, (version, execution_optimistic)) = + map_fork_name_with!(fork_name, SignedBlindedBeaconBlock, { + let ExecutionOptimisticForkVersionedResponse { + version, + execution_optimistic, + data, + } = response.json().await?; + (data, (version, execution_optimistic)) + }); + (data, version, execution_optimistic) + } + Ok(None) | Err(_) => { + let ExecutionOptimisticForkVersionedResponse { + version, + execution_optimistic, + data, + } = response.json().await?; + (data, version, execution_optimistic) + } + }; + Ok(Some(ExecutionOptimisticForkVersionedResponse { + version, + execution_optimistic, + data: block, + })) + } + /// `GET v1/beacon/blocks` (LEGACY) /// /// Returns `Ok(None)` on a 404 error. @@ -735,6 +814,24 @@ impl BeaconNodeHttpClient { .transpose() } + /// `GET beacon/blinded_blocks/{block_id}` as SSZ + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_blinded_blocks_ssz( + &self, + block_id: BlockId, + spec: &ChainSpec, + ) -> Result>, Error> { + let path = self.get_beacon_blinded_blocks_path(block_id)?; + + self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_beacon_blocks_ssz) + .await? + .map(|bytes| { + SignedBlindedBeaconBlock::from_ssz_bytes(&bytes, spec).map_err(Error::InvalidSsz) + }) + .transpose() + } + /// `GET beacon/blocks/{block_id}/root` /// /// Returns `Ok(None)` on a 404 error. diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 71e5b40e0..4b7ae5539 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -455,6 +455,11 @@ pub struct SyncCommitteesQuery { pub epoch: Option, } +#[derive(Serialize, Deserialize)] +pub struct RandaoQuery { + pub epoch: Option, +} + #[derive(Serialize, Deserialize)] pub struct AttestationPoolQuery { pub slot: Option, @@ -486,6 +491,11 @@ pub struct SyncCommitteeByValidatorIndices { pub validator_aggregates: Vec, } +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct RandaoMix { + pub randao: Hash256, +} + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(transparent)] pub struct SyncSubcommittee { diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index 7987899c3..d55ef3f3b 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -6,8 +6,8 @@ PRESET_BASE: 'gnosis' # Transition # --------------------------------------------------------------- -# TBD, 2**256-2**10 is a placeholder -TERMINAL_TOTAL_DIFFICULTY: 115792089237316195423570985008687907853269984665640564039457584007913129638912 +# Estimated on Dec 5, 2022 +TERMINAL_TOTAL_DIFFICULTY: 8626000000000000000000058750000000000000000000 # By default, don't use these params TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 @@ -35,7 +35,7 @@ ALTAIR_FORK_VERSION: 0x01000064 ALTAIR_FORK_EPOCH: 512 # Merge BELLATRIX_FORK_VERSION: 0x02000064 -BELLATRIX_FORK_EPOCH: 18446744073709551615 +BELLATRIX_FORK_EPOCH: 385536 # Sharding SHARDING_FORK_VERSION: 0x03000064 SHARDING_FORK_EPOCH: 18446744073709551615 diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index 2bfd00326..7aef78437 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -226,7 +226,7 @@ mod tests { use super::*; use ssz::Encode; use tempfile::Builder as TempBuilder; - use types::{Config, Eth1Data, GnosisEthSpec, Hash256, MainnetEthSpec, GNOSIS}; + use types::{Config, Eth1Data, GnosisEthSpec, Hash256, MainnetEthSpec}; type E = MainnetEthSpec; @@ -250,6 +250,13 @@ mod tests { assert_eq!(spec, config.chain_spec::().unwrap()); } + #[test] + fn gnosis_config_eq_chain_spec() { + let config = Eth2NetworkConfig::from_hardcoded_net(&GNOSIS).unwrap(); + let spec = ChainSpec::gnosis(); + assert_eq!(spec, config.chain_spec::().unwrap()); + } + #[test] fn mainnet_genesis_state() { let config = Eth2NetworkConfig::from_hardcoded_net(&MAINNET).unwrap(); @@ -270,7 +277,7 @@ mod tests { .unwrap_or_else(|_| panic!("{:?}", net.name)); // Ensure we can parse the YAML config to a chain spec. - if net.name == GNOSIS { + if net.name == types::GNOSIS { config.chain_spec::().unwrap(); } else { config.chain_spec::().unwrap(); diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index a48ba211d..afcbae513 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v3.2.1-", - fallback = "Lighthouse/v3.2.1" + prefix = "Lighthouse/v3.3.0-", + fallback = "Lighthouse/v3.3.0" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/common/system_health/Cargo.toml b/common/system_health/Cargo.toml new file mode 100644 index 000000000..0956710b8 --- /dev/null +++ b/common/system_health/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "system_health" +version = "0.1.0" +edition = "2021" + +[dependencies] +lighthouse_network = { path = "../../beacon_node/lighthouse_network" } +types = { path = "../../consensus/types" } +sysinfo = "0.26.5" +serde = "1.0.116" +serde_derive = "1.0.116" +serde_json = "1.0.58" +parking_lot = "0.12.0" diff --git a/common/system_health/src/lib.rs b/common/system_health/src/lib.rs new file mode 100644 index 000000000..d10540e50 --- /dev/null +++ b/common/system_health/src/lib.rs @@ -0,0 +1,241 @@ +use lighthouse_network::{types::SyncState, NetworkGlobals}; +use parking_lot::RwLock; +use serde::{Deserialize, Serialize}; +use std::path::{Path, PathBuf}; +use std::sync::Arc; +use sysinfo::{CpuExt, DiskExt, NetworkExt, NetworksExt, System, SystemExt}; +use types::EthSpec; + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct SystemHealth { + /// Total memory of the system. + pub total_memory: u64, + /// Total free memory available to the system. + pub free_memory: u64, + /// Total used memory. + pub used_memory: u64, + + /// System load average over 1 minute. + pub sys_loadavg_1: f64, + /// System load average over 5 minutes. + pub sys_loadavg_5: f64, + /// System load average over 15 minutes. + pub sys_loadavg_15: f64, + + /// Total cpu cores. + pub cpu_cores: usize, + /// Total cpu threads. + pub cpu_threads: usize, + /// The global cpu frequency. + pub global_cpu_frequency: f32, + + /// Total capacity of disk. + pub disk_bytes_total: u64, + /// Free space in disk. + pub disk_bytes_free: u64, + + /// System uptime. + pub system_uptime: u64, + /// Application uptime. + pub app_uptime: u64, + /// The System name + pub system_name: String, + /// Kernel version + pub kernel_version: String, + /// OS version + pub os_version: String, + /// Hostname + pub host_name: String, +} + +/// System related health, specific to the UI for the validator client. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct SystemHealthVC { + #[serde(flatten)] + pub system_health: SystemHealth, +} + +/// System related health, specific to the UI for the Beacon Node. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct SystemHealthBN { + #[serde(flatten)] + pub system_health: SystemHealth, + /// The name of the network that uses the most traffic. + pub network_name: String, + /// Total bytes received over the main interface. + pub network_bytes_total_received: u64, + /// Total bytes sent over the main interface. + pub network_bytes_total_transmit: u64, + + /// The current NAT status. + pub nat_open: bool, + /// The current number of connected peers. + pub connected_peers: usize, + /// The current syncing state of the consensus node. + pub sync_state: SyncState, +} + +/// Populates the system health. +fn observe_system_health( + sysinfo: Arc>, + data_dir: PathBuf, + app_uptime: u64, +) -> SystemHealth { + let sysinfo = sysinfo.read(); + let loadavg = sysinfo.load_average(); + + let cpus = sysinfo.cpus(); + + let disks = sysinfo.disks(); + + let system_uptime = sysinfo.uptime(); + + // Helper functions to extract specific data + + // Find fs associated with the data dir location and report this + let (disk_bytes_total, disk_bytes_free) = { + // There is no clean way to find this in an OS-agnostic way. We take a simple approach, + // which is attempt to match the mount_point to the data_dir. If this cannot be done, we + // just fallback to the root fs. + + let mut root_fs_disk = None; + let mut other_matching_fs = None; + + for disk in disks.iter() { + if disk.mount_point() == Path::new("/") + || disk.mount_point() == Path::new("C:\\") + || disk.mount_point() == Path::new("/System/Volumes/Data") + { + // Found the usual default root_fs + root_fs_disk = Some(disk); + continue; + } + + // If we have other file systems, compare these to the data_dir of Lighthouse and + // prioritize these. + if data_dir + .to_str() + .map(|path| { + if let Some(mount_str) = disk.mount_point().to_str() { + path.contains(mount_str) + } else { + false + } + }) + .unwrap_or(false) + { + other_matching_fs = Some(disk); + break; // Don't bother finding other competing fs. + } + } + + // If we found a file system other than the root, report this, otherwise just report the + // root fs + let fs = other_matching_fs.or(root_fs_disk); + + // If the root fs is not known, just add up the total of all known partitions + match fs { + Some(fs) => (fs.total_space(), fs.available_space()), + None => { + // If we can't find a known partition, just add them all up + disks.iter().fold((0, 0), |mut current_sizes, disk| { + current_sizes.0 += disk.total_space(); + current_sizes.1 += disk.available_space(); + current_sizes + }) + } + } + }; + + // Attempt to get the clock speed from the name of the CPU + let cpu_frequency_from_name = cpus.iter().next().and_then(|cpu| { + cpu.brand() + .split_once("GHz") + .and_then(|(result, _)| result.trim().rsplit_once(' ')) + .and_then(|(_, result)| result.parse::().ok()) + }); + + let global_cpu_frequency = match cpu_frequency_from_name { + Some(freq) => freq, + None => { + // Get the frequency from average measured frequencies + let global_cpu_frequency: f32 = + cpus.iter().map(|cpu| cpu.frequency()).sum::() as f32 / cpus.len() as f32; + // Shift to ghz to 1dp + (global_cpu_frequency / 100.0).round() / 10.0 + } + }; + + SystemHealth { + total_memory: sysinfo.total_memory(), + free_memory: sysinfo.free_memory(), + used_memory: sysinfo.used_memory(), + sys_loadavg_1: loadavg.one, + sys_loadavg_5: loadavg.five, + sys_loadavg_15: loadavg.fifteen, + cpu_cores: sysinfo.physical_core_count().unwrap_or(0), + cpu_threads: cpus.len(), + global_cpu_frequency, + disk_bytes_total, + disk_bytes_free, + system_uptime, + app_uptime, + system_name: sysinfo.name().unwrap_or_else(|| String::from("")), + kernel_version: sysinfo.kernel_version().unwrap_or_else(|| "".into()), + os_version: sysinfo.long_os_version().unwrap_or_else(|| "".into()), + host_name: sysinfo.host_name().unwrap_or_else(|| "".into()), + } +} + +/// Observes the Validator client system health. +pub fn observe_system_health_vc( + sysinfo: Arc>, + data_dir: PathBuf, + app_uptime: u64, +) -> SystemHealthVC { + SystemHealthVC { + system_health: observe_system_health(sysinfo, data_dir, app_uptime), + } +} + +/// Observes the Beacon Node system health. +pub fn observe_system_health_bn( + sysinfo: Arc>, + data_dir: PathBuf, + app_uptime: u64, + network_globals: Arc>, +) -> SystemHealthBN { + let system_health = observe_system_health(sysinfo.clone(), data_dir, app_uptime); + + // Find the network with the most traffic and assume this is the main network + let sysinfo = sysinfo.read(); + let networks = sysinfo.networks(); + let (network_name, network_bytes_total_received, network_bytes_total_transmit) = networks + .iter() + .max_by_key(|(_name, network)| network.total_received()) + .map(|(name, network)| { + ( + name.clone(), + network.total_received(), + network.total_transmitted(), + ) + }) + .unwrap_or_else(|| (String::from("None"), 0, 0)); + + // Determine if the NAT is open or not. + let nat_open = lighthouse_network::metrics::NAT_OPEN + .as_ref() + .map(|v| v.get()) + .unwrap_or(0) + != 0; + + SystemHealthBN { + system_health, + network_name, + network_bytes_total_received, + network_bytes_total_transmit, + nat_open, + connected_peers: network_globals.connected_peers(), + sync_state: network_globals.sync_state(), + } +} diff --git a/consensus/ssz_types/src/bitfield.rs b/consensus/ssz_types/src/bitfield.rs index b0cf4551e..0539cc7d2 100644 --- a/consensus/ssz_types/src/bitfield.rs +++ b/consensus/ssz_types/src/bitfield.rs @@ -22,7 +22,7 @@ pub trait BitfieldBehaviour: Clone {} /// A marker struct used to declare SSZ `Variable` behaviour on a `Bitfield`. /// /// See the [`Bitfield`](struct.Bitfield.html) docs for usage. -#[derive(Clone, PartialEq, Debug)] +#[derive(Clone, PartialEq, Eq, Debug)] pub struct Variable { _phantom: PhantomData, } @@ -30,7 +30,7 @@ pub struct Variable { /// A marker struct used to declare SSZ `Fixed` behaviour on a `Bitfield`. /// /// See the [`Bitfield`](struct.Bitfield.html) docs for usage. -#[derive(Clone, PartialEq, Debug)] +#[derive(Clone, PartialEq, Eq, Debug)] pub struct Fixed { _phantom: PhantomData, } @@ -96,7 +96,7 @@ pub type BitVector = Bitfield>; /// byte (by `Vec` index) stores the lowest bit-indices and the right-most bit stores the lowest /// bit-index. E.g., `smallvec![0b0000_0001, 0b0000_0010]` has bits `0, 9` set. #[derive(Clone, Debug, Derivative)] -#[derivative(PartialEq, Hash(bound = ""))] +#[derivative(PartialEq, Eq, Hash(bound = ""))] pub struct Bitfield { bytes: SmallVec<[u8; SMALLVEC_LEN]>, len: usize, diff --git a/consensus/state_processing/src/consensus_context.rs b/consensus/state_processing/src/consensus_context.rs index 121a9eccb..ccf8cefb6 100644 --- a/consensus/state_processing/src/consensus_context.rs +++ b/consensus/state_processing/src/consensus_context.rs @@ -1,8 +1,11 @@ +use crate::common::get_indexed_attestation; +use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError}; +use std::collections::{hash_map::Entry, HashMap}; use std::marker::PhantomData; use tree_hash::TreeHash; use types::{ - AbstractExecPayload, BeaconState, BeaconStateError, ChainSpec, EthSpec, Hash256, - SignedBeaconBlock, Slot, + AbstractExecPayload, Attestation, AttestationData, BeaconState, BeaconStateError, BitList, + ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, }; #[derive(Debug)] @@ -13,6 +16,9 @@ pub struct ConsensusContext { proposer_index: Option, /// Block root of the block at `slot`. current_block_root: Option, + /// Cache of indexed attestations constructed during block processing. + indexed_attestations: + HashMap<(AttestationData, BitList), IndexedAttestation>, _phantom: PhantomData, } @@ -20,6 +26,7 @@ pub struct ConsensusContext { pub enum ContextError { BeaconState(BeaconStateError), SlotMismatch { slot: Slot, expected: Slot }, + EpochMismatch { epoch: Epoch, expected: Epoch }, } impl From for ContextError { @@ -34,6 +41,7 @@ impl ConsensusContext { slot, proposer_index: None, current_block_root: None, + indexed_attestations: HashMap::new(), _phantom: PhantomData, } } @@ -43,13 +51,39 @@ impl ConsensusContext { self } + /// Strict method for fetching the proposer index. + /// + /// Gets the proposer index for `self.slot` while ensuring that it matches `state.slot()`. This + /// method should be used in block processing and almost everywhere the proposer index is + /// required. If the slot check is too restrictive, see `get_proposer_index_from_epoch_state`. pub fn get_proposer_index( &mut self, state: &BeaconState, spec: &ChainSpec, ) -> Result { self.check_slot(state.slot())?; + self.get_proposer_index_no_checks(state, spec) + } + /// More liberal method for fetching the proposer index. + /// + /// Fetches the proposer index for `self.slot` but does not require the state to be from an + /// exactly matching slot (merely a matching epoch). This is useful in batch verification where + /// we want to extract the proposer index from a single state for every slot in the epoch. + pub fn get_proposer_index_from_epoch_state( + &mut self, + state: &BeaconState, + spec: &ChainSpec, + ) -> Result { + self.check_epoch(state.current_epoch())?; + self.get_proposer_index_no_checks(state, spec) + } + + fn get_proposer_index_no_checks( + &mut self, + state: &BeaconState, + spec: &ChainSpec, + ) -> Result { if let Some(proposer_index) = self.proposer_index { return Ok(proposer_index); } @@ -89,4 +123,39 @@ impl ConsensusContext { }) } } + + fn check_epoch(&self, epoch: Epoch) -> Result<(), ContextError> { + let expected = self.slot.epoch(T::slots_per_epoch()); + if epoch == expected { + Ok(()) + } else { + Err(ContextError::EpochMismatch { epoch, expected }) + } + } + + pub fn get_indexed_attestation( + &mut self, + state: &BeaconState, + attestation: &Attestation, + ) -> Result<&IndexedAttestation, BlockOperationError> { + let key = ( + attestation.data.clone(), + attestation.aggregation_bits.clone(), + ); + + match self.indexed_attestations.entry(key) { + Entry::Occupied(occupied) => Ok(occupied.into_mut()), + Entry::Vacant(vacant) => { + let committee = + state.get_beacon_committee(attestation.data.slot, attestation.data.index)?; + let indexed_attestation = + get_indexed_attestation(committee.committee, attestation)?; + Ok(vacant.insert(indexed_attestation)) + } + } + } + + pub fn num_cached_indexed_attestations(&self) -> usize { + self.indexed_attestations.len() + } } diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index d1c4cf12a..526992226 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -120,16 +120,13 @@ pub fn per_block_processing>( let verify_signatures = match block_signature_strategy { BlockSignatureStrategy::VerifyBulk => { // Verify all signatures in the block at once. - let block_root = Some(ctxt.get_current_block_root(signed_block)?); - let proposer_index = Some(ctxt.get_proposer_index(state, spec)?); block_verify!( BlockSignatureVerifier::verify_entire_block( state, |i| get_pubkey_from_state(state, i), |pk_bytes| pk_bytes.decompress().ok().map(Cow::Owned), signed_block, - block_root, - proposer_index, + ctxt, spec ) .is_ok(), @@ -352,6 +349,7 @@ pub fn get_new_eth1_data( /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/beacon-chain.md#process_execution_payload pub fn partially_verify_execution_payload<'payload, T: EthSpec, Payload: AbstractExecPayload>( state: &BeaconState, + block_slot: Slot, payload: Payload::Ref<'payload>, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { @@ -372,7 +370,7 @@ pub fn partially_verify_execution_payload<'payload, T: EthSpec, Payload: Abstrac } ); - let timestamp = compute_timestamp_at_slot(state, spec)?; + let timestamp = compute_timestamp_at_slot(state, block_slot, spec)?; block_verify!( payload.timestamp() == timestamp, BlockProcessingError::ExecutionInvalidTimestamp { @@ -396,7 +394,7 @@ pub fn process_execution_payload<'payload, T: EthSpec, Payload: AbstractExecPayl payload: Payload::Ref<'payload>, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - partially_verify_execution_payload::(state, payload, spec)?; + partially_verify_execution_payload::(state, state.slot(), payload, spec)?; match state.latest_execution_payload_header_mut()? { ExecutionPayloadHeaderRefMut::Merge(header_mut) => { @@ -459,9 +457,10 @@ pub fn is_execution_enabled>( /// https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#compute_timestamp_at_slot pub fn compute_timestamp_at_slot( state: &BeaconState, + block_slot: Slot, spec: &ChainSpec, ) -> Result { - let slots_since_genesis = state.slot().as_u64().safe_sub(spec.genesis_slot.as_u64())?; + let slots_since_genesis = block_slot.as_u64().safe_sub(spec.genesis_slot.as_u64())?; slots_since_genesis .safe_mul(spec.seconds_per_slot) .and_then(|since_genesis| state.genesis_time().safe_add(since_genesis)) diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index a8d0acc55..fe654fcaa 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -1,8 +1,8 @@ #![allow(clippy::integer_arithmetic)] use super::signature_sets::{Error as SignatureSetError, *}; -use crate::common::get_indexed_attestation; use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError}; +use crate::{ConsensusContext, ContextError}; use bls::{verify_signature_sets, PublicKey, PublicKeyBytes, SignatureSet}; use rayon::prelude::*; use std::borrow::Cow; @@ -28,6 +28,8 @@ pub enum Error { IncorrectBlockProposer { block: u64, local_shuffling: u64 }, /// Failed to load a signature set. The block may be invalid or we failed to process it. SignatureSetError(SignatureSetError), + /// Error related to the consensus context, likely the proposer index or block root calc. + ContextError(ContextError), } impl From for Error { @@ -36,6 +38,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: ContextError) -> Error { + Error::ContextError(e) + } +} + impl From for Error { fn from(e: SignatureSetError) -> Error { match e { @@ -122,12 +130,11 @@ where get_pubkey: F, decompressor: D, block: &'a SignedBeaconBlock, - block_root: Option, - verified_proposer_index: Option, + ctxt: &mut ConsensusContext, spec: &'a ChainSpec, ) -> Result<()> { let mut verifier = Self::new(state, get_pubkey, decompressor, spec); - verifier.include_all_signatures(block, block_root, verified_proposer_index)?; + verifier.include_all_signatures(block, ctxt)?; verifier.verify() } @@ -135,11 +142,14 @@ where pub fn include_all_signatures>( &mut self, block: &'a SignedBeaconBlock, - block_root: Option, - verified_proposer_index: Option, + ctxt: &mut ConsensusContext, ) -> Result<()> { + let block_root = Some(ctxt.get_current_block_root(block)?); + let verified_proposer_index = + Some(ctxt.get_proposer_index_from_epoch_state(self.state, self.spec)?); + self.include_block_proposal(block, block_root, verified_proposer_index)?; - self.include_all_signatures_except_proposal(block, verified_proposer_index)?; + self.include_all_signatures_except_proposal(block, ctxt)?; Ok(()) } @@ -149,12 +159,14 @@ where pub fn include_all_signatures_except_proposal>( &mut self, block: &'a SignedBeaconBlock, - verified_proposer_index: Option, + ctxt: &mut ConsensusContext, ) -> Result<()> { + let verified_proposer_index = + Some(ctxt.get_proposer_index_from_epoch_state(self.state, self.spec)?); self.include_randao_reveal(block, verified_proposer_index)?; self.include_proposer_slashings(block)?; self.include_attester_slashings(block)?; - self.include_attestations(block)?; + self.include_attestations(block, ctxt)?; // Deposits are not included because they can legally have invalid signatures. self.include_exits(block)?; self.include_sync_aggregate(block)?; @@ -262,7 +274,8 @@ where pub fn include_attestations>( &mut self, block: &'a SignedBeaconBlock, - ) -> Result>> { + ctxt: &mut ConsensusContext, + ) -> Result<()> { self.sets .sets .reserve(block.message().body().attestations().len()); @@ -272,28 +285,18 @@ where .body() .attestations() .iter() - .try_fold( - Vec::with_capacity(block.message().body().attestations().len()), - |mut vec, attestation| { - let committee = self - .state - .get_beacon_committee(attestation.data.slot, attestation.data.index)?; - let indexed_attestation = - get_indexed_attestation(committee.committee, attestation)?; + .try_for_each(|attestation| { + let indexed_attestation = ctxt.get_indexed_attestation(self.state, attestation)?; - self.sets.push(indexed_attestation_signature_set( - self.state, - self.get_pubkey.clone(), - &attestation.signature, - &indexed_attestation, - self.spec, - )?); - - vec.push(indexed_attestation); - - Ok(vec) - }, - ) + self.sets.push(indexed_attestation_signature_set( + self.state, + self.get_pubkey.clone(), + &attestation.signature, + indexed_attestation, + self.spec, + )?); + Ok(()) + }) .map_err(Error::into) } diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 32e36c6ce..d0e855b7a 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -63,8 +63,14 @@ pub mod base { // Verify and apply each attestation. for (i, attestation) in attestations.iter().enumerate() { - verify_attestation_for_block_inclusion(state, attestation, verify_signatures, spec) - .map_err(|e| e.into_with_index(i))?; + verify_attestation_for_block_inclusion( + state, + attestation, + ctxt, + verify_signatures, + spec, + ) + .map_err(|e| e.into_with_index(i))?; let pending_attestation = PendingAttestation { aggregation_bits: attestation.aggregation_bits.clone(), @@ -100,19 +106,11 @@ pub mod altair { ctxt: &mut ConsensusContext, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - let proposer_index = ctxt.get_proposer_index(state, spec)?; attestations .iter() .enumerate() .try_for_each(|(i, attestation)| { - process_attestation( - state, - attestation, - i, - proposer_index, - verify_signatures, - spec, - ) + process_attestation(state, attestation, i, ctxt, verify_signatures, spec) }) } @@ -120,16 +118,24 @@ pub mod altair { state: &mut BeaconState, attestation: &Attestation, att_index: usize, - proposer_index: u64, + ctxt: &mut ConsensusContext, verify_signatures: VerifySignatures, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { state.build_committee_cache(RelativeEpoch::Previous, spec)?; state.build_committee_cache(RelativeEpoch::Current, spec)?; - let indexed_attestation = - verify_attestation_for_block_inclusion(state, attestation, verify_signatures, spec) - .map_err(|e| e.into_with_index(att_index))?; + let proposer_index = ctxt.get_proposer_index(state, spec)?; + + let attesting_indices = &verify_attestation_for_block_inclusion( + state, + attestation, + ctxt, + verify_signatures, + spec, + ) + .map_err(|e| e.into_with_index(att_index))? + .attesting_indices; // Matching roots, participation flag indices let data = &attestation.data; @@ -141,7 +147,7 @@ pub mod altair { let total_active_balance = state.get_total_active_balance()?; let base_reward_per_increment = BaseRewardPerIncrement::new(total_active_balance, spec)?; let mut proposer_reward_numerator = 0; - for index in &indexed_attestation.attesting_indices { + for index in attesting_indices { let index = *index as usize; for (flag_index, &weight) in PARTICIPATION_FLAG_WEIGHTS.iter().enumerate() { diff --git a/consensus/state_processing/src/per_block_processing/verify_attestation.rs b/consensus/state_processing/src/per_block_processing/verify_attestation.rs index 5d8113af4..303a6e391 100644 --- a/consensus/state_processing/src/per_block_processing/verify_attestation.rs +++ b/consensus/state_processing/src/per_block_processing/verify_attestation.rs @@ -1,7 +1,7 @@ use super::errors::{AttestationInvalid as Invalid, BlockOperationError}; use super::VerifySignatures; -use crate::common::get_indexed_attestation; use crate::per_block_processing::is_valid_indexed_attestation; +use crate::ConsensusContext; use safe_arith::SafeArith; use types::*; @@ -15,12 +15,13 @@ fn error(reason: Invalid) -> BlockOperationError { /// to `state`. Otherwise, returns a descriptive `Err`. /// /// Optionally verifies the aggregate signature, depending on `verify_signatures`. -pub fn verify_attestation_for_block_inclusion( +pub fn verify_attestation_for_block_inclusion<'ctxt, T: EthSpec>( state: &BeaconState, attestation: &Attestation, + ctxt: &'ctxt mut ConsensusContext, verify_signatures: VerifySignatures, spec: &ChainSpec, -) -> Result> { +) -> Result<&'ctxt IndexedAttestation> { let data = &attestation.data; verify!( @@ -39,7 +40,7 @@ pub fn verify_attestation_for_block_inclusion( } ); - verify_attestation_for_state(state, attestation, verify_signatures, spec) + verify_attestation_for_state(state, attestation, ctxt, verify_signatures, spec) } /// Returns `Ok(())` if `attestation` is a valid attestation to the chain that precedes the given @@ -49,12 +50,13 @@ pub fn verify_attestation_for_block_inclusion( /// prior blocks in `state`. /// /// Spec v0.12.1 -pub fn verify_attestation_for_state( +pub fn verify_attestation_for_state<'ctxt, T: EthSpec>( state: &BeaconState, attestation: &Attestation, + ctxt: &'ctxt mut ConsensusContext, verify_signatures: VerifySignatures, spec: &ChainSpec, -) -> Result> { +) -> Result<&'ctxt IndexedAttestation> { let data = &attestation.data; verify!( @@ -66,9 +68,8 @@ pub fn verify_attestation_for_state( verify_casper_ffg_vote(attestation, state)?; // Check signature and bitfields - let committee = state.get_beacon_committee(attestation.data.slot, attestation.data.index)?; - let indexed_attestation = get_indexed_attestation(committee.committee, attestation)?; - is_valid_indexed_attestation(state, &indexed_attestation, verify_signatures, spec)?; + let indexed_attestation = ctxt.get_indexed_attestation(state, attestation)?; + is_valid_indexed_attestation(state, indexed_attestation, verify_signatures, spec)?; Ok(indexed_attestation) } diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index c787a7a87..b3ef3ae38 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -10,6 +10,7 @@ harness = false [dependencies] serde-big-array = {version = "0.3.2", features = ["const-generics"]} +merkle_proof = { path = "../../consensus/merkle_proof" } bls = { path = "../../crypto/bls" } compare_fields = { path = "../../common/compare_fields" } compare_fields_derive = { path = "../../common/compare_fields_derive" } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 000e6f671..48a83f94f 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -125,6 +125,8 @@ pub enum Error { current_epoch: Epoch, epoch: Epoch, }, + IndexNotSupported(usize), + MerkleTreeError(merkle_proof::MerkleTreeError), } /// Control whether an epoch-indexed field can be indexed at the next epoch or not. @@ -1735,6 +1737,57 @@ impl BeaconState { }; Ok(sync_committee) } + + pub fn compute_merkle_proof( + &mut self, + generalized_index: usize, + ) -> Result, Error> { + // 1. Convert generalized index to field index. + let field_index = match generalized_index { + light_client_update::CURRENT_SYNC_COMMITTEE_INDEX + | light_client_update::NEXT_SYNC_COMMITTEE_INDEX => { + // Sync committees are top-level fields, subtract off the generalized indices + // for the internal nodes. Result should be 22 or 23, the field offset of the committee + // in the `BeaconState`: + // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate + generalized_index + .checked_sub(tree_hash_cache::NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES) + .ok_or(Error::IndexNotSupported(generalized_index))? + } + light_client_update::FINALIZED_ROOT_INDEX => { + // Finalized root is the right child of `finalized_checkpoint`, divide by two to get + // the generalized index of `state.finalized_checkpoint`. + let finalized_checkpoint_generalized_index = generalized_index / 2; + // Subtract off the internal nodes. Result should be 105/2 - 32 = 20 which matches + // position of `finalized_checkpoint` in `BeaconState`. + finalized_checkpoint_generalized_index + .checked_sub(tree_hash_cache::NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES) + .ok_or(Error::IndexNotSupported(generalized_index))? + } + _ => return Err(Error::IndexNotSupported(generalized_index)), + }; + + // 2. Get all `BeaconState` leaves. + let mut cache = self + .tree_hash_cache_mut() + .take() + .ok_or(Error::TreeHashCacheNotInitialized)?; + let leaves = cache.recalculate_tree_hash_leaves(self)?; + self.tree_hash_cache_mut().restore(cache); + + // 3. Make deposit tree. + // Use the depth of the `BeaconState` fields (i.e. `log2(32) = 5`). + let depth = light_client_update::CURRENT_SYNC_COMMITTEE_PROOF_LEN; + let tree = merkle_proof::MerkleTree::create(&leaves, depth); + let (_, mut proof) = tree.generate_proof(field_index, depth)?; + + // 4. If we're proving the finalized root, patch in the finalized epoch to complete the proof. + if generalized_index == light_client_update::FINALIZED_ROOT_INDEX { + proof.insert(0, self.finalized_checkpoint().epoch.tree_hash_root()); + } + + Ok(proof) + } } impl From for Error { @@ -1767,6 +1820,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: merkle_proof::MerkleTreeError) -> Error { + Error::MerkleTreeError(e) + } +} + impl From for Error { fn from(e: ArithError) -> Error { Error::ArithError(e) diff --git a/consensus/types/src/beacon_state/tree_hash_cache.rs b/consensus/types/src/beacon_state/tree_hash_cache.rs index e50265e66..30dd9f8d6 100644 --- a/consensus/types/src/beacon_state/tree_hash_cache.rs +++ b/consensus/types/src/beacon_state/tree_hash_cache.rs @@ -18,7 +18,7 @@ use tree_hash::{mix_in_length, MerkleHasher, TreeHash}; /// /// This constant is set with the assumption that there are `> 16` and `<= 32` fields on the /// `BeaconState`. **Tree hashing will fail if this value is set incorrectly.** -const NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES: usize = 32; +pub const NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES: usize = 32; /// The number of nodes in the Merkle tree of a validator record. const NODES_PER_VALIDATOR: usize = 15; @@ -210,6 +210,90 @@ impl BeaconTreeHashCacheInner { } } + pub fn recalculate_tree_hash_leaves( + &mut self, + state: &BeaconState, + ) -> Result, Error> { + let mut leaves = vec![ + // Genesis data leaves. + state.genesis_time().tree_hash_root(), + state.genesis_validators_root().tree_hash_root(), + // Current fork data leaves. + state.slot().tree_hash_root(), + state.fork().tree_hash_root(), + state.latest_block_header().tree_hash_root(), + // Roots leaves. + state + .block_roots() + .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.block_roots)?, + state + .state_roots() + .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.state_roots)?, + state + .historical_roots() + .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.historical_roots)?, + // Eth1 Data leaves. + state.eth1_data().tree_hash_root(), + self.eth1_data_votes.recalculate_tree_hash_root(state)?, + state.eth1_deposit_index().tree_hash_root(), + // Validator leaves. + self.validators + .recalculate_tree_hash_root(state.validators())?, + state + .balances() + .recalculate_tree_hash_root(&mut self.balances_arena, &mut self.balances)?, + state + .randao_mixes() + .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.randao_mixes)?, + state + .slashings() + .recalculate_tree_hash_root(&mut self.slashings_arena, &mut self.slashings)?, + ]; + // Participation + if let BeaconState::Base(state) = state { + leaves.push(state.previous_epoch_attestations.tree_hash_root()); + leaves.push(state.current_epoch_attestations.tree_hash_root()); + } else { + leaves.push( + self.previous_epoch_participation + .recalculate_tree_hash_root(&ParticipationList::new( + state.previous_epoch_participation()?, + ))?, + ); + leaves.push( + self.current_epoch_participation + .recalculate_tree_hash_root(&ParticipationList::new( + state.current_epoch_participation()?, + ))?, + ); + } + // Checkpoint leaves + leaves.push(state.justification_bits().tree_hash_root()); + leaves.push(state.previous_justified_checkpoint().tree_hash_root()); + leaves.push(state.current_justified_checkpoint().tree_hash_root()); + leaves.push(state.finalized_checkpoint().tree_hash_root()); + // Inactivity & light-client sync committees (Altair and later). + if let Ok(inactivity_scores) = state.inactivity_scores() { + leaves.push( + self.inactivity_scores + .recalculate_tree_hash_root(inactivity_scores)?, + ); + } + if let Ok(current_sync_committee) = state.current_sync_committee() { + leaves.push(current_sync_committee.tree_hash_root()); + } + + if let Ok(next_sync_committee) = state.next_sync_committee() { + leaves.push(next_sync_committee.tree_hash_root()); + } + + // Execution payload (merge and later). + if let Ok(payload_header) = state.latest_execution_payload_header() { + leaves.push(payload_header.tree_hash_root()); + } + Ok(leaves) + } + /// Updates the cache and returns the tree hash root for the given `state`. /// /// The provided `state` should be a descendant of the last `state` given to this function, or @@ -246,121 +330,9 @@ impl BeaconTreeHashCacheInner { let mut hasher = MerkleHasher::with_leaves(NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES); - hasher.write(state.genesis_time().tree_hash_root().as_bytes())?; - hasher.write(state.genesis_validators_root().tree_hash_root().as_bytes())?; - hasher.write(state.slot().tree_hash_root().as_bytes())?; - hasher.write(state.fork().tree_hash_root().as_bytes())?; - hasher.write(state.latest_block_header().tree_hash_root().as_bytes())?; - hasher.write( - state - .block_roots() - .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.block_roots)? - .as_bytes(), - )?; - hasher.write( - state - .state_roots() - .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.state_roots)? - .as_bytes(), - )?; - hasher.write( - state - .historical_roots() - .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.historical_roots)? - .as_bytes(), - )?; - hasher.write(state.eth1_data().tree_hash_root().as_bytes())?; - hasher.write( - self.eth1_data_votes - .recalculate_tree_hash_root(state)? - .as_bytes(), - )?; - hasher.write(state.eth1_deposit_index().tree_hash_root().as_bytes())?; - hasher.write( - self.validators - .recalculate_tree_hash_root(state.validators())? - .as_bytes(), - )?; - hasher.write( - state - .balances() - .recalculate_tree_hash_root(&mut self.balances_arena, &mut self.balances)? - .as_bytes(), - )?; - hasher.write( - state - .randao_mixes() - .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.randao_mixes)? - .as_bytes(), - )?; - hasher.write( - state - .slashings() - .recalculate_tree_hash_root(&mut self.slashings_arena, &mut self.slashings)? - .as_bytes(), - )?; - - // Participation - if let BeaconState::Base(state) = state { - hasher.write( - state - .previous_epoch_attestations - .tree_hash_root() - .as_bytes(), - )?; - hasher.write(state.current_epoch_attestations.tree_hash_root().as_bytes())?; - } else { - hasher.write( - self.previous_epoch_participation - .recalculate_tree_hash_root(&ParticipationList::new( - state.previous_epoch_participation()?, - ))? - .as_bytes(), - )?; - hasher.write( - self.current_epoch_participation - .recalculate_tree_hash_root(&ParticipationList::new( - state.current_epoch_participation()?, - ))? - .as_bytes(), - )?; - } - - hasher.write(state.justification_bits().tree_hash_root().as_bytes())?; - hasher.write( - state - .previous_justified_checkpoint() - .tree_hash_root() - .as_bytes(), - )?; - hasher.write( - state - .current_justified_checkpoint() - .tree_hash_root() - .as_bytes(), - )?; - hasher.write(state.finalized_checkpoint().tree_hash_root().as_bytes())?; - - // Inactivity & light-client sync committees (Altair and later). - if let Ok(inactivity_scores) = state.inactivity_scores() { - hasher.write( - self.inactivity_scores - .recalculate_tree_hash_root(inactivity_scores)? - .as_bytes(), - )?; - } - - if let Ok(current_sync_committee) = state.current_sync_committee() { - hasher.write(current_sync_committee.tree_hash_root().as_bytes())?; - } - - if let Ok(next_sync_committee) = state.next_sync_committee() { - hasher.write(next_sync_committee.tree_hash_root().as_bytes())?; - } - - // Execution payload (merge and later). - if let Ok(payload_header) = state.latest_execution_payload_header() { - hasher.write(payload_header.tree_hash_root().as_bytes())?; + let leaves = self.recalculate_tree_hash_leaves(state)?; + for leaf in leaves { + hasher.write(leaf.as_bytes())?; } // Withdrawal indices (Capella and later). diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index ee108b6cf..9a8c4bb6f 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -844,7 +844,7 @@ impl ChainSpec { domain_sync_committee_selection_proof: 8, domain_contribution_and_proof: 9, altair_fork_version: [0x01, 0x00, 0x00, 0x64], - altair_fork_epoch: Some(Epoch::new(256)), + altair_fork_epoch: Some(Epoch::new(512)), /* * Merge hard fork params @@ -855,14 +855,11 @@ impl ChainSpec { .expect("pow does not overflow"), proportional_slashing_multiplier_bellatrix: 3, bellatrix_fork_version: [0x02, 0x00, 0x00, 0x64], - bellatrix_fork_epoch: None, - terminal_total_difficulty: Uint256::MAX - .checked_sub(Uint256::from(2u64.pow(10))) - .expect("subtraction does not overflow") - // Add 1 since the spec declares `2**256 - 2**10` and we use - // `Uint256::MAX` which is `2*256- 1`. - .checked_add(Uint256::one()) - .expect("addition does not overflow"), + bellatrix_fork_epoch: Some(Epoch::new(385536)), + terminal_total_difficulty: Uint256::from_dec_str( + "8626000000000000000000058750000000000000000000", + ) + .expect("terminal_total_difficulty is a valid integer"), terminal_block_hash: ExecutionBlockHash::zero(), terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), safe_slots_to_import_optimistically: 128u64, diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index 406136d54..d2a46c04a 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -21,17 +21,15 @@ pub struct LightClientBootstrap { } impl LightClientBootstrap { - pub fn from_beacon_state(beacon_state: BeaconState) -> Result { + pub fn from_beacon_state(beacon_state: &mut BeaconState) -> Result { let mut header = beacon_state.latest_block_header().clone(); header.state_root = beacon_state.tree_hash_root(); + let current_sync_committee_branch = + beacon_state.compute_merkle_proof(CURRENT_SYNC_COMMITTEE_INDEX)?; Ok(LightClientBootstrap { header, current_sync_committee: beacon_state.current_sync_committee()?.clone(), - /// TODO(Giulio2002): Generate Merkle Proof, this is just empty hashes - current_sync_committee_branch: FixedVector::new(vec![ - Hash256::zero(); - CURRENT_SYNC_COMMITTEE_PROOF_LEN - ])?, + current_sync_committee_branch: FixedVector::new(current_sync_committee_branch)?, }) } } diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index c93d15a1a..fe26c0fa3 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -31,7 +31,7 @@ impl LightClientFinalityUpdate { chain_spec: ChainSpec, beacon_state: BeaconState, block: BeaconBlock, - attested_state: BeaconState, + attested_state: &mut BeaconState, finalized_block: BeaconBlock, ) -> Result { let altair_fork_epoch = chain_spec @@ -60,11 +60,12 @@ impl LightClientFinalityUpdate { if finalized_header.tree_hash_root() != beacon_state.finalized_checkpoint().root { return Err(Error::InvalidFinalizedBlock); } - // TODO(Giulio2002): compute proper merkle proofs. + + let finality_branch = attested_state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?; Ok(Self { attested_header: attested_header, finalized_header: finalized_header, - finality_branch: FixedVector::new(vec![Hash256::zero(); FINALIZED_ROOT_PROOF_LEN])?, + finality_branch: FixedVector::new(finality_branch)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block.slot(), }) diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index 38609cf1b..7d01f39bf 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -77,7 +77,7 @@ impl LightClientUpdate { chain_spec: ChainSpec, beacon_state: BeaconState, block: BeaconBlock, - attested_state: BeaconState, + attested_state: &mut BeaconState, finalized_block: BeaconBlock, ) -> Result { let altair_fork_epoch = chain_spec @@ -114,16 +114,15 @@ impl LightClientUpdate { if finalized_header.tree_hash_root() != beacon_state.finalized_checkpoint().root { return Err(Error::InvalidFinalizedBlock); } - // TODO(Giulio2002): compute proper merkle proofs. + let next_sync_committee_branch = + attested_state.compute_merkle_proof(NEXT_SYNC_COMMITTEE_INDEX)?; + let finality_branch = attested_state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?; Ok(Self { attested_header, next_sync_committee: attested_state.next_sync_committee()?.clone(), - next_sync_committee_branch: FixedVector::new(vec![ - Hash256::zero(); - NEXT_SYNC_COMMITTEE_PROOF_LEN - ])?, + next_sync_committee_branch: FixedVector::new(next_sync_committee_branch)?, finalized_header, - finality_branch: FixedVector::new(vec![Hash256::zero(); FINALIZED_ROOT_PROOF_LEN])?, + finality_branch: FixedVector::new(finality_branch)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block.slot(), }) diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index c0023f350..33accfc05 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -98,10 +98,9 @@ fn parse_client_config( cli_args: &ArgMatches, _env: &Environment, ) -> Result { - let mut client_config = ClientConfig { - data_dir: get_data_dir(cli_args), - ..Default::default() - }; + let mut client_config = ClientConfig::default(); + + client_config.set_data_dir(get_data_dir(cli_args)); if let Some(freezer_dir) = clap_utils::parse_optional(cli_args, "freezer-dir")? { client_config.freezer_db_path = Some(freezer_dir); @@ -289,7 +288,7 @@ pub fn prune_payloads( } /// Run the database manager, returning an error string if the operation did not succeed. -pub fn run(cli_args: &ArgMatches<'_>, mut env: Environment) -> Result<(), String> { +pub fn run(cli_args: &ArgMatches<'_>, env: Environment) -> Result<(), String> { let client_config = parse_client_config(cli_args, &env)?; let context = env.core_context(); let log = context.log().clone(); diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index b4f630ae1..638ab46bf 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "3.2.1" +version = "3.3.0" authors = ["Paul Hauner "] edition = "2021" diff --git a/lcli/src/block_root.rs b/lcli/src/block_root.rs index 7631872c5..a47b48a30 100644 --- a/lcli/src/block_root.rs +++ b/lcli/src/block_root.rs @@ -37,7 +37,7 @@ use types::{EthSpec, FullPayload, SignedBeaconBlock}; const HTTP_TIMEOUT: Duration = Duration::from_secs(5); -pub fn run(mut env: Environment, matches: &ArgMatches) -> Result<(), String> { +pub fn run(env: Environment, matches: &ArgMatches) -> Result<(), String> { let spec = &T::default_spec(); let executor = env.core_context().executor; diff --git a/lcli/src/eth1_genesis.rs b/lcli/src/eth1_genesis.rs index 80bcff909..34144cd86 100644 --- a/lcli/src/eth1_genesis.rs +++ b/lcli/src/eth1_genesis.rs @@ -13,7 +13,7 @@ use types::EthSpec; pub const ETH1_GENESIS_UPDATE_INTERVAL: Duration = Duration::from_millis(7_000); pub fn run( - mut env: Environment, + env: Environment, testnet_dir: PathBuf, matches: &ArgMatches<'_>, ) -> Result<(), String> { diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 8b233d847..9d548b049 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -789,6 +789,7 @@ fn run( max_log_size: 0, max_log_number: 0, compression: false, + is_restricted: true, }) .map_err(|e| format!("should start logger: {:?}", e))? .build() diff --git a/lcli/src/skip_slots.rs b/lcli/src/skip_slots.rs index 8bd9af99a..49d1dd424 100644 --- a/lcli/src/skip_slots.rs +++ b/lcli/src/skip_slots.rs @@ -59,7 +59,7 @@ use types::{BeaconState, CloneConfig, EthSpec, Hash256}; const HTTP_TIMEOUT: Duration = Duration::from_secs(10); -pub fn run(mut env: Environment, matches: &ArgMatches) -> Result<(), String> { +pub fn run(env: Environment, matches: &ArgMatches) -> Result<(), String> { let spec = &T::default_spec(); let executor = env.core_context().executor; diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index b25cec81b..44a1772cc 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -94,7 +94,7 @@ struct Config { exclude_post_block_thc: bool, } -pub fn run(mut env: Environment, matches: &ArgMatches) -> Result<(), String> { +pub fn run(env: Environment, matches: &ArgMatches) -> Result<(), String> { let spec = &T::default_spec(); let executor = env.core_context().executor; @@ -339,6 +339,10 @@ fn do_transition( .map_err(|e| format!("Unable to build caches: {:?}", e))?; debug!("Build all caches (again): {:?}", t.elapsed()); + let mut ctxt = ConsensusContext::new(pre_state.slot()) + .set_current_block_root(block_root) + .set_proposer_index(block.message().proposer_index()); + if !config.no_signature_verification { let get_pubkey = move |validator_index| { validator_pubkey_cache @@ -359,18 +363,20 @@ fn do_transition( get_pubkey, decompressor, &block, - Some(block_root), - Some(block.message().proposer_index()), + &mut ctxt, spec, ) .map_err(|e| format!("Invalid block signature: {:?}", e))?; debug!("Batch verify block signatures: {:?}", t.elapsed()); + + // Signature verification should prime the indexed attestation cache. + assert_eq!( + ctxt.num_cached_indexed_attestations(), + block.message().body().attestations().len() + ); } let t = Instant::now(); - let mut ctxt = ConsensusContext::new(pre_state.slot()) - .set_current_block_root(block_root) - .set_proposer_index(block.message().proposer_index()); per_block_processing( &mut pre_state, &block, diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 7864b7e82..3b4dd5753 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "3.2.1" +version = "3.3.0" authors = ["Sigma Prime "] edition = "2021" autotests = false diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 49163b96f..fad7edeb1 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -55,6 +55,7 @@ pub struct LoggerConfig { pub max_log_size: u64, pub max_log_number: usize, pub compression: bool, + pub is_restricted: bool, } impl Default for LoggerConfig { fn default() -> Self { @@ -68,6 +69,7 @@ impl Default for LoggerConfig { max_log_size: 200, max_log_number: 5, compression: false, + is_restricted: true, } } } @@ -257,7 +259,7 @@ impl EnvironmentBuilder { .rotate_size(config.max_log_size) .rotate_keep(config.max_log_number) .rotate_compress(config.compression) - .restrict_permissions(true) + .restrict_permissions(config.is_restricted) .build() .map_err(|e| format!("Unable to build file logger: {}", e))?; @@ -380,7 +382,7 @@ impl Environment { } /// Returns a `Context` where no "service" has been added to the logger output. - pub fn core_context(&mut self) -> RuntimeContext { + pub fn core_context(&self) -> RuntimeContext { RuntimeContext { executor: TaskExecutor::new( Arc::downgrade(self.runtime()), @@ -395,7 +397,7 @@ impl Environment { } /// Returns a `Context` where the `service_name` is added to the logger output. - pub fn service_context(&mut self, service_name: String) -> RuntimeContext { + pub fn service_context(&self, service_name: String) -> RuntimeContext { RuntimeContext { executor: TaskExecutor::new( Arc::downgrade(self.runtime()), diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 9dc0902e0..da72204f9 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -129,6 +129,15 @@ fn main() { to store old logs.") .global(true), ) + .arg( + Arg::with_name("logfile-no-restricted-perms") + .long("logfile-no-restricted-perms") + .help( + "If present, log files will be generated as world-readable meaning they can be read by \ + any user on the machine. Note that logs can often contain sensitive information \ + about your validator and so this flag should be used with caution.") + .global(true), + ) .arg( Arg::with_name("log-format") .long("log-format") @@ -407,6 +416,8 @@ fn run( let logfile_compress = matches.is_present("logfile-compress"); + let logfile_restricted = !matches.is_present("logfile-no-restricted-perms"); + // Construct the path to the log file. let mut log_path: Option = clap_utils::parse_optional(matches, "logfile")?; if log_path.is_none() { @@ -446,6 +457,7 @@ fn run( max_log_size: logfile_max_size * 1_024 * 1_024, max_log_number: logfile_max_number, compression: logfile_compress, + is_restricted: logfile_restricted, }; let builder = environment_builder.initialize_logger(logger_config.clone())?; diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index b1498f109..d39235cb1 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -56,7 +56,9 @@ impl CommandLineTestExec for CommandLineTest { fn datadir_flag() { CommandLineTest::new() .run_with_zero_port() - .with_config_and_dir(|config, dir| assert_eq!(config.data_dir, dir.path().join("beacon"))); + .with_config_and_dir(|config, dir| { + assert_eq!(*config.data_dir(), dir.path().join("beacon")) + }); } #[test] @@ -132,6 +134,25 @@ fn fork_choice_before_proposal_timeout_zero() { .with_config(|config| assert_eq!(config.chain.fork_choice_before_proposal_timeout_ms, 0)); } +#[test] +fn checkpoint_sync_url_timeout_flag() { + CommandLineTest::new() + .flag("checkpoint-sync-url-timeout", Some("300")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.chain.checkpoint_sync_url_timeout, 300); + }); +} + +#[test] +fn checkpoint_sync_url_timeout_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.chain.checkpoint_sync_url_timeout, 60); + }); +} + #[test] fn paranoid_block_proposal_default() { CommandLineTest::new() @@ -1527,6 +1548,23 @@ fn enabled_disable_log_timestamp_flag() { assert!(config.logger_config.disable_log_timestamp); }); } +#[test] +fn logfile_restricted_perms_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert!(config.logger_config.is_restricted); + }); +} +#[test] +fn logfile_no_restricted_perms_flag() { + CommandLineTest::new() + .flag("logfile-no-restricted-perms", None) + .run_with_zero_port() + .with_config(|config| { + assert!(config.logger_config.is_restricted == false); + }); +} #[test] fn sync_eth1_chain_default() { @@ -1561,3 +1599,29 @@ fn sync_eth1_chain_disable_deposit_contract_sync_flag() { .run_with_zero_port() .with_config(|config| assert_eq!(config.sync_eth1_chain, false)); } + +#[test] +fn light_client_server_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert_eq!(config.network.enable_light_client_server, false)); +} + +#[test] +fn light_client_server_enabled() { + CommandLineTest::new() + .flag("light-client-server", None) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.network.enable_light_client_server, true)); +} + +#[test] +fn gui_flag() { + CommandLineTest::new() + .flag("gui", None) + .run_with_zero_port() + .with_config(|config| { + assert!(config.http_api.enabled); + assert!(config.validator_monitor_auto); + }); +} diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 49bc920dd..04f45ac5d 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -39,10 +39,10 @@ excluded_paths = [ "tests/.*/.*/ssz_static/LightClientOptimistic", # LightClientFinalityUpdate "tests/.*/.*/ssz_static/LightClientFinalityUpdate", - # Merkle-proof tests for light clients - "tests/.*/.*/merkle/single_proof", # Eip4844 tests are disabled for now. "tests/.*/eip4844", + # Capella tests are disabled for now. + "tests/.*/capella", # One of the EF researchers likes to pack the tarballs on a Mac ".*\.DS_Store.*", # More Mac weirdness. diff --git a/testing/ef_tests/src/cases.rs b/testing/ef_tests/src/cases.rs index ae70f1e07..216912a4f 100644 --- a/testing/ef_tests/src/cases.rs +++ b/testing/ef_tests/src/cases.rs @@ -18,6 +18,7 @@ mod fork; mod fork_choice; mod genesis_initialization; mod genesis_validity; +mod merkle_proof_validity; mod operations; mod rewards; mod sanity_blocks; @@ -41,6 +42,7 @@ pub use epoch_processing::*; pub use fork::ForkTest; pub use genesis_initialization::*; pub use genesis_validity::*; +pub use merkle_proof_validity::*; pub use operations::*; pub use rewards::RewardsTest; pub use sanity_blocks::*; diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 8faf4db82..039efb368 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -7,7 +7,7 @@ use beacon_chain::{ obtain_indexed_attestation_and_committees_per_slot, VerifiedAttestation, }, test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainTypes, CachedHead, CountUnrealized, + BeaconChainTypes, CachedHead, CountUnrealized, NotifyExecutionLayer, }; use execution_layer::{json_structures::JsonPayloadStatusV1Status, PayloadStatusV1}; use serde::Deserialize; @@ -388,6 +388,7 @@ impl Tester { block_root, block.clone(), CountUnrealized::False, + NotifyExecutionLayer::Yes, ))?; if result.is_ok() != valid { return Err(Error::DidntFail(format!( diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs new file mode 100644 index 000000000..a57abc2e0 --- /dev/null +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -0,0 +1,87 @@ +use super::*; +use crate::decode::{ssz_decode_state, yaml_decode_file}; +use serde_derive::Deserialize; +use std::path::Path; +use tree_hash::Hash256; +use types::{BeaconState, EthSpec, ForkName}; + +#[derive(Debug, Clone, Deserialize)] +pub struct Metadata { + #[serde(rename(deserialize = "description"))] + _description: String, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct MerkleProof { + pub leaf: Hash256, + pub leaf_index: usize, + pub branch: Vec, +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec")] +pub struct MerkleProofValidity { + pub metadata: Option, + pub state: BeaconState, + pub merkle_proof: MerkleProof, +} + +impl LoadCase for MerkleProofValidity { + fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { + let spec = &testing_spec::(fork_name); + let state = ssz_decode_state(&path.join("state.ssz_snappy"), spec)?; + let merkle_proof = yaml_decode_file(&path.join("proof.yaml"))?; + // Metadata does not exist in these tests but it is left like this just in case. + let meta_path = path.join("meta.yaml"); + let metadata = if meta_path.exists() { + Some(yaml_decode_file(&meta_path)?) + } else { + None + }; + + Ok(Self { + metadata, + state, + merkle_proof, + }) + } +} + +impl Case for MerkleProofValidity { + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let mut state = self.state.clone(); + state.initialize_tree_hash_cache(); + let proof = match state.compute_merkle_proof(self.merkle_proof.leaf_index) { + Ok(proof) => proof, + Err(_) => { + return Err(Error::FailedToParseTest( + "Could not retrieve merkle proof".to_string(), + )) + } + }; + let proof_len = proof.len(); + let branch_len = self.merkle_proof.branch.len(); + if proof_len != branch_len { + return Err(Error::NotEqual(format!( + "Branches not equal in length computed: {}, expected {}", + proof_len, branch_len + ))); + } + + for (i, proof_leaf) in proof.iter().enumerate().take(proof_len) { + let expected_leaf = self.merkle_proof.branch[i]; + if *proof_leaf != expected_leaf { + return Err(Error::NotEqual(format!( + "Leaves not equal in merke proof computed: {}, expected: {}", + hex::encode(proof_leaf), + hex::encode(expected_leaf) + ))); + } + } + + // Tree hash cache should still be initialized (not dropped). + assert!(state.tree_hash_cache().is_initialized()); + + Ok(()) + } +} diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 9e3562bc7..e99728ed2 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -91,7 +91,6 @@ impl Operation for Attestation { _: &Operations, ) -> Result<(), BlockProcessingError> { let mut ctxt = ConsensusContext::new(state.slot()); - let proposer_index = ctxt.get_proposer_index(state, spec)?; match state { BeaconState::Base(_) => base::process_attestations( state, @@ -103,14 +102,9 @@ impl Operation for Attestation { BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) - | BeaconState::Eip4844(_) => altair::process_attestation( - state, - self, - 0, - proposer_index, - VerifySignatures::True, - spec, - ), + | BeaconState::Eip4844(_) => { + altair::process_attestation(state, self, 0, &mut ctxt, VerifySignatures::True, spec) + } } } } diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index ed376af44..66f81616b 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -624,6 +624,30 @@ impl Handler for GenesisInitializationHandler { } } +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct MerkleProofValidityHandler(PhantomData); + +impl Handler for MerkleProofValidityHandler { + type Case = cases::MerkleProofValidity; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "light_client" + } + + fn handler_name(&self) -> String { + "single_merkle_proof".into() + } + + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + fork_name != ForkName::Base + } +} + #[derive(Derivative)] #[derivative(Default(bound = ""))] pub struct OperationsHandler(PhantomData<(E, O)>); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 338a56b9f..f84be64da 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -507,6 +507,11 @@ fn genesis_validity() { // Note: there are no genesis validity tests for mainnet } +#[test] +fn merkle_proof_validity() { + MerkleProofValidityHandler::::default().run(); +} + #[test] fn rewards() { for handler in &["basic", "leak", "random"] { diff --git a/testing/execution_engine_integration/src/nethermind.rs b/testing/execution_engine_integration/src/nethermind.rs index 1fe7bf0f0..f643fbd5f 100644 --- a/testing/execution_engine_integration/src/nethermind.rs +++ b/testing/execution_engine_integration/src/nethermind.rs @@ -8,7 +8,10 @@ use std::process::{Child, Command, Output}; use tempfile::TempDir; use unused_port::unused_tcp_port; -const NETHERMIND_BRANCH: &str = "master"; +/// We've pinned the Nethermind version since our method of using the `master` branch to +/// find the latest tag isn't working. It appears Nethermind don't always tag on `master`. +/// We should fix this so we always pull the latest version of Nethermind. +const NETHERMIND_BRANCH: &str = "release/1.14.6"; const NETHERMIND_REPO_URL: &str = "https://github.com/NethermindEth/nethermind"; fn build_result(repo_dir: &Path) -> Output { diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 1b280d148..944e2fef6 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -18,7 +18,7 @@ use types::{ Address, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ForkName, FullPayload, Hash256, MainnetEthSpec, PublicKeyBytes, Slot, Uint256, }; -const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(20); +const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(30); struct ExecutionPair { /// The Lighthouse `ExecutionLayer` struct, connected to the `execution_engine` via HTTP. diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index 0933bff4c..d0a4ef949 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -48,7 +48,7 @@ impl LocalBeaconNode { .tempdir() .expect("should create temp directory for client datadir"); - client_config.data_dir = datadir.path().into(); + client_config.set_data_dir(datadir.path().into()); client_config.network.network_dir = PathBuf::from(datadir.path()).join("network"); ProductionBeaconNode::new(context, client_config) diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 3d59013f2..8284bff60 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -67,6 +67,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { max_log_size: 0, max_log_number: 0, compression: false, + is_restricted: true, })? .multi_threaded_tokio_runtime()? .build()?; diff --git a/testing/simulator/src/no_eth1_sim.rs b/testing/simulator/src/no_eth1_sim.rs index 06f9e9a4f..53c4447da 100644 --- a/testing/simulator/src/no_eth1_sim.rs +++ b/testing/simulator/src/no_eth1_sim.rs @@ -52,6 +52,7 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { max_log_size: 0, max_log_number: 0, compression: false, + is_restricted: true, })? .multi_threaded_tokio_runtime()? .build()?; diff --git a/testing/simulator/src/sync_sim.rs b/testing/simulator/src/sync_sim.rs index 00e439e4c..1c8b41f05 100644 --- a/testing/simulator/src/sync_sim.rs +++ b/testing/simulator/src/sync_sim.rs @@ -56,6 +56,7 @@ fn syncing_sim( max_log_size: 0, max_log_number: 0, compression: false, + is_restricted: true, })? .multi_threaded_tokio_runtime()? .build()?; diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 8a3c8303a..ada023f8c 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -59,3 +59,6 @@ task_executor = { path = "../common/task_executor" } reqwest = { version = "0.11.0", features = ["json","stream"] } url = "2.2.2" malloc_utils = { path = "../common/malloc_utils" } +sysinfo = "0.26.5" +system_health = { path = "../common/system_health" } + diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index e9c7bf69d..df5d0c606 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -16,6 +16,7 @@ use eth2::lighthouse_vc::{ types::{self as api_types, GenericResponse, PublicKey, PublicKeyBytes}, }; use lighthouse_version::version_with_platform; +use parking_lot::RwLock; use serde::{Deserialize, Serialize}; use slog::{crit, info, warn, Logger}; use slot_clock::SlotClock; @@ -24,6 +25,8 @@ use std::marker::PhantomData; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::path::PathBuf; use std::sync::Arc; +use sysinfo::{System, SystemExt}; +use system_health::observe_system_health_vc; use task_executor::TaskExecutor; use types::{ChainSpec, ConfigAndPreset, EthSpec}; use validator_dir::Builder as ValidatorDirBuilder; @@ -183,6 +186,35 @@ pub fn serve( let api_token_path_inner = api_token_path.clone(); let api_token_path_filter = warp::any().map(move || api_token_path_inner.clone()); + // Create a `warp` filter that provides access to local system information. + let system_info = Arc::new(RwLock::new(sysinfo::System::new())); + { + // grab write access for initialisation + let mut system_info = system_info.write(); + system_info.refresh_disks_list(); + system_info.refresh_networks_list(); + } // end lock + + let system_info_filter = + warp::any() + .map(move || system_info.clone()) + .map(|sysinfo: Arc>| { + { + // refresh stats + let mut sysinfo_lock = sysinfo.write(); + sysinfo_lock.refresh_memory(); + sysinfo_lock.refresh_cpu_specifics(sysinfo::CpuRefreshKind::everything()); + sysinfo_lock.refresh_cpu(); + sysinfo_lock.refresh_system(); + sysinfo_lock.refresh_networks(); + sysinfo_lock.refresh_disks(); + } // end lock + sysinfo + }); + + let app_start = std::time::Instant::now(); + let app_start_filter = warp::any().map(move || app_start); + // GET lighthouse/version let get_node_version = warp::path("lighthouse") .and(warp::path("version")) @@ -279,6 +311,24 @@ pub fn serve( }, ); + // GET lighthouse/ui/health + let get_lighthouse_ui_health = warp::path("lighthouse") + .and(warp::path("ui")) + .and(warp::path("health")) + .and(warp::path::end()) + .and(system_info_filter) + .and(app_start_filter) + .and(validator_dir_filter.clone()) + .and(signer.clone()) + .and_then(|sysinfo, app_start: std::time::Instant, val_dir, signer| { + blocking_signed_json_task(signer, move || { + let app_uptime = app_start.elapsed().as_secs() as u64; + Ok(api_types::GenericResponse::from(observe_system_health_vc( + sysinfo, val_dir, app_uptime, + ))) + }) + }); + // POST lighthouse/validators/ let post_validators = warp::path("lighthouse") .and(warp::path("validators")) @@ -894,6 +944,7 @@ pub fn serve( .or(get_lighthouse_spec) .or(get_lighthouse_validators) .or(get_lighthouse_validators_pubkey) + .or(get_lighthouse_ui_health) .or(get_fee_recipient) .or(get_gas_limit) .or(get_std_keystores) diff --git a/validator_client/src/sync_committee_service.rs b/validator_client/src/sync_committee_service.rs index 5b9594530..3647396ed 100644 --- a/validator_client/src/sync_committee_service.rs +++ b/validator_client/src/sync_committee_service.rs @@ -174,39 +174,40 @@ impl SyncCommitteeService { return Ok(()); } - // Fetch `block_root` and `execution_optimistic` for `SyncCommitteeContribution`. + // Fetch `block_root` with non optimistic execution for `SyncCommitteeContribution`. let response = self .beacon_nodes - .first_success(RequireSynced::Yes, OfflineOnFailure::Yes,|beacon_node| async move { - beacon_node.get_beacon_blocks_root(BlockId::Head).await - }) - .await - .map_err(|e| e.to_string())? - .ok_or_else(|| format!("No block root found for slot {}", slot))?; + .first_success( + RequireSynced::Yes, + OfflineOnFailure::Yes, + |beacon_node| async move { + match beacon_node.get_beacon_blocks_root(BlockId::Head).await { + Ok(Some(block)) if block.execution_optimistic == Some(false) => { + Ok(block) + } + Ok(Some(_)) => { + Err(format!("To sign sync committee messages for slot {slot} a non-optimistic head block is required")) + } + Ok(None) => Err(format!("No block root found for slot {}", slot)), + Err(e) => Err(e.to_string()), + } + }, + ) + .await; - let block_root = response.data.root; - if let Some(execution_optimistic) = response.execution_optimistic { - if execution_optimistic { + let block_root = match response { + Ok(block) => block.data.root, + Err(errs) => { warn!( log, - "Refusing to sign sync committee messages for optimistic head block"; + "Refusing to sign sync committee messages for an optimistic head block or \ + a block head with unknown optimistic status"; + "errors" => errs.to_string(), "slot" => slot, ); return Ok(()); } - } else if let Some(bellatrix_fork_epoch) = self.duties_service.spec.bellatrix_fork_epoch { - // If the slot is post Bellatrix, do not sign messages when we cannot verify the - // optimistic status of the head block. - if slot.epoch(E::slots_per_epoch()) > bellatrix_fork_epoch { - warn!( - log, - "Refusing to sign sync committee messages for a head block with an unknown \ - optimistic status"; - "slot" => slot, - ); - return Ok(()); - } - } + }; // Spawn one task to publish all of the sync committee signatures. let validator_duties = slot_duties.duties;