Merge branch 'sigp:stable' into stable
This commit is contained in:
		
						commit
						f70b3ef11d
					
				| @ -6,4 +6,4 @@ end_of_line=lf | ||||
| charset=utf-8 | ||||
| trim_trailing_whitespace=true | ||||
| max_line_length=100 | ||||
| insert_final_newline=false | ||||
| insert_final_newline=true | ||||
							
								
								
									
										4
									
								
								.github/workflows/book.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/workflows/book.yml
									
									
									
									
										vendored
									
									
								
							| @ -5,6 +5,10 @@ on: | ||||
|     branches: | ||||
|       - unstable | ||||
| 
 | ||||
| concurrency: | ||||
|   group: ${{ github.workflow }}-${{ github.ref }} | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   build-and-upload-to-s3: | ||||
|     runs-on: ubuntu-20.04 | ||||
|  | ||||
							
								
								
									
										14
									
								
								.github/workflows/cancel-previous-runs.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										14
									
								
								.github/workflows/cancel-previous-runs.yml
									
									
									
									
										vendored
									
									
								
							| @ -1,14 +0,0 @@ | ||||
| name: cancel previous runs | ||||
| on: [push] | ||||
| jobs: | ||||
|   cancel: | ||||
|     name: 'Cancel Previous Runs' | ||||
|     runs-on: ubuntu-latest | ||||
|     timeout-minutes: 3 | ||||
|     steps: | ||||
|       # https://github.com/styfle/cancel-workflow-action/releases | ||||
|       - uses: styfle/cancel-workflow-action@514c783324374c6940d1b92bfb962d0763d22de3 # 0.7.0 | ||||
|         with: | ||||
|           # https://api.github.com/repos/sigp/lighthouse/actions/workflows | ||||
|           workflow_id: 697364,2434944,4462424,308241,2883401,316 | ||||
|           access_token: ${{ github.token }} | ||||
							
								
								
									
										31
									
								
								.github/workflows/docker-antithesis.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										31
									
								
								.github/workflows/docker-antithesis.yml
									
									
									
									
										vendored
									
									
								
							| @ -1,31 +0,0 @@ | ||||
| name: docker antithesis | ||||
| 
 | ||||
| on: | ||||
|     push: | ||||
|         branches: | ||||
|             - unstable | ||||
| 
 | ||||
| env: | ||||
|     ANTITHESIS_PASSWORD: ${{ secrets.ANTITHESIS_PASSWORD }} | ||||
|     ANTITHESIS_USERNAME: ${{ secrets.ANTITHESIS_USERNAME }} | ||||
|     ANTITHESIS_SERVER: ${{ secrets.ANTITHESIS_SERVER }} | ||||
|     REPOSITORY: ${{ secrets.ANTITHESIS_REPOSITORY }} | ||||
|     IMAGE_NAME: lighthouse | ||||
|     TAG: libvoidstar | ||||
| 
 | ||||
| jobs: | ||||
|     build-docker: | ||||
|         runs-on: ubuntu-22.04 | ||||
|         steps: | ||||
|             - uses: actions/checkout@v3 | ||||
|             - name: Update Rust | ||||
|               run: rustup update stable | ||||
|             - name: Dockerhub login | ||||
|               run: | | ||||
|                   echo "${ANTITHESIS_PASSWORD}" | docker login --username ${ANTITHESIS_USERNAME} https://${ANTITHESIS_SERVER} --password-stdin | ||||
|             - name: Build AMD64 dockerfile (with push) | ||||
|               run: | | ||||
|                   docker build \ | ||||
|                       --tag ${ANTITHESIS_SERVER}/${REPOSITORY}/${IMAGE_NAME}:${TAG} \ | ||||
|                       --file ./testing/antithesis/Dockerfile.libvoidstar . | ||||
|                   docker push ${ANTITHESIS_SERVER}/${REPOSITORY}/${IMAGE_NAME}:${TAG} | ||||
							
								
								
									
										65
									
								
								.github/workflows/docker.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										65
									
								
								.github/workflows/docker.yml
									
									
									
									
										vendored
									
									
								
							| @ -8,11 +8,17 @@ on: | ||||
|         tags: | ||||
|             - v* | ||||
| 
 | ||||
| concurrency: | ||||
|   group: ${{ github.workflow }}-${{ github.ref }} | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| env: | ||||
|     DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} | ||||
|     DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} | ||||
|     IMAGE_NAME: ${{ github.repository_owner}}/lighthouse | ||||
|     LCLI_IMAGE_NAME: ${{ github.repository_owner }}/lcli | ||||
|     # Enable self-hosted runners for the sigp repo only. | ||||
|     SELF_HOSTED_RUNNERS: ${{ github.repository == 'sigp/lighthouse' }} | ||||
| 
 | ||||
| jobs: | ||||
|     # Extract the VERSION which is either `latest` or `vX.Y.Z`, and the VERSION_SUFFIX | ||||
| @ -44,7 +50,8 @@ jobs: | ||||
|             VERSION_SUFFIX: ${{ env.VERSION_SUFFIX }} | ||||
|     build-docker-single-arch: | ||||
|         name: build-docker-${{ matrix.binary }}${{ matrix.features.version_suffix }} | ||||
|         runs-on: ubuntu-22.04 | ||||
|         # Use self-hosted runners only on the sigp repo. | ||||
|         runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release"]') || 'ubuntu-22.04'  }} | ||||
|         strategy: | ||||
|             matrix: | ||||
|                 binary: [aarch64, | ||||
| @ -60,14 +67,13 @@ jobs: | ||||
| 
 | ||||
|         needs: [extract-version] | ||||
|         env: | ||||
|             # We need to enable experimental docker features in order to use `docker buildx` | ||||
|             DOCKER_CLI_EXPERIMENTAL: enabled | ||||
|             VERSION: ${{ needs.extract-version.outputs.VERSION }} | ||||
|             VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} | ||||
|             FEATURE_SUFFIX: ${{ matrix.features.version_suffix }} | ||||
|         steps: | ||||
|             - uses: actions/checkout@v3 | ||||
|             - name: Update Rust | ||||
|               if: env.SELF_HOSTED_RUNNERS == 'false' | ||||
|               run: rustup update stable | ||||
|             - name: Dockerhub login | ||||
|               run: | | ||||
| @ -76,16 +82,14 @@ jobs: | ||||
|               run: | | ||||
|                   cargo install cross | ||||
|                   env CROSS_PROFILE=${{ matrix.profile }} CROSS_FEATURES=${{ matrix.features.env }} make build-${{ matrix.binary }} | ||||
|             - name: Make bin dir | ||||
|               run: mkdir ./bin | ||||
|             - name: Move cross-built binary into Docker scope (if ARM) | ||||
|               if: startsWith(matrix.binary, 'aarch64') | ||||
|               run: | | ||||
|                   mkdir ./bin; | ||||
|                   mv ./target/aarch64-unknown-linux-gnu/${{ matrix.profile }}/lighthouse ./bin; | ||||
|               run: mv ./target/aarch64-unknown-linux-gnu/${{ matrix.profile }}/lighthouse ./bin | ||||
|             - name: Move cross-built binary into Docker scope (if x86_64) | ||||
|               if: startsWith(matrix.binary, 'x86_64') | ||||
|               run: | | ||||
|                   mkdir ./bin; | ||||
|                   mv ./target/x86_64-unknown-linux-gnu/${{ matrix.profile }}/lighthouse ./bin; | ||||
|               run: mv ./target/x86_64-unknown-linux-gnu/${{ matrix.profile }}/lighthouse ./bin | ||||
|             - name: Map aarch64 to arm64 short arch | ||||
|               if: startsWith(matrix.binary, 'aarch64') | ||||
|               run: echo "SHORT_ARCH=arm64" >> $GITHUB_ENV | ||||
| @ -95,17 +99,24 @@ jobs: | ||||
|             - name: Set modernity suffix | ||||
|               if: endsWith(matrix.binary, '-portable') != true | ||||
|               run: echo "MODERNITY_SUFFIX=-modern" >> $GITHUB_ENV; | ||||
|             # Install dependencies for emulation. Have to create a new builder to pick up emulation support. | ||||
|             - name: Build Dockerfile and push | ||||
|               run: | | ||||
|                   docker run --privileged --rm tonistiigi/binfmt --install ${SHORT_ARCH} | ||||
|                   docker buildx create --use --name cross-builder | ||||
|                   docker buildx build \ | ||||
|                       --platform=linux/${SHORT_ARCH} \ | ||||
|                       --file ./Dockerfile.cross . \ | ||||
|                       --tag ${IMAGE_NAME}:${VERSION}-${SHORT_ARCH}${VERSION_SUFFIX}${MODERNITY_SUFFIX}${FEATURE_SUFFIX} \ | ||||
|                       --provenance=false \ | ||||
|                       --push | ||||
| 
 | ||||
|             - name: Install QEMU | ||||
|               if: env.SELF_HOSTED_RUNNERS == 'false' | ||||
|               run: sudo apt-get update && sudo apt-get install -y qemu-user-static | ||||
| 
 | ||||
|             - name: Set up Docker Buildx | ||||
|               if: env.SELF_HOSTED_RUNNERS == 'false' | ||||
|               uses: docker/setup-buildx-action@v2 | ||||
| 
 | ||||
|             - name: Build and push | ||||
|               uses: docker/build-push-action@v4 | ||||
|               with: | ||||
|                 file: ./Dockerfile.cross | ||||
|                 context: . | ||||
|                 platforms: linux/${{ env.SHORT_ARCH }} | ||||
|                 push: true | ||||
|                 tags: ${{ env.IMAGE_NAME }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }}${{ env.MODERNITY_SUFFIX }}${{ env.FEATURE_SUFFIX }} | ||||
| 
 | ||||
|     build-docker-multiarch: | ||||
|         name: build-docker-multiarch${{ matrix.modernity }} | ||||
|         runs-on: ubuntu-22.04 | ||||
| @ -114,20 +125,22 @@ jobs: | ||||
|             matrix: | ||||
|                 modernity: ["", "-modern"] | ||||
|         env: | ||||
|             # We need to enable experimental docker features in order to use `docker manifest` | ||||
|             DOCKER_CLI_EXPERIMENTAL: enabled | ||||
|             VERSION: ${{ needs.extract-version.outputs.VERSION }} | ||||
|             VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} | ||||
|         steps: | ||||
|             - name: Set up Docker Buildx | ||||
|               uses: docker/setup-buildx-action@v2 | ||||
| 
 | ||||
|             - name: Dockerhub login | ||||
|               run: | | ||||
|                   echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin | ||||
| 
 | ||||
|             - name: Create and push multiarch manifest | ||||
|               run: | | ||||
|                   docker manifest create ${IMAGE_NAME}:${VERSION}${VERSION_SUFFIX}${{ matrix.modernity }} \ | ||||
|                       --amend ${IMAGE_NAME}:${VERSION}-arm64${VERSION_SUFFIX}${{ matrix.modernity }} \ | ||||
|                       --amend ${IMAGE_NAME}:${VERSION}-amd64${VERSION_SUFFIX}${{ matrix.modernity }}; | ||||
|                   docker manifest push ${IMAGE_NAME}:${VERSION}${VERSION_SUFFIX}${{ matrix.modernity }} | ||||
|                   docker buildx imagetools create -t ${IMAGE_NAME}:${VERSION}${VERSION_SUFFIX}${{ matrix.modernity }} \ | ||||
|                       ${IMAGE_NAME}:${VERSION}-arm64${VERSION_SUFFIX}${{ matrix.modernity }} \ | ||||
|                       ${IMAGE_NAME}:${VERSION}-amd64${VERSION_SUFFIX}${{ matrix.modernity }}; | ||||
| 
 | ||||
|     build-docker-lcli: | ||||
|         runs-on: ubuntu-22.04 | ||||
|         needs: [extract-version] | ||||
|  | ||||
							
								
								
									
										4
									
								
								.github/workflows/linkcheck.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/workflows/linkcheck.yml
									
									
									
									
										vendored
									
									
								
							| @ -9,6 +9,10 @@ on: | ||||
|       - 'book/**' | ||||
|   merge_group: | ||||
| 
 | ||||
| concurrency: | ||||
|   group: ${{ github.workflow }}-${{ github.ref }} | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   linkcheck: | ||||
|     name: Check broken links | ||||
|  | ||||
							
								
								
									
										8
									
								
								.github/workflows/local-testnet.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										8
									
								
								.github/workflows/local-testnet.yml
									
									
									
									
										vendored
									
									
								
							| @ -8,6 +8,10 @@ on: | ||||
|   pull_request: | ||||
|   merge_group: | ||||
| 
 | ||||
| concurrency: | ||||
|   group: ${{ github.workflow }}-${{ github.ref }} | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   run-local-testnet: | ||||
|     strategy: | ||||
| @ -21,10 +25,6 @@ jobs: | ||||
| 
 | ||||
|       - name: Get latest version of stable Rust | ||||
|         run: rustup update stable | ||||
|       - name: Install Protoc | ||||
|         uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 | ||||
|         with: | ||||
|             repo-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|       - name: Install geth (ubuntu) | ||||
|         if: matrix.os == 'ubuntu-22.04' | ||||
|         run: | | ||||
|  | ||||
							
								
								
									
										66
									
								
								.github/workflows/publish-crate.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										66
									
								
								.github/workflows/publish-crate.yml
									
									
									
									
										vendored
									
									
								
							| @ -1,66 +0,0 @@ | ||||
| name:   Publish Crate | ||||
| 
 | ||||
| on: | ||||
|     push: | ||||
|         tags: | ||||
|             - tree-hash-v* | ||||
|             - tree-hash-derive-v* | ||||
|             - eth2-ssz-v* | ||||
|             - eth2-ssz-derive-v* | ||||
|             - eth2-ssz-types-v* | ||||
|             - eth2-serde-util-v* | ||||
|             - eth2-hashing-v* | ||||
| 
 | ||||
| env: | ||||
|     CARGO_API_TOKEN: ${{ secrets.CARGO_API_TOKEN }} | ||||
| 
 | ||||
| jobs: | ||||
|     extract-tag: | ||||
|         runs-on: ubuntu-latest | ||||
|         steps: | ||||
|             - name: Extract tag | ||||
|               run: echo "TAG=$(echo ${GITHUB_REF#refs/tags/})" >> $GITHUB_OUTPUT | ||||
|               id: extract_tag | ||||
|         outputs: | ||||
|             TAG: ${{ steps.extract_tag.outputs.TAG }} | ||||
| 
 | ||||
|     publish-crate: | ||||
|         runs-on: ubuntu-latest | ||||
|         needs: [extract-tag] | ||||
|         env: | ||||
|             TAG: ${{ needs.extract-tag.outputs.TAG }} | ||||
|         steps: | ||||
|             - uses: actions/checkout@v3 | ||||
|             - name: Update Rust | ||||
|               run: rustup update stable | ||||
|             - name: Cargo login | ||||
|               run: | | ||||
|                   echo "${CARGO_API_TOKEN}" | cargo login | ||||
|             - name: publish eth2 ssz derive | ||||
|               if: startsWith(env.TAG, 'eth2-ssz-derive-v') | ||||
|               run: | | ||||
|                 ./scripts/ci/publish.sh consensus/ssz_derive eth2_ssz_derive "$TAG" | ||||
|             - name: publish eth2 ssz | ||||
|               if: startsWith(env.TAG, 'eth2-ssz-v') | ||||
|               run: | | ||||
|                 ./scripts/ci/publish.sh consensus/ssz eth2_ssz "$TAG" | ||||
|             - name: publish eth2 hashing | ||||
|               if: startsWith(env.TAG, 'eth2-hashing-v') | ||||
|               run: | | ||||
|                 ./scripts/ci/publish.sh crypto/eth2_hashing eth2_hashing "$TAG" | ||||
|             - name: publish tree hash derive | ||||
|               if: startsWith(env.TAG, 'tree-hash-derive-v') | ||||
|               run: | | ||||
|                 ./scripts/ci/publish.sh consensus/tree_hash_derive tree_hash_derive "$TAG" | ||||
|             - name: publish tree hash | ||||
|               if: startsWith(env.TAG, 'tree-hash-v') | ||||
|               run: | | ||||
|                 ./scripts/ci/publish.sh consensus/tree_hash tree_hash "$TAG" | ||||
|             - name: publish ssz types | ||||
|               if: startsWith(env.TAG, 'eth2-ssz-types-v') | ||||
|               run: | | ||||
|                 ./scripts/ci/publish.sh consensus/ssz_types eth2_ssz_types "$TAG" | ||||
|             - name: publish serde util | ||||
|               if: startsWith(env.TAG, 'eth2-serde-util-v') | ||||
|               run: | | ||||
|                 ./scripts/ci/publish.sh consensus/serde_utils eth2_serde_utils "$TAG" | ||||
							
								
								
									
										46
									
								
								.github/workflows/release.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										46
									
								
								.github/workflows/release.yml
									
									
									
									
										vendored
									
									
								
							| @ -5,11 +5,17 @@ on: | ||||
|         tags: | ||||
|             - v* | ||||
| 
 | ||||
| concurrency: | ||||
|   group: ${{ github.workflow }}-${{ github.ref }} | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| env: | ||||
|     DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} | ||||
|     DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} | ||||
|     REPO_NAME: ${{ github.repository_owner }}/lighthouse | ||||
|     IMAGE_NAME: ${{ github.repository_owner }}/lighthouse | ||||
|     # Enable self-hosted runners for the sigp repo only. | ||||
|     SELF_HOSTED_RUNNERS: ${{ github.repository == 'sigp/lighthouse' }} | ||||
| 
 | ||||
| jobs: | ||||
|     extract-version: | ||||
| @ -34,36 +40,37 @@ jobs: | ||||
|                        x86_64-windows-portable] | ||||
|                 include: | ||||
|                     -   arch: aarch64-unknown-linux-gnu | ||||
|                         platform: ubuntu-latest | ||||
|                         runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release", "large"]') || 'ubuntu-latest'  }} | ||||
|                         profile: maxperf | ||||
|                     -   arch: aarch64-unknown-linux-gnu-portable | ||||
|                         platform: ubuntu-latest | ||||
|                         runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release", "large"]') || 'ubuntu-latest'  }} | ||||
|                         profile: maxperf | ||||
|                     -   arch: x86_64-unknown-linux-gnu | ||||
|                         platform: ubuntu-latest | ||||
|                         runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release", "large"]') || 'ubuntu-latest'  }} | ||||
|                         profile: maxperf | ||||
|                     -   arch: x86_64-unknown-linux-gnu-portable | ||||
|                         platform: ubuntu-latest | ||||
|                         runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release", "large"]') || 'ubuntu-latest'  }} | ||||
|                         profile: maxperf | ||||
|                     -   arch: x86_64-apple-darwin | ||||
|                         platform: macos-latest | ||||
|                         runner: macos-latest | ||||
|                         profile: maxperf | ||||
|                     -   arch: x86_64-apple-darwin-portable | ||||
|                         platform: macos-latest | ||||
|                         runner: macos-latest | ||||
|                         profile: maxperf | ||||
|                     -   arch: x86_64-windows | ||||
|                         platform: windows-2019 | ||||
|                         runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "windows", "release"]') || 'windows-2019'  }} | ||||
|                         profile: maxperf | ||||
|                     -   arch: x86_64-windows-portable | ||||
|                         platform: windows-2019 | ||||
|                         runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "windows", "release"]') || 'windows-2019'  }} | ||||
|                         profile: maxperf | ||||
| 
 | ||||
|         runs-on:    ${{ matrix.platform }} | ||||
|         runs-on:    ${{ matrix.runner }} | ||||
|         needs: extract-version | ||||
|         steps: | ||||
|             - name: Checkout sources | ||||
|               uses: actions/checkout@v3 | ||||
|             - name: Get latest version of stable Rust | ||||
|               if: env.SELF_HOSTED_RUNNERS == 'false' | ||||
|               run: rustup update stable | ||||
| 
 | ||||
|             # ============================== | ||||
| @ -71,7 +78,7 @@ jobs: | ||||
|             # ============================== | ||||
| 
 | ||||
|             - uses: KyleMayes/install-llvm-action@v1 | ||||
|               if: startsWith(matrix.arch, 'x86_64-windows') | ||||
|               if: env.SELF_HOSTED_RUNNERS == 'false' && startsWith(matrix.arch, 'x86_64-windows') | ||||
|               with: | ||||
|                 version: "15.0" | ||||
|                 directory: ${{ runner.temp }}/llvm | ||||
| @ -79,15 +86,6 @@ jobs: | ||||
|               if: startsWith(matrix.arch, 'x86_64-windows') | ||||
|               run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV | ||||
| 
 | ||||
|             # ============================== | ||||
|             # Windows & Mac dependencies | ||||
|             # ============================== | ||||
|             - name: Install Protoc | ||||
|               if: contains(matrix.arch, 'darwin') || contains(matrix.arch, 'windows') | ||||
|               uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 | ||||
|               with: | ||||
|                 repo-token: ${{ secrets.GITHUB_TOKEN }} | ||||
| 
 | ||||
|             # ============================== | ||||
|             #       Builds | ||||
|             # ============================== | ||||
| @ -134,17 +132,11 @@ jobs: | ||||
| 
 | ||||
|             - name: Build Lighthouse for Windows portable | ||||
|               if:   matrix.arch == 'x86_64-windows-portable' | ||||
|               # NOTE: profile set to release until this rustc issue is fixed: | ||||
|               # | ||||
|               # https://github.com/rust-lang/rust/issues/107781 | ||||
|               # | ||||
|               # tracked at: https://github.com/sigp/lighthouse/issues/3964 | ||||
|               run:  cargo install --path lighthouse --force --locked --features portable,gnosis --profile release | ||||
|               run:  cargo install --path lighthouse --force --locked --features portable,gnosis --profile ${{ matrix.profile }} | ||||
| 
 | ||||
|             - name: Build Lighthouse for Windows modern | ||||
|               if:   matrix.arch == 'x86_64-windows' | ||||
|               # NOTE: profile set to release (see above) | ||||
|               run:  cargo install --path lighthouse --force --locked --features modern,gnosis --profile release | ||||
|               run:  cargo install --path lighthouse --force --locked --features modern,gnosis --profile ${{ matrix.profile }} | ||||
| 
 | ||||
|             - name: Configure GPG and create artifacts | ||||
|               if: startsWith(matrix.arch, 'x86_64-windows') != true | ||||
|  | ||||
							
								
								
									
										127
									
								
								.github/workflows/test-suite.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										127
									
								
								.github/workflows/test-suite.yml
									
									
									
									
										vendored
									
									
								
							| @ -9,6 +9,11 @@ on: | ||||
|       - 'pr/*' | ||||
|   pull_request: | ||||
|   merge_group: | ||||
| 
 | ||||
| concurrency: | ||||
|   group: ${{ github.workflow }}-${{ github.ref }} | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| env: | ||||
|   # Deny warnings in CI | ||||
|   # Disable debug info (see https://github.com/sigp/lighthouse/issues/4005) | ||||
| @ -17,6 +22,10 @@ env: | ||||
|   PINNED_NIGHTLY: nightly-2023-04-16 | ||||
|   # Prevent Github API rate limiting. | ||||
|   LIGHTHOUSE_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} | ||||
|   # Enable self-hosted runners for the sigp repo only. | ||||
|   SELF_HOSTED_RUNNERS: ${{ github.repository == 'sigp/lighthouse' }} | ||||
|   # Self-hosted runners need to reference a different host for `./watch` tests. | ||||
|   WATCH_HOST: ${{ github.repository == 'sigp/lighthouse' && 'host.docker.internal' || 'localhost' }} | ||||
| jobs: | ||||
|   target-branch-check: | ||||
|     name: target-branch-check | ||||
| @ -48,41 +57,37 @@ jobs: | ||||
|       run: make cargo-fmt | ||||
|   release-tests-ubuntu: | ||||
|     name: release-tests-ubuntu | ||||
|     runs-on: ubuntu-latest | ||||
|     # Use self-hosted runners only on the sigp repo. | ||||
|     runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest'  }} | ||||
|     needs: cargo-fmt | ||||
|     steps: | ||||
|     - uses: actions/checkout@v3 | ||||
|     - name: Get latest version of stable Rust | ||||
|       if: env.SELF_HOSTED_RUNNERS == false | ||||
|       run: rustup update stable | ||||
|     - name: Install Protoc | ||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 | ||||
|     - name: Install Foundry (anvil) | ||||
|       uses: foundry-rs/foundry-toolchain@v1 | ||||
|       with: | ||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     - name: Install anvil | ||||
|       run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil | ||||
|         version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d | ||||
|     - name: Run tests in release | ||||
|       run: make test-release | ||||
|   release-tests-windows: | ||||
|     name: release-tests-windows | ||||
|     runs-on: windows-2019 | ||||
|     runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "windows", "CI"]') || 'windows-2019'  }} | ||||
|     needs: cargo-fmt | ||||
|     steps: | ||||
|     - uses: actions/checkout@v3 | ||||
|     - name: Get latest version of stable Rust | ||||
|       if: env.SELF_HOSTED_RUNNERS == false | ||||
|       run: rustup update stable | ||||
|     - name: Use Node.js | ||||
|       uses: actions/setup-node@v2 | ||||
|     - name: Install Foundry (anvil) | ||||
|       uses: foundry-rs/foundry-toolchain@v1 | ||||
|       with: | ||||
|         node-version: '14' | ||||
|     - name: Install windows build tools | ||||
|       run: | | ||||
|         choco install python protoc visualstudio2019-workload-vctools -y | ||||
|         npm config set msvs_version 2019 | ||||
|     - name: Install anvil | ||||
|       run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil | ||||
|         version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d | ||||
|     - name: Install make | ||||
|       run: choco install -y make | ||||
|     - uses: KyleMayes/install-llvm-action@v1 | ||||
|       if: env.SELF_HOSTED_RUNNERS == false | ||||
|       with: | ||||
|         version: "15.0" | ||||
|         directory: ${{ runner.temp }}/llvm | ||||
| @ -92,16 +97,14 @@ jobs: | ||||
|       run: make test-release | ||||
|   beacon-chain-tests: | ||||
|     name: beacon-chain-tests | ||||
|     runs-on: ubuntu-latest | ||||
|     # Use self-hosted runners only on the sigp repo. | ||||
|     runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest'  }} | ||||
|     needs: cargo-fmt | ||||
|     steps: | ||||
|     - uses: actions/checkout@v3 | ||||
|     - name: Get latest version of stable Rust | ||||
|       if: env.SELF_HOSTED_RUNNERS == false | ||||
|       run: rustup update stable | ||||
|     - name: Install Protoc | ||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 | ||||
|       with: | ||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     - name: Run beacon_chain tests for all known forks | ||||
|       run: make test-beacon-chain | ||||
|   op-pool-tests: | ||||
| @ -112,10 +115,6 @@ jobs: | ||||
|     - uses: actions/checkout@v3 | ||||
|     - name: Get latest version of stable Rust | ||||
|       run: rustup update stable | ||||
|     - name: Install Protoc | ||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 | ||||
|       with: | ||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     - name: Run operation_pool tests for all known forks | ||||
|       run: make test-op-pool | ||||
|   slasher-tests: | ||||
| @ -130,18 +129,18 @@ jobs: | ||||
|       run: make test-slasher | ||||
|   debug-tests-ubuntu: | ||||
|     name: debug-tests-ubuntu | ||||
|     runs-on: ubuntu-22.04 | ||||
|     # Use self-hosted runners only on the sigp repo. | ||||
|     runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest'  }} | ||||
|     needs: cargo-fmt | ||||
|     steps: | ||||
|     - uses: actions/checkout@v3 | ||||
|     - name: Get latest version of stable Rust | ||||
|       if: env.SELF_HOSTED_RUNNERS == false | ||||
|       run: rustup update stable | ||||
|     - name: Install Protoc | ||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 | ||||
|     - name: Install Foundry (anvil) | ||||
|       uses: foundry-rs/foundry-toolchain@v1 | ||||
|       with: | ||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     - name: Install anvil | ||||
|       run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil | ||||
|         version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d | ||||
|     - name: Run tests in debug | ||||
|       run: make test-debug | ||||
|   state-transition-vectors-ubuntu: | ||||
| @ -152,24 +151,18 @@ jobs: | ||||
|     - uses: actions/checkout@v3 | ||||
|     - name: Get latest version of stable Rust | ||||
|       run: rustup update stable | ||||
|     - name: Install Protoc | ||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 | ||||
|       with: | ||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     - name: Run state_transition_vectors in release. | ||||
|       run: make run-state-transition-tests | ||||
|   ef-tests-ubuntu: | ||||
|     name: ef-tests-ubuntu | ||||
|     runs-on: ubuntu-latest | ||||
|     # Use self-hosted runners only on the sigp repo. | ||||
|     runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest'  }} | ||||
|     needs: cargo-fmt | ||||
|     steps: | ||||
|     - uses: actions/checkout@v3 | ||||
|     - name: Get latest version of stable Rust | ||||
|       if: env.SELF_HOSTED_RUNNERS == false | ||||
|       run: rustup update stable | ||||
|     - name: Install Protoc | ||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 | ||||
|       with: | ||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     - name: Run consensus-spec-tests with blst, milagro and fake_crypto | ||||
|       run: make test-ef | ||||
|   dockerfile-ubuntu: | ||||
| @ -192,12 +185,10 @@ jobs: | ||||
|     - uses: actions/checkout@v3 | ||||
|     - name: Get latest version of stable Rust | ||||
|       run: rustup update stable | ||||
|     - name: Install Protoc | ||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 | ||||
|     - name: Install Foundry (anvil) | ||||
|       uses: foundry-rs/foundry-toolchain@v1 | ||||
|       with: | ||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     - name: Install anvil | ||||
|       run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil | ||||
|         version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d | ||||
|     - name: Run the beacon chain sim that starts from an eth1 contract | ||||
|       run: cargo run --release --bin simulator eth1-sim | ||||
|   merge-transition-ubuntu: | ||||
| @ -208,12 +199,10 @@ jobs: | ||||
|     - uses: actions/checkout@v3 | ||||
|     - name: Get latest version of stable Rust | ||||
|       run: rustup update stable | ||||
|     - name: Install Protoc | ||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 | ||||
|     - name: Install Foundry (anvil) | ||||
|       uses: foundry-rs/foundry-toolchain@v1 | ||||
|       with: | ||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     - name: Install anvil | ||||
|       run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil | ||||
|         version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d | ||||
|     - name: Run the beacon chain sim and go through the merge transition | ||||
|       run: cargo run --release --bin simulator eth1-sim --post-merge | ||||
|   no-eth1-simulator-ubuntu: | ||||
| @ -224,10 +213,6 @@ jobs: | ||||
|     - uses: actions/checkout@v3 | ||||
|     - name: Get latest version of stable Rust | ||||
|       run: rustup update stable | ||||
|     - name: Install Protoc | ||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 | ||||
|       with: | ||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     - name: Run the beacon chain sim without an eth1 connection | ||||
|       run: cargo run --release --bin simulator no-eth1-sim | ||||
|   syncing-simulator-ubuntu: | ||||
| @ -238,12 +223,10 @@ jobs: | ||||
|     - uses: actions/checkout@v3 | ||||
|     - name: Get latest version of stable Rust | ||||
|       run: rustup update stable | ||||
|     - name: Install Protoc | ||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 | ||||
|     - name: Install Foundry (anvil) | ||||
|       uses: foundry-rs/foundry-toolchain@v1 | ||||
|       with: | ||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     - name: Install anvil | ||||
|       run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil | ||||
|         version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d | ||||
|     - name: Run the syncing simulator | ||||
|       run: cargo run --release --bin simulator syncing-sim | ||||
|   doppelganger-protection-test: | ||||
| @ -254,10 +237,6 @@ jobs: | ||||
|     - uses: actions/checkout@v3 | ||||
|     - name: Get latest version of stable Rust | ||||
|       run: rustup update stable | ||||
|     - name: Install Protoc | ||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 | ||||
|       with: | ||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     - name: Install geth | ||||
|       run: | | ||||
|           sudo add-apt-repository -y ppa:ethereum/ethereum | ||||
| @ -289,10 +268,6 @@ jobs: | ||||
|         dotnet-version: '6.0.201' | ||||
|     - name: Get latest version of stable Rust | ||||
|       run: rustup update stable | ||||
|     - name: Install Protoc | ||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 | ||||
|       with: | ||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     - name: Run exec engine integration tests in release | ||||
|       run: make test-exec-engine | ||||
|   check-benchmarks: | ||||
| @ -303,10 +278,6 @@ jobs: | ||||
|     - uses: actions/checkout@v3 | ||||
|     - name: Get latest version of stable Rust | ||||
|       run: rustup update stable | ||||
|     - name: Install Protoc | ||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 | ||||
|       with: | ||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     - name: Typecheck benchmark code without running it | ||||
|       run: make check-benches | ||||
|   clippy: | ||||
| @ -317,10 +288,6 @@ jobs: | ||||
|     - uses: actions/checkout@v3 | ||||
|     - name: Get latest version of stable Rust | ||||
|       run: rustup update stable | ||||
|     - name: Install Protoc | ||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 | ||||
|       with: | ||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     - name: Lint code for quality and style with Clippy | ||||
|       run: make lint | ||||
|     - name: Certify Cargo.lock freshness | ||||
| @ -333,10 +300,6 @@ jobs: | ||||
|     - uses: actions/checkout@v3 | ||||
|     - name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }}) | ||||
|       run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }} | ||||
|     - name: Install Protoc | ||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 | ||||
|       with: | ||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     - name: Run cargo check | ||||
|       run: cargo check --workspace | ||||
|   arbitrary-check: | ||||
| @ -375,10 +338,6 @@ jobs: | ||||
|     - uses: actions/checkout@v3 | ||||
|     - name: Install Rust (${{ env.PINNED_NIGHTLY }}) | ||||
|       run: rustup toolchain install $PINNED_NIGHTLY | ||||
|     - name: Install Protoc | ||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 | ||||
|       with: | ||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     - name: Install cargo-udeps | ||||
|       run: cargo install cargo-udeps --locked --force | ||||
|     - name: Create Cargo config dir | ||||
| @ -396,7 +355,7 @@ jobs: | ||||
|     steps: | ||||
|     - uses: actions/checkout@v3 | ||||
|     - name: Install dependencies | ||||
|       run: sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler | ||||
|       run: sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang | ||||
|     - name: Use Rust beta | ||||
|       run: rustup override set beta | ||||
|     - name: Run make | ||||
|  | ||||
							
								
								
									
										3761
									
								
								Cargo.lock
									
									
									
										generated
									
									
									
								
							
							
						
						
									
										3761
									
								
								Cargo.lock
									
									
									
										generated
									
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										145
									
								
								Cargo.toml
									
									
									
									
									
								
							
							
						
						
									
										145
									
								
								Cargo.toml
									
									
									
									
									
								
							| @ -4,6 +4,7 @@ members = [ | ||||
| 
 | ||||
|     "beacon_node", | ||||
|     "beacon_node/beacon_chain", | ||||
|     "beacon_node/beacon_processor", | ||||
|     "beacon_node/builder_client", | ||||
|     "beacon_node/client", | ||||
|     "beacon_node/eth1", | ||||
| @ -35,6 +36,7 @@ members = [ | ||||
|     "common/lru_cache", | ||||
|     "common/malloc_utils", | ||||
|     "common/oneshot_broadcast", | ||||
|     "common/pretty_reqwest_error", | ||||
|     "common/sensitive_url", | ||||
|     "common/slot_clock", | ||||
|     "common/system_health", | ||||
| @ -81,21 +83,144 @@ members = [ | ||||
|     "validator_client", | ||||
|     "validator_client/slashing_protection", | ||||
| 
 | ||||
|     "validator_manager", | ||||
| 
 | ||||
|     "watch", | ||||
| ] | ||||
| resolver = "2" | ||||
| 
 | ||||
| [patch] | ||||
| [patch.crates-io] | ||||
| warp = { git = "https://github.com/macladson/warp", rev="7e75acc368229a46a236a8c991bf251fe7fe50ef" } | ||||
| arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="f002b99989b561ddce62e4cf2887b0f8860ae991" } | ||||
| [workspace.package] | ||||
| edition = "2021" | ||||
| 
 | ||||
| [patch."https://github.com/ralexstokes/mev-rs"] | ||||
| mev-rs = { git = "https://github.com/ralexstokes//mev-rs", rev = "7813d4a4a564e0754e9aaab2d95520ba437c3889" } | ||||
| [patch."https://github.com/ralexstokes/ethereum-consensus"] | ||||
| ethereum-consensus = { git = "https://github.com/ralexstokes//ethereum-consensus", rev = "9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d" } | ||||
| [patch."https://github.com/ralexstokes/ssz-rs"] | ||||
| ssz-rs = { git = "https://github.com/ralexstokes//ssz-rs", rev = "adf1a0b14cef90b9536f28ef89da1fab316465e1" } | ||||
| [workspace.dependencies] | ||||
| arbitrary = { version = "1", features = ["derive"] } | ||||
| bincode = "1" | ||||
| bitvec = "1" | ||||
| byteorder = "1" | ||||
| bytes = "1" | ||||
| clap = "2" | ||||
| compare_fields_derive = { path = "common/compare_fields_derive" } | ||||
| criterion = "0.3" | ||||
| delay_map = "0.3" | ||||
| derivative = "2" | ||||
| dirs = "3" | ||||
| discv5 = { version = "0.3", features = ["libp2p"] } | ||||
| env_logger = "0.9" | ||||
| error-chain = "0.12" | ||||
| ethereum-types = "0.14" | ||||
| ethereum_hashing = "1.0.0-beta.2" | ||||
| ethereum_serde_utils = "0.5" | ||||
| ethereum_ssz = "0.5" | ||||
| ethereum_ssz_derive = "0.5" | ||||
| ethers-core = "1" | ||||
| ethers-providers = { version = "1", default-features = false } | ||||
| exit-future = "0.2" | ||||
| fnv = "1" | ||||
| fs2 = "0.4" | ||||
| futures = "0.3" | ||||
| hex = "0.4" | ||||
| hyper = "0.14" | ||||
| itertools = "0.10" | ||||
| lazy_static = "1" | ||||
| libsecp256k1 = "0.7" | ||||
| log = "0.4" | ||||
| lru = "0.7" | ||||
| maplit = "1" | ||||
| num_cpus = "1" | ||||
| parking_lot = "0.12" | ||||
| paste = "1" | ||||
| quickcheck = "1" | ||||
| quickcheck_macros = "1" | ||||
| quote = "1" | ||||
| r2d2 = "0.8" | ||||
| rand = "0.8" | ||||
| rayon = "1.7" | ||||
| regex = "1" | ||||
| reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "stream", "rustls-tls"] } | ||||
| ring = "0.16" | ||||
| rusqlite = { version = "0.28", features = ["bundled"] } | ||||
| serde = { version = "1", features = ["derive"] } | ||||
| serde_json = "1" | ||||
| serde_repr = "0.1" | ||||
| serde_yaml = "0.8" | ||||
| sha2 = "0.9" | ||||
| slog = { version = "2", features = ["max_level_trace", "release_max_level_trace"] } | ||||
| slog-async = "2" | ||||
| slog-term = "2" | ||||
| sloggers = { version = "2", features = ["json"] } | ||||
| smallvec = "1" | ||||
| snap = "1" | ||||
| ssz_types = "0.5" | ||||
| strum = { version = "0.24", features = ["derive"] } | ||||
| superstruct = "0.6" | ||||
| syn = "1" | ||||
| sysinfo = "0.26" | ||||
| tempfile = "3" | ||||
| tokio = { version = "1", features = ["rt-multi-thread", "sync"] } | ||||
| tokio-stream = { version = "0.1", features = ["sync"] } | ||||
| tokio-util = { version = "0.6", features = ["codec", "compat", "time"] } | ||||
| tree_hash = "0.5" | ||||
| tree_hash_derive = "0.5" | ||||
| url = "2" | ||||
| uuid = { version = "0.8", features = ["serde", "v4"] } | ||||
| # TODO update to warp 0.3.6 after released. | ||||
| warp = { git = "https://github.com/seanmonstar/warp.git", default-features = false, features = ["tls"] } | ||||
| zeroize = { version = "1", features = ["zeroize_derive"] } | ||||
| zip = "0.6" | ||||
| 
 | ||||
| # Local crates. | ||||
| account_utils = { path = "common/account_utils" } | ||||
| beacon_chain = { path = "beacon_node/beacon_chain" } | ||||
| beacon_node = { path = "beacon_node" } | ||||
| beacon_processor =  { path = "beacon_node/beacon_processor" } | ||||
| bls = { path = "crypto/bls" } | ||||
| cached_tree_hash = { path = "consensus/cached_tree_hash" } | ||||
| clap_utils = { path = "common/clap_utils" } | ||||
| compare_fields = { path = "common/compare_fields" } | ||||
| deposit_contract = { path = "common/deposit_contract" } | ||||
| directory = { path = "common/directory" } | ||||
| environment = { path = "lighthouse/environment" } | ||||
| eth1 = { path = "beacon_node/eth1" } | ||||
| eth1_test_rig = { path = "testing/eth1_test_rig" } | ||||
| eth2 = { path = "common/eth2" } | ||||
| eth2_config = { path = "common/eth2_config" } | ||||
| eth2_key_derivation = { path = "crypto/eth2_key_derivation" } | ||||
| eth2_keystore = { path = "crypto/eth2_keystore" } | ||||
| eth2_network_config = { path = "common/eth2_network_config" } | ||||
| eth2_wallet = { path = "crypto/eth2_wallet" } | ||||
| execution_layer = { path = "beacon_node/execution_layer" } | ||||
| filesystem = { path = "common/filesystem" } | ||||
| fork_choice = { path = "consensus/fork_choice" } | ||||
| genesis = { path = "beacon_node/genesis" } | ||||
| http_api = { path = "beacon_node/http_api" } | ||||
| int_to_bytes = { path = "consensus/int_to_bytes" } | ||||
| lighthouse_metrics = { path = "common/lighthouse_metrics" } | ||||
| lighthouse_network = { path = "beacon_node/lighthouse_network" } | ||||
| lighthouse_version = { path = "common/lighthouse_version" } | ||||
| lockfile = { path = "common/lockfile" } | ||||
| logging = { path = "common/logging" } | ||||
| lru_cache = { path = "common/lru_cache" } | ||||
| malloc_utils = { path = "common/malloc_utils" } | ||||
| merkle_proof = { path = "consensus/merkle_proof" } | ||||
| monitoring_api = { path = "common/monitoring_api" } | ||||
| network = { path = "beacon_node/network" } | ||||
| operation_pool = { path = "beacon_node/operation_pool" } | ||||
| pretty_reqwest_error = { path = "common/pretty_reqwest_error" } | ||||
| proto_array = { path = "consensus/proto_array" } | ||||
| safe_arith = {path = "consensus/safe_arith"} | ||||
| sensitive_url = { path = "common/sensitive_url" } | ||||
| slasher = { path = "slasher" } | ||||
| slashing_protection = { path = "validator_client/slashing_protection" } | ||||
| slot_clock = { path = "common/slot_clock" } | ||||
| state_processing = { path = "consensus/state_processing" } | ||||
| store = { path = "beacon_node/store" } | ||||
| swap_or_not_shuffle = { path = "consensus/swap_or_not_shuffle" } | ||||
| task_executor = { path = "common/task_executor" } | ||||
| types = { path = "consensus/types" } | ||||
| unused_port = { path = "common/unused_port" } | ||||
| validator_client = { path = "validator_client/" } | ||||
| validator_dir = { path = "common/validator_dir" } | ||||
| warp_utils = { path = "common/warp_utils" } | ||||
| 
 | ||||
| [profile.maxperf] | ||||
| inherits = "release" | ||||
|  | ||||
| @ -1,5 +1,5 @@ | ||||
| [target.x86_64-unknown-linux-gnu] | ||||
| dockerfile = './scripts/cross/Dockerfile' | ||||
| pre-build = ["apt-get install -y cmake clang-3.9"] | ||||
| 
 | ||||
| [target.aarch64-unknown-linux-gnu] | ||||
| dockerfile = './scripts/cross/Dockerfile' | ||||
| pre-build = ["apt-get install -y cmake clang-3.9"] | ||||
|  | ||||
| @ -1,5 +1,5 @@ | ||||
| FROM rust:1.68.2-bullseye AS builder | ||||
| RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler | ||||
| FROM rust:1.69.0-bullseye AS builder | ||||
| RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev | ||||
| COPY . lighthouse | ||||
| ARG FEATURES | ||||
| ARG PROFILE=release | ||||
|  | ||||
							
								
								
									
										21
									
								
								Makefile
									
									
									
									
									
								
							
							
						
						
									
										21
									
								
								Makefile
									
									
									
									
									
								
							| @ -71,13 +71,13 @@ install-lcli: | ||||
| # optimized CPU functions that may not be available on some systems. This
 | ||||
| # results in a more portable binary with ~20% slower BLS verification.
 | ||||
| build-x86_64: | ||||
| 	cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "modern,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" | ||||
| 	cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "modern,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked | ||||
| build-x86_64-portable: | ||||
| 	cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" | ||||
| 	cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked | ||||
| build-aarch64: | ||||
| 	cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" | ||||
| 	cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked | ||||
| build-aarch64-portable: | ||||
| 	cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" | ||||
| 	cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked | ||||
| 
 | ||||
| # Create a `.tar.gz` containing a binary for a specific target.
 | ||||
| define tarball_release_binary | ||||
| @ -145,8 +145,9 @@ test-op-pool-%: | ||||
| 
 | ||||
| # Run the tests in the `slasher` crate for all supported database backends.
 | ||||
| test-slasher: | ||||
| 	cargo test --release -p slasher --features mdbx | ||||
| 	cargo test --release -p slasher --no-default-features --features lmdb | ||||
| 	cargo test --release -p slasher --features lmdb | ||||
| 	cargo test --release -p slasher --no-default-features --features mdbx | ||||
| 	cargo test --release -p slasher --features lmdb,mdbx # both backends enabled | ||||
| 
 | ||||
| # Runs only the tests/state_transition_vectors tests.
 | ||||
| run-state-transition-tests: | ||||
| @ -169,7 +170,7 @@ test-full: cargo-fmt test-release test-debug test-ef test-exec-engine | ||||
| # Lints the code for bad style and potentially unsafe arithmetic using Clippy.
 | ||||
| # Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints.
 | ||||
| lint: | ||||
| 	cargo clippy --workspace --tests -- \
 | ||||
| 	cargo clippy --workspace --tests $(EXTRA_CLIPPY_OPTS) -- \
 | ||||
| 		-D clippy::fn_to_numeric_cast_any \
 | ||||
| 		-D warnings \
 | ||||
| 		-A clippy::derive_partial_eq_without_eq \
 | ||||
| @ -179,6 +180,10 @@ lint: | ||||
| 		-A clippy::question-mark \
 | ||||
| 		-A clippy::uninlined-format-args | ||||
| 
 | ||||
| # Lints the code using Clippy and automatically fix some simple compiler warnings.
 | ||||
| lint-fix: | ||||
| 	EXTRA_CLIPPY_OPTS="--fix --allow-staged --allow-dirty" $(MAKE) lint | ||||
| 
 | ||||
| nightly-lint: | ||||
| 	cp .github/custom/clippy.toml . | ||||
| 	cargo +$(CLIPPY_PINNED_NIGHTLY) clippy --workspace --tests --release -- \
 | ||||
| @ -202,7 +207,7 @@ arbitrary-fuzz: | ||||
| # Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database)
 | ||||
| audit: | ||||
| 	cargo install --force cargo-audit | ||||
| 	cargo audit --ignore RUSTSEC-2020-0071 | ||||
| 	cargo audit | ||||
| 
 | ||||
| # Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose.
 | ||||
| vendor: | ||||
|  | ||||
| @ -10,7 +10,7 @@ An open-source Ethereum consensus client, written in Rust and maintained by Sigm | ||||
| [Book Link]: https://lighthouse-book.sigmaprime.io | ||||
| [stable]: https://github.com/sigp/lighthouse/tree/stable | ||||
| [unstable]: https://github.com/sigp/lighthouse/tree/unstable | ||||
| [blog]: https://lighthouse.sigmaprime.io | ||||
| [blog]: https://lighthouse-blog.sigmaprime.io | ||||
| 
 | ||||
| [Documentation](https://lighthouse-book.sigmaprime.io) | ||||
| 
 | ||||
|  | ||||
| @ -1,29 +1,35 @@ | ||||
| [package] | ||||
| name = "account_manager" | ||||
| version = "0.3.5" | ||||
| authors = ["Paul Hauner <paul@paulhauner.com>", "Luke Anderson <luke@sigmaprime.io>"] | ||||
| edition = "2021" | ||||
| authors = [ | ||||
|     "Paul Hauner <paul@paulhauner.com>", | ||||
|     "Luke Anderson <luke@sigmaprime.io>", | ||||
| ] | ||||
| edition = { workspace = true } | ||||
| 
 | ||||
| [dependencies] | ||||
| bls = { path = "../crypto/bls" } | ||||
| clap = "2.33.3" | ||||
| types = { path = "../consensus/types" } | ||||
| environment = { path = "../lighthouse/environment" } | ||||
| eth2_network_config = { path = "../common/eth2_network_config" } | ||||
| clap_utils = { path = "../common/clap_utils" } | ||||
| directory = { path = "../common/directory" } | ||||
| eth2_wallet = { path = "../crypto/eth2_wallet" } | ||||
| bls = { workspace = true } | ||||
| clap = { workspace = true } | ||||
| types = { workspace = true } | ||||
| environment = { workspace = true } | ||||
| eth2_network_config = { workspace = true } | ||||
| clap_utils = { workspace = true } | ||||
| directory = { workspace = true } | ||||
| eth2_wallet = { workspace = true } | ||||
| eth2_wallet_manager = { path = "../common/eth2_wallet_manager" } | ||||
| validator_dir = { path = "../common/validator_dir" } | ||||
| tokio = { version = "1.14.0", features = ["full"] } | ||||
| eth2_keystore = { path = "../crypto/eth2_keystore" } | ||||
| account_utils = { path = "../common/account_utils" } | ||||
| slashing_protection = { path = "../validator_client/slashing_protection" } | ||||
| eth2 = {path = "../common/eth2"} | ||||
| safe_arith = {path = "../consensus/safe_arith"} | ||||
| slot_clock = { path = "../common/slot_clock" } | ||||
| filesystem = { path = "../common/filesystem" } | ||||
| sensitive_url = { path = "../common/sensitive_url" } | ||||
| validator_dir = { workspace = true } | ||||
| tokio = { workspace = true } | ||||
| eth2_keystore = { workspace = true } | ||||
| account_utils = { workspace = true } | ||||
| slashing_protection = { workspace = true } | ||||
| eth2 = { workspace = true } | ||||
| safe_arith = { workspace = true } | ||||
| slot_clock = { workspace = true } | ||||
| filesystem = { workspace = true } | ||||
| sensitive_url = { workspace = true } | ||||
| serde = { workspace = true } | ||||
| serde_json = { workspace = true } | ||||
| slog = { workspace = true } | ||||
| 
 | ||||
| [dev-dependencies] | ||||
| tempfile = "3.1.0" | ||||
| tempfile = { workspace = true } | ||||
|  | ||||
| @ -1,55 +1,7 @@ | ||||
| use account_utils::PlainText; | ||||
| use account_utils::{read_input_from_user, strip_off_newlines}; | ||||
| use eth2_wallet::bip39::{Language, Mnemonic}; | ||||
| use std::fs; | ||||
| use std::path::PathBuf; | ||||
| use std::str::from_utf8; | ||||
| use std::thread::sleep; | ||||
| use std::time::Duration; | ||||
| use account_utils::read_input_from_user; | ||||
| 
 | ||||
| pub const MNEMONIC_PROMPT: &str = "Enter the mnemonic phrase:"; | ||||
| pub const WALLET_NAME_PROMPT: &str = "Enter wallet name:"; | ||||
| 
 | ||||
| pub fn read_mnemonic_from_cli( | ||||
|     mnemonic_path: Option<PathBuf>, | ||||
|     stdin_inputs: bool, | ||||
| ) -> Result<Mnemonic, String> { | ||||
|     let mnemonic = match mnemonic_path { | ||||
|         Some(path) => fs::read(&path) | ||||
|             .map_err(|e| format!("Unable to read {:?}: {:?}", path, e)) | ||||
|             .and_then(|bytes| { | ||||
|                 let bytes_no_newlines: PlainText = strip_off_newlines(bytes).into(); | ||||
|                 let phrase = from_utf8(bytes_no_newlines.as_ref()) | ||||
|                     .map_err(|e| format!("Unable to derive mnemonic: {:?}", e))?; | ||||
|                 Mnemonic::from_phrase(phrase, Language::English).map_err(|e| { | ||||
|                     format!( | ||||
|                         "Unable to derive mnemonic from string {:?}: {:?}", | ||||
|                         phrase, e | ||||
|                     ) | ||||
|                 }) | ||||
|             })?, | ||||
|         None => loop { | ||||
|             eprintln!(); | ||||
|             eprintln!("{}", MNEMONIC_PROMPT); | ||||
| 
 | ||||
|             let mnemonic = read_input_from_user(stdin_inputs)?; | ||||
| 
 | ||||
|             match Mnemonic::from_phrase(mnemonic.as_str(), Language::English) { | ||||
|                 Ok(mnemonic_m) => { | ||||
|                     eprintln!("Valid mnemonic provided."); | ||||
|                     eprintln!(); | ||||
|                     sleep(Duration::from_secs(1)); | ||||
|                     break mnemonic_m; | ||||
|                 } | ||||
|                 Err(_) => { | ||||
|                     eprintln!("Invalid mnemonic"); | ||||
|                 } | ||||
|             } | ||||
|         }, | ||||
|     }; | ||||
|     Ok(mnemonic) | ||||
| } | ||||
| 
 | ||||
| /// Reads in a wallet name from the user. If the `--wallet-name` flag is provided, use it. Otherwise
 | ||||
| /// read from an interactive prompt using tty unless the `--stdin-inputs` flag is provided.
 | ||||
| pub fn read_wallet_name_from_cli( | ||||
|  | ||||
| @ -123,10 +123,8 @@ async fn publish_voluntary_exit<E: EthSpec>( | ||||
| ) -> Result<(), String> { | ||||
|     let genesis_data = get_geneisis_data(client).await?; | ||||
|     let testnet_genesis_root = eth2_network_config | ||||
|         .beacon_state::<E>() | ||||
|         .as_ref() | ||||
|         .expect("network should have valid genesis state") | ||||
|         .genesis_validators_root(); | ||||
|         .genesis_validators_root::<E>()? | ||||
|         .ok_or("Genesis state is unknown")?; | ||||
| 
 | ||||
|     // Verify that the beacon node and validator being exited are on the same network.
 | ||||
|     if genesis_data.genesis_validators_root != testnet_genesis_root { | ||||
|  | ||||
| @ -4,8 +4,8 @@ use account_utils::{ | ||||
|     eth2_keystore::Keystore, | ||||
|     read_password_from_user, | ||||
|     validator_definitions::{ | ||||
|         recursively_find_voting_keystores, ValidatorDefinition, ValidatorDefinitions, | ||||
|         CONFIG_FILENAME, | ||||
|         recursively_find_voting_keystores, PasswordStorage, ValidatorDefinition, | ||||
|         ValidatorDefinitions, CONFIG_FILENAME, | ||||
|     }, | ||||
|     ZeroizeString, | ||||
| }; | ||||
| @ -277,7 +277,9 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin | ||||
|         let suggested_fee_recipient = None; | ||||
|         let validator_def = ValidatorDefinition::new_keystore_with_password( | ||||
|             &dest_keystore, | ||||
|             password_opt, | ||||
|             password_opt | ||||
|                 .map(PasswordStorage::ValidatorDefinitions) | ||||
|                 .unwrap_or(PasswordStorage::None), | ||||
|             graffiti, | ||||
|             suggested_fee_recipient, | ||||
|             None, | ||||
|  | ||||
| @ -1,10 +1,9 @@ | ||||
| use super::create::STORE_WITHDRAW_FLAG; | ||||
| use crate::common::read_mnemonic_from_cli; | ||||
| use crate::validator::create::COUNT_FLAG; | ||||
| use crate::wallet::create::STDIN_INPUTS_FLAG; | ||||
| use crate::SECRETS_DIR_FLAG; | ||||
| use account_utils::eth2_keystore::{keypair_from_secret, Keystore, KeystoreBuilder}; | ||||
| use account_utils::random_password; | ||||
| use account_utils::{random_password, read_mnemonic_from_cli}; | ||||
| use clap::{App, Arg, ArgMatches}; | ||||
| use directory::ensure_dir_exists; | ||||
| use directory::{parse_path_or_default_with_flag, DEFAULT_SECRET_DIR}; | ||||
|  | ||||
| @ -7,7 +7,7 @@ use slashing_protection::{ | ||||
| use std::fs::File; | ||||
| use std::path::PathBuf; | ||||
| use std::str::FromStr; | ||||
| use types::{BeaconState, Epoch, EthSpec, PublicKeyBytes, Slot}; | ||||
| use types::{Epoch, EthSpec, PublicKeyBytes, Slot}; | ||||
| 
 | ||||
| pub const CMD: &str = "slashing-protection"; | ||||
| pub const IMPORT_CMD: &str = "import"; | ||||
| @ -81,20 +81,13 @@ pub fn cli_run<T: EthSpec>( | ||||
|     validator_base_dir: PathBuf, | ||||
| ) -> Result<(), String> { | ||||
|     let slashing_protection_db_path = validator_base_dir.join(SLASHING_PROTECTION_FILENAME); | ||||
| 
 | ||||
|     let eth2_network_config = env | ||||
|         .eth2_network_config | ||||
|         .ok_or("Unable to get testnet configuration from the environment")?; | ||||
| 
 | ||||
|     let genesis_validators_root = eth2_network_config | ||||
|         .beacon_state::<T>() | ||||
|         .map(|state: BeaconState<T>| state.genesis_validators_root()) | ||||
|         .map_err(|e| { | ||||
|             format!( | ||||
|                 "Unable to get genesis state, has genesis occurred? Detail: {:?}", | ||||
|                 e | ||||
|             ) | ||||
|         })?; | ||||
|         .genesis_validators_root::<T>()? | ||||
|         .ok_or_else(|| "Unable to get genesis state, has genesis occurred?".to_string())?; | ||||
| 
 | ||||
|     match matches.subcommand() { | ||||
|         (IMPORT_CMD, Some(matches)) => { | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| use crate::common::read_mnemonic_from_cli; | ||||
| use crate::wallet::create::{create_wallet_from_mnemonic, STDIN_INPUTS_FLAG}; | ||||
| use crate::wallet::create::{HD_TYPE, NAME_FLAG, PASSWORD_FLAG, TYPE_FLAG}; | ||||
| use account_utils::read_mnemonic_from_cli; | ||||
| use clap::{App, Arg, ArgMatches}; | ||||
| use std::path::PathBuf; | ||||
| 
 | ||||
|  | ||||
| @ -1,8 +1,11 @@ | ||||
| [package] | ||||
| name = "beacon_node" | ||||
| version = "4.2.0" | ||||
| authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"] | ||||
| edition = "2021" | ||||
| version = "4.5.0" | ||||
| authors = [ | ||||
|     "Paul Hauner <paul@paulhauner.com>", | ||||
|     "Age Manning <Age@AgeManning.com", | ||||
| ] | ||||
| edition = { workspace = true } | ||||
| 
 | ||||
| [lib] | ||||
| name = "beacon_node" | ||||
| @ -12,33 +15,35 @@ path = "src/lib.rs" | ||||
| node_test_rig = { path = "../testing/node_test_rig" } | ||||
| 
 | ||||
| [features] | ||||
| write_ssz_files = ["beacon_chain/write_ssz_files"]  # Writes debugging .ssz files to /tmp during block processing. | ||||
| write_ssz_files = [ | ||||
|     "beacon_chain/write_ssz_files", | ||||
| ] # Writes debugging .ssz files to /tmp during block processing. | ||||
| 
 | ||||
| [dependencies] | ||||
| eth2_config = { path = "../common/eth2_config" } | ||||
| beacon_chain = { path = "beacon_chain" } | ||||
| types = { path = "../consensus/types" } | ||||
| store = { path = "./store" } | ||||
| eth2_config = { workspace = true } | ||||
| beacon_chain = { workspace = true } | ||||
| types = { workspace = true } | ||||
| store = { workspace = true } | ||||
| client = { path = "client" } | ||||
| clap = "2.33.3" | ||||
| slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } | ||||
| dirs = "3.0.1" | ||||
| directory = {path = "../common/directory"} | ||||
| futures = "0.3.7" | ||||
| environment = { path = "../lighthouse/environment" } | ||||
| task_executor = { path = "../common/task_executor" } | ||||
| genesis = { path = "genesis" } | ||||
| eth2_network_config = { path = "../common/eth2_network_config" } | ||||
| execution_layer = { path = "execution_layer" } | ||||
| lighthouse_network = { path = "./lighthouse_network" } | ||||
| serde = "1.0.116" | ||||
| clap_utils = { path = "../common/clap_utils" } | ||||
| hyper = "0.14.4" | ||||
| lighthouse_version = { path = "../common/lighthouse_version" } | ||||
| hex = "0.4.2" | ||||
| slasher = { path = "../slasher", default-features = false } | ||||
| monitoring_api = { path = "../common/monitoring_api" } | ||||
| sensitive_url = { path = "../common/sensitive_url" } | ||||
| http_api = { path = "http_api" } | ||||
| unused_port = { path = "../common/unused_port" } | ||||
| strum = "0.24.1" | ||||
| clap = { workspace = true } | ||||
| slog = { workspace = true } | ||||
| dirs = { workspace = true } | ||||
| directory = { workspace = true } | ||||
| futures = { workspace = true } | ||||
| environment = { workspace = true } | ||||
| task_executor = { workspace = true } | ||||
| genesis = { workspace = true } | ||||
| eth2_network_config = { workspace = true } | ||||
| execution_layer = { workspace = true } | ||||
| lighthouse_network = { workspace = true } | ||||
| serde = { workspace = true } | ||||
| clap_utils = { workspace = true } | ||||
| hyper = { workspace = true } | ||||
| lighthouse_version = { workspace = true } | ||||
| hex = { workspace = true } | ||||
| slasher = { workspace = true } | ||||
| monitoring_api = { workspace = true } | ||||
| sensitive_url = { workspace = true } | ||||
| http_api = { workspace = true } | ||||
| unused_port = { workspace = true } | ||||
| strum = { workspace = true } | ||||
|  | ||||
| @ -2,7 +2,7 @@ | ||||
| name = "beacon_chain" | ||||
| version = "0.2.0" | ||||
| authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com>"] | ||||
| edition = "2021" | ||||
| edition = { workspace = true } | ||||
| autotests = false # using a single test binary compiles faster | ||||
| 
 | ||||
| [features] | ||||
| @ -12,59 +12,59 @@ participation_metrics = []  # Exposes validator participation metrics to Prometh | ||||
| fork_from_env = [] # Initialise the harness chain spec from the FORK_NAME env variable | ||||
| 
 | ||||
| [dev-dependencies] | ||||
| maplit = "1.0.2" | ||||
| environment = { path = "../../lighthouse/environment" } | ||||
| serde_json = "1.0.58" | ||||
| maplit = { workspace = true } | ||||
| environment = { workspace = true } | ||||
| serde_json = { workspace = true } | ||||
| 
 | ||||
| [dependencies] | ||||
| merkle_proof = { path = "../../consensus/merkle_proof" } | ||||
| store = { path = "../store" } | ||||
| parking_lot = "0.12.0" | ||||
| lazy_static = "1.4.0" | ||||
| smallvec = "1.6.1" | ||||
| lighthouse_metrics = { path = "../../common/lighthouse_metrics" } | ||||
| operation_pool = { path = "../operation_pool" } | ||||
| rayon = "1.4.1" | ||||
| serde = "1.0.116" | ||||
| serde_derive = "1.0.116" | ||||
| slog = { version = "2.5.2", features = ["max_level_trace"] } | ||||
| sloggers = { version = "2.1.1", features = ["json"] } | ||||
| slot_clock = { path = "../../common/slot_clock" } | ||||
| ethereum_hashing = "1.0.0-beta.2" | ||||
| ethereum_ssz = "0.5.0" | ||||
| ssz_types = "0.5.0" | ||||
| ethereum_ssz_derive = "0.5.0" | ||||
| state_processing = { path = "../../consensus/state_processing" } | ||||
| tree_hash = "0.5.0" | ||||
| types = { path = "../../consensus/types" } | ||||
| tokio = "1.14.0" | ||||
| tokio-stream = "0.1.3" | ||||
| eth1 = { path = "../eth1" } | ||||
| futures = "0.3.7" | ||||
| genesis = { path = "../genesis" } | ||||
| int_to_bytes = { path = "../../consensus/int_to_bytes" } | ||||
| rand = "0.8.5" | ||||
| proto_array = { path = "../../consensus/proto_array" } | ||||
| lru = "0.7.1" | ||||
| tempfile = "3.1.0" | ||||
| bitvec = "0.20.4" | ||||
| bls = { path = "../../crypto/bls" } | ||||
| safe_arith = { path = "../../consensus/safe_arith" } | ||||
| fork_choice = { path = "../../consensus/fork_choice" } | ||||
| task_executor = { path = "../../common/task_executor" } | ||||
| derivative = "2.1.1" | ||||
| itertools = "0.10.0" | ||||
| slasher = { path = "../../slasher", default-features = false } | ||||
| eth2 = { path = "../../common/eth2" } | ||||
| strum = { version = "0.24.0", features = ["derive"] } | ||||
| logging = { path = "../../common/logging" } | ||||
| execution_layer = { path = "../execution_layer" } | ||||
| sensitive_url = { path = "../../common/sensitive_url" } | ||||
| superstruct = "0.5.0" | ||||
| hex = "0.4.2" | ||||
| exit-future = "0.2.0" | ||||
| unused_port = {path = "../../common/unused_port"} | ||||
| oneshot_broadcast = { path = "../../common/oneshot_broadcast" } | ||||
| merkle_proof = { workspace = true } | ||||
| store = { workspace = true } | ||||
| parking_lot = { workspace = true } | ||||
| lazy_static = { workspace = true } | ||||
| smallvec = { workspace = true } | ||||
| lighthouse_metrics = { workspace = true } | ||||
| operation_pool = { workspace = true } | ||||
| rayon = { workspace = true } | ||||
| serde = { workspace = true } | ||||
| ethereum_serde_utils = { workspace = true } | ||||
| slog = { workspace = true } | ||||
| sloggers = { workspace = true } | ||||
| slot_clock = { workspace = true } | ||||
| ethereum_hashing = { workspace = true } | ||||
| ethereum_ssz = { workspace = true } | ||||
| ssz_types = { workspace = true } | ||||
| ethereum_ssz_derive = { workspace = true } | ||||
| state_processing = { workspace = true } | ||||
| tree_hash_derive = { workspace = true } | ||||
| tree_hash = { workspace = true } | ||||
| types = { workspace = true } | ||||
| tokio = { workspace = true } | ||||
| tokio-stream = { workspace = true } | ||||
| eth1 = { workspace = true } | ||||
| futures = { workspace = true } | ||||
| genesis = { workspace = true } | ||||
| int_to_bytes = { workspace = true } | ||||
| rand = { workspace = true } | ||||
| proto_array = { workspace = true } | ||||
| lru = { workspace = true } | ||||
| tempfile = { workspace = true } | ||||
| bitvec = { workspace = true } | ||||
| bls = { workspace = true } | ||||
| safe_arith = { workspace = true } | ||||
| fork_choice = { workspace = true } | ||||
| task_executor = { workspace = true } | ||||
| derivative = { workspace = true } | ||||
| itertools = { workspace = true } | ||||
| slasher = { workspace = true } | ||||
| eth2 = { workspace = true } | ||||
| strum = { workspace = true } | ||||
| logging = { workspace = true } | ||||
| execution_layer = { workspace = true } | ||||
| sensitive_url = { workspace = true } | ||||
| superstruct = { workspace = true } | ||||
| hex = { workspace = true } | ||||
| exit-future = { workspace = true } | ||||
| oneshot_broadcast = { path = "../../common/oneshot_broadcast/" } | ||||
| 
 | ||||
| [[test]] | ||||
| name = "beacon_chain_tests" | ||||
|  | ||||
| @ -3,7 +3,8 @@ use eth2::lighthouse::attestation_rewards::{IdealAttestationRewards, TotalAttest | ||||
| use eth2::lighthouse::StandardAttestationRewards; | ||||
| use participation_cache::ParticipationCache; | ||||
| use safe_arith::SafeArith; | ||||
| use slog::{debug, Logger}; | ||||
| use serde_utils::quoted_u64::Quoted; | ||||
| use slog::debug; | ||||
| use state_processing::{ | ||||
|     common::altair::BaseRewardPerIncrement, | ||||
|     per_epoch_processing::altair::{participation_cache, rewards_and_penalties::get_flag_weight}, | ||||
| @ -15,32 +16,111 @@ use store::consts::altair::{ | ||||
| }; | ||||
| use types::consts::altair::WEIGHT_DENOMINATOR; | ||||
| 
 | ||||
| use types::{Epoch, EthSpec}; | ||||
| use types::{BeaconState, Epoch, EthSpec}; | ||||
| 
 | ||||
| use eth2::types::ValidatorId; | ||||
| use state_processing::common::base::get_base_reward_from_effective_balance; | ||||
| use state_processing::per_epoch_processing::base::rewards_and_penalties::{ | ||||
|     get_attestation_component_delta, get_attestation_deltas_all, get_attestation_deltas_subset, | ||||
|     get_inactivity_penalty_delta, get_inclusion_delay_delta, | ||||
| }; | ||||
| use state_processing::per_epoch_processing::base::validator_statuses::InclusionInfo; | ||||
| use state_processing::per_epoch_processing::base::{ | ||||
|     TotalBalances, ValidatorStatus, ValidatorStatuses, | ||||
| }; | ||||
| 
 | ||||
| impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
|     pub fn compute_attestation_rewards( | ||||
|         &self, | ||||
|         epoch: Epoch, | ||||
|         validators: Vec<ValidatorId>, | ||||
|         log: Logger, | ||||
|     ) -> Result<StandardAttestationRewards, BeaconChainError> { | ||||
|         debug!(log, "computing attestation rewards"; "epoch" => epoch, "validator_count" => validators.len()); | ||||
|         debug!(self.log, "computing attestation rewards"; "epoch" => epoch, "validator_count" => validators.len()); | ||||
| 
 | ||||
|         // Get state
 | ||||
|         let spec = &self.spec; | ||||
| 
 | ||||
|         let state_slot = (epoch + 1).end_slot(T::EthSpec::slots_per_epoch()); | ||||
| 
 | ||||
|         let state_root = self | ||||
|             .state_root_at_slot(state_slot)? | ||||
|             .ok_or(BeaconChainError::NoStateForSlot(state_slot))?; | ||||
| 
 | ||||
|         let mut state = self | ||||
|         let state = self | ||||
|             .get_state(&state_root, Some(state_slot))? | ||||
|             .ok_or(BeaconChainError::MissingBeaconState(state_root))?; | ||||
| 
 | ||||
|         match state { | ||||
|             BeaconState::Base(_) => self.compute_attestation_rewards_base(state, validators), | ||||
|             BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { | ||||
|                 self.compute_attestation_rewards_altair(state, validators) | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     fn compute_attestation_rewards_base( | ||||
|         &self, | ||||
|         mut state: BeaconState<T::EthSpec>, | ||||
|         validators: Vec<ValidatorId>, | ||||
|     ) -> Result<StandardAttestationRewards, BeaconChainError> { | ||||
|         let spec = &self.spec; | ||||
|         let mut validator_statuses = ValidatorStatuses::new(&state, spec)?; | ||||
|         validator_statuses.process_attestations(&state)?; | ||||
| 
 | ||||
|         let ideal_rewards = | ||||
|             self.compute_ideal_rewards_base(&state, &validator_statuses.total_balances)?; | ||||
| 
 | ||||
|         let indices_to_attestation_delta = if validators.is_empty() { | ||||
|             get_attestation_deltas_all(&state, &validator_statuses, spec)? | ||||
|                 .into_iter() | ||||
|                 .enumerate() | ||||
|                 .collect() | ||||
|         } else { | ||||
|             let validator_indices = Self::validators_ids_to_indices(&mut state, validators)?; | ||||
|             get_attestation_deltas_subset(&state, &validator_statuses, &validator_indices, spec)? | ||||
|         }; | ||||
| 
 | ||||
|         let mut total_rewards = vec![]; | ||||
| 
 | ||||
|         for (index, delta) in indices_to_attestation_delta.into_iter() { | ||||
|             let head_delta = delta.head_delta; | ||||
|             let head = (head_delta.rewards as i64).safe_sub(head_delta.penalties as i64)?; | ||||
| 
 | ||||
|             let target_delta = delta.target_delta; | ||||
|             let target = (target_delta.rewards as i64).safe_sub(target_delta.penalties as i64)?; | ||||
| 
 | ||||
|             let source_delta = delta.source_delta; | ||||
|             let source = (source_delta.rewards as i64).safe_sub(source_delta.penalties as i64)?; | ||||
| 
 | ||||
|             // No penalties associated with inclusion delay
 | ||||
|             let inclusion_delay = delta.inclusion_delay_delta.rewards; | ||||
|             let inactivity = delta.inactivity_penalty_delta.penalties.wrapping_neg() as i64; | ||||
| 
 | ||||
|             let rewards = TotalAttestationRewards { | ||||
|                 validator_index: index as u64, | ||||
|                 head, | ||||
|                 target, | ||||
|                 source, | ||||
|                 inclusion_delay: Some(Quoted { | ||||
|                     value: inclusion_delay, | ||||
|                 }), | ||||
|                 inactivity, | ||||
|             }; | ||||
| 
 | ||||
|             total_rewards.push(rewards); | ||||
|         } | ||||
| 
 | ||||
|         Ok(StandardAttestationRewards { | ||||
|             ideal_rewards, | ||||
|             total_rewards, | ||||
|         }) | ||||
|     } | ||||
| 
 | ||||
|     fn compute_attestation_rewards_altair( | ||||
|         &self, | ||||
|         mut state: BeaconState<T::EthSpec>, | ||||
|         validators: Vec<ValidatorId>, | ||||
|     ) -> Result<StandardAttestationRewards, BeaconChainError> { | ||||
|         let spec = &self.spec; | ||||
| 
 | ||||
|         // Calculate ideal_rewards
 | ||||
|         let participation_cache = ParticipationCache::new(&state, spec)?; | ||||
| 
 | ||||
| @ -71,7 +151,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
|             let base_reward_per_increment = | ||||
|                 BaseRewardPerIncrement::new(total_active_balance, spec)?; | ||||
| 
 | ||||
|             for effective_balance_eth in 0..=32 { | ||||
|             for effective_balance_eth in 1..=self.max_effective_balance_increment_steps()? { | ||||
|                 let effective_balance = | ||||
|                     effective_balance_eth.safe_mul(spec.effective_balance_increment)?; | ||||
|                 let base_reward = | ||||
| @ -86,7 +166,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
|                 let ideal_reward = reward_numerator | ||||
|                     .safe_div(active_increments)? | ||||
|                     .safe_div(WEIGHT_DENOMINATOR)?; | ||||
|                 if !state.is_in_inactivity_leak(previous_epoch, spec) { | ||||
|                 if !state.is_in_inactivity_leak(previous_epoch, spec)? { | ||||
|                     ideal_rewards_hashmap | ||||
|                         .insert((flag_index, effective_balance), (ideal_reward, penalty)); | ||||
|                 } else { | ||||
| @ -101,20 +181,12 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
|         let validators = if validators.is_empty() { | ||||
|             participation_cache.eligible_validator_indices().to_vec() | ||||
|         } else { | ||||
|             validators | ||||
|                 .into_iter() | ||||
|                 .map(|validator| match validator { | ||||
|                     ValidatorId::Index(i) => Ok(i as usize), | ||||
|                     ValidatorId::PublicKey(pubkey) => state | ||||
|                         .get_validator_index(&pubkey)? | ||||
|                         .ok_or(BeaconChainError::ValidatorPubkeyUnknown(pubkey)), | ||||
|                 }) | ||||
|                 .collect::<Result<Vec<_>, _>>()? | ||||
|             Self::validators_ids_to_indices(&mut state, validators)? | ||||
|         }; | ||||
| 
 | ||||
|         for validator_index in &validators { | ||||
|             let eligible = state.is_eligible_validator(previous_epoch, *validator_index)?; | ||||
|             let mut head_reward = 0u64; | ||||
|             let mut head_reward = 0i64; | ||||
|             let mut target_reward = 0i64; | ||||
|             let mut source_reward = 0i64; | ||||
| 
 | ||||
| @ -132,7 +204,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
|                         .map_err(|_| BeaconChainError::AttestationRewardsError)?; | ||||
|                     if voted_correctly { | ||||
|                         if flag_index == TIMELY_HEAD_FLAG_INDEX { | ||||
|                             head_reward += ideal_reward; | ||||
|                             head_reward += *ideal_reward as i64; | ||||
|                         } else if flag_index == TIMELY_TARGET_FLAG_INDEX { | ||||
|                             target_reward += *ideal_reward as i64; | ||||
|                         } else if flag_index == TIMELY_SOURCE_FLAG_INDEX { | ||||
| @ -152,6 +224,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
|                 head: head_reward, | ||||
|                 target: target_reward, | ||||
|                 source: source_reward, | ||||
|                 inclusion_delay: None, | ||||
|                 // TODO: altair calculation logic needs to be updated to include inactivity penalty
 | ||||
|                 inactivity: 0, | ||||
|             }); | ||||
|         } | ||||
| 
 | ||||
| @ -173,6 +248,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
|                             head: 0, | ||||
|                             target: 0, | ||||
|                             source: 0, | ||||
|                             inclusion_delay: None, | ||||
|                             // TODO: altair calculation logic needs to be updated to include inactivity penalty
 | ||||
|                             inactivity: 0, | ||||
|                         }); | ||||
|                     match *flag_index { | ||||
|                         TIMELY_SOURCE_FLAG_INDEX => entry.source += ideal_reward, | ||||
| @ -192,4 +270,126 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
|             total_rewards, | ||||
|         }) | ||||
|     } | ||||
| 
 | ||||
|     fn max_effective_balance_increment_steps(&self) -> Result<u64, BeaconChainError> { | ||||
|         let spec = &self.spec; | ||||
|         let max_steps = spec | ||||
|             .max_effective_balance | ||||
|             .safe_div(spec.effective_balance_increment)?; | ||||
|         Ok(max_steps) | ||||
|     } | ||||
| 
 | ||||
|     fn validators_ids_to_indices( | ||||
|         state: &mut BeaconState<T::EthSpec>, | ||||
|         validators: Vec<ValidatorId>, | ||||
|     ) -> Result<Vec<usize>, BeaconChainError> { | ||||
|         let indices = validators | ||||
|             .into_iter() | ||||
|             .map(|validator| match validator { | ||||
|                 ValidatorId::Index(i) => Ok(i as usize), | ||||
|                 ValidatorId::PublicKey(pubkey) => state | ||||
|                     .get_validator_index(&pubkey)? | ||||
|                     .ok_or(BeaconChainError::ValidatorPubkeyUnknown(pubkey)), | ||||
|             }) | ||||
|             .collect::<Result<Vec<_>, _>>()?; | ||||
|         Ok(indices) | ||||
|     } | ||||
| 
 | ||||
|     fn compute_ideal_rewards_base( | ||||
|         &self, | ||||
|         state: &BeaconState<T::EthSpec>, | ||||
|         total_balances: &TotalBalances, | ||||
|     ) -> Result<Vec<IdealAttestationRewards>, BeaconChainError> { | ||||
|         let spec = &self.spec; | ||||
|         let previous_epoch = state.previous_epoch(); | ||||
|         let finality_delay = previous_epoch | ||||
|             .safe_sub(state.finalized_checkpoint().epoch)? | ||||
|             .as_u64(); | ||||
| 
 | ||||
|         let ideal_validator_status = ValidatorStatus { | ||||
|             is_previous_epoch_attester: true, | ||||
|             is_slashed: false, | ||||
|             inclusion_info: Some(InclusionInfo { | ||||
|                 delay: 1, | ||||
|                 ..Default::default() | ||||
|             }), | ||||
|             ..Default::default() | ||||
|         }; | ||||
| 
 | ||||
|         let mut ideal_attestation_rewards_list = Vec::new(); | ||||
| 
 | ||||
|         for effective_balance_step in 1..=self.max_effective_balance_increment_steps()? { | ||||
|             let effective_balance = | ||||
|                 effective_balance_step.safe_mul(spec.effective_balance_increment)?; | ||||
|             let base_reward = get_base_reward_from_effective_balance::<T::EthSpec>( | ||||
|                 effective_balance, | ||||
|                 total_balances.current_epoch(), | ||||
|                 spec, | ||||
|             )?; | ||||
| 
 | ||||
|             // compute ideal head rewards
 | ||||
|             let head = get_attestation_component_delta( | ||||
|                 true, | ||||
|                 total_balances.previous_epoch_head_attesters(), | ||||
|                 total_balances, | ||||
|                 base_reward, | ||||
|                 finality_delay, | ||||
|                 spec, | ||||
|             )? | ||||
|             .rewards; | ||||
| 
 | ||||
|             // compute ideal target rewards
 | ||||
|             let target = get_attestation_component_delta( | ||||
|                 true, | ||||
|                 total_balances.previous_epoch_target_attesters(), | ||||
|                 total_balances, | ||||
|                 base_reward, | ||||
|                 finality_delay, | ||||
|                 spec, | ||||
|             )? | ||||
|             .rewards; | ||||
| 
 | ||||
|             // compute ideal source rewards
 | ||||
|             let source = get_attestation_component_delta( | ||||
|                 true, | ||||
|                 total_balances.previous_epoch_attesters(), | ||||
|                 total_balances, | ||||
|                 base_reward, | ||||
|                 finality_delay, | ||||
|                 spec, | ||||
|             )? | ||||
|             .rewards; | ||||
| 
 | ||||
|             // compute ideal inclusion delay rewards
 | ||||
|             let inclusion_delay = | ||||
|                 get_inclusion_delay_delta(&ideal_validator_status, base_reward, spec)? | ||||
|                     .0 | ||||
|                     .rewards; | ||||
| 
 | ||||
|             // compute inactivity penalty
 | ||||
|             let inactivity = get_inactivity_penalty_delta( | ||||
|                 &ideal_validator_status, | ||||
|                 base_reward, | ||||
|                 finality_delay, | ||||
|                 spec, | ||||
|             )? | ||||
|             .penalties | ||||
|             .wrapping_neg() as i64; | ||||
| 
 | ||||
|             let ideal_attestation_rewards = IdealAttestationRewards { | ||||
|                 effective_balance, | ||||
|                 head, | ||||
|                 target, | ||||
|                 source, | ||||
|                 inclusion_delay: Some(Quoted { | ||||
|                     value: inclusion_delay, | ||||
|                 }), | ||||
|                 inactivity, | ||||
|             }; | ||||
| 
 | ||||
|             ideal_attestation_rewards_list.push(ideal_attestation_rewards); | ||||
|         } | ||||
| 
 | ||||
|         Ok(ideal_attestation_rewards_list) | ||||
|     } | ||||
| } | ||||
|  | ||||
| @ -35,10 +35,8 @@ | ||||
| mod batch; | ||||
| 
 | ||||
| use crate::{ | ||||
|     beacon_chain::{MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT}, | ||||
|     metrics, | ||||
|     observed_aggregates::ObserveOutcome, | ||||
|     observed_attesters::Error as ObservedAttestersError, | ||||
|     beacon_chain::VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, metrics, | ||||
|     observed_aggregates::ObserveOutcome, observed_attesters::Error as ObservedAttestersError, | ||||
|     BeaconChain, BeaconChainError, BeaconChainTypes, | ||||
| }; | ||||
| use bls::verify_signature_sets; | ||||
| @ -57,8 +55,8 @@ use std::borrow::Cow; | ||||
| use strum::AsRefStr; | ||||
| use tree_hash::TreeHash; | ||||
| use types::{ | ||||
|     Attestation, BeaconCommittee, CommitteeIndex, Epoch, EthSpec, Hash256, IndexedAttestation, | ||||
|     SelectionProof, SignedAggregateAndProof, Slot, SubnetId, | ||||
|     Attestation, BeaconCommittee, ChainSpec, CommitteeIndex, Epoch, EthSpec, Hash256, | ||||
|     IndexedAttestation, SelectionProof, SignedAggregateAndProof, Slot, SubnetId, | ||||
| }; | ||||
| 
 | ||||
| pub use batch::{batch_verify_aggregated_attestations, batch_verify_unaggregated_attestations}; | ||||
| @ -117,14 +115,14 @@ pub enum Error { | ||||
|     ///
 | ||||
|     /// The peer has sent an invalid message.
 | ||||
|     AggregatorPubkeyUnknown(u64), | ||||
|     /// The attestation has been seen before; either in a block, on the gossip network or from a
 | ||||
|     /// local validator.
 | ||||
|     /// The attestation or a superset of this attestation's aggregations bits for the same data
 | ||||
|     /// has been seen before; either in a block, on the gossip network or from a local validator.
 | ||||
|     ///
 | ||||
|     /// ## Peer scoring
 | ||||
|     ///
 | ||||
|     /// It's unclear if this attestation is valid, however we have already observed it and do not
 | ||||
|     /// need to observe it again.
 | ||||
|     AttestationAlreadyKnown(Hash256), | ||||
|     AttestationSupersetKnown(Hash256), | ||||
|     /// There has already been an aggregation observed for this validator, we refuse to process a
 | ||||
|     /// second.
 | ||||
|     ///
 | ||||
| @ -268,7 +266,7 @@ enum CheckAttestationSignature { | ||||
| struct IndexedAggregatedAttestation<'a, T: BeaconChainTypes> { | ||||
|     signed_aggregate: &'a SignedAggregateAndProof<T::EthSpec>, | ||||
|     indexed_attestation: IndexedAttestation<T::EthSpec>, | ||||
|     attestation_root: Hash256, | ||||
|     attestation_data_root: Hash256, | ||||
| } | ||||
| 
 | ||||
| /// Wraps a `Attestation` that has been verified up until the point that an `IndexedAttestation` can
 | ||||
| @ -454,7 +452,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { | ||||
|         // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance).
 | ||||
|         //
 | ||||
|         // We do not queue future attestations for later processing.
 | ||||
|         verify_propagation_slot_range(&chain.slot_clock, attestation)?; | ||||
|         verify_propagation_slot_range(&chain.slot_clock, attestation, &chain.spec)?; | ||||
| 
 | ||||
|         // Check the attestation's epoch matches its target.
 | ||||
|         if attestation.data.slot.epoch(T::EthSpec::slots_per_epoch()) | ||||
| @ -467,14 +465,17 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { | ||||
|         } | ||||
| 
 | ||||
|         // Ensure the valid aggregated attestation has not already been seen locally.
 | ||||
|         let attestation_root = attestation.tree_hash_root(); | ||||
|         let attestation_data = &attestation.data; | ||||
|         let attestation_data_root = attestation_data.tree_hash_root(); | ||||
| 
 | ||||
|         if chain | ||||
|             .observed_attestations | ||||
|             .write() | ||||
|             .is_known(attestation, attestation_root) | ||||
|             .is_known_subset(attestation, attestation_data_root) | ||||
|             .map_err(|e| Error::BeaconChainError(e.into()))? | ||||
|         { | ||||
|             return Err(Error::AttestationAlreadyKnown(attestation_root)); | ||||
|             metrics::inc_counter(&metrics::AGGREGATED_ATTESTATION_SUBSETS); | ||||
|             return Err(Error::AttestationSupersetKnown(attestation_data_root)); | ||||
|         } | ||||
| 
 | ||||
|         let aggregator_index = signed_aggregate.message.aggregator_index; | ||||
| @ -520,7 +521,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { | ||||
|         if attestation.aggregation_bits.is_zero() { | ||||
|             Err(Error::EmptyAggregationBitfield) | ||||
|         } else { | ||||
|             Ok(attestation_root) | ||||
|             Ok(attestation_data_root) | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
| @ -533,7 +534,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { | ||||
| 
 | ||||
|         let attestation = &signed_aggregate.message.aggregate; | ||||
|         let aggregator_index = signed_aggregate.message.aggregator_index; | ||||
|         let attestation_root = match Self::verify_early_checks(signed_aggregate, chain) { | ||||
|         let attestation_data_root = match Self::verify_early_checks(signed_aggregate, chain) { | ||||
|             Ok(root) => root, | ||||
|             Err(e) => return Err(SignatureNotChecked(&signed_aggregate.message.aggregate, e)), | ||||
|         }; | ||||
| @ -568,7 +569,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { | ||||
|         Ok(IndexedAggregatedAttestation { | ||||
|             signed_aggregate, | ||||
|             indexed_attestation, | ||||
|             attestation_root, | ||||
|             attestation_data_root, | ||||
|         }) | ||||
|     } | ||||
| } | ||||
| @ -577,7 +578,7 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { | ||||
|     /// Run the checks that happen after the indexed attestation and signature have been checked.
 | ||||
|     fn verify_late_checks( | ||||
|         signed_aggregate: &SignedAggregateAndProof<T::EthSpec>, | ||||
|         attestation_root: Hash256, | ||||
|         attestation_data_root: Hash256, | ||||
|         chain: &BeaconChain<T>, | ||||
|     ) -> Result<(), Error> { | ||||
|         let attestation = &signed_aggregate.message.aggregate; | ||||
| @ -587,13 +588,14 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { | ||||
|         //
 | ||||
|         // It's important to double check that the attestation is not already known, otherwise two
 | ||||
|         // attestations processed at the same time could be published.
 | ||||
|         if let ObserveOutcome::AlreadyKnown = chain | ||||
|         if let ObserveOutcome::Subset = chain | ||||
|             .observed_attestations | ||||
|             .write() | ||||
|             .observe_item(attestation, Some(attestation_root)) | ||||
|             .observe_item(attestation, Some(attestation_data_root)) | ||||
|             .map_err(|e| Error::BeaconChainError(e.into()))? | ||||
|         { | ||||
|             return Err(Error::AttestationAlreadyKnown(attestation_root)); | ||||
|             metrics::inc_counter(&metrics::AGGREGATED_ATTESTATION_SUBSETS); | ||||
|             return Err(Error::AttestationSupersetKnown(attestation_data_root)); | ||||
|         } | ||||
| 
 | ||||
|         // Observe the aggregator so we don't process another aggregate from them.
 | ||||
| @ -653,7 +655,7 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { | ||||
|         let IndexedAggregatedAttestation { | ||||
|             signed_aggregate, | ||||
|             indexed_attestation, | ||||
|             attestation_root, | ||||
|             attestation_data_root, | ||||
|         } = signed_aggregate; | ||||
| 
 | ||||
|         match check_signature { | ||||
| @ -677,7 +679,7 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { | ||||
|             CheckAttestationSignature::No => (), | ||||
|         }; | ||||
| 
 | ||||
|         if let Err(e) = Self::verify_late_checks(signed_aggregate, attestation_root, chain) { | ||||
|         if let Err(e) = Self::verify_late_checks(signed_aggregate, attestation_data_root, chain) { | ||||
|             return Err(SignatureValid(indexed_attestation, e)); | ||||
|         } | ||||
| 
 | ||||
| @ -718,7 +720,7 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { | ||||
|         // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance).
 | ||||
|         //
 | ||||
|         // We do not queue future attestations for later processing.
 | ||||
|         verify_propagation_slot_range(&chain.slot_clock, attestation)?; | ||||
|         verify_propagation_slot_range(&chain.slot_clock, attestation, &chain.spec)?; | ||||
| 
 | ||||
|         // Check to ensure that the attestation is "unaggregated". I.e., it has exactly one
 | ||||
|         // aggregation bit set.
 | ||||
| @ -1033,11 +1035,11 @@ fn verify_head_block_is_known<T: BeaconChainTypes>( | ||||
| pub fn verify_propagation_slot_range<S: SlotClock, E: EthSpec>( | ||||
|     slot_clock: &S, | ||||
|     attestation: &Attestation<E>, | ||||
|     spec: &ChainSpec, | ||||
| ) -> Result<(), Error> { | ||||
|     let attestation_slot = attestation.data.slot; | ||||
| 
 | ||||
|     let latest_permissible_slot = slot_clock | ||||
|         .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) | ||||
|         .now_with_future_tolerance(spec.maximum_gossip_clock_disparity()) | ||||
|         .ok_or(BeaconChainError::UnableToReadSlot)?; | ||||
|     if attestation_slot > latest_permissible_slot { | ||||
|         return Err(Error::FutureSlot { | ||||
| @ -1048,7 +1050,7 @@ pub fn verify_propagation_slot_range<S: SlotClock, E: EthSpec>( | ||||
| 
 | ||||
|     // Taking advantage of saturating subtraction on `Slot`.
 | ||||
|     let earliest_permissible_slot = slot_clock | ||||
|         .now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) | ||||
|         .now_with_past_tolerance(spec.maximum_gossip_clock_disparity()) | ||||
|         .ok_or(BeaconChainError::UnableToReadSlot)? | ||||
|         - E::slots_per_epoch(); | ||||
|     if attestation_slot < earliest_permissible_slot { | ||||
|  | ||||
| @ -63,7 +63,6 @@ use execution_layer::{ | ||||
|     BlockProposalContents, BuilderParams, ChainHealth, ExecutionLayer, FailedCondition, | ||||
|     PayloadAttributes, PayloadStatus, | ||||
| }; | ||||
| pub use fork_choice::CountUnrealized; | ||||
| use fork_choice::{ | ||||
|     AttestationFromBlock, ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters, | ||||
|     InvalidationOperation, PayloadVerificationStatus, ResetPayloadStatuses, | ||||
| @ -165,7 +164,7 @@ pub enum WhenSlotSkipped { | ||||
|     ///
 | ||||
|     /// This is how the HTTP API behaves.
 | ||||
|     None, | ||||
|     /// If the slot it a skip slot, return the previous non-skipped block.
 | ||||
|     /// If the slot is a skip slot, return the previous non-skipped block.
 | ||||
|     ///
 | ||||
|     /// This is generally how the specification behaves.
 | ||||
|     Prev, | ||||
| @ -198,6 +197,17 @@ pub struct PrePayloadAttributes { | ||||
|     pub parent_block_number: u64, | ||||
| } | ||||
| 
 | ||||
| /// Information about a state/block at a specific slot.
 | ||||
| #[derive(Debug, Clone, Copy)] | ||||
| pub struct FinalizationAndCanonicity { | ||||
|     /// True if the slot of the state or block is finalized.
 | ||||
|     ///
 | ||||
|     /// This alone DOES NOT imply that the state/block is finalized, use `self.is_finalized()`.
 | ||||
|     pub slot_is_finalized: bool, | ||||
|     /// True if the state or block is canonical at its slot.
 | ||||
|     pub canonical: bool, | ||||
| } | ||||
| 
 | ||||
| /// Define whether a forkchoiceUpdate needs to be checked for an override (`Yes`) or has already
 | ||||
| /// been checked (`AlreadyApplied`). It is safe to specify `Yes` even if re-orgs are disabled.
 | ||||
| #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] | ||||
| @ -207,11 +217,6 @@ pub enum OverrideForkchoiceUpdate { | ||||
|     AlreadyApplied, | ||||
| } | ||||
| 
 | ||||
| /// The accepted clock drift for nodes gossiping blocks and attestations. See:
 | ||||
| ///
 | ||||
| /// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/p2p-interface.md#configuration
 | ||||
| pub const MAXIMUM_GOSSIP_CLOCK_DISPARITY: Duration = Duration::from_millis(500); | ||||
| 
 | ||||
| #[derive(Debug, PartialEq)] | ||||
| pub enum AttestationProcessingOutcome { | ||||
|     Processed, | ||||
| @ -427,6 +432,12 @@ pub struct BeaconChain<T: BeaconChainTypes> { | ||||
| 
 | ||||
| type BeaconBlockAndState<T, Payload> = (BeaconBlock<T, Payload>, BeaconState<T>); | ||||
| 
 | ||||
| impl FinalizationAndCanonicity { | ||||
|     pub fn is_finalized(self) -> bool { | ||||
|         self.slot_is_finalized && self.canonical | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
|     /// Checks if a block is finalized.
 | ||||
|     /// The finalization check is done with the block slot. The block root is used to verify that
 | ||||
| @ -456,16 +467,30 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
|         state_root: &Hash256, | ||||
|         state_slot: Slot, | ||||
|     ) -> Result<bool, Error> { | ||||
|         self.state_finalization_and_canonicity(state_root, state_slot) | ||||
|             .map(FinalizationAndCanonicity::is_finalized) | ||||
|     } | ||||
| 
 | ||||
|     /// Fetch the finalization and canonicity status of the state with `state_root`.
 | ||||
|     pub fn state_finalization_and_canonicity( | ||||
|         &self, | ||||
|         state_root: &Hash256, | ||||
|         state_slot: Slot, | ||||
|     ) -> Result<FinalizationAndCanonicity, Error> { | ||||
|         let finalized_slot = self | ||||
|             .canonical_head | ||||
|             .cached_head() | ||||
|             .finalized_checkpoint() | ||||
|             .epoch | ||||
|             .start_slot(T::EthSpec::slots_per_epoch()); | ||||
|         let is_canonical = self | ||||
|         let slot_is_finalized = state_slot <= finalized_slot; | ||||
|         let canonical = self | ||||
|             .state_root_at_slot(state_slot)? | ||||
|             .map_or(false, |canonical_root| state_root == &canonical_root); | ||||
|         Ok(state_slot <= finalized_slot && is_canonical) | ||||
|         Ok(FinalizationAndCanonicity { | ||||
|             slot_is_finalized, | ||||
|             canonical, | ||||
|         }) | ||||
|     } | ||||
| 
 | ||||
|     /// Persists the head tracker and fork choice.
 | ||||
| @ -784,10 +809,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
|     ///
 | ||||
|     /// May return a database error.
 | ||||
|     pub fn state_root_at_slot(&self, request_slot: Slot) -> Result<Option<Hash256>, Error> { | ||||
|         if request_slot > self.slot()? { | ||||
|             return Ok(None); | ||||
|         } else if request_slot == self.spec.genesis_slot { | ||||
|         if request_slot == self.spec.genesis_slot { | ||||
|             return Ok(Some(self.genesis_state_root)); | ||||
|         } else if request_slot > self.slot()? { | ||||
|             return Ok(None); | ||||
|         } | ||||
| 
 | ||||
|         // Check limits w.r.t historic state bounds.
 | ||||
| @ -864,10 +889,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
|     ///
 | ||||
|     /// May return a database error.
 | ||||
|     fn block_root_at_slot_skips_none(&self, request_slot: Slot) -> Result<Option<Hash256>, Error> { | ||||
|         if request_slot > self.slot()? { | ||||
|             return Ok(None); | ||||
|         } else if request_slot == self.spec.genesis_slot { | ||||
|         if request_slot == self.spec.genesis_slot { | ||||
|             return Ok(Some(self.genesis_block_root)); | ||||
|         } else if request_slot > self.slot()? { | ||||
|             return Ok(None); | ||||
|         } | ||||
| 
 | ||||
|         let prev_slot = request_slot.saturating_sub(1_u64); | ||||
| @ -927,10 +952,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
|     ///
 | ||||
|     /// May return a database error.
 | ||||
|     fn block_root_at_slot_skips_prev(&self, request_slot: Slot) -> Result<Option<Hash256>, Error> { | ||||
|         if request_slot > self.slot()? { | ||||
|             return Ok(None); | ||||
|         } else if request_slot == self.spec.genesis_slot { | ||||
|         if request_slot == self.spec.genesis_slot { | ||||
|             return Ok(Some(self.genesis_block_root)); | ||||
|         } else if request_slot > self.slot()? { | ||||
|             return Ok(None); | ||||
|         } | ||||
| 
 | ||||
|         // Try an optimized path of reading the root directly from the head state.
 | ||||
| @ -2510,7 +2535,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
|     pub async fn process_chain_segment( | ||||
|         self: &Arc<Self>, | ||||
|         chain_segment: Vec<Arc<SignedBeaconBlock<T::EthSpec>>>, | ||||
|         count_unrealized: CountUnrealized, | ||||
|         notify_execution_layer: NotifyExecutionLayer, | ||||
|     ) -> ChainSegmentResult<T::EthSpec> { | ||||
|         let mut imported_blocks = 0; | ||||
| @ -2579,8 +2603,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
|                     .process_block( | ||||
|                         signature_verified_block.block_root(), | ||||
|                         signature_verified_block, | ||||
|                         count_unrealized, | ||||
|                         notify_execution_layer, | ||||
|                         || Ok(()), | ||||
|                     ) | ||||
|                     .await | ||||
|                 { | ||||
| @ -2668,8 +2692,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
|         self: &Arc<Self>, | ||||
|         block_root: Hash256, | ||||
|         unverified_block: B, | ||||
|         count_unrealized: CountUnrealized, | ||||
|         notify_execution_layer: NotifyExecutionLayer, | ||||
|         publish_fn: impl FnOnce() -> Result<(), BlockError<T::EthSpec>> + Send + 'static, | ||||
|     ) -> Result<Hash256, BlockError<T::EthSpec>> { | ||||
|         // Start the Prometheus timer.
 | ||||
|         let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); | ||||
| @ -2688,8 +2712,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
|                 &chain, | ||||
|                 notify_execution_layer, | ||||
|             )?; | ||||
|             publish_fn()?; | ||||
|             chain | ||||
|                 .import_execution_pending_block(execution_pending, count_unrealized) | ||||
|                 .import_execution_pending_block(execution_pending) | ||||
|                 .await | ||||
|         }; | ||||
| 
 | ||||
| @ -2729,7 +2754,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
|             } | ||||
|             // The block failed verification.
 | ||||
|             Err(other) => { | ||||
|                 trace!( | ||||
|                 debug!( | ||||
|                     self.log, | ||||
|                     "Beacon block rejected"; | ||||
|                     "reason" => other.to_string(), | ||||
| @ -2744,10 +2769,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
|     ///
 | ||||
|     /// An error is returned if the block was unable to be imported. It may be partially imported
 | ||||
|     /// (i.e., this function is not atomic).
 | ||||
|     async fn import_execution_pending_block( | ||||
|     pub async fn import_execution_pending_block( | ||||
|         self: Arc<Self>, | ||||
|         execution_pending_block: ExecutionPendingBlock<T>, | ||||
|         count_unrealized: CountUnrealized, | ||||
|     ) -> Result<Hash256, BlockError<T::EthSpec>> { | ||||
|         let ExecutionPendingBlock { | ||||
|             block, | ||||
| @ -2808,7 +2832,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
|                         state, | ||||
|                         confirmed_state_roots, | ||||
|                         payload_verification_status, | ||||
|                         count_unrealized, | ||||
|                         parent_block, | ||||
|                         parent_eth1_finalization_data, | ||||
|                         consensus_context, | ||||
| @ -2834,7 +2857,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
|         mut state: BeaconState<T::EthSpec>, | ||||
|         confirmed_state_roots: Vec<Hash256>, | ||||
|         payload_verification_status: PayloadVerificationStatus, | ||||
|         count_unrealized: CountUnrealized, | ||||
|         parent_block: SignedBlindedBeaconBlock<T::EthSpec>, | ||||
|         parent_eth1_finalization_data: Eth1FinalizationData, | ||||
|         mut consensus_context: ConsensusContext<T::EthSpec>, | ||||
| @ -2902,8 +2924,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
|                     block_delay, | ||||
|                     &state, | ||||
|                     payload_verification_status, | ||||
|                     self.config.progressive_balances_mode, | ||||
|                     &self.spec, | ||||
|                     count_unrealized, | ||||
|                     &self.log, | ||||
|                 ) | ||||
|                 .map_err(|e| BlockError::BeaconChainError(e.into()))?; | ||||
|         } | ||||
| @ -4633,6 +4656,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
|             self.log, | ||||
|             "Produced block on state"; | ||||
|             "block_size" => block_size, | ||||
|             "slot" => block.slot(), | ||||
|         ); | ||||
| 
 | ||||
|         metrics::observe(&metrics::BLOCK_SIZE, block_size as f64); | ||||
| @ -5548,14 +5572,16 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
|             let (mut state, state_root) = if let Some((state, state_root)) = head_state_opt { | ||||
|                 (state, state_root) | ||||
|             } else { | ||||
|                 let state_root = head_block.state_root; | ||||
|                 let state = self | ||||
|                 let block_state_root = head_block.state_root; | ||||
|                 let max_slot = shuffling_epoch.start_slot(T::EthSpec::slots_per_epoch()); | ||||
|                 let (state_root, state) = self | ||||
|                     .store | ||||
|                     .get_inconsistent_state_for_attestation_verification_only( | ||||
|                         &state_root, | ||||
|                         Some(head_block.slot), | ||||
|                         &head_block_root, | ||||
|                         max_slot, | ||||
|                         block_state_root, | ||||
|                     )? | ||||
|                     .ok_or(Error::MissingBeaconState(head_block.state_root))?; | ||||
|                     .ok_or(Error::MissingBeaconState(block_state_root))?; | ||||
|                 (state, state_root) | ||||
|             }; | ||||
| 
 | ||||
| @ -5707,13 +5733,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
|     /// Since we are likely calling this during the slot we are going to propose in, don't take into
 | ||||
|     /// account the current slot when accounting for skips.
 | ||||
|     pub fn is_healthy(&self, parent_root: &Hash256) -> Result<ChainHealth, Error> { | ||||
|         let cached_head = self.canonical_head.cached_head(); | ||||
|         // Check if the merge has been finalized.
 | ||||
|         if let Some(finalized_hash) = self | ||||
|             .canonical_head | ||||
|             .cached_head() | ||||
|             .forkchoice_update_parameters() | ||||
|             .finalized_hash | ||||
|         { | ||||
|         if let Some(finalized_hash) = cached_head.forkchoice_update_parameters().finalized_hash { | ||||
|             if ExecutionBlockHash::zero() == finalized_hash { | ||||
|                 return Ok(ChainHealth::PreMerge); | ||||
|             } | ||||
| @ -5740,17 +5762,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
| 
 | ||||
|         // Check slots at the head of the chain.
 | ||||
|         let prev_slot = current_slot.saturating_sub(Slot::new(1)); | ||||
|         let head_skips = prev_slot.saturating_sub(self.canonical_head.cached_head().head_slot()); | ||||
|         let head_skips = prev_slot.saturating_sub(cached_head.head_slot()); | ||||
|         let head_skips_check = head_skips.as_usize() <= self.config.builder_fallback_skips; | ||||
| 
 | ||||
|         // Check if finalization is advancing.
 | ||||
|         let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); | ||||
|         let epochs_since_finalization = current_epoch.saturating_sub( | ||||
|             self.canonical_head | ||||
|                 .cached_head() | ||||
|                 .finalized_checkpoint() | ||||
|                 .epoch, | ||||
|         ); | ||||
|         let epochs_since_finalization = | ||||
|             current_epoch.saturating_sub(cached_head.finalized_checkpoint().epoch); | ||||
|         let finalization_check = epochs_since_finalization.as_usize() | ||||
|             <= self.config.builder_fallback_epochs_since_finalization; | ||||
| 
 | ||||
|  | ||||
| @ -321,9 +321,17 @@ where | ||||
|                 .deconstruct() | ||||
|                 .0; | ||||
| 
 | ||||
|             let state = self | ||||
|             let max_slot = self | ||||
|                 .justified_checkpoint | ||||
|                 .epoch | ||||
|                 .start_slot(E::slots_per_epoch()); | ||||
|             let (_, state) = self | ||||
|                 .store | ||||
|                 .get_state(&justified_block.state_root(), Some(justified_block.slot())) | ||||
|                 .get_advanced_hot_state( | ||||
|                     self.justified_checkpoint.root, | ||||
|                     max_slot, | ||||
|                     justified_block.state_root(), | ||||
|                 ) | ||||
|                 .map_err(Error::FailedToReadState)? | ||||
|                 .ok_or_else(|| Error::MissingState(justified_block.state_root()))?; | ||||
| 
 | ||||
|  | ||||
| @ -135,7 +135,7 @@ impl BeaconProposerCache { | ||||
| 
 | ||||
| /// Compute the proposer duties using the head state without cache.
 | ||||
| pub fn compute_proposer_duties_from_head<T: BeaconChainTypes>( | ||||
|     current_epoch: Epoch, | ||||
|     request_epoch: Epoch, | ||||
|     chain: &BeaconChain<T>, | ||||
| ) -> Result<(Vec<usize>, Hash256, ExecutionStatus, Fork), BeaconChainError> { | ||||
|     // Atomically collect information about the head whilst holding the canonical head `Arc` as
 | ||||
| @ -159,7 +159,7 @@ pub fn compute_proposer_duties_from_head<T: BeaconChainTypes>( | ||||
|         .ok_or(BeaconChainError::HeadMissingFromForkChoice(head_block_root))?; | ||||
| 
 | ||||
|     // Advance the state into the requested epoch.
 | ||||
|     ensure_state_is_in_epoch(&mut state, head_state_root, current_epoch, &chain.spec)?; | ||||
|     ensure_state_is_in_epoch(&mut state, head_state_root, request_epoch, &chain.spec)?; | ||||
| 
 | ||||
|     let indices = state | ||||
|         .get_beacon_proposer_indices(&chain.spec) | ||||
|  | ||||
| @ -1,4 +1,4 @@ | ||||
| use serde_derive::Serialize; | ||||
| use serde::Serialize; | ||||
| use std::sync::Arc; | ||||
| use types::{ | ||||
|     beacon_state::CloneConfig, AbstractExecPayload, BeaconState, EthSpec, FullPayload, Hash256, | ||||
|  | ||||
| @ -52,13 +52,14 @@ use crate::execution_payload::{ | ||||
|     is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, | ||||
|     AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier, | ||||
| }; | ||||
| use crate::observed_block_producers::SeenBlock; | ||||
| use crate::snapshot_cache::PreProcessingSnapshot; | ||||
| use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; | ||||
| use crate::validator_pubkey_cache::ValidatorPubkeyCache; | ||||
| use crate::{ | ||||
|     beacon_chain::{ | ||||
|         BeaconForkChoice, ForkChoiceError, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, | ||||
|         MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, | ||||
|         VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, | ||||
|     }, | ||||
|     metrics, BeaconChain, BeaconChainError, BeaconChainTypes, | ||||
| }; | ||||
| @ -141,8 +142,6 @@ pub enum BlockError<T: EthSpec> { | ||||
|     /// It's unclear if this block is valid, but it cannot be processed without already knowing
 | ||||
|     /// its parent.
 | ||||
|     ParentUnknown(Arc<SignedBeaconBlock<T>>), | ||||
|     /// The block skips too many slots and is a DoS risk.
 | ||||
|     TooManySkippedSlots { parent_slot: Slot, block_slot: Slot }, | ||||
|     /// The block slot is greater than the present slot.
 | ||||
|     ///
 | ||||
|     /// ## Peer scoring
 | ||||
| @ -183,13 +182,6 @@ pub enum BlockError<T: EthSpec> { | ||||
|     ///
 | ||||
|     /// The block is valid and we have already imported a block with this hash.
 | ||||
|     BlockIsAlreadyKnown, | ||||
|     /// A block for this proposer and slot has already been observed.
 | ||||
|     ///
 | ||||
|     /// ## Peer scoring
 | ||||
|     ///
 | ||||
|     /// The `proposer` has already proposed a block at this slot. The existing block may or may not
 | ||||
|     /// be equal to the given block.
 | ||||
|     RepeatProposal { proposer: u64, slot: Slot }, | ||||
|     /// The block slot exceeds the MAXIMUM_BLOCK_SLOT_NUMBER.
 | ||||
|     ///
 | ||||
|     /// ## Peer scoring
 | ||||
| @ -285,6 +277,13 @@ pub enum BlockError<T: EthSpec> { | ||||
|     /// problems to worry about than losing peers, and we're doing the network a favour by
 | ||||
|     /// disconnecting.
 | ||||
|     ParentExecutionPayloadInvalid { parent_root: Hash256 }, | ||||
|     /// The block is a slashable equivocation from the proposer.
 | ||||
|     ///
 | ||||
|     /// ## Peer scoring
 | ||||
|     ///
 | ||||
|     /// Honest peers shouldn't forward more than 1 equivocating block from the same proposer, so
 | ||||
|     /// we penalise them with a mid-tolerance error.
 | ||||
|     Slashable, | ||||
| } | ||||
| 
 | ||||
| /// Returned when block validation failed due to some issue verifying
 | ||||
| @ -633,6 +632,40 @@ pub struct ExecutionPendingBlock<T: BeaconChainTypes> { | ||||
|     pub payload_verification_handle: PayloadVerificationHandle<T::EthSpec>, | ||||
| } | ||||
| 
 | ||||
| pub trait IntoGossipVerifiedBlock<T: BeaconChainTypes>: Sized { | ||||
|     fn into_gossip_verified_block( | ||||
|         self, | ||||
|         chain: &BeaconChain<T>, | ||||
|     ) -> Result<GossipVerifiedBlock<T>, BlockError<T::EthSpec>>; | ||||
|     fn inner(&self) -> Arc<SignedBeaconBlock<T::EthSpec>>; | ||||
| } | ||||
| 
 | ||||
| impl<T: BeaconChainTypes> IntoGossipVerifiedBlock<T> for GossipVerifiedBlock<T> { | ||||
|     fn into_gossip_verified_block( | ||||
|         self, | ||||
|         _chain: &BeaconChain<T>, | ||||
|     ) -> Result<GossipVerifiedBlock<T>, BlockError<T::EthSpec>> { | ||||
|         Ok(self) | ||||
|     } | ||||
| 
 | ||||
|     fn inner(&self) -> Arc<SignedBeaconBlock<T::EthSpec>> { | ||||
|         self.block.clone() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<T: BeaconChainTypes> IntoGossipVerifiedBlock<T> for Arc<SignedBeaconBlock<T::EthSpec>> { | ||||
|     fn into_gossip_verified_block( | ||||
|         self, | ||||
|         chain: &BeaconChain<T>, | ||||
|     ) -> Result<GossipVerifiedBlock<T>, BlockError<T::EthSpec>> { | ||||
|         GossipVerifiedBlock::new(self, chain) | ||||
|     } | ||||
| 
 | ||||
|     fn inner(&self) -> Arc<SignedBeaconBlock<T::EthSpec>> { | ||||
|         self.clone() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /// Implemented on types that can be converted into a `ExecutionPendingBlock`.
 | ||||
| ///
 | ||||
| /// Used to allow functions to accept blocks at various stages of verification.
 | ||||
| @ -697,7 +730,7 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> { | ||||
|         // Do not gossip or process blocks from future slots.
 | ||||
|         let present_slot_with_tolerance = chain | ||||
|             .slot_clock | ||||
|             .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) | ||||
|             .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity()) | ||||
|             .ok_or(BeaconChainError::UnableToReadSlot)?; | ||||
|         if block.slot() > present_slot_with_tolerance { | ||||
|             return Err(BlockError::FutureSlot { | ||||
| @ -721,35 +754,16 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> { | ||||
|         // reboot if the `observed_block_producers` cache is empty. In that case, without this
 | ||||
|         // check, we will load the parent and state from disk only to find out later that we
 | ||||
|         // already know this block.
 | ||||
|         if chain | ||||
|             .canonical_head | ||||
|             .fork_choice_read_lock() | ||||
|             .contains_block(&block_root) | ||||
|         { | ||||
|         let fork_choice_read_lock = chain.canonical_head.fork_choice_read_lock(); | ||||
|         if fork_choice_read_lock.contains_block(&block_root) { | ||||
|             return Err(BlockError::BlockIsAlreadyKnown); | ||||
|         } | ||||
| 
 | ||||
|         // Check that we have not already received a block with a valid signature for this slot.
 | ||||
|         if chain | ||||
|             .observed_block_producers | ||||
|             .read() | ||||
|             .proposer_has_been_observed(block.message()) | ||||
|             .map_err(|e| BlockError::BeaconChainError(e.into()))? | ||||
|         { | ||||
|             return Err(BlockError::RepeatProposal { | ||||
|                 proposer: block.message().proposer_index(), | ||||
|                 slot: block.slot(), | ||||
|             }); | ||||
|         } | ||||
| 
 | ||||
|         // Do not process a block that doesn't descend from the finalized root.
 | ||||
|         //
 | ||||
|         // We check this *before* we load the parent so that we can return a more detailed error.
 | ||||
|         check_block_is_finalized_checkpoint_or_descendant( | ||||
|             chain, | ||||
|             &chain.canonical_head.fork_choice_write_lock(), | ||||
|             &block, | ||||
|         )?; | ||||
|         check_block_is_finalized_checkpoint_or_descendant(chain, &fork_choice_read_lock, &block)?; | ||||
|         drop(fork_choice_read_lock); | ||||
| 
 | ||||
|         let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); | ||||
|         let (parent_block, block) = verify_parent_block_is_known(chain, block)?; | ||||
| @ -786,9 +800,6 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> { | ||||
|                 parent_block.root | ||||
|             }; | ||||
| 
 | ||||
|         // Reject any block that exceeds our limit on skipped slots.
 | ||||
|         check_block_skip_slots(chain, parent_block.slot, block.message())?; | ||||
| 
 | ||||
|         // We assign to a variable instead of using `if let Some` directly to ensure we drop the
 | ||||
|         // write lock before trying to acquire it again in the `else` clause.
 | ||||
|         let proposer_opt = chain | ||||
| @ -860,17 +871,16 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> { | ||||
|         //
 | ||||
|         // It's important to double-check that the proposer still hasn't been observed so we don't
 | ||||
|         // have a race-condition when verifying two blocks simultaneously.
 | ||||
|         if chain | ||||
|         match chain | ||||
|             .observed_block_producers | ||||
|             .write() | ||||
|             .observe_proposer(block.message()) | ||||
|             .observe_proposal(block_root, block.message()) | ||||
|             .map_err(|e| BlockError::BeaconChainError(e.into()))? | ||||
|         { | ||||
|             return Err(BlockError::RepeatProposal { | ||||
|                 proposer: block.message().proposer_index(), | ||||
|                 slot: block.slot(), | ||||
|             }); | ||||
|         } | ||||
|             SeenBlock::Slashable => return Err(BlockError::Slashable), | ||||
|             SeenBlock::Duplicate => return Err(BlockError::BlockIsAlreadyKnown), | ||||
|             SeenBlock::UniqueNonSlashable => {} | ||||
|         }; | ||||
| 
 | ||||
|         if block.message().proposer_index() != expected_proposer as u64 { | ||||
|             return Err(BlockError::IncorrectBlockProposer { | ||||
| @ -942,9 +952,6 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> { | ||||
| 
 | ||||
|         let (mut parent, block) = load_parent(block_root, block, chain)?; | ||||
| 
 | ||||
|         // Reject any block that exceeds our limit on skipped slots.
 | ||||
|         check_block_skip_slots(chain, parent.beacon_block.slot(), block.message())?; | ||||
| 
 | ||||
|         let state = cheap_state_advance_to_obtain_committees( | ||||
|             &mut parent.pre_state, | ||||
|             parent.beacon_state_root, | ||||
| @ -1109,6 +1116,12 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> { | ||||
|         chain: &Arc<BeaconChain<T>>, | ||||
|         notify_execution_layer: NotifyExecutionLayer, | ||||
|     ) -> Result<Self, BlockError<T::EthSpec>> { | ||||
|         chain | ||||
|             .observed_block_producers | ||||
|             .write() | ||||
|             .observe_proposal(block_root, block.message()) | ||||
|             .map_err(|e| BlockError::BeaconChainError(e.into()))?; | ||||
| 
 | ||||
|         if let Some(parent) = chain | ||||
|             .canonical_head | ||||
|             .fork_choice_read_lock() | ||||
| @ -1135,9 +1148,6 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> { | ||||
|             return Err(BlockError::ParentUnknown(block)); | ||||
|         } | ||||
| 
 | ||||
|         // Reject any block that exceeds our limit on skipped slots.
 | ||||
|         check_block_skip_slots(chain, parent.beacon_block.slot(), block.message())?; | ||||
| 
 | ||||
|         /* | ||||
|          *  Perform cursory checks to see if the block is even worth processing. | ||||
|          */ | ||||
| @ -1245,7 +1255,7 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> { | ||||
| 
 | ||||
|         // Perform a sanity check on the pre-state.
 | ||||
|         let parent_slot = parent.beacon_block.slot(); | ||||
|         if state.slot() < parent_slot || state.slot() > parent_slot + 1 { | ||||
|         if state.slot() < parent_slot || state.slot() > block.slot() { | ||||
|             return Err(BeaconChainError::BadPreState { | ||||
|                 parent_root: parent.beacon_block_root, | ||||
|                 parent_slot, | ||||
| @ -1492,30 +1502,6 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> { | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /// Check that the count of skip slots between the block and its parent does not exceed our maximum
 | ||||
| /// value.
 | ||||
| ///
 | ||||
| /// Whilst this is not part of the specification, we include this to help prevent us from DoS
 | ||||
| /// attacks. In times of dire network circumstance, the user can configure the
 | ||||
| /// `import_max_skip_slots` value.
 | ||||
| fn check_block_skip_slots<T: BeaconChainTypes>( | ||||
|     chain: &BeaconChain<T>, | ||||
|     parent_slot: Slot, | ||||
|     block: BeaconBlockRef<'_, T::EthSpec>, | ||||
| ) -> Result<(), BlockError<T::EthSpec>> { | ||||
|     // Reject any block that exceeds our limit on skipped slots.
 | ||||
|     if let Some(max_skip_slots) = chain.config.import_max_skip_slots { | ||||
|         if block.slot() > parent_slot + max_skip_slots { | ||||
|             return Err(BlockError::TooManySkippedSlots { | ||||
|                 parent_slot, | ||||
|                 block_slot: block.slot(), | ||||
|             }); | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     Ok(()) | ||||
| } | ||||
| 
 | ||||
| /// Returns `Ok(())` if the block's slot is greater than the anchor block's slot (if any).
 | ||||
| fn check_block_against_anchor_slot<T: BeaconChainTypes>( | ||||
|     block: BeaconBlockRef<'_, T::EthSpec>, | ||||
| @ -1768,13 +1754,18 @@ fn load_parent<T: BeaconChainTypes>( | ||||
|                 BlockError::from(BeaconChainError::MissingBeaconBlock(block.parent_root())) | ||||
|             })?; | ||||
| 
 | ||||
|         // Load the parent blocks state from the database, returning an error if it is not found.
 | ||||
|         // Load the parent block's state from the database, returning an error if it is not found.
 | ||||
|         // It is an error because if we know the parent block we should also know the parent state.
 | ||||
|         let parent_state_root = parent_block.state_root(); | ||||
|         let parent_state = chain | ||||
|             .get_state(&parent_state_root, Some(parent_block.slot()))? | ||||
|         // Retrieve any state that is advanced through to at most `block.slot()`: this is
 | ||||
|         // particularly important if `block` descends from the finalized/split block, but at a slot
 | ||||
|         // prior to the finalized slot (which is invalid and inaccessible in our DB schema).
 | ||||
|         let (parent_state_root, parent_state) = chain | ||||
|             .store | ||||
|             .get_advanced_hot_state(root, block.slot(), parent_block.state_root())? | ||||
|             .ok_or_else(|| { | ||||
|                 BeaconChainError::DBInconsistent(format!("Missing state {:?}", parent_state_root)) | ||||
|                 BeaconChainError::DBInconsistent( | ||||
|                     format!("Missing state for parent block {root:?}",), | ||||
|                 ) | ||||
|             })?; | ||||
| 
 | ||||
|         metrics::inc_counter(&metrics::BLOCK_PROCESSING_SNAPSHOT_CACHE_MISSES); | ||||
|  | ||||
| @ -18,14 +18,15 @@ use crate::{ | ||||
| }; | ||||
| use eth1::Config as Eth1Config; | ||||
| use execution_layer::ExecutionLayer; | ||||
| use fork_choice::{CountUnrealized, ForkChoice, ResetPayloadStatuses}; | ||||
| use fork_choice::{ForkChoice, ResetPayloadStatuses}; | ||||
| use futures::channel::mpsc::Sender; | ||||
| use operation_pool::{OperationPool, PersistedOperationPool}; | ||||
| use parking_lot::RwLock; | ||||
| use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; | ||||
| use slasher::Slasher; | ||||
| use slog::{crit, error, info, Logger}; | ||||
| use slog::{crit, debug, error, info, Logger}; | ||||
| use slot_clock::{SlotClock, TestingSlotClock}; | ||||
| use state_processing::per_slot_processing; | ||||
| use std::marker::PhantomData; | ||||
| use std::sync::Arc; | ||||
| use std::time::Duration; | ||||
| @ -287,7 +288,7 @@ where | ||||
|         let genesis_state = store | ||||
|             .get_state(&genesis_block.state_root(), Some(genesis_block.slot())) | ||||
|             .map_err(|e| descriptive_db_error("genesis state", &e))? | ||||
|             .ok_or("Genesis block not found in store")?; | ||||
|             .ok_or("Genesis state not found in store")?; | ||||
| 
 | ||||
|         self.genesis_time = Some(genesis_state.genesis_time()); | ||||
| 
 | ||||
| @ -338,7 +339,7 @@ where | ||||
|         let beacon_block = genesis_block(&mut beacon_state, &self.spec)?; | ||||
| 
 | ||||
|         beacon_state | ||||
|             .build_all_caches(&self.spec) | ||||
|             .build_caches(&self.spec) | ||||
|             .map_err(|e| format!("Failed to build genesis state caches: {:?}", e))?; | ||||
| 
 | ||||
|         let beacon_state_root = beacon_block.message().state_root(); | ||||
| @ -382,6 +383,16 @@ where | ||||
|         let (genesis, updated_builder) = self.set_genesis_state(beacon_state)?; | ||||
|         self = updated_builder; | ||||
| 
 | ||||
|         // Stage the database's metadata fields for atomic storage when `build` is called.
 | ||||
|         // Since v4.4.0 we will set the anchor with a dummy state upper limit in order to prevent
 | ||||
|         // historic states from being retained (unless `--reconstruct-historic-states` is set).
 | ||||
|         let retain_historic_states = self.chain_config.reconstruct_historic_states; | ||||
|         self.pending_io_batch.push( | ||||
|             store | ||||
|                 .init_anchor_info(genesis.beacon_block.message(), retain_historic_states) | ||||
|                 .map_err(|e| format!("Failed to initialize genesis anchor: {:?}", e))?, | ||||
|         ); | ||||
| 
 | ||||
|         let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis) | ||||
|             .map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?; | ||||
|         let current_slot = None; | ||||
| @ -408,46 +419,48 @@ where | ||||
|         weak_subj_block: SignedBeaconBlock<TEthSpec>, | ||||
|         genesis_state: BeaconState<TEthSpec>, | ||||
|     ) -> Result<Self, String> { | ||||
|         let store = self.store.clone().ok_or("genesis_state requires a store")?; | ||||
|         let store = self | ||||
|             .store | ||||
|             .clone() | ||||
|             .ok_or("weak_subjectivity_state requires a store")?; | ||||
|         let log = self | ||||
|             .log | ||||
|             .as_ref() | ||||
|             .ok_or("weak_subjectivity_state requires a log")?; | ||||
| 
 | ||||
|         let weak_subj_slot = weak_subj_state.slot(); | ||||
|         let weak_subj_block_root = weak_subj_block.canonical_root(); | ||||
|         let weak_subj_state_root = weak_subj_block.state_root(); | ||||
| 
 | ||||
|         // Check that the given block lies on an epoch boundary. Due to the database only storing
 | ||||
|         // full states on epoch boundaries and at restore points it would be difficult to support
 | ||||
|         // starting from a mid-epoch state.
 | ||||
|         if weak_subj_slot % TEthSpec::slots_per_epoch() != 0 { | ||||
|             return Err(format!( | ||||
|                 "Checkpoint block at slot {} is not aligned to epoch start. \ | ||||
|                  Please supply an aligned checkpoint with block.slot % 32 == 0",
 | ||||
|                 weak_subj_block.slot(), | ||||
|             )); | ||||
|         // Ensure the state is advanced to an epoch boundary.
 | ||||
|         let slots_per_epoch = TEthSpec::slots_per_epoch(); | ||||
|         if weak_subj_state.slot() % slots_per_epoch != 0 { | ||||
|             debug!( | ||||
|                 log, | ||||
|                 "Advancing checkpoint state to boundary"; | ||||
|                 "state_slot" => weak_subj_state.slot(), | ||||
|                 "block_slot" => weak_subj_block.slot(), | ||||
|             ); | ||||
|             while weak_subj_state.slot() % slots_per_epoch != 0 { | ||||
|                 per_slot_processing(&mut weak_subj_state, None, &self.spec) | ||||
|                     .map_err(|e| format!("Error advancing state: {e:?}"))?; | ||||
|             } | ||||
| 
 | ||||
|         // Check that the block and state have consistent slots and state roots.
 | ||||
|         if weak_subj_state.slot() != weak_subj_block.slot() { | ||||
|             return Err(format!( | ||||
|                 "Slot of snapshot block ({}) does not match snapshot state ({})", | ||||
|                 weak_subj_block.slot(), | ||||
|                 weak_subj_state.slot(), | ||||
|             )); | ||||
|         } | ||||
| 
 | ||||
|         // Prime all caches before storing the state in the database and computing the tree hash
 | ||||
|         // root.
 | ||||
|         weak_subj_state | ||||
|             .build_all_caches(&self.spec) | ||||
|             .build_caches(&self.spec) | ||||
|             .map_err(|e| format!("Error building caches on checkpoint state: {e:?}"))?; | ||||
| 
 | ||||
|         let computed_state_root = weak_subj_state | ||||
|         let weak_subj_state_root = weak_subj_state | ||||
|             .update_tree_hash_cache() | ||||
|             .map_err(|e| format!("Error computing checkpoint state root: {:?}", e))?; | ||||
| 
 | ||||
|         if weak_subj_state_root != computed_state_root { | ||||
|         let weak_subj_slot = weak_subj_state.slot(); | ||||
|         let weak_subj_block_root = weak_subj_block.canonical_root(); | ||||
| 
 | ||||
|         // Validate the state's `latest_block_header` against the checkpoint block.
 | ||||
|         let state_latest_block_root = weak_subj_state.get_latest_block_root(weak_subj_state_root); | ||||
|         if weak_subj_block_root != state_latest_block_root { | ||||
|             return Err(format!( | ||||
|                 "Snapshot state root does not match block, expected: {:?}, got: {:?}", | ||||
|                 weak_subj_state_root, computed_state_root | ||||
|                 "Snapshot state's most recent block root does not match block, expected: {:?}, got: {:?}", | ||||
|                 weak_subj_block_root, state_latest_block_root | ||||
|             )); | ||||
|         } | ||||
| 
 | ||||
| @ -464,10 +477,25 @@ where | ||||
| 
 | ||||
|         // Set the store's split point *before* storing genesis so that genesis is stored
 | ||||
|         // immediately in the freezer DB.
 | ||||
|         store.set_split(weak_subj_slot, weak_subj_state_root); | ||||
|         store.set_split(weak_subj_slot, weak_subj_state_root, weak_subj_block_root); | ||||
|         let (_, updated_builder) = self.set_genesis_state(genesis_state)?; | ||||
|         self = updated_builder; | ||||
| 
 | ||||
|         // Fill in the linear block roots between the checkpoint block's slot and the aligned
 | ||||
|         // state's slot. All slots less than the block's slot will be handled by block backfill,
 | ||||
|         // while states greater or equal to the checkpoint state will be handled by `migrate_db`.
 | ||||
|         let block_root_batch = store | ||||
|             .store_frozen_block_root_at_skip_slots( | ||||
|                 weak_subj_block.slot(), | ||||
|                 weak_subj_state.slot(), | ||||
|                 weak_subj_block_root, | ||||
|             ) | ||||
|             .map_err(|e| format!("Error writing frozen block roots: {e:?}"))?; | ||||
|         store | ||||
|             .cold_db | ||||
|             .do_atomically(block_root_batch) | ||||
|             .map_err(|e| format!("Error writing frozen block roots: {e:?}"))?; | ||||
| 
 | ||||
|         // Write the state and block non-atomically, it doesn't matter if they're forgotten
 | ||||
|         // about on a crash restart.
 | ||||
|         store | ||||
| @ -480,10 +508,11 @@ where | ||||
|         // Stage the database's metadata fields for atomic storage when `build` is called.
 | ||||
|         // This prevents the database from restarting in an inconsistent state if the anchor
 | ||||
|         // info or split point is written before the `PersistedBeaconChain`.
 | ||||
|         let retain_historic_states = self.chain_config.reconstruct_historic_states; | ||||
|         self.pending_io_batch.push(store.store_split_in_batch()); | ||||
|         self.pending_io_batch.push( | ||||
|             store | ||||
|                 .init_anchor_info(weak_subj_block.message()) | ||||
|                 .init_anchor_info(weak_subj_block.message(), retain_historic_states) | ||||
|                 .map_err(|e| format!("Failed to initialize anchor info: {:?}", e))?, | ||||
|         ); | ||||
| 
 | ||||
| @ -503,13 +532,12 @@ where | ||||
|         let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &snapshot) | ||||
|             .map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?; | ||||
| 
 | ||||
|         let current_slot = Some(snapshot.beacon_block.slot()); | ||||
|         let fork_choice = ForkChoice::from_anchor( | ||||
|             fc_store, | ||||
|             snapshot.beacon_block_root, | ||||
|             &snapshot.beacon_block, | ||||
|             &snapshot.beacon_state, | ||||
|             current_slot, | ||||
|             Some(weak_subj_slot), | ||||
|             &self.spec, | ||||
|         ) | ||||
|         .map_err(|e| format!("Unable to initialize ForkChoice: {:?}", e))?; | ||||
| @ -672,9 +700,8 @@ where | ||||
|                 Err(e) => return Err(descriptive_db_error("head block", &e)), | ||||
|             }; | ||||
| 
 | ||||
|         let head_state_root = head_block.state_root(); | ||||
|         let head_state = store | ||||
|             .get_state(&head_state_root, Some(head_block.slot())) | ||||
|         let (_head_state_root, head_state) = store | ||||
|             .get_advanced_hot_state(head_block_root, current_slot, head_block.state_root()) | ||||
|             .map_err(|e| descriptive_db_error("head state", &e))? | ||||
|             .ok_or("Head state not found in store")?; | ||||
| 
 | ||||
| @ -687,7 +714,8 @@ where | ||||
|                 store.clone(), | ||||
|                 Some(current_slot), | ||||
|                 &self.spec, | ||||
|                 CountUnrealized::True, | ||||
|                 self.chain_config.progressive_balances_mode, | ||||
|                 &log, | ||||
|             )?; | ||||
|         } | ||||
| 
 | ||||
| @ -701,7 +729,7 @@ where | ||||
| 
 | ||||
|         head_snapshot | ||||
|             .beacon_state | ||||
|             .build_all_caches(&self.spec) | ||||
|             .build_caches(&self.spec) | ||||
|             .map_err(|e| format!("Failed to build state caches: {:?}", e))?; | ||||
| 
 | ||||
|         // Perform a check to ensure that the finalization points of the head and fork choice are
 | ||||
| @ -827,7 +855,6 @@ where | ||||
|             observed_sync_aggregators: <_>::default(), | ||||
|             // TODO: allow for persisting and loading the pool from disk.
 | ||||
|             observed_block_producers: <_>::default(), | ||||
|             // TODO: allow for persisting and loading the pool from disk.
 | ||||
|             observed_voluntary_exits: <_>::default(), | ||||
|             observed_proposer_slashings: <_>::default(), | ||||
|             observed_attester_slashings: <_>::default(), | ||||
|  | ||||
| @ -47,7 +47,8 @@ use crate::{ | ||||
| }; | ||||
| use eth2::types::{EventKind, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead}; | ||||
| use fork_choice::{ | ||||
|     ExecutionStatus, ForkChoiceView, ForkchoiceUpdateParameters, ProtoBlock, ResetPayloadStatuses, | ||||
|     ExecutionStatus, ForkChoiceStore, ForkChoiceView, ForkchoiceUpdateParameters, ProtoBlock, | ||||
|     ResetPayloadStatuses, | ||||
| }; | ||||
| use itertools::process_results; | ||||
| use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; | ||||
| @ -298,10 +299,10 @@ impl<T: BeaconChainTypes> CanonicalHead<T> { | ||||
|         let beacon_block = store | ||||
|             .get_full_block(&beacon_block_root)? | ||||
|             .ok_or(Error::MissingBeaconBlock(beacon_block_root))?; | ||||
|         let beacon_state_root = beacon_block.state_root(); | ||||
|         let beacon_state = store | ||||
|             .get_state(&beacon_state_root, Some(beacon_block.slot()))? | ||||
|             .ok_or(Error::MissingBeaconState(beacon_state_root))?; | ||||
|         let current_slot = fork_choice.fc_store().get_current_slot(); | ||||
|         let (_, beacon_state) = store | ||||
|             .get_advanced_hot_state(beacon_block_root, current_slot, beacon_block.state_root())? | ||||
|             .ok_or(Error::MissingBeaconState(beacon_block.state_root()))?; | ||||
| 
 | ||||
|         let snapshot = BeaconSnapshot { | ||||
|             beacon_block_root, | ||||
| @ -669,10 +670,14 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
|                         .get_full_block(&new_view.head_block_root)? | ||||
|                         .ok_or(Error::MissingBeaconBlock(new_view.head_block_root))?; | ||||
| 
 | ||||
|                     let beacon_state_root = beacon_block.state_root(); | ||||
|                     let beacon_state: BeaconState<T::EthSpec> = self | ||||
|                         .get_state(&beacon_state_root, Some(beacon_block.slot()))? | ||||
|                         .ok_or(Error::MissingBeaconState(beacon_state_root))?; | ||||
|                     let (_, beacon_state) = self | ||||
|                         .store | ||||
|                         .get_advanced_hot_state( | ||||
|                             new_view.head_block_root, | ||||
|                             current_slot, | ||||
|                             beacon_block.state_root(), | ||||
|                         )? | ||||
|                         .ok_or(Error::MissingBeaconState(beacon_block.state_root()))?; | ||||
| 
 | ||||
|                     Ok(BeaconSnapshot { | ||||
|                         beacon_block: Arc::new(beacon_block), | ||||
|  | ||||
| @ -1,7 +1,7 @@ | ||||
| pub use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; | ||||
| use serde_derive::{Deserialize, Serialize}; | ||||
| use serde::{Deserialize, Serialize}; | ||||
| use std::time::Duration; | ||||
| use types::{Checkpoint, Epoch}; | ||||
| use types::{Checkpoint, Epoch, ProgressiveBalancesMode}; | ||||
| 
 | ||||
| pub const DEFAULT_RE_ORG_THRESHOLD: ReOrgThreshold = ReOrgThreshold(20); | ||||
| pub const DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION: Epoch = Epoch::new(2); | ||||
| @ -17,8 +17,7 @@ pub const FORK_CHOICE_LOOKAHEAD_FACTOR: u32 = 24; | ||||
| 
 | ||||
| #[derive(Debug, PartialEq, Eq, Clone, Deserialize, Serialize)] | ||||
| pub struct ChainConfig { | ||||
|     /// Maximum number of slots to skip when importing a consensus message (e.g., block,
 | ||||
|     /// attestation, etc).
 | ||||
|     /// Maximum number of slots to skip when importing an attestation.
 | ||||
|     ///
 | ||||
|     /// If `None`, there is no limit.
 | ||||
|     pub import_max_skip_slots: Option<u64>, | ||||
| @ -80,8 +79,10 @@ pub struct ChainConfig { | ||||
|     ///
 | ||||
|     /// This is useful for block builders and testing.
 | ||||
|     pub always_prepare_payload: bool, | ||||
|     /// Whether backfill sync processing should be rate-limited.
 | ||||
|     pub enable_backfill_rate_limiting: bool, | ||||
|     /// Whether to use `ProgressiveBalancesCache` in unrealized FFG progression calculation.
 | ||||
|     pub progressive_balances_mode: ProgressiveBalancesMode, | ||||
|     /// Number of epochs between each migration of data from the hot database to the freezer.
 | ||||
|     pub epochs_per_migration: u64, | ||||
| } | ||||
| 
 | ||||
| impl Default for ChainConfig { | ||||
| @ -111,7 +112,8 @@ impl Default for ChainConfig { | ||||
|             shuffling_cache_size: crate::shuffling_cache::DEFAULT_CACHE_SIZE, | ||||
|             genesis_backfill: false, | ||||
|             always_prepare_payload: false, | ||||
|             enable_backfill_rate_limiting: true, | ||||
|             progressive_balances_mode: ProgressiveBalancesMode::Checked, | ||||
|             epochs_per_migration: crate::migrate::DEFAULT_EPOCHS_PER_MIGRATION, | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| @ -24,7 +24,7 @@ use state_processing::{ | ||||
|     }, | ||||
|     signature_sets::Error as SignatureSetError, | ||||
|     state_advance::Error as StateAdvanceError, | ||||
|     BlockProcessingError, BlockReplayError, SlotProcessingError, | ||||
|     BlockProcessingError, BlockReplayError, EpochProcessingError, SlotProcessingError, | ||||
| }; | ||||
| use std::time::Duration; | ||||
| use task_executor::ShutdownReason; | ||||
| @ -60,6 +60,7 @@ pub enum BeaconChainError { | ||||
|     MissingBeaconBlock(Hash256), | ||||
|     MissingBeaconState(Hash256), | ||||
|     SlotProcessingError(SlotProcessingError), | ||||
|     EpochProcessingError(EpochProcessingError), | ||||
|     StateAdvanceError(StateAdvanceError), | ||||
|     UnableToAdvanceState(String), | ||||
|     NoStateForAttestation { | ||||
| @ -145,6 +146,8 @@ pub enum BeaconChainError { | ||||
|     BlockVariantLacksExecutionPayload(Hash256), | ||||
|     ExecutionLayerErrorPayloadReconstruction(ExecutionBlockHash, Box<execution_layer::Error>), | ||||
|     EngineGetCapabilititesFailed(Box<execution_layer::Error>), | ||||
|     ExecutionLayerGetBlockByNumberFailed(Box<execution_layer::Error>), | ||||
|     ExecutionLayerGetBlockByHashFailed(Box<execution_layer::Error>), | ||||
|     BlockHashMissingFromExecutionLayer(ExecutionBlockHash), | ||||
|     InconsistentPayloadReconstructed { | ||||
|         slot: Slot, | ||||
| @ -213,9 +216,11 @@ pub enum BeaconChainError { | ||||
|     BlsToExecutionConflictsWithPool, | ||||
|     InconsistentFork(InconsistentFork), | ||||
|     ProposerHeadForkChoiceError(fork_choice::Error<proto_array::Error>), | ||||
|     UnableToPublish, | ||||
| } | ||||
| 
 | ||||
| easy_from_to!(SlotProcessingError, BeaconChainError); | ||||
| easy_from_to!(EpochProcessingError, BeaconChainError); | ||||
| easy_from_to!(AttestationValidationError, BeaconChainError); | ||||
| easy_from_to!(SyncCommitteeMessageValidationError, BeaconChainError); | ||||
| easy_from_to!(ExitValidationError, BeaconChainError); | ||||
|  | ||||
| @ -21,8 +21,11 @@ pub struct ServerSentEventHandler<T: EthSpec> { | ||||
| } | ||||
| 
 | ||||
| impl<T: EthSpec> ServerSentEventHandler<T> { | ||||
|     pub fn new(log: Logger) -> Self { | ||||
|         Self::new_with_capacity(log, DEFAULT_CHANNEL_CAPACITY) | ||||
|     pub fn new(log: Logger, capacity_multiplier: usize) -> Self { | ||||
|         Self::new_with_capacity( | ||||
|             log, | ||||
|             capacity_multiplier.saturating_mul(DEFAULT_CHANNEL_CAPACITY), | ||||
|         ) | ||||
|     } | ||||
| 
 | ||||
|     pub fn new_with_capacity(log: Logger, capacity: usize) -> Self { | ||||
|  | ||||
| @ -1,5 +1,5 @@ | ||||
| use crate::{BeaconForkChoiceStore, BeaconSnapshot}; | ||||
| use fork_choice::{CountUnrealized, ForkChoice, PayloadVerificationStatus}; | ||||
| use fork_choice::{ForkChoice, PayloadVerificationStatus}; | ||||
| use itertools::process_results; | ||||
| use slog::{info, warn, Logger}; | ||||
| use state_processing::state_advance::complete_state_advance; | ||||
| @ -10,7 +10,10 @@ use state_processing::{ | ||||
| use std::sync::Arc; | ||||
| use std::time::Duration; | ||||
| use store::{iter::ParentRootBlockIterator, HotColdDB, ItemStore}; | ||||
| use types::{BeaconState, ChainSpec, EthSpec, ForkName, Hash256, SignedBeaconBlock, Slot}; | ||||
| use types::{ | ||||
|     BeaconState, ChainSpec, EthSpec, ForkName, Hash256, ProgressiveBalancesMode, SignedBeaconBlock, | ||||
|     Slot, | ||||
| }; | ||||
| 
 | ||||
| const CORRUPT_DB_MESSAGE: &str = "The database could be corrupt. Check its file permissions or \ | ||||
|                                   consider deleting it by running with the --purge-db flag.";
 | ||||
| @ -100,7 +103,8 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It | ||||
|     store: Arc<HotColdDB<E, Hot, Cold>>, | ||||
|     current_slot: Option<Slot>, | ||||
|     spec: &ChainSpec, | ||||
|     count_unrealized_config: CountUnrealized, | ||||
|     progressive_balances_mode: ProgressiveBalancesMode, | ||||
|     log: &Logger, | ||||
| ) -> Result<ForkChoice<BeaconForkChoiceStore<E, Hot, Cold>, E>, String> { | ||||
|     // Fetch finalized block.
 | ||||
|     let finalized_checkpoint = head_state.finalized_checkpoint(); | ||||
| @ -166,8 +170,7 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It | ||||
|         .map_err(|e| format!("Error loading blocks to replay for fork choice: {:?}", e))?; | ||||
| 
 | ||||
|     let mut state = finalized_snapshot.beacon_state; | ||||
|     let blocks_len = blocks.len(); | ||||
|     for (i, block) in blocks.into_iter().enumerate() { | ||||
|     for block in blocks { | ||||
|         complete_state_advance(&mut state, None, block.slot(), spec) | ||||
|             .map_err(|e| format!("State advance failed: {:?}", e))?; | ||||
| 
 | ||||
| @ -190,15 +193,6 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It | ||||
|         // This scenario is so rare that it seems OK to double-verify some blocks.
 | ||||
|         let payload_verification_status = PayloadVerificationStatus::Optimistic; | ||||
| 
 | ||||
|         // Because we are replaying a single chain of blocks, we only need to calculate unrealized
 | ||||
|         // justification for the last block in the chain.
 | ||||
|         let is_last_block = i + 1 == blocks_len; | ||||
|         let count_unrealized = if is_last_block { | ||||
|             count_unrealized_config | ||||
|         } else { | ||||
|             CountUnrealized::False | ||||
|         }; | ||||
| 
 | ||||
|         fork_choice | ||||
|             .on_block( | ||||
|                 block.slot(), | ||||
| @ -208,8 +202,9 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It | ||||
|                 Duration::from_secs(0), | ||||
|                 &state, | ||||
|                 payload_verification_status, | ||||
|                 progressive_balances_mode, | ||||
|                 spec, | ||||
|                 count_unrealized, | ||||
|                 log, | ||||
|             ) | ||||
|             .map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?; | ||||
|     } | ||||
|  | ||||
| @ -52,9 +52,9 @@ pub mod validator_pubkey_cache; | ||||
| 
 | ||||
| pub use self::beacon_chain::{ | ||||
|     AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, | ||||
|     CountUnrealized, ForkChoiceError, OverrideForkchoiceUpdate, ProduceBlockVerification, | ||||
|     StateSkipConfig, WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, | ||||
|     INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY, | ||||
|     ForkChoiceError, OverrideForkchoiceUpdate, ProduceBlockVerification, StateSkipConfig, | ||||
|     WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, | ||||
|     INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, | ||||
| }; | ||||
| pub use self::beacon_snapshot::BeaconSnapshot; | ||||
| pub use self::chain_config::ChainConfig; | ||||
| @ -64,6 +64,7 @@ pub use attestation_verification::Error as AttestationError; | ||||
| pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError}; | ||||
| pub use block_verification::{ | ||||
|     get_block_root, BlockError, ExecutionPayloadError, GossipVerifiedBlock, | ||||
|     IntoExecutionPendingBlock, IntoGossipVerifiedBlock, | ||||
| }; | ||||
| pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock}; | ||||
| pub use eth1_chain::{Eth1Chain, Eth1ChainBackend}; | ||||
| @ -72,6 +73,7 @@ pub use execution_layer::EngineState; | ||||
| pub use execution_payload::NotifyExecutionLayer; | ||||
| pub use fork_choice::{ExecutionStatus, ForkchoiceUpdateParameters}; | ||||
| pub use metrics::scrape_for_metrics; | ||||
| pub use migrate::MigratorConfig; | ||||
| pub use parking_lot; | ||||
| pub use slot_clock; | ||||
| pub use state_processing::per_block_processing::errors::{ | ||||
|  | ||||
| @ -1,6 +1,4 @@ | ||||
| use crate::{ | ||||
|     beacon_chain::MAXIMUM_GOSSIP_CLOCK_DISPARITY, BeaconChain, BeaconChainError, BeaconChainTypes, | ||||
| }; | ||||
| use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; | ||||
| use derivative::Derivative; | ||||
| use slot_clock::SlotClock; | ||||
| use std::time::Duration; | ||||
| @ -103,7 +101,8 @@ impl<T: BeaconChainTypes> VerifiedLightClientFinalityUpdate<T> { | ||||
|         // verify that enough time has passed for the block to have been propagated
 | ||||
|         match start_time { | ||||
|             Some(time) => { | ||||
|                 if seen_timestamp + MAXIMUM_GOSSIP_CLOCK_DISPARITY < time + one_third_slot_duration | ||||
|                 if seen_timestamp + chain.spec.maximum_gossip_clock_disparity() | ||||
|                     < time + one_third_slot_duration | ||||
|                 { | ||||
|                     return Err(Error::TooEarly); | ||||
|                 } | ||||
|  | ||||
| @ -1,6 +1,4 @@ | ||||
| use crate::{ | ||||
|     beacon_chain::MAXIMUM_GOSSIP_CLOCK_DISPARITY, BeaconChain, BeaconChainError, BeaconChainTypes, | ||||
| }; | ||||
| use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; | ||||
| use derivative::Derivative; | ||||
| use eth2::types::Hash256; | ||||
| use slot_clock::SlotClock; | ||||
| @ -103,7 +101,8 @@ impl<T: BeaconChainTypes> VerifiedLightClientOptimisticUpdate<T> { | ||||
|         // verify that enough time has passed for the block to have been propagated
 | ||||
|         match start_time { | ||||
|             Some(time) => { | ||||
|                 if seen_timestamp + MAXIMUM_GOSSIP_CLOCK_DISPARITY < time + one_third_slot_duration | ||||
|                 if seen_timestamp + chain.spec.maximum_gossip_clock_disparity() | ||||
|                     < time + one_third_slot_duration | ||||
|                 { | ||||
|                     return Err(Error::TooEarly); | ||||
|                 } | ||||
|  | ||||
| @ -1,8 +1,10 @@ | ||||
| //! Provides tools for checking if a node is ready for the Bellatrix upgrade and following merge
 | ||||
| //! transition.
 | ||||
| 
 | ||||
| use crate::{BeaconChain, BeaconChainTypes}; | ||||
| use crate::{BeaconChain, BeaconChainError as Error, BeaconChainTypes}; | ||||
| use execution_layer::BlockByNumberQuery; | ||||
| use serde::{Deserialize, Serialize, Serializer}; | ||||
| use slog::debug; | ||||
| use std::fmt; | ||||
| use std::fmt::Write; | ||||
| use types::*; | ||||
| @ -86,9 +88,6 @@ pub enum MergeReadiness { | ||||
|         #[serde(serialize_with = "serialize_uint256")] | ||||
|         current_difficulty: Option<Uint256>, | ||||
|     }, | ||||
|     /// The transition configuration with the EL failed, there might be a problem with
 | ||||
|     /// connectivity, authentication or a difference in configuration.
 | ||||
|     ExchangeTransitionConfigurationFailed { error: String }, | ||||
|     /// The EL can be reached and has the correct configuration, however it's not yet synced.
 | ||||
|     NotSynced, | ||||
|     /// The user has not configured this node to use an execution endpoint.
 | ||||
| @ -109,12 +108,6 @@ impl fmt::Display for MergeReadiness { | ||||
|                     params, current_difficulty | ||||
|                 ) | ||||
|             } | ||||
|             MergeReadiness::ExchangeTransitionConfigurationFailed { error } => write!( | ||||
|                 f, | ||||
|                 "Could not confirm the transition configuration with the \ | ||||
|                     execution endpoint: {:?}",
 | ||||
|                 error | ||||
|             ), | ||||
|             MergeReadiness::NotSynced => write!( | ||||
|                 f, | ||||
|                 "The execution endpoint is connected and configured, \ | ||||
| @ -129,6 +122,25 @@ impl fmt::Display for MergeReadiness { | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| pub enum GenesisExecutionPayloadStatus { | ||||
|     Correct(ExecutionBlockHash), | ||||
|     BlockHashMismatch { | ||||
|         got: ExecutionBlockHash, | ||||
|         expected: ExecutionBlockHash, | ||||
|     }, | ||||
|     TransactionsRootMismatch { | ||||
|         got: Hash256, | ||||
|         expected: Hash256, | ||||
|     }, | ||||
|     WithdrawalsRootMismatch { | ||||
|         got: Hash256, | ||||
|         expected: Hash256, | ||||
|     }, | ||||
|     OtherMismatch, | ||||
|     Irrelevant, | ||||
|     AlreadyHappened, | ||||
| } | ||||
| 
 | ||||
| impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
|     /// Returns `true` if user has an EL configured, or if the Bellatrix fork has occurred or will
 | ||||
|     /// occur within `MERGE_READINESS_PREPARATION_SECONDS`.
 | ||||
| @ -153,17 +165,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
|     } | ||||
| 
 | ||||
|     /// Attempts to connect to the EL and confirm that it is ready for the merge.
 | ||||
|     pub async fn check_merge_readiness(&self) -> MergeReadiness { | ||||
|     pub async fn check_merge_readiness(&self, current_slot: Slot) -> MergeReadiness { | ||||
|         if let Some(el) = self.execution_layer.as_ref() { | ||||
|             if let Err(e) = el.exchange_transition_configuration(&self.spec).await { | ||||
|                 // The EL was either unreachable, responded with an error or has a different
 | ||||
|                 // configuration.
 | ||||
|                 return MergeReadiness::ExchangeTransitionConfigurationFailed { | ||||
|                     error: format!("{:?}", e), | ||||
|                 }; | ||||
|             } | ||||
| 
 | ||||
|             if !el.is_synced_for_notifier().await { | ||||
|             if !el.is_synced_for_notifier(current_slot).await { | ||||
|                 // The EL is not synced.
 | ||||
|                 return MergeReadiness::NotSynced; | ||||
|             } | ||||
| @ -178,6 +182,91 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | ||||
|             MergeReadiness::NoExecutionEndpoint | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /// Check that the execution payload embedded in the genesis state matches the EL's genesis
 | ||||
|     /// block.
 | ||||
|     pub async fn check_genesis_execution_payload_is_correct( | ||||
|         &self, | ||||
|     ) -> Result<GenesisExecutionPayloadStatus, Error> { | ||||
|         let head_snapshot = self.head_snapshot(); | ||||
|         let genesis_state = &head_snapshot.beacon_state; | ||||
| 
 | ||||
|         if genesis_state.slot() != 0 { | ||||
|             return Ok(GenesisExecutionPayloadStatus::AlreadyHappened); | ||||
|         } | ||||
| 
 | ||||
|         let Ok(latest_execution_payload_header) = genesis_state.latest_execution_payload_header() | ||||
|         else { | ||||
|             return Ok(GenesisExecutionPayloadStatus::Irrelevant); | ||||
|         }; | ||||
|         let fork = self.spec.fork_name_at_epoch(Epoch::new(0)); | ||||
| 
 | ||||
|         let execution_layer = self | ||||
|             .execution_layer | ||||
|             .as_ref() | ||||
|             .ok_or(Error::ExecutionLayerMissing)?; | ||||
|         let exec_block_hash = latest_execution_payload_header.block_hash(); | ||||
| 
 | ||||
|         // Use getBlockByNumber(0) to check that the block hash matches.
 | ||||
|         // At present, Geth does not respond to engine_getPayloadBodiesByRange before genesis.
 | ||||
|         let execution_block = execution_layer | ||||
|             .get_block_by_number(BlockByNumberQuery::Tag("0x0")) | ||||
|             .await | ||||
|             .map_err(|e| Error::ExecutionLayerGetBlockByNumberFailed(Box::new(e)))? | ||||
|             .ok_or(Error::BlockHashMissingFromExecutionLayer(exec_block_hash))?; | ||||
| 
 | ||||
|         if execution_block.block_hash != exec_block_hash { | ||||
|             return Ok(GenesisExecutionPayloadStatus::BlockHashMismatch { | ||||
|                 got: execution_block.block_hash, | ||||
|                 expected: exec_block_hash, | ||||
|             }); | ||||
|         } | ||||
| 
 | ||||
|         // Double-check the block by reconstructing it.
 | ||||
|         let execution_payload = execution_layer | ||||
|             .get_payload_by_hash_legacy(exec_block_hash, fork) | ||||
|             .await | ||||
|             .map_err(|e| Error::ExecutionLayerGetBlockByHashFailed(Box::new(e)))? | ||||
|             .ok_or(Error::BlockHashMissingFromExecutionLayer(exec_block_hash))?; | ||||
| 
 | ||||
|         // Verify payload integrity.
 | ||||
|         let header_from_payload = ExecutionPayloadHeader::from(execution_payload.to_ref()); | ||||
| 
 | ||||
|         let got_transactions_root = header_from_payload.transactions_root(); | ||||
|         let expected_transactions_root = latest_execution_payload_header.transactions_root(); | ||||
|         let got_withdrawals_root = header_from_payload.withdrawals_root().ok(); | ||||
|         let expected_withdrawals_root = latest_execution_payload_header.withdrawals_root().ok(); | ||||
| 
 | ||||
|         if got_transactions_root != expected_transactions_root { | ||||
|             return Ok(GenesisExecutionPayloadStatus::TransactionsRootMismatch { | ||||
|                 got: got_transactions_root, | ||||
|                 expected: expected_transactions_root, | ||||
|             }); | ||||
|         } | ||||
| 
 | ||||
|         if let Some(&expected) = expected_withdrawals_root { | ||||
|             if let Some(&got) = got_withdrawals_root { | ||||
|                 if got != expected { | ||||
|                     return Ok(GenesisExecutionPayloadStatus::WithdrawalsRootMismatch { | ||||
|                         got, | ||||
|                         expected, | ||||
|                     }); | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         if header_from_payload.to_ref() != latest_execution_payload_header { | ||||
|             debug!( | ||||
|                 self.log, | ||||
|                 "Genesis execution payload reconstruction failure"; | ||||
|                 "consensus_node_header" => ?latest_execution_payload_header, | ||||
|                 "execution_node_header" => ?header_from_payload | ||||
|             ); | ||||
|             return Ok(GenesisExecutionPayloadStatus::OtherMismatch); | ||||
|         } | ||||
| 
 | ||||
|         Ok(GenesisExecutionPayloadStatus::Correct(exec_block_hash)) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /// Utility function to serialize a Uint256 as a decimal string.
 | ||||
|  | ||||
| @ -998,6 +998,17 @@ lazy_static! { | ||||
|         "light_client_optimistic_update_verification_success_total", | ||||
|         "Number of light client optimistic updates verified for gossip" | ||||
|     ); | ||||
|     /* | ||||
|     * Aggregate subset metrics | ||||
|      */ | ||||
|     pub static ref SYNC_CONTRIBUTION_SUBSETS: Result<IntCounter> = try_create_int_counter( | ||||
|         "beacon_sync_contribution_subsets_total", | ||||
|         "Count of new sync contributions that are subsets of already known aggregates" | ||||
|     ); | ||||
|     pub static ref AGGREGATED_ATTESTATION_SUBSETS: Result<IntCounter> = try_create_int_counter( | ||||
|         "beacon_aggregated_attestation_subsets_total", | ||||
|         "Count of new aggregated attestations that are subsets of already known aggregates" | ||||
|     ); | ||||
| } | ||||
| 
 | ||||
| /// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot,
 | ||||
|  | ||||
| @ -25,10 +25,15 @@ const MIN_COMPACTION_PERIOD_SECONDS: u64 = 7200; | ||||
| /// Compact after a large finality gap, if we respect `MIN_COMPACTION_PERIOD_SECONDS`.
 | ||||
| const COMPACTION_FINALITY_DISTANCE: u64 = 1024; | ||||
| 
 | ||||
| /// Default number of epochs to wait between finalization migrations.
 | ||||
| pub const DEFAULT_EPOCHS_PER_MIGRATION: u64 = 1; | ||||
| 
 | ||||
| /// The background migrator runs a thread to perform pruning and migrate state from the hot
 | ||||
| /// to the cold database.
 | ||||
| pub struct BackgroundMigrator<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> { | ||||
|     db: Arc<HotColdDB<E, Hot, Cold>>, | ||||
|     /// Record of when the last migration ran, for enforcing `epochs_per_migration`.
 | ||||
|     prev_migration: Arc<Mutex<PrevMigration>>, | ||||
|     #[allow(clippy::type_complexity)] | ||||
|     tx_thread: Option<Mutex<(mpsc::Sender<Notification>, thread::JoinHandle<()>)>>, | ||||
|     /// Genesis block root, for persisting the `PersistedBeaconChain`.
 | ||||
| @ -36,9 +41,22 @@ pub struct BackgroundMigrator<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> | ||||
|     log: Logger, | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, Default, Clone, PartialEq, Eq)] | ||||
| #[derive(Debug, Clone, PartialEq, Eq)] | ||||
| pub struct MigratorConfig { | ||||
|     pub blocking: bool, | ||||
|     /// Run migrations at most once per `epochs_per_migration`.
 | ||||
|     ///
 | ||||
|     /// If set to 0 or 1, then run every finalization.
 | ||||
|     pub epochs_per_migration: u64, | ||||
| } | ||||
| 
 | ||||
| impl Default for MigratorConfig { | ||||
|     fn default() -> Self { | ||||
|         Self { | ||||
|             blocking: false, | ||||
|             epochs_per_migration: DEFAULT_EPOCHS_PER_MIGRATION, | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl MigratorConfig { | ||||
| @ -46,6 +64,19 @@ impl MigratorConfig { | ||||
|         self.blocking = true; | ||||
|         self | ||||
|     } | ||||
| 
 | ||||
|     pub fn epochs_per_migration(mut self, epochs_per_migration: u64) -> Self { | ||||
|         self.epochs_per_migration = epochs_per_migration; | ||||
|         self | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /// Record of when the last migration ran.
 | ||||
| pub struct PrevMigration { | ||||
|     /// The epoch at which the last finalization migration ran.
 | ||||
|     epoch: Epoch, | ||||
|     /// The number of epochs to wait between runs.
 | ||||
|     epochs_per_migration: u64, | ||||
| } | ||||
| 
 | ||||
| /// Pruning can be successful, or in rare cases deferred to a later point.
 | ||||
| @ -92,6 +123,7 @@ pub struct FinalizationNotification { | ||||
|     finalized_state_root: BeaconStateHash, | ||||
|     finalized_checkpoint: Checkpoint, | ||||
|     head_tracker: Arc<HeadTracker>, | ||||
|     prev_migration: Arc<Mutex<PrevMigration>>, | ||||
|     genesis_block_root: Hash256, | ||||
| } | ||||
| 
 | ||||
| @ -103,6 +135,11 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho | ||||
|         genesis_block_root: Hash256, | ||||
|         log: Logger, | ||||
|     ) -> Self { | ||||
|         // Estimate last migration run from DB split slot.
 | ||||
|         let prev_migration = Arc::new(Mutex::new(PrevMigration { | ||||
|             epoch: db.get_split_slot().epoch(E::slots_per_epoch()), | ||||
|             epochs_per_migration: config.epochs_per_migration, | ||||
|         })); | ||||
|         let tx_thread = if config.blocking { | ||||
|             None | ||||
|         } else { | ||||
| @ -111,6 +148,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho | ||||
|         Self { | ||||
|             db, | ||||
|             tx_thread, | ||||
|             prev_migration, | ||||
|             genesis_block_root, | ||||
|             log, | ||||
|         } | ||||
| @ -131,6 +169,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho | ||||
|             finalized_state_root, | ||||
|             finalized_checkpoint, | ||||
|             head_tracker, | ||||
|             prev_migration: self.prev_migration.clone(), | ||||
|             genesis_block_root: self.genesis_block_root, | ||||
|         }; | ||||
| 
 | ||||
| @ -204,9 +243,30 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho | ||||
|         notif: FinalizationNotification, | ||||
|         log: &Logger, | ||||
|     ) { | ||||
|         // Do not run too frequently.
 | ||||
|         let epoch = notif.finalized_checkpoint.epoch; | ||||
|         let mut prev_migration = notif.prev_migration.lock(); | ||||
|         if epoch < prev_migration.epoch + prev_migration.epochs_per_migration { | ||||
|             debug!( | ||||
|                 log, | ||||
|                 "Database consolidation deferred"; | ||||
|                 "last_finalized_epoch" => prev_migration.epoch, | ||||
|                 "new_finalized_epoch" => epoch, | ||||
|                 "epochs_per_migration" => prev_migration.epochs_per_migration, | ||||
|             ); | ||||
|             return; | ||||
|         } | ||||
| 
 | ||||
|         // Update the previous migration epoch immediately to avoid holding the lock. If the
 | ||||
|         // migration doesn't succeed then the next migration will be retried at the next scheduled
 | ||||
|         // run.
 | ||||
|         prev_migration.epoch = epoch; | ||||
|         drop(prev_migration); | ||||
| 
 | ||||
|         debug!(log, "Database consolidation started"); | ||||
| 
 | ||||
|         let finalized_state_root = notif.finalized_state_root; | ||||
|         let finalized_block_root = notif.finalized_checkpoint.root; | ||||
| 
 | ||||
|         let finalized_state = match db.get_state(&finalized_state_root.into(), None) { | ||||
|             Ok(Some(state)) => state, | ||||
| @ -260,7 +320,12 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho | ||||
|             } | ||||
|         }; | ||||
| 
 | ||||
|         match migrate_database(db.clone(), finalized_state_root.into(), &finalized_state) { | ||||
|         match migrate_database( | ||||
|             db.clone(), | ||||
|             finalized_state_root.into(), | ||||
|             finalized_block_root, | ||||
|             &finalized_state, | ||||
|         ) { | ||||
|             Ok(()) => {} | ||||
|             Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => { | ||||
|                 debug!( | ||||
|  | ||||
| @ -1,7 +1,9 @@ | ||||
| //! Provides an `ObservedAggregates` struct which allows us to reject aggregated attestations or
 | ||||
| //! sync committee contributions if we've already seen them.
 | ||||
| 
 | ||||
| use std::collections::HashSet; | ||||
| use crate::sync_committee_verification::SyncCommitteeData; | ||||
| use ssz_types::{BitList, BitVector}; | ||||
| use std::collections::HashMap; | ||||
| use std::marker::PhantomData; | ||||
| use tree_hash::TreeHash; | ||||
| use types::consts::altair::{ | ||||
| @ -10,8 +12,16 @@ use types::consts::altair::{ | ||||
| use types::slot_data::SlotData; | ||||
| use types::{Attestation, EthSpec, Hash256, Slot, SyncCommitteeContribution}; | ||||
| 
 | ||||
| pub type ObservedSyncContributions<E> = ObservedAggregates<SyncCommitteeContribution<E>, E>; | ||||
| pub type ObservedAggregateAttestations<E> = ObservedAggregates<Attestation<E>, E>; | ||||
| pub type ObservedSyncContributions<E> = ObservedAggregates< | ||||
|     SyncCommitteeContribution<E>, | ||||
|     E, | ||||
|     BitVector<<E as types::EthSpec>::SyncSubcommitteeSize>, | ||||
| >; | ||||
| pub type ObservedAggregateAttestations<E> = ObservedAggregates< | ||||
|     Attestation<E>, | ||||
|     E, | ||||
|     BitList<<E as types::EthSpec>::MaxValidatorsPerCommittee>, | ||||
| >; | ||||
| 
 | ||||
| /// A trait use to associate capacity constants with the type being stored in `ObservedAggregates`.
 | ||||
| pub trait Consts { | ||||
| @ -69,10 +79,81 @@ impl<T: EthSpec> Consts for SyncCommitteeContribution<T> { | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /// A trait for types that implement a behaviour where one object of that type
 | ||||
| /// can be a subset/superset of another.
 | ||||
| /// This trait allows us to be generic over the aggregate item that we store in the cache that
 | ||||
| /// we want to prevent duplicates/subsets for.
 | ||||
| pub trait SubsetItem { | ||||
|     /// The item that is stored for later comparison with new incoming aggregate items.
 | ||||
|     type Item; | ||||
| 
 | ||||
|     /// Returns `true` if `self` is a non-strict subset of `other` and `false` otherwise.
 | ||||
|     fn is_subset(&self, other: &Self::Item) -> bool; | ||||
| 
 | ||||
|     /// Returns `true` if `self` is a non-strict superset of `other` and `false` otherwise.
 | ||||
|     fn is_superset(&self, other: &Self::Item) -> bool; | ||||
| 
 | ||||
|     /// Returns the item that gets stored in `ObservedAggregates` for later subset
 | ||||
|     /// comparison with incoming aggregates.
 | ||||
|     fn get_item(&self) -> Self::Item; | ||||
| 
 | ||||
|     /// Returns a unique value that keys the object to the item that is being stored
 | ||||
|     /// in `ObservedAggregates`.
 | ||||
|     fn root(&self) -> Hash256; | ||||
| } | ||||
| 
 | ||||
| impl<T: EthSpec> SubsetItem for Attestation<T> { | ||||
|     type Item = BitList<T::MaxValidatorsPerCommittee>; | ||||
|     fn is_subset(&self, other: &Self::Item) -> bool { | ||||
|         self.aggregation_bits.is_subset(other) | ||||
|     } | ||||
| 
 | ||||
|     fn is_superset(&self, other: &Self::Item) -> bool { | ||||
|         other.is_subset(&self.aggregation_bits) | ||||
|     } | ||||
| 
 | ||||
|     /// Returns the sync contribution aggregation bits.
 | ||||
|     fn get_item(&self) -> Self::Item { | ||||
|         self.aggregation_bits.clone() | ||||
|     } | ||||
| 
 | ||||
|     /// Returns the hash tree root of the attestation data.
 | ||||
|     fn root(&self) -> Hash256 { | ||||
|         self.data.tree_hash_root() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<T: EthSpec> SubsetItem for SyncCommitteeContribution<T> { | ||||
|     type Item = BitVector<T::SyncSubcommitteeSize>; | ||||
|     fn is_subset(&self, other: &Self::Item) -> bool { | ||||
|         self.aggregation_bits.is_subset(other) | ||||
|     } | ||||
| 
 | ||||
|     fn is_superset(&self, other: &Self::Item) -> bool { | ||||
|         other.is_subset(&self.aggregation_bits) | ||||
|     } | ||||
| 
 | ||||
|     /// Returns the sync contribution aggregation bits.
 | ||||
|     fn get_item(&self) -> Self::Item { | ||||
|         self.aggregation_bits.clone() | ||||
|     } | ||||
| 
 | ||||
|     /// Returns the hash tree root of the root, slot and subcommittee index
 | ||||
|     /// of the sync contribution.
 | ||||
|     fn root(&self) -> Hash256 { | ||||
|         SyncCommitteeData { | ||||
|             root: self.beacon_block_root, | ||||
|             slot: self.slot, | ||||
|             subcommittee_index: self.subcommittee_index, | ||||
|         } | ||||
|         .tree_hash_root() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, PartialEq)] | ||||
| pub enum ObserveOutcome { | ||||
|     /// This item was already known.
 | ||||
|     AlreadyKnown, | ||||
|     /// This item is a non-strict subset of an already known item.
 | ||||
|     Subset, | ||||
|     /// This was the first time this item was observed.
 | ||||
|     New, | ||||
| } | ||||
| @ -94,26 +175,28 @@ pub enum Error { | ||||
|     }, | ||||
| } | ||||
| 
 | ||||
| /// A `HashSet` that contains entries related to some `Slot`.
 | ||||
| struct SlotHashSet { | ||||
|     set: HashSet<Hash256>, | ||||
| /// A `HashMap` that contains entries related to some `Slot`.
 | ||||
| struct SlotHashSet<I> { | ||||
|     /// Contains a vector of maximally-sized aggregation bitfields/bitvectors
 | ||||
|     /// such that no bitfield/bitvector is a subset of any other in the list.
 | ||||
|     map: HashMap<Hash256, Vec<I>>, | ||||
|     slot: Slot, | ||||
|     max_capacity: usize, | ||||
| } | ||||
| 
 | ||||
| impl SlotHashSet { | ||||
| impl<I> SlotHashSet<I> { | ||||
|     pub fn new(slot: Slot, initial_capacity: usize, max_capacity: usize) -> Self { | ||||
|         Self { | ||||
|             slot, | ||||
|             set: HashSet::with_capacity(initial_capacity), | ||||
|             map: HashMap::with_capacity(initial_capacity), | ||||
|             max_capacity, | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /// Store the items in self so future observations recognise its existence.
 | ||||
|     pub fn observe_item<T: SlotData>( | ||||
|     pub fn observe_item<S: SlotData + SubsetItem<Item = I>>( | ||||
|         &mut self, | ||||
|         item: &T, | ||||
|         item: &S, | ||||
|         root: Hash256, | ||||
|     ) -> Result<ObserveOutcome, Error> { | ||||
|         if item.get_slot() != self.slot { | ||||
| @ -123,29 +206,45 @@ impl SlotHashSet { | ||||
|             }); | ||||
|         } | ||||
| 
 | ||||
|         if self.set.contains(&root) { | ||||
|             Ok(ObserveOutcome::AlreadyKnown) | ||||
|         } else { | ||||
|         if let Some(aggregates) = self.map.get_mut(&root) { | ||||
|             for existing in aggregates { | ||||
|                 // Check if `item` is a subset of any of the observed aggregates
 | ||||
|                 if item.is_subset(existing) { | ||||
|                     return Ok(ObserveOutcome::Subset); | ||||
|                 // Check if `item` is a superset of any of the observed aggregates
 | ||||
|                 // If true, we replace the new item with its existing subset. This allows us
 | ||||
|                 // to hold fewer items in the list.
 | ||||
|                 } else if item.is_superset(existing) { | ||||
|                     *existing = item.get_item(); | ||||
|                     return Ok(ObserveOutcome::New); | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         // Here we check to see if this slot has reached the maximum observation count.
 | ||||
|         //
 | ||||
|         // The resulting behaviour is that we are no longer able to successfully observe new
 | ||||
|             // items, however we will continue to return `is_known` values. We could also
 | ||||
|             // disable `is_known`, however then we would stop forwarding items across the
 | ||||
|         // items, however we will continue to return `is_known_subset` values. We could also
 | ||||
|         // disable `is_known_subset`, however then we would stop forwarding items across the
 | ||||
|         // gossip network and I think that this is a worse case than sending some invalid ones.
 | ||||
|         // The underlying libp2p network is responsible for removing duplicate messages, so
 | ||||
|         // this doesn't risk a broadcast loop.
 | ||||
|             if self.set.len() >= self.max_capacity { | ||||
|         if self.map.len() >= self.max_capacity { | ||||
|             return Err(Error::ReachedMaxObservationsPerSlot(self.max_capacity)); | ||||
|         } | ||||
| 
 | ||||
|             self.set.insert(root); | ||||
| 
 | ||||
|         let item = item.get_item(); | ||||
|         self.map.entry(root).or_default().push(item); | ||||
|         Ok(ObserveOutcome::New) | ||||
|     } | ||||
|     } | ||||
| 
 | ||||
|     /// Indicates if `item` has been observed before.
 | ||||
|     pub fn is_known<T: SlotData>(&self, item: &T, root: Hash256) -> Result<bool, Error> { | ||||
|     /// Check if `item` is a non-strict subset of any of the already observed aggregates for
 | ||||
|     /// the given root and slot.
 | ||||
|     pub fn is_known_subset<S: SlotData + SubsetItem<Item = I>>( | ||||
|         &self, | ||||
|         item: &S, | ||||
|         root: Hash256, | ||||
|     ) -> Result<bool, Error> { | ||||
|         if item.get_slot() != self.slot { | ||||
|             return Err(Error::IncorrectSlot { | ||||
|                 expected: self.slot, | ||||
| @ -153,25 +252,28 @@ impl SlotHashSet { | ||||
|             }); | ||||
|         } | ||||
| 
 | ||||
|         Ok(self.set.contains(&root)) | ||||
|         Ok(self | ||||
|             .map | ||||
|             .get(&root) | ||||
|             .map_or(false, |agg| agg.iter().any(|val| item.is_subset(val)))) | ||||
|     } | ||||
| 
 | ||||
|     /// The number of observed items in `self`.
 | ||||
|     pub fn len(&self) -> usize { | ||||
|         self.set.len() | ||||
|         self.map.len() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /// Stores the roots of objects for some number of `Slots`, so we can determine if
 | ||||
| /// these have previously been seen on the network.
 | ||||
| pub struct ObservedAggregates<T: TreeHash + SlotData + Consts, E: EthSpec> { | ||||
| pub struct ObservedAggregates<T: SlotData + Consts, E: EthSpec, I> { | ||||
|     lowest_permissible_slot: Slot, | ||||
|     sets: Vec<SlotHashSet>, | ||||
|     sets: Vec<SlotHashSet<I>>, | ||||
|     _phantom_spec: PhantomData<E>, | ||||
|     _phantom_tree_hash: PhantomData<T>, | ||||
| } | ||||
| 
 | ||||
| impl<T: TreeHash + SlotData + Consts, E: EthSpec> Default for ObservedAggregates<T, E> { | ||||
| impl<T: SlotData + Consts, E: EthSpec, I> Default for ObservedAggregates<T, E, I> { | ||||
|     fn default() -> Self { | ||||
|         Self { | ||||
|             lowest_permissible_slot: Slot::new(0), | ||||
| @ -182,17 +284,17 @@ impl<T: TreeHash + SlotData + Consts, E: EthSpec> Default for ObservedAggregates | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<T: TreeHash + SlotData + Consts, E: EthSpec> ObservedAggregates<T, E> { | ||||
|     /// Store the root of `item` in `self`.
 | ||||
| impl<T: SlotData + Consts + SubsetItem<Item = I>, E: EthSpec, I> ObservedAggregates<T, E, I> { | ||||
|     /// Store `item` in `self` keyed at `root`.
 | ||||
|     ///
 | ||||
|     /// `root` must equal `item.tree_hash_root()`.
 | ||||
|     /// `root` must equal `item.root::<SubsetItem>()`.
 | ||||
|     pub fn observe_item( | ||||
|         &mut self, | ||||
|         item: &T, | ||||
|         root_opt: Option<Hash256>, | ||||
|     ) -> Result<ObserveOutcome, Error> { | ||||
|         let index = self.get_set_index(item.get_slot())?; | ||||
|         let root = root_opt.unwrap_or_else(|| item.tree_hash_root()); | ||||
|         let root = root_opt.unwrap_or_else(|| item.root()); | ||||
| 
 | ||||
|         self.sets | ||||
|             .get_mut(index) | ||||
| @ -200,17 +302,18 @@ impl<T: TreeHash + SlotData + Consts, E: EthSpec> ObservedAggregates<T, E> { | ||||
|             .and_then(|set| set.observe_item(item, root)) | ||||
|     } | ||||
| 
 | ||||
|     /// Check to see if the `root` of `item` is in self.
 | ||||
|     /// Check if `item` is a non-strict subset of any of the already observed aggregates for
 | ||||
|     /// the given root and slot.
 | ||||
|     ///
 | ||||
|     /// `root` must equal `a.tree_hash_root()`.
 | ||||
|     /// `root` must equal `item.root::<SubsetItem>()`.
 | ||||
|     #[allow(clippy::wrong_self_convention)] | ||||
|     pub fn is_known(&mut self, item: &T, root: Hash256) -> Result<bool, Error> { | ||||
|     pub fn is_known_subset(&mut self, item: &T, root: Hash256) -> Result<bool, Error> { | ||||
|         let index = self.get_set_index(item.get_slot())?; | ||||
| 
 | ||||
|         self.sets | ||||
|             .get(index) | ||||
|             .ok_or(Error::InvalidSetIndex(index)) | ||||
|             .and_then(|set| set.is_known(item, root)) | ||||
|             .and_then(|set| set.is_known_subset(item, root)) | ||||
|     } | ||||
| 
 | ||||
|     /// The maximum number of slots that items are stored for.
 | ||||
| @ -296,7 +399,6 @@ impl<T: TreeHash + SlotData + Consts, E: EthSpec> ObservedAggregates<T, E> { | ||||
| #[cfg(not(debug_assertions))] | ||||
| mod tests { | ||||
|     use super::*; | ||||
|     use tree_hash::TreeHash; | ||||
|     use types::{test_utils::test_random_instance, Hash256}; | ||||
| 
 | ||||
|     type E = types::MainnetEthSpec; | ||||
| @ -330,7 +432,7 @@ mod tests { | ||||
| 
 | ||||
|                     for a in &items { | ||||
|                         assert_eq!( | ||||
|                             store.is_known(a, a.tree_hash_root()), | ||||
|                             store.is_known_subset(a, a.root()), | ||||
|                             Ok(false), | ||||
|                             "should indicate an unknown attestation is unknown" | ||||
|                         ); | ||||
| @ -343,13 +445,13 @@ mod tests { | ||||
| 
 | ||||
|                     for a in &items { | ||||
|                         assert_eq!( | ||||
|                             store.is_known(a, a.tree_hash_root()), | ||||
|                             store.is_known_subset(a, a.root()), | ||||
|                             Ok(true), | ||||
|                             "should indicate a known attestation is known" | ||||
|                         ); | ||||
|                         assert_eq!( | ||||
|                             store.observe_item(a, Some(a.tree_hash_root())), | ||||
|                             Ok(ObserveOutcome::AlreadyKnown), | ||||
|                             store.observe_item(a, Some(a.root())), | ||||
|                             Ok(ObserveOutcome::Subset), | ||||
|                             "should acknowledge an existing attestation" | ||||
|                         ); | ||||
|                     } | ||||
|  | ||||
| @ -841,7 +841,7 @@ mod tests { | ||||
|                     let mut store = $type::default(); | ||||
|                     let max_cap = store.max_capacity(); | ||||
| 
 | ||||
|                     let to_skip = vec![1_u64, 3, 4, 5]; | ||||
|                     let to_skip = [1_u64, 3, 4, 5]; | ||||
|                     let periods = (0..max_cap * 3) | ||||
|                         .into_iter() | ||||
|                         .filter(|i| !to_skip.contains(i)) | ||||
| @ -1012,7 +1012,7 @@ mod tests { | ||||
|                     let mut store = $type::default(); | ||||
|                     let max_cap = store.max_capacity(); | ||||
| 
 | ||||
|                     let to_skip = vec![1_u64, 3, 4, 5]; | ||||
|                     let to_skip = [1_u64, 3, 4, 5]; | ||||
|                     let periods = (0..max_cap * 3) | ||||
|                         .into_iter() | ||||
|                         .filter(|i| !to_skip.contains(i)) | ||||
| @ -1121,7 +1121,7 @@ mod tests { | ||||
|                     let mut store = $type::default(); | ||||
|                     let max_cap = store.max_capacity(); | ||||
| 
 | ||||
|                     let to_skip = vec![1_u64, 3, 4, 5]; | ||||
|                     let to_skip = [1_u64, 3, 4, 5]; | ||||
|                     let periods = (0..max_cap * 3) | ||||
|                         .into_iter() | ||||
|                         .filter(|i| !to_skip.contains(i)) | ||||
|  | ||||
| @ -1,9 +1,10 @@ | ||||
| //! Provides the `ObservedBlockProducers` struct which allows for rejecting gossip blocks from
 | ||||
| //! validators that have already produced a block.
 | ||||
| 
 | ||||
| use std::collections::hash_map::Entry; | ||||
| use std::collections::{HashMap, HashSet}; | ||||
| use std::marker::PhantomData; | ||||
| use types::{BeaconBlockRef, Epoch, EthSpec, Slot, Unsigned}; | ||||
| use types::{BeaconBlockRef, Epoch, EthSpec, Hash256, Slot, Unsigned}; | ||||
| 
 | ||||
| #[derive(Debug, PartialEq)] | ||||
| pub enum Error { | ||||
| @ -14,6 +15,12 @@ pub enum Error { | ||||
|     ValidatorIndexTooHigh(u64), | ||||
| } | ||||
| 
 | ||||
| #[derive(Eq, Hash, PartialEq, Debug, Default)] | ||||
| struct ProposalKey { | ||||
|     slot: Slot, | ||||
|     proposer: u64, | ||||
| } | ||||
| 
 | ||||
| /// Maintains a cache of observed `(block.slot, block.proposer)`.
 | ||||
| ///
 | ||||
| /// The cache supports pruning based upon the finalized epoch. It does not automatically prune, you
 | ||||
| @ -27,7 +34,7 @@ pub enum Error { | ||||
| /// known_distinct_shufflings` which is much smaller.
 | ||||
| pub struct ObservedBlockProducers<E: EthSpec> { | ||||
|     finalized_slot: Slot, | ||||
|     items: HashMap<Slot, HashSet<u64>>, | ||||
|     items: HashMap<ProposalKey, HashSet<Hash256>>, | ||||
|     _phantom: PhantomData<E>, | ||||
| } | ||||
| 
 | ||||
| @ -42,6 +49,24 @@ impl<E: EthSpec> Default for ObservedBlockProducers<E> { | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| pub enum SeenBlock { | ||||
|     Duplicate, | ||||
|     Slashable, | ||||
|     UniqueNonSlashable, | ||||
| } | ||||
| 
 | ||||
| impl SeenBlock { | ||||
|     pub fn proposer_previously_observed(self) -> bool { | ||||
|         match self { | ||||
|             Self::Duplicate | Self::Slashable => true, | ||||
|             Self::UniqueNonSlashable => false, | ||||
|         } | ||||
|     } | ||||
|     pub fn is_slashable(&self) -> bool { | ||||
|         matches!(self, Self::Slashable) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<E: EthSpec> ObservedBlockProducers<E> { | ||||
|     /// Observe that the `block` was produced by `block.proposer_index` at `block.slot`. This will
 | ||||
|     /// update `self` so future calls to it indicate that this block is known.
 | ||||
| @ -52,16 +77,44 @@ impl<E: EthSpec> ObservedBlockProducers<E> { | ||||
|     ///
 | ||||
|     /// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`.
 | ||||
|     /// - `block.slot` is equal to or less than the latest pruned `finalized_slot`.
 | ||||
|     pub fn observe_proposer(&mut self, block: BeaconBlockRef<'_, E>) -> Result<bool, Error> { | ||||
|     pub fn observe_proposal( | ||||
|         &mut self, | ||||
|         block_root: Hash256, | ||||
|         block: BeaconBlockRef<'_, E>, | ||||
|     ) -> Result<SeenBlock, Error> { | ||||
|         self.sanitize_block(block)?; | ||||
| 
 | ||||
|         let did_not_exist = self | ||||
|             .items | ||||
|             .entry(block.slot()) | ||||
|             .or_insert_with(|| HashSet::with_capacity(E::SlotsPerEpoch::to_usize())) | ||||
|             .insert(block.proposer_index()); | ||||
|         let key = ProposalKey { | ||||
|             slot: block.slot(), | ||||
|             proposer: block.proposer_index(), | ||||
|         }; | ||||
| 
 | ||||
|         Ok(!did_not_exist) | ||||
|         let entry = self.items.entry(key); | ||||
| 
 | ||||
|         let slashable_proposal = match entry { | ||||
|             Entry::Occupied(mut occupied_entry) => { | ||||
|                 let block_roots = occupied_entry.get_mut(); | ||||
|                 let newly_inserted = block_roots.insert(block_root); | ||||
| 
 | ||||
|                 let is_equivocation = block_roots.len() > 1; | ||||
| 
 | ||||
|                 if is_equivocation { | ||||
|                     SeenBlock::Slashable | ||||
|                 } else if !newly_inserted { | ||||
|                     SeenBlock::Duplicate | ||||
|                 } else { | ||||
|                     SeenBlock::UniqueNonSlashable | ||||
|                 } | ||||
|             } | ||||
|             Entry::Vacant(vacant_entry) => { | ||||
|                 let block_roots = HashSet::from([block_root]); | ||||
|                 vacant_entry.insert(block_roots); | ||||
| 
 | ||||
|                 SeenBlock::UniqueNonSlashable | ||||
|             } | ||||
|         }; | ||||
| 
 | ||||
|         Ok(slashable_proposal) | ||||
|     } | ||||
| 
 | ||||
|     /// Returns `Ok(true)` if the `block` has been observed before, `Ok(false)` if not. Does not
 | ||||
| @ -72,15 +125,33 @@ impl<E: EthSpec> ObservedBlockProducers<E> { | ||||
|     ///
 | ||||
|     /// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`.
 | ||||
|     /// - `block.slot` is equal to or less than the latest pruned `finalized_slot`.
 | ||||
|     pub fn proposer_has_been_observed(&self, block: BeaconBlockRef<'_, E>) -> Result<bool, Error> { | ||||
|     pub fn proposer_has_been_observed( | ||||
|         &self, | ||||
|         block: BeaconBlockRef<'_, E>, | ||||
|         block_root: Hash256, | ||||
|     ) -> Result<SeenBlock, Error> { | ||||
|         self.sanitize_block(block)?; | ||||
| 
 | ||||
|         let exists = self | ||||
|             .items | ||||
|             .get(&block.slot()) | ||||
|             .map_or(false, |set| set.contains(&block.proposer_index())); | ||||
|         let key = ProposalKey { | ||||
|             slot: block.slot(), | ||||
|             proposer: block.proposer_index(), | ||||
|         }; | ||||
| 
 | ||||
|         Ok(exists) | ||||
|         if let Some(block_roots) = self.items.get(&key) { | ||||
|             let block_already_known = block_roots.contains(&block_root); | ||||
|             let no_prev_known_blocks = | ||||
|                 block_roots.difference(&HashSet::from([block_root])).count() == 0; | ||||
| 
 | ||||
|             if !no_prev_known_blocks { | ||||
|                 Ok(SeenBlock::Slashable) | ||||
|             } else if block_already_known { | ||||
|                 Ok(SeenBlock::Duplicate) | ||||
|             } else { | ||||
|                 Ok(SeenBlock::UniqueNonSlashable) | ||||
|             } | ||||
|         } else { | ||||
|             Ok(SeenBlock::UniqueNonSlashable) | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /// Returns `Ok(())` if the given `block` is sane.
 | ||||
| @ -112,15 +183,15 @@ impl<E: EthSpec> ObservedBlockProducers<E> { | ||||
|         } | ||||
| 
 | ||||
|         self.finalized_slot = finalized_slot; | ||||
|         self.items.retain(|slot, _set| *slot > finalized_slot); | ||||
|         self.items.retain(|key, _| key.slot > finalized_slot); | ||||
|     } | ||||
| 
 | ||||
|     /// Returns `true` if the given `validator_index` has been stored in `self` at `epoch`.
 | ||||
|     ///
 | ||||
|     /// This is useful for doppelganger detection.
 | ||||
|     pub fn index_seen_at_epoch(&self, validator_index: u64, epoch: Epoch) -> bool { | ||||
|         self.items.iter().any(|(slot, producers)| { | ||||
|             slot.epoch(E::slots_per_epoch()) == epoch && producers.contains(&validator_index) | ||||
|         self.items.iter().any(|(key, _)| { | ||||
|             key.slot.epoch(E::slots_per_epoch()) == epoch && key.proposer == validator_index | ||||
|         }) | ||||
|     } | ||||
| } | ||||
| @ -148,9 +219,12 @@ mod tests { | ||||
| 
 | ||||
|         // Slot 0, proposer 0
 | ||||
|         let block_a = get_block(0, 0); | ||||
|         let block_root = block_a.canonical_root(); | ||||
| 
 | ||||
|         assert_eq!( | ||||
|             cache.observe_proposer(block_a.to_ref()), | ||||
|             cache | ||||
|                 .observe_proposal(block_root, block_a.to_ref()) | ||||
|                 .map(SeenBlock::proposer_previously_observed), | ||||
|             Ok(false), | ||||
|             "can observe proposer, indicates proposer unobserved" | ||||
|         ); | ||||
| @ -164,7 +238,10 @@ mod tests { | ||||
|         assert_eq!( | ||||
|             cache | ||||
|                 .items | ||||
|                 .get(&Slot::new(0)) | ||||
|                 .get(&ProposalKey { | ||||
|                     slot: Slot::new(0), | ||||
|                     proposer: 0 | ||||
|                 }) | ||||
|                 .expect("slot zero should be present") | ||||
|                 .len(), | ||||
|             1, | ||||
| @ -182,7 +259,10 @@ mod tests { | ||||
|         assert_eq!( | ||||
|             cache | ||||
|                 .items | ||||
|                 .get(&Slot::new(0)) | ||||
|                 .get(&ProposalKey { | ||||
|                     slot: Slot::new(0), | ||||
|                     proposer: 0 | ||||
|                 }) | ||||
|                 .expect("slot zero should be present") | ||||
|                 .len(), | ||||
|             1, | ||||
| @ -207,9 +287,12 @@ mod tests { | ||||
| 
 | ||||
|         // First slot of finalized epoch, proposer 0
 | ||||
|         let block_b = get_block(E::slots_per_epoch(), 0); | ||||
|         let block_root_b = block_b.canonical_root(); | ||||
| 
 | ||||
|         assert_eq!( | ||||
|             cache.observe_proposer(block_b.to_ref()), | ||||
|             cache | ||||
|                 .observe_proposal(block_root_b, block_b.to_ref()) | ||||
|                 .map(SeenBlock::proposer_previously_observed), | ||||
|             Err(Error::FinalizedBlock { | ||||
|                 slot: E::slots_per_epoch().into(), | ||||
|                 finalized_slot: E::slots_per_epoch().into(), | ||||
| @ -229,7 +312,9 @@ mod tests { | ||||
|         let block_b = get_block(three_epochs, 0); | ||||
| 
 | ||||
|         assert_eq!( | ||||
|             cache.observe_proposer(block_b.to_ref()), | ||||
|             cache | ||||
|                 .observe_proposal(block_root_b, block_b.to_ref()) | ||||
|                 .map(SeenBlock::proposer_previously_observed), | ||||
|             Ok(false), | ||||
|             "can insert non-finalized block" | ||||
|         ); | ||||
| @ -238,7 +323,10 @@ mod tests { | ||||
|         assert_eq!( | ||||
|             cache | ||||
|                 .items | ||||
|                 .get(&Slot::new(three_epochs)) | ||||
|                 .get(&ProposalKey { | ||||
|                     slot: Slot::new(three_epochs), | ||||
|                     proposer: 0 | ||||
|                 }) | ||||
|                 .expect("the three epochs slot should be present") | ||||
|                 .len(), | ||||
|             1, | ||||
| @ -262,7 +350,10 @@ mod tests { | ||||
|         assert_eq!( | ||||
|             cache | ||||
|                 .items | ||||
|                 .get(&Slot::new(three_epochs)) | ||||
|                 .get(&ProposalKey { | ||||
|                     slot: Slot::new(three_epochs), | ||||
|                     proposer: 0 | ||||
|                 }) | ||||
|                 .expect("the three epochs slot should be present") | ||||
|                 .len(), | ||||
|             1, | ||||
| @ -276,24 +367,33 @@ mod tests { | ||||
| 
 | ||||
|         // Slot 0, proposer 0
 | ||||
|         let block_a = get_block(0, 0); | ||||
|         let block_root_a = block_a.canonical_root(); | ||||
| 
 | ||||
|         assert_eq!( | ||||
|             cache.proposer_has_been_observed(block_a.to_ref()), | ||||
|             cache | ||||
|                 .proposer_has_been_observed(block_a.to_ref(), block_a.canonical_root()) | ||||
|                 .map(|x| x.proposer_previously_observed()), | ||||
|             Ok(false), | ||||
|             "no observation in empty cache" | ||||
|         ); | ||||
|         assert_eq!( | ||||
|             cache.observe_proposer(block_a.to_ref()), | ||||
|             cache | ||||
|                 .observe_proposal(block_root_a, block_a.to_ref()) | ||||
|                 .map(SeenBlock::proposer_previously_observed), | ||||
|             Ok(false), | ||||
|             "can observe proposer, indicates proposer unobserved" | ||||
|         ); | ||||
|         assert_eq!( | ||||
|             cache.proposer_has_been_observed(block_a.to_ref()), | ||||
|             cache | ||||
|                 .proposer_has_been_observed(block_a.to_ref(), block_a.canonical_root()) | ||||
|                 .map(|x| x.proposer_previously_observed()), | ||||
|             Ok(true), | ||||
|             "observed block is indicated as true" | ||||
|         ); | ||||
|         assert_eq!( | ||||
|             cache.observe_proposer(block_a.to_ref()), | ||||
|             cache | ||||
|                 .observe_proposal(block_root_a, block_a.to_ref()) | ||||
|                 .map(SeenBlock::proposer_previously_observed), | ||||
|             Ok(true), | ||||
|             "observing again indicates true" | ||||
|         ); | ||||
| @ -303,7 +403,10 @@ mod tests { | ||||
|         assert_eq!( | ||||
|             cache | ||||
|                 .items | ||||
|                 .get(&Slot::new(0)) | ||||
|                 .get(&ProposalKey { | ||||
|                     slot: Slot::new(0), | ||||
|                     proposer: 0 | ||||
|                 }) | ||||
|                 .expect("slot zero should be present") | ||||
|                 .len(), | ||||
|             1, | ||||
| @ -312,24 +415,33 @@ mod tests { | ||||
| 
 | ||||
|         // Slot 1, proposer 0
 | ||||
|         let block_b = get_block(1, 0); | ||||
|         let block_root_b = block_b.canonical_root(); | ||||
| 
 | ||||
|         assert_eq!( | ||||
|             cache.proposer_has_been_observed(block_b.to_ref()), | ||||
|             cache | ||||
|                 .proposer_has_been_observed(block_b.to_ref(), block_b.canonical_root()) | ||||
|                 .map(|x| x.proposer_previously_observed()), | ||||
|             Ok(false), | ||||
|             "no observation for new slot" | ||||
|         ); | ||||
|         assert_eq!( | ||||
|             cache.observe_proposer(block_b.to_ref()), | ||||
|             cache | ||||
|                 .observe_proposal(block_root_b, block_b.to_ref()) | ||||
|                 .map(SeenBlock::proposer_previously_observed), | ||||
|             Ok(false), | ||||
|             "can observe proposer for new slot, indicates proposer unobserved" | ||||
|         ); | ||||
|         assert_eq!( | ||||
|             cache.proposer_has_been_observed(block_b.to_ref()), | ||||
|             cache | ||||
|                 .proposer_has_been_observed(block_b.to_ref(), block_b.canonical_root()) | ||||
|                 .map(|x| x.proposer_previously_observed()), | ||||
|             Ok(true), | ||||
|             "observed block in slot 1 is indicated as true" | ||||
|         ); | ||||
|         assert_eq!( | ||||
|             cache.observe_proposer(block_b.to_ref()), | ||||
|             cache | ||||
|                 .observe_proposal(block_root_b, block_b.to_ref()) | ||||
|                 .map(SeenBlock::proposer_previously_observed), | ||||
|             Ok(true), | ||||
|             "observing slot 1 again indicates true" | ||||
|         ); | ||||
| @ -339,7 +451,10 @@ mod tests { | ||||
|         assert_eq!( | ||||
|             cache | ||||
|                 .items | ||||
|                 .get(&Slot::new(0)) | ||||
|                 .get(&ProposalKey { | ||||
|                     slot: Slot::new(0), | ||||
|                     proposer: 0 | ||||
|                 }) | ||||
|                 .expect("slot zero should be present") | ||||
|                 .len(), | ||||
|             1, | ||||
| @ -348,7 +463,10 @@ mod tests { | ||||
|         assert_eq!( | ||||
|             cache | ||||
|                 .items | ||||
|                 .get(&Slot::new(1)) | ||||
|                 .get(&ProposalKey { | ||||
|                     slot: Slot::new(1), | ||||
|                     proposer: 0 | ||||
|                 }) | ||||
|                 .expect("slot zero should be present") | ||||
|                 .len(), | ||||
|             1, | ||||
| @ -357,45 +475,54 @@ mod tests { | ||||
| 
 | ||||
|         // Slot 0, proposer 1
 | ||||
|         let block_c = get_block(0, 1); | ||||
|         let block_root_c = block_c.canonical_root(); | ||||
| 
 | ||||
|         assert_eq!( | ||||
|             cache.proposer_has_been_observed(block_c.to_ref()), | ||||
|             cache | ||||
|                 .proposer_has_been_observed(block_c.to_ref(), block_c.canonical_root()) | ||||
|                 .map(|x| x.proposer_previously_observed()), | ||||
|             Ok(false), | ||||
|             "no observation for new proposer" | ||||
|         ); | ||||
|         assert_eq!( | ||||
|             cache.observe_proposer(block_c.to_ref()), | ||||
|             cache | ||||
|                 .observe_proposal(block_root_c, block_c.to_ref()) | ||||
|                 .map(SeenBlock::proposer_previously_observed), | ||||
|             Ok(false), | ||||
|             "can observe new proposer, indicates proposer unobserved" | ||||
|         ); | ||||
|         assert_eq!( | ||||
|             cache.proposer_has_been_observed(block_c.to_ref()), | ||||
|             cache | ||||
|                 .proposer_has_been_observed(block_c.to_ref(), block_c.canonical_root()) | ||||
|                 .map(|x| x.proposer_previously_observed()), | ||||
|             Ok(true), | ||||
|             "observed new proposer block is indicated as true" | ||||
|         ); | ||||
|         assert_eq!( | ||||
|             cache.observe_proposer(block_c.to_ref()), | ||||
|             cache | ||||
|                 .observe_proposal(block_root_c, block_c.to_ref()) | ||||
|                 .map(SeenBlock::proposer_previously_observed), | ||||
|             Ok(true), | ||||
|             "observing new proposer again indicates true" | ||||
|         ); | ||||
| 
 | ||||
|         assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); | ||||
|         assert_eq!(cache.items.len(), 2, "two slots should be present"); | ||||
|         assert_eq!(cache.items.len(), 3, "three slots should be present"); | ||||
|         assert_eq!( | ||||
|             cache | ||||
|                 .items | ||||
|                 .get(&Slot::new(0)) | ||||
|                 .expect("slot zero should be present") | ||||
|                 .len(), | ||||
|                 .iter() | ||||
|                 .filter(|(k, _)| k.slot == cache.finalized_slot) | ||||
|                 .count(), | ||||
|             2, | ||||
|             "two proposers should be present in slot 0" | ||||
|         ); | ||||
|         assert_eq!( | ||||
|             cache | ||||
|                 .items | ||||
|                 .get(&Slot::new(1)) | ||||
|                 .expect("slot zero should be present") | ||||
|                 .len(), | ||||
|                 .iter() | ||||
|                 .filter(|(k, _)| k.slot == Slot::new(1)) | ||||
|                 .count(), | ||||
|             1, | ||||
|             "only one proposer should be present in slot 1" | ||||
|         ); | ||||
|  | ||||
| @ -28,15 +28,14 @@ | ||||
| 
 | ||||
| use crate::observed_attesters::SlotSubcommitteeIndex; | ||||
| use crate::{ | ||||
|     beacon_chain::{MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT}, | ||||
|     metrics, | ||||
|     observed_aggregates::ObserveOutcome, | ||||
|     BeaconChain, BeaconChainError, BeaconChainTypes, | ||||
|     beacon_chain::VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, metrics, | ||||
|     observed_aggregates::ObserveOutcome, BeaconChain, BeaconChainError, BeaconChainTypes, | ||||
| }; | ||||
| use bls::{verify_signature_sets, PublicKeyBytes}; | ||||
| use derivative::Derivative; | ||||
| use safe_arith::ArithError; | ||||
| use slot_clock::SlotClock; | ||||
| use ssz_derive::{Decode, Encode}; | ||||
| use state_processing::per_block_processing::errors::SyncCommitteeMessageValidationError; | ||||
| use state_processing::signature_sets::{ | ||||
|     signed_sync_aggregate_selection_proof_signature_set, signed_sync_aggregate_signature_set, | ||||
| @ -47,9 +46,11 @@ use std::borrow::Cow; | ||||
| use std::collections::HashMap; | ||||
| use strum::AsRefStr; | ||||
| use tree_hash::TreeHash; | ||||
| use tree_hash_derive::TreeHash; | ||||
| use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; | ||||
| use types::slot_data::SlotData; | ||||
| use types::sync_committee::Error as SyncCommitteeError; | ||||
| use types::ChainSpec; | ||||
| use types::{ | ||||
|     sync_committee_contribution::Error as ContributionError, AggregateSignature, BeaconStateError, | ||||
|     EthSpec, Hash256, SignedContributionAndProof, Slot, SyncCommitteeContribution, | ||||
| @ -110,14 +111,14 @@ pub enum Error { | ||||
|     ///
 | ||||
|     /// The peer has sent an invalid message.
 | ||||
|     AggregatorPubkeyUnknown(u64), | ||||
|     /// The sync contribution has been seen before; either in a block, on the gossip network or from a
 | ||||
|     /// local validator.
 | ||||
|     /// The sync contribution or a superset of this sync contribution's aggregation bits for the same data
 | ||||
|     /// has been seen before; either in a block on the gossip network or from a local validator.
 | ||||
|     ///
 | ||||
|     /// ## Peer scoring
 | ||||
|     ///
 | ||||
|     /// It's unclear if this sync contribution is valid, however we have already observed it and do not
 | ||||
|     /// need to observe it again.
 | ||||
|     SyncContributionAlreadyKnown(Hash256), | ||||
|     SyncContributionSupersetKnown(Hash256), | ||||
|     /// There has already been an aggregation observed for this validator, we refuse to process a
 | ||||
|     /// second.
 | ||||
|     ///
 | ||||
| @ -268,6 +269,14 @@ pub struct VerifiedSyncContribution<T: BeaconChainTypes> { | ||||
|     participant_pubkeys: Vec<PublicKeyBytes>, | ||||
| } | ||||
| 
 | ||||
| /// The sync contribution data.
 | ||||
| #[derive(Encode, Decode, TreeHash)] | ||||
| pub struct SyncCommitteeData { | ||||
|     pub slot: Slot, | ||||
|     pub root: Hash256, | ||||
|     pub subcommittee_index: u64, | ||||
| } | ||||
| 
 | ||||
| /// Wraps a `SyncCommitteeMessage` that has been verified for propagation on the gossip network.
 | ||||
| #[derive(Clone)] | ||||
| pub struct VerifiedSyncCommitteeMessage { | ||||
| @ -287,7 +296,7 @@ impl<T: BeaconChainTypes> VerifiedSyncContribution<T> { | ||||
|         let subcommittee_index = contribution.subcommittee_index as usize; | ||||
| 
 | ||||
|         // Ensure sync committee contribution is within the MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance.
 | ||||
|         verify_propagation_slot_range(&chain.slot_clock, contribution)?; | ||||
|         verify_propagation_slot_range(&chain.slot_clock, contribution, &chain.spec)?; | ||||
| 
 | ||||
|         // Validate subcommittee index.
 | ||||
|         if contribution.subcommittee_index >= SYNC_COMMITTEE_SUBNET_COUNT { | ||||
| @ -314,15 +323,22 @@ impl<T: BeaconChainTypes> VerifiedSyncContribution<T> { | ||||
|             return Err(Error::AggregatorNotInCommittee { aggregator_index }); | ||||
|         }; | ||||
| 
 | ||||
|         // Ensure the valid sync contribution has not already been seen locally.
 | ||||
|         let contribution_root = contribution.tree_hash_root(); | ||||
|         // Ensure the valid sync contribution or its superset has not already been seen locally.
 | ||||
|         let contribution_data_root = SyncCommitteeData { | ||||
|             slot: contribution.slot, | ||||
|             root: contribution.beacon_block_root, | ||||
|             subcommittee_index: contribution.subcommittee_index, | ||||
|         } | ||||
|         .tree_hash_root(); | ||||
| 
 | ||||
|         if chain | ||||
|             .observed_sync_contributions | ||||
|             .write() | ||||
|             .is_known(contribution, contribution_root) | ||||
|             .is_known_subset(contribution, contribution_data_root) | ||||
|             .map_err(|e| Error::BeaconChainError(e.into()))? | ||||
|         { | ||||
|             return Err(Error::SyncContributionAlreadyKnown(contribution_root)); | ||||
|             metrics::inc_counter(&metrics::SYNC_CONTRIBUTION_SUBSETS); | ||||
|             return Err(Error::SyncContributionSupersetKnown(contribution_data_root)); | ||||
|         } | ||||
| 
 | ||||
|         // Ensure there has been no other observed aggregate for the given `aggregator_index`.
 | ||||
| @ -376,13 +392,14 @@ impl<T: BeaconChainTypes> VerifiedSyncContribution<T> { | ||||
|         //
 | ||||
|         // It's important to double check that the contribution is not already known, otherwise two
 | ||||
|         // contribution processed at the same time could be published.
 | ||||
|         if let ObserveOutcome::AlreadyKnown = chain | ||||
|         if let ObserveOutcome::Subset = chain | ||||
|             .observed_sync_contributions | ||||
|             .write() | ||||
|             .observe_item(contribution, Some(contribution_root)) | ||||
|             .observe_item(contribution, Some(contribution_data_root)) | ||||
|             .map_err(|e| Error::BeaconChainError(e.into()))? | ||||
|         { | ||||
|             return Err(Error::SyncContributionAlreadyKnown(contribution_root)); | ||||
|             metrics::inc_counter(&metrics::SYNC_CONTRIBUTION_SUBSETS); | ||||
|             return Err(Error::SyncContributionSupersetKnown(contribution_data_root)); | ||||
|         } | ||||
| 
 | ||||
|         // Observe the aggregator so we don't process another aggregate from them.
 | ||||
| @ -442,7 +459,7 @@ impl VerifiedSyncCommitteeMessage { | ||||
|         // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance).
 | ||||
|         //
 | ||||
|         // We do not queue future sync committee messages for later processing.
 | ||||
|         verify_propagation_slot_range(&chain.slot_clock, &sync_message)?; | ||||
|         verify_propagation_slot_range(&chain.slot_clock, &sync_message, &chain.spec)?; | ||||
| 
 | ||||
|         // Ensure the `subnet_id` is valid for the given validator.
 | ||||
|         let pubkey = chain | ||||
| @ -558,11 +575,11 @@ impl VerifiedSyncCommitteeMessage { | ||||
| pub fn verify_propagation_slot_range<S: SlotClock, U: SlotData>( | ||||
|     slot_clock: &S, | ||||
|     sync_contribution: &U, | ||||
|     spec: &ChainSpec, | ||||
| ) -> Result<(), Error> { | ||||
|     let message_slot = sync_contribution.get_slot(); | ||||
| 
 | ||||
|     let latest_permissible_slot = slot_clock | ||||
|         .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) | ||||
|         .now_with_future_tolerance(spec.maximum_gossip_clock_disparity()) | ||||
|         .ok_or(BeaconChainError::UnableToReadSlot)?; | ||||
|     if message_slot > latest_permissible_slot { | ||||
|         return Err(Error::FutureSlot { | ||||
| @ -572,7 +589,7 @@ pub fn verify_propagation_slot_range<S: SlotClock, U: SlotData>( | ||||
|     } | ||||
| 
 | ||||
|     let earliest_permissible_slot = slot_clock | ||||
|         .now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) | ||||
|         .now_with_past_tolerance(spec.maximum_gossip_clock_disparity()) | ||||
|         .ok_or(BeaconChainError::UnableToReadSlot)?; | ||||
| 
 | ||||
|     if message_slot < earliest_permissible_slot { | ||||
|  | ||||
| @ -17,12 +17,11 @@ use bls::get_withdrawal_credentials; | ||||
| use execution_layer::{ | ||||
|     auth::JwtKey, | ||||
|     test_utils::{ | ||||
|         ExecutionBlockGenerator, MockExecutionLayer, TestingBuilder, DEFAULT_JWT_SECRET, | ||||
|         DEFAULT_TERMINAL_BLOCK, | ||||
|         ExecutionBlockGenerator, MockBuilder, MockBuilderServer, MockExecutionLayer, | ||||
|         DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, | ||||
|     }, | ||||
|     ExecutionLayer, | ||||
| }; | ||||
| use fork_choice::CountUnrealized; | ||||
| use futures::channel::mpsc::Receiver; | ||||
| pub use genesis::{interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH}; | ||||
| use int_to_bytes::int_to_bytes32; | ||||
| @ -168,7 +167,6 @@ pub struct Builder<T: BeaconChainTypes> { | ||||
|     store_mutator: Option<BoxedMutator<T::EthSpec, T::HotStore, T::ColdStore>>, | ||||
|     execution_layer: Option<ExecutionLayer<T::EthSpec>>, | ||||
|     mock_execution_layer: Option<MockExecutionLayer<T::EthSpec>>, | ||||
|     mock_builder: Option<TestingBuilder<T::EthSpec>>, | ||||
|     testing_slot_clock: Option<TestingSlotClock>, | ||||
|     runtime: TestRuntime, | ||||
|     log: Logger, | ||||
| @ -302,7 +300,6 @@ where | ||||
|             store_mutator: None, | ||||
|             execution_layer: None, | ||||
|             mock_execution_layer: None, | ||||
|             mock_builder: None, | ||||
|             testing_slot_clock: None, | ||||
|             runtime, | ||||
|             log, | ||||
| @ -434,7 +431,11 @@ where | ||||
|         self | ||||
|     } | ||||
| 
 | ||||
|     pub fn mock_execution_layer(mut self) -> Self { | ||||
|     pub fn mock_execution_layer(self) -> Self { | ||||
|         self.mock_execution_layer_with_config(None) | ||||
|     } | ||||
| 
 | ||||
|     pub fn mock_execution_layer_with_config(mut self, builder_threshold: Option<u128>) -> Self { | ||||
|         let spec = self.spec.clone().expect("cannot build without spec"); | ||||
|         let shanghai_time = spec.capella_fork_epoch.map(|epoch| { | ||||
|             HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() | ||||
| @ -443,55 +444,15 @@ where | ||||
|             self.runtime.task_executor.clone(), | ||||
|             DEFAULT_TERMINAL_BLOCK, | ||||
|             shanghai_time, | ||||
|             None, | ||||
|             builder_threshold, | ||||
|             Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), | ||||
|             spec, | ||||
|             None, | ||||
|         ); | ||||
|         self.execution_layer = Some(mock.el.clone()); | ||||
|         self.mock_execution_layer = Some(mock); | ||||
|         self | ||||
|     } | ||||
| 
 | ||||
|     pub fn mock_execution_layer_with_builder( | ||||
|         mut self, | ||||
|         beacon_url: SensitiveUrl, | ||||
|         builder_threshold: Option<u128>, | ||||
|     ) -> Self { | ||||
|         // Get a random unused port
 | ||||
|         let port = unused_port::unused_tcp4_port().unwrap(); | ||||
|         let builder_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(); | ||||
| 
 | ||||
|         let spec = self.spec.clone().expect("cannot build without spec"); | ||||
|         let shanghai_time = spec.capella_fork_epoch.map(|epoch| { | ||||
|             HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() | ||||
|         }); | ||||
|         let mock_el = MockExecutionLayer::new( | ||||
|             self.runtime.task_executor.clone(), | ||||
|             DEFAULT_TERMINAL_BLOCK, | ||||
|             shanghai_time, | ||||
|             builder_threshold, | ||||
|             Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), | ||||
|             spec.clone(), | ||||
|             Some(builder_url.clone()), | ||||
|         ) | ||||
|         .move_to_terminal_block(); | ||||
| 
 | ||||
|         let mock_el_url = SensitiveUrl::parse(mock_el.server.url().as_str()).unwrap(); | ||||
| 
 | ||||
|         self.mock_builder = Some(TestingBuilder::new( | ||||
|             mock_el_url, | ||||
|             builder_url, | ||||
|             beacon_url, | ||||
|             spec, | ||||
|             self.runtime.task_executor.clone(), | ||||
|         )); | ||||
|         self.execution_layer = Some(mock_el.el.clone()); | ||||
|         self.mock_execution_layer = Some(mock_el); | ||||
| 
 | ||||
|         self | ||||
|     } | ||||
| 
 | ||||
|     /// Instruct the mock execution engine to always return a "valid" response to any payload it is
 | ||||
|     /// asked to execute.
 | ||||
|     pub fn mock_execution_layer_all_payloads_valid(self) -> Self { | ||||
| @ -517,18 +478,23 @@ where | ||||
|         let validator_keypairs = self | ||||
|             .validator_keypairs | ||||
|             .expect("cannot build without validator keypairs"); | ||||
|         let chain_config = self.chain_config.unwrap_or_default(); | ||||
| 
 | ||||
|         let mut builder = BeaconChainBuilder::new(self.eth_spec_instance) | ||||
|             .logger(log.clone()) | ||||
|             .custom_spec(spec) | ||||
|             .store(self.store.expect("cannot build without store")) | ||||
|             .store_migrator_config(MigratorConfig::default().blocking()) | ||||
|             .store_migrator_config( | ||||
|                 MigratorConfig::default() | ||||
|                     .blocking() | ||||
|                     .epochs_per_migration(chain_config.epochs_per_migration), | ||||
|             ) | ||||
|             .task_executor(self.runtime.task_executor.clone()) | ||||
|             .execution_layer(self.execution_layer) | ||||
|             .dummy_eth1_backend() | ||||
|             .expect("should build dummy backend") | ||||
|             .shutdown_sender(shutdown_tx) | ||||
|             .chain_config(self.chain_config.unwrap_or_default()) | ||||
|             .chain_config(chain_config) | ||||
|             .event_handler(Some(ServerSentEventHandler::new_with_capacity( | ||||
|                 log.clone(), | ||||
|                 5, | ||||
| @ -568,7 +534,7 @@ where | ||||
|             shutdown_receiver: Arc::new(Mutex::new(shutdown_receiver)), | ||||
|             runtime: self.runtime, | ||||
|             mock_execution_layer: self.mock_execution_layer, | ||||
|             mock_builder: self.mock_builder.map(Arc::new), | ||||
|             mock_builder: None, | ||||
|             rng: make_rng(), | ||||
|         } | ||||
|     } | ||||
| @ -593,7 +559,7 @@ pub struct BeaconChainHarness<T: BeaconChainTypes> { | ||||
|     pub runtime: TestRuntime, | ||||
| 
 | ||||
|     pub mock_execution_layer: Option<MockExecutionLayer<T::EthSpec>>, | ||||
|     pub mock_builder: Option<Arc<TestingBuilder<T::EthSpec>>>, | ||||
|     pub mock_builder: Option<Arc<MockBuilder<T::EthSpec>>>, | ||||
| 
 | ||||
|     pub rng: Mutex<StdRng>, | ||||
| } | ||||
| @ -629,6 +595,49 @@ where | ||||
|             .execution_block_generator() | ||||
|     } | ||||
| 
 | ||||
|     pub fn set_mock_builder(&mut self, beacon_url: SensitiveUrl) -> MockBuilderServer { | ||||
|         let mock_el = self | ||||
|             .mock_execution_layer | ||||
|             .as_ref() | ||||
|             .expect("harness was not built with mock execution layer"); | ||||
| 
 | ||||
|         let mock_el_url = SensitiveUrl::parse(mock_el.server.url().as_str()).unwrap(); | ||||
| 
 | ||||
|         // Create the builder, listening on a free port.
 | ||||
|         let (mock_builder, mock_builder_server) = MockBuilder::new_for_testing( | ||||
|             mock_el_url, | ||||
|             beacon_url, | ||||
|             self.spec.clone(), | ||||
|             self.runtime.task_executor.clone(), | ||||
|         ); | ||||
| 
 | ||||
|         // Set the builder URL in the execution layer now that its port is known.
 | ||||
|         let builder_listen_addr = mock_builder_server.local_addr(); | ||||
|         let port = builder_listen_addr.port(); | ||||
|         mock_el | ||||
|             .el | ||||
|             .set_builder_url( | ||||
|                 SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(), | ||||
|                 None, | ||||
|             ) | ||||
|             .unwrap(); | ||||
| 
 | ||||
|         self.mock_builder = Some(Arc::new(mock_builder)); | ||||
| 
 | ||||
|         // Sanity check.
 | ||||
|         let el_builder = self | ||||
|             .chain | ||||
|             .execution_layer | ||||
|             .as_ref() | ||||
|             .unwrap() | ||||
|             .builder() | ||||
|             .unwrap(); | ||||
|         let mock_el_builder = mock_el.el.builder().unwrap(); | ||||
|         assert!(Arc::ptr_eq(&el_builder, &mock_el_builder)); | ||||
| 
 | ||||
|         mock_builder_server | ||||
|     } | ||||
| 
 | ||||
|     pub fn get_all_validators(&self) -> Vec<usize> { | ||||
|         (0..self.validator_keypairs.len()).collect() | ||||
|     } | ||||
| @ -734,6 +743,15 @@ where | ||||
|         state.get_block_root(slot).unwrap() == state.get_block_root(slot - 1).unwrap() | ||||
|     } | ||||
| 
 | ||||
|     pub async fn make_blinded_block( | ||||
|         &self, | ||||
|         state: BeaconState<E>, | ||||
|         slot: Slot, | ||||
|     ) -> (SignedBlindedBeaconBlock<E>, BeaconState<E>) { | ||||
|         let (unblinded, new_state) = self.make_block(state, slot).await; | ||||
|         (unblinded.into(), new_state) | ||||
|     } | ||||
| 
 | ||||
|     /// Returns a newly created block, signed by the proposer for the given slot.
 | ||||
|     pub async fn make_block( | ||||
|         &self, | ||||
| @ -746,9 +764,7 @@ where | ||||
|         complete_state_advance(&mut state, None, slot, &self.spec) | ||||
|             .expect("should be able to advance state to slot"); | ||||
| 
 | ||||
|         state | ||||
|             .build_all_caches(&self.spec) | ||||
|             .expect("should build caches"); | ||||
|         state.build_caches(&self.spec).expect("should build caches"); | ||||
| 
 | ||||
|         let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); | ||||
| 
 | ||||
| @ -795,9 +811,7 @@ where | ||||
|         complete_state_advance(&mut state, None, slot, &self.spec) | ||||
|             .expect("should be able to advance state to slot"); | ||||
| 
 | ||||
|         state | ||||
|             .build_all_caches(&self.spec) | ||||
|             .expect("should build caches"); | ||||
|         state.build_caches(&self.spec).expect("should build caches"); | ||||
| 
 | ||||
|         let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); | ||||
| 
 | ||||
| @ -1515,6 +1529,36 @@ where | ||||
|         .sign(sk, &fork, genesis_validators_root, &self.chain.spec) | ||||
|     } | ||||
| 
 | ||||
|     pub fn add_proposer_slashing(&self, validator_index: u64) -> Result<(), String> { | ||||
|         let propposer_slashing = self.make_proposer_slashing(validator_index); | ||||
|         if let ObservationOutcome::New(verified_proposer_slashing) = self | ||||
|             .chain | ||||
|             .verify_proposer_slashing_for_gossip(propposer_slashing) | ||||
|             .expect("should verify proposer slashing for gossip") | ||||
|         { | ||||
|             self.chain | ||||
|                 .import_proposer_slashing(verified_proposer_slashing); | ||||
|             Ok(()) | ||||
|         } else { | ||||
|             Err("should observe new proposer slashing".to_string()) | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     pub fn add_attester_slashing(&self, validator_indices: Vec<u64>) -> Result<(), String> { | ||||
|         let attester_slashing = self.make_attester_slashing(validator_indices); | ||||
|         if let ObservationOutcome::New(verified_attester_slashing) = self | ||||
|             .chain | ||||
|             .verify_attester_slashing_for_gossip(attester_slashing) | ||||
|             .expect("should verify attester slashing for gossip") | ||||
|         { | ||||
|             self.chain | ||||
|                 .import_attester_slashing(verified_attester_slashing); | ||||
|             Ok(()) | ||||
|         } else { | ||||
|             Err("should observe new attester slashing".to_string()) | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     pub fn add_bls_to_execution_change( | ||||
|         &self, | ||||
|         validator_index: u64, | ||||
| @ -1696,8 +1740,8 @@ where | ||||
|             .process_block( | ||||
|                 block_root, | ||||
|                 Arc::new(block), | ||||
|                 CountUnrealized::True, | ||||
|                 NotifyExecutionLayer::Yes, | ||||
|                 || Ok(()), | ||||
|             ) | ||||
|             .await? | ||||
|             .into(); | ||||
| @ -1714,8 +1758,8 @@ where | ||||
|             .process_block( | ||||
|                 block.canonical_root(), | ||||
|                 Arc::new(block), | ||||
|                 CountUnrealized::True, | ||||
|                 NotifyExecutionLayer::Yes, | ||||
|                 || Ok(()), | ||||
|             ) | ||||
|             .await? | ||||
|             .into(); | ||||
|  | ||||
| @ -5,7 +5,7 @@ use std::time::Duration; | ||||
| /// A simple wrapper around `parking_lot::RwLock` that only permits read/write access with a
 | ||||
| /// time-out (i.e., no indefinitely-blocking operations).
 | ||||
| ///
 | ||||
| /// Timeouts can be optionally be disabled at runtime for all instances of this type by calling
 | ||||
| /// Timeouts can be optionally disabled at runtime for all instances of this type by calling
 | ||||
| /// `TimeoutRwLock::disable_timeouts()`.
 | ||||
| pub struct TimeoutRwLock<T>(RwLock<T>); | ||||
| 
 | ||||
|  | ||||
| @ -9,7 +9,7 @@ use beacon_chain::{ | ||||
|     test_utils::{ | ||||
|         test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, | ||||
|     }, | ||||
|     BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped, | ||||
|     BeaconChain, BeaconChainError, BeaconChainTypes, ChainConfig, WhenSlotSkipped, | ||||
| }; | ||||
| use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; | ||||
| use int_to_bytes::int_to_bytes32; | ||||
| @ -47,6 +47,10 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessTyp | ||||
| 
 | ||||
|     let harness = BeaconChainHarness::builder(MainnetEthSpec) | ||||
|         .spec(spec) | ||||
|         .chain_config(ChainConfig { | ||||
|             reconstruct_historic_states: true, | ||||
|             ..ChainConfig::default() | ||||
|         }) | ||||
|         .keypairs(KEYPAIRS[0..validator_count].to_vec()) | ||||
|         .fresh_ephemeral_store() | ||||
|         .mock_execution_layer() | ||||
| @ -79,6 +83,10 @@ fn get_harness_capella_spec( | ||||
| 
 | ||||
|     let harness = BeaconChainHarness::builder(MainnetEthSpec) | ||||
|         .spec(spec.clone()) | ||||
|         .chain_config(ChainConfig { | ||||
|             reconstruct_historic_states: true, | ||||
|             ..ChainConfig::default() | ||||
|         }) | ||||
|         .keypairs(validator_keypairs) | ||||
|         .withdrawal_keypairs( | ||||
|             KEYPAIRS[0..validator_count] | ||||
| @ -699,8 +707,8 @@ async fn aggregated_gossip_verification() { | ||||
|             |tester, err| { | ||||
|                 assert!(matches!( | ||||
|                     err, | ||||
|                     AttnError::AttestationAlreadyKnown(hash) | ||||
|                     if hash == tester.valid_aggregate.message.aggregate.tree_hash_root() | ||||
|                     AttnError::AttestationSupersetKnown(hash) | ||||
|                     if hash == tester.valid_aggregate.message.aggregate.data.tree_hash_root() | ||||
|                 )) | ||||
|             }, | ||||
|         ) | ||||
|  | ||||
| @ -3,8 +3,10 @@ | ||||
| use beacon_chain::test_utils::{ | ||||
|     AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, | ||||
| }; | ||||
| use beacon_chain::{BeaconSnapshot, BlockError, ChainSegmentResult, NotifyExecutionLayer}; | ||||
| use fork_choice::CountUnrealized; | ||||
| use beacon_chain::{ | ||||
|     BeaconSnapshot, BlockError, ChainConfig, ChainSegmentResult, IntoExecutionPendingBlock, | ||||
|     NotifyExecutionLayer, | ||||
| }; | ||||
| use lazy_static::lazy_static; | ||||
| use logging::test_logger; | ||||
| use slasher::{Config as SlasherConfig, Slasher}; | ||||
| @ -68,6 +70,10 @@ async fn get_chain_segment() -> Vec<BeaconSnapshot<E>> { | ||||
| fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessType<E>> { | ||||
|     let harness = BeaconChainHarness::builder(MainnetEthSpec) | ||||
|         .default_spec() | ||||
|         .chain_config(ChainConfig { | ||||
|             reconstruct_historic_states: true, | ||||
|             ..ChainConfig::default() | ||||
|         }) | ||||
|         .keypairs(KEYPAIRS[0..validator_count].to_vec()) | ||||
|         .fresh_ephemeral_store() | ||||
|         .mock_execution_layer() | ||||
| @ -148,18 +154,14 @@ async fn chain_segment_full_segment() { | ||||
|     // Sneak in a little check to ensure we can process empty chain segments.
 | ||||
|     harness | ||||
|         .chain | ||||
|         .process_chain_segment(vec![], CountUnrealized::True, NotifyExecutionLayer::Yes) | ||||
|         .process_chain_segment(vec![], NotifyExecutionLayer::Yes) | ||||
|         .await | ||||
|         .into_block_error() | ||||
|         .expect("should import empty chain segment"); | ||||
| 
 | ||||
|     harness | ||||
|         .chain | ||||
|         .process_chain_segment( | ||||
|             blocks.clone(), | ||||
|             CountUnrealized::True, | ||||
|             NotifyExecutionLayer::Yes, | ||||
|         ) | ||||
|         .process_chain_segment(blocks.clone(), NotifyExecutionLayer::Yes) | ||||
|         .await | ||||
|         .into_block_error() | ||||
|         .expect("should import chain segment"); | ||||
| @ -188,11 +190,7 @@ async fn chain_segment_varying_chunk_size() { | ||||
|         for chunk in blocks.chunks(*chunk_size) { | ||||
|             harness | ||||
|                 .chain | ||||
|                 .process_chain_segment( | ||||
|                     chunk.to_vec(), | ||||
|                     CountUnrealized::True, | ||||
|                     NotifyExecutionLayer::Yes, | ||||
|                 ) | ||||
|                 .process_chain_segment(chunk.to_vec(), NotifyExecutionLayer::Yes) | ||||
|                 .await | ||||
|                 .into_block_error() | ||||
|                 .unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size)); | ||||
| @ -228,7 +226,7 @@ async fn chain_segment_non_linear_parent_roots() { | ||||
|         matches!( | ||||
|             harness | ||||
|                 .chain | ||||
|                 .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) | ||||
|                 .process_chain_segment(blocks, NotifyExecutionLayer::Yes) | ||||
|                 .await | ||||
|                 .into_block_error(), | ||||
|             Err(BlockError::NonLinearParentRoots) | ||||
| @ -248,7 +246,7 @@ async fn chain_segment_non_linear_parent_roots() { | ||||
|         matches!( | ||||
|             harness | ||||
|                 .chain | ||||
|                 .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) | ||||
|                 .process_chain_segment(blocks, NotifyExecutionLayer::Yes) | ||||
|                 .await | ||||
|                 .into_block_error(), | ||||
|             Err(BlockError::NonLinearParentRoots) | ||||
| @ -279,7 +277,7 @@ async fn chain_segment_non_linear_slots() { | ||||
|         matches!( | ||||
|             harness | ||||
|                 .chain | ||||
|                 .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) | ||||
|                 .process_chain_segment(blocks, NotifyExecutionLayer::Yes) | ||||
|                 .await | ||||
|                 .into_block_error(), | ||||
|             Err(BlockError::NonLinearSlots) | ||||
| @ -300,7 +298,7 @@ async fn chain_segment_non_linear_slots() { | ||||
|         matches!( | ||||
|             harness | ||||
|                 .chain | ||||
|                 .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) | ||||
|                 .process_chain_segment(blocks, NotifyExecutionLayer::Yes) | ||||
|                 .await | ||||
|                 .into_block_error(), | ||||
|             Err(BlockError::NonLinearSlots) | ||||
| @ -326,7 +324,7 @@ async fn assert_invalid_signature( | ||||
|         matches!( | ||||
|             harness | ||||
|                 .chain | ||||
|                 .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) | ||||
|                 .process_chain_segment(blocks, NotifyExecutionLayer::Yes) | ||||
|                 .await | ||||
|                 .into_block_error(), | ||||
|             Err(BlockError::InvalidSignature) | ||||
| @ -348,11 +346,7 @@ async fn assert_invalid_signature( | ||||
|     // imported prior to this test.
 | ||||
|     let _ = harness | ||||
|         .chain | ||||
|         .process_chain_segment( | ||||
|             ancestor_blocks, | ||||
|             CountUnrealized::True, | ||||
|             NotifyExecutionLayer::Yes, | ||||
|         ) | ||||
|         .process_chain_segment(ancestor_blocks, NotifyExecutionLayer::Yes) | ||||
|         .await; | ||||
|     harness.chain.recompute_head_at_current_slot().await; | ||||
| 
 | ||||
| @ -361,8 +355,8 @@ async fn assert_invalid_signature( | ||||
|         .process_block( | ||||
|             snapshots[block_index].beacon_block.canonical_root(), | ||||
|             snapshots[block_index].beacon_block.clone(), | ||||
|             CountUnrealized::True, | ||||
|             NotifyExecutionLayer::Yes, | ||||
|             || Ok(()), | ||||
|         ) | ||||
|         .await; | ||||
|     assert!( | ||||
| @ -414,11 +408,7 @@ async fn invalid_signature_gossip_block() { | ||||
|             .collect(); | ||||
|         harness | ||||
|             .chain | ||||
|             .process_chain_segment( | ||||
|                 ancestor_blocks, | ||||
|                 CountUnrealized::True, | ||||
|                 NotifyExecutionLayer::Yes, | ||||
|             ) | ||||
|             .process_chain_segment(ancestor_blocks, NotifyExecutionLayer::Yes) | ||||
|             .await | ||||
|             .into_block_error() | ||||
|             .expect("should import all blocks prior to the one being tested"); | ||||
| @ -430,8 +420,8 @@ async fn invalid_signature_gossip_block() { | ||||
|                     .process_block( | ||||
|                         signed_block.canonical_root(), | ||||
|                         Arc::new(signed_block), | ||||
|                         CountUnrealized::True, | ||||
|                         NotifyExecutionLayer::Yes, | ||||
|                         || Ok(()), | ||||
|                     ) | ||||
|                     .await, | ||||
|                 Err(BlockError::InvalidSignature) | ||||
| @ -465,7 +455,7 @@ async fn invalid_signature_block_proposal() { | ||||
|             matches!( | ||||
|                 harness | ||||
|                     .chain | ||||
|                     .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) | ||||
|                     .process_chain_segment(blocks, NotifyExecutionLayer::Yes) | ||||
|                     .await | ||||
|                     .into_block_error(), | ||||
|                 Err(BlockError::InvalidSignature) | ||||
| @ -663,7 +653,7 @@ async fn invalid_signature_deposit() { | ||||
|             !matches!( | ||||
|                 harness | ||||
|                     .chain | ||||
|                     .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) | ||||
|                     .process_chain_segment(blocks, NotifyExecutionLayer::Yes) | ||||
|                     .await | ||||
|                     .into_block_error(), | ||||
|                 Err(BlockError::InvalidSignature) | ||||
| @ -743,8 +733,8 @@ async fn block_gossip_verification() { | ||||
|             .process_block( | ||||
|                 gossip_verified.block_root, | ||||
|                 gossip_verified, | ||||
|                 CountUnrealized::True, | ||||
|                 NotifyExecutionLayer::Yes, | ||||
|                 || Ok(()), | ||||
|             ) | ||||
|             .await | ||||
|             .expect("should import valid gossip verified block"); | ||||
| @ -941,11 +931,7 @@ async fn block_gossip_verification() { | ||||
|     assert!( | ||||
|         matches!( | ||||
|             unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await), | ||||
|             BlockError::RepeatProposal { | ||||
|                 proposer, | ||||
|                 slot, | ||||
|             } | ||||
|             if proposer == other_proposer && slot == block.message().slot() | ||||
|             BlockError::BlockIsAlreadyKnown, | ||||
|         ), | ||||
|         "should register any valid signature against the proposer, even if the block failed later verification" | ||||
|     ); | ||||
| @ -974,11 +960,7 @@ async fn block_gossip_verification() { | ||||
|                 .await | ||||
|                 .err() | ||||
|                 .expect("should error when processing known block"), | ||||
|             BlockError::RepeatProposal { | ||||
|                 proposer, | ||||
|                 slot, | ||||
|             } | ||||
|             if proposer == block.message().proposer_index() && slot == block.message().slot() | ||||
|             BlockError::BlockIsAlreadyKnown | ||||
|         ), | ||||
|         "the second proposal by this validator should be rejected" | ||||
|     ); | ||||
| @ -1015,8 +997,8 @@ async fn verify_block_for_gossip_slashing_detection() { | ||||
|         .process_block( | ||||
|             verified_block.block_root, | ||||
|             verified_block, | ||||
|             CountUnrealized::True, | ||||
|             NotifyExecutionLayer::Yes, | ||||
|             || Ok(()), | ||||
|         ) | ||||
|         .await | ||||
|         .unwrap(); | ||||
| @ -1055,8 +1037,8 @@ async fn verify_block_for_gossip_doppelganger_detection() { | ||||
|         .process_block( | ||||
|             verified_block.block_root, | ||||
|             verified_block, | ||||
|             CountUnrealized::True, | ||||
|             NotifyExecutionLayer::Yes, | ||||
|             || Ok(()), | ||||
|         ) | ||||
|         .await | ||||
|         .unwrap(); | ||||
| @ -1203,8 +1185,8 @@ async fn add_base_block_to_altair_chain() { | ||||
|             .process_block( | ||||
|                 base_block.canonical_root(), | ||||
|                 Arc::new(base_block.clone()), | ||||
|                 CountUnrealized::True, | ||||
|                 NotifyExecutionLayer::Yes, | ||||
|                 || Ok(()), | ||||
|             ) | ||||
|             .await | ||||
|             .err() | ||||
| @ -1219,11 +1201,7 @@ async fn add_base_block_to_altair_chain() { | ||||
|     assert!(matches!( | ||||
|         harness | ||||
|             .chain | ||||
|             .process_chain_segment( | ||||
|                 vec![Arc::new(base_block)], | ||||
|                 CountUnrealized::True, | ||||
|                 NotifyExecutionLayer::Yes, | ||||
|             ) | ||||
|             .process_chain_segment(vec![Arc::new(base_block)], NotifyExecutionLayer::Yes,) | ||||
|             .await, | ||||
|         ChainSegmentResult::Failed { | ||||
|             imported_blocks: 0, | ||||
| @ -1342,8 +1320,8 @@ async fn add_altair_block_to_base_chain() { | ||||
|             .process_block( | ||||
|                 altair_block.canonical_root(), | ||||
|                 Arc::new(altair_block.clone()), | ||||
|                 CountUnrealized::True, | ||||
|                 NotifyExecutionLayer::Yes, | ||||
|                 || Ok(()), | ||||
|             ) | ||||
|             .await | ||||
|             .err() | ||||
| @ -1358,11 +1336,7 @@ async fn add_altair_block_to_base_chain() { | ||||
|     assert!(matches!( | ||||
|         harness | ||||
|             .chain | ||||
|             .process_chain_segment( | ||||
|                 vec![Arc::new(altair_block)], | ||||
|                 CountUnrealized::True, | ||||
|                 NotifyExecutionLayer::Yes | ||||
|             ) | ||||
|             .process_chain_segment(vec![Arc::new(altair_block)], NotifyExecutionLayer::Yes) | ||||
|             .await, | ||||
|         ChainSegmentResult::Failed { | ||||
|             imported_blocks: 0, | ||||
| @ -1373,3 +1347,100 @@ async fn add_altair_block_to_base_chain() { | ||||
|         } | ||||
|     )); | ||||
| } | ||||
| 
 | ||||
| #[tokio::test] | ||||
| async fn import_duplicate_block_unrealized_justification() { | ||||
|     let spec = MainnetEthSpec::default_spec(); | ||||
| 
 | ||||
|     let harness = BeaconChainHarness::builder(MainnetEthSpec) | ||||
|         .spec(spec) | ||||
|         .keypairs(KEYPAIRS[..].to_vec()) | ||||
|         .fresh_ephemeral_store() | ||||
|         .mock_execution_layer() | ||||
|         .build(); | ||||
|     let chain = &harness.chain; | ||||
| 
 | ||||
|     // Move out of the genesis slot.
 | ||||
|     harness.advance_slot(); | ||||
| 
 | ||||
|     // Build the chain out to the first justification opportunity 2/3rds of the way through epoch 2.
 | ||||
|     let num_slots = E::slots_per_epoch() as usize * 8 / 3; | ||||
|     harness | ||||
|         .extend_chain( | ||||
|             num_slots, | ||||
|             BlockStrategy::OnCanonicalHead, | ||||
|             AttestationStrategy::AllValidators, | ||||
|         ) | ||||
|         .await; | ||||
| 
 | ||||
|     // Move into the next empty slot.
 | ||||
|     harness.advance_slot(); | ||||
| 
 | ||||
|     // The store's justified checkpoint must still be at epoch 0, while unrealized justification
 | ||||
|     // must be at epoch 1.
 | ||||
|     let fc = chain.canonical_head.fork_choice_read_lock(); | ||||
|     assert_eq!(fc.justified_checkpoint().epoch, 0); | ||||
|     assert_eq!(fc.unrealized_justified_checkpoint().epoch, 1); | ||||
|     drop(fc); | ||||
| 
 | ||||
|     // Produce a block to justify epoch 2.
 | ||||
|     let state = harness.get_current_state(); | ||||
|     let slot = harness.get_current_slot(); | ||||
|     let (block, _) = harness.make_block(state.clone(), slot).await; | ||||
|     let block = Arc::new(block); | ||||
|     let block_root = block.canonical_root(); | ||||
| 
 | ||||
|     // Create two verified variants of the block, representing the same block being processed in
 | ||||
|     // parallel.
 | ||||
|     let notify_execution_layer = NotifyExecutionLayer::Yes; | ||||
|     let verified_block1 = block | ||||
|         .clone() | ||||
|         .into_execution_pending_block(block_root, &chain, notify_execution_layer) | ||||
|         .unwrap(); | ||||
|     let verified_block2 = block | ||||
|         .into_execution_pending_block(block_root, &chain, notify_execution_layer) | ||||
|         .unwrap(); | ||||
| 
 | ||||
|     // Import the first block, simulating a block processed via a finalized chain segment.
 | ||||
|     chain | ||||
|         .clone() | ||||
|         .import_execution_pending_block(verified_block1) | ||||
|         .await | ||||
|         .unwrap(); | ||||
| 
 | ||||
|     // Unrealized justification should NOT have updated.
 | ||||
|     let fc = chain.canonical_head.fork_choice_read_lock(); | ||||
|     assert_eq!(fc.justified_checkpoint().epoch, 0); | ||||
|     let unrealized_justification = fc.unrealized_justified_checkpoint(); | ||||
|     assert_eq!(unrealized_justification.epoch, 2); | ||||
| 
 | ||||
|     // The fork choice node for the block should have unrealized justification.
 | ||||
|     let fc_block = fc.get_block(&block_root).unwrap(); | ||||
|     assert_eq!( | ||||
|         fc_block.unrealized_justified_checkpoint, | ||||
|         Some(unrealized_justification) | ||||
|     ); | ||||
|     drop(fc); | ||||
| 
 | ||||
|     // Import the second verified block, simulating a block processed via RPC.
 | ||||
|     chain | ||||
|         .clone() | ||||
|         .import_execution_pending_block(verified_block2) | ||||
|         .await | ||||
|         .unwrap(); | ||||
| 
 | ||||
|     // Unrealized justification should still be updated.
 | ||||
|     let fc = chain.canonical_head.fork_choice_read_lock(); | ||||
|     assert_eq!(fc.justified_checkpoint().epoch, 0); | ||||
|     assert_eq!( | ||||
|         fc.unrealized_justified_checkpoint(), | ||||
|         unrealized_justification | ||||
|     ); | ||||
| 
 | ||||
|     // The fork choice node for the block should still have the unrealized justified checkpoint.
 | ||||
|     let fc_block = fc.get_block(&block_root).unwrap(); | ||||
|     assert_eq!( | ||||
|         fc_block.unrealized_justified_checkpoint, | ||||
|         Some(unrealized_justification) | ||||
|     ); | ||||
| } | ||||
|  | ||||
| @ -133,13 +133,8 @@ async fn base_altair_merge_capella() { | ||||
|     for _ in (merge_fork_slot.as_u64() + 3)..capella_fork_slot.as_u64() { | ||||
|         harness.extend_slots(1).await; | ||||
|         let block = &harness.chain.head_snapshot().beacon_block; | ||||
|         let full_payload: FullPayload<E> = block | ||||
|             .message() | ||||
|             .body() | ||||
|             .execution_payload() | ||||
|             .unwrap() | ||||
|             .clone() | ||||
|             .into(); | ||||
|         let full_payload: FullPayload<E> = | ||||
|             block.message().body().execution_payload().unwrap().into(); | ||||
|         // pre-capella shouldn't have withdrawals
 | ||||
|         assert!(full_payload.withdrawals_root().is_err()); | ||||
|         execution_payloads.push(full_payload); | ||||
| @ -151,13 +146,8 @@ async fn base_altair_merge_capella() { | ||||
|     for _ in 0..16 { | ||||
|         harness.extend_slots(1).await; | ||||
|         let block = &harness.chain.head_snapshot().beacon_block; | ||||
|         let full_payload: FullPayload<E> = block | ||||
|             .message() | ||||
|             .body() | ||||
|             .execution_payload() | ||||
|             .unwrap() | ||||
|             .clone() | ||||
|             .into(); | ||||
|         let full_payload: FullPayload<E> = | ||||
|             block.message().body().execution_payload().unwrap().into(); | ||||
|         // post-capella should have withdrawals
 | ||||
|         assert!(full_payload.withdrawals_root().is_ok()); | ||||
|         execution_payloads.push(full_payload); | ||||
|  | ||||
| @ -7,7 +7,7 @@ use beacon_chain::otb_verification_service::{ | ||||
| use beacon_chain::{ | ||||
|     canonical_head::{CachedHead, CanonicalHead}, | ||||
|     test_utils::{BeaconChainHarness, EphemeralHarnessType}, | ||||
|     BeaconChainError, BlockError, ExecutionPayloadError, NotifyExecutionLayer, | ||||
|     BeaconChainError, BlockError, ChainConfig, ExecutionPayloadError, NotifyExecutionLayer, | ||||
|     OverrideForkchoiceUpdate, StateSkipConfig, WhenSlotSkipped, | ||||
|     INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, | ||||
|     INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, | ||||
| @ -17,9 +17,7 @@ use execution_layer::{ | ||||
|     test_utils::ExecutionBlockGenerator, | ||||
|     ExecutionLayer, ForkchoiceState, PayloadAttributes, | ||||
| }; | ||||
| use fork_choice::{ | ||||
|     CountUnrealized, Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus, | ||||
| }; | ||||
| use fork_choice::{Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus}; | ||||
| use logging::test_logger; | ||||
| use proto_array::{Error as ProtoArrayError, ExecutionStatus}; | ||||
| use slot_clock::SlotClock; | ||||
| @ -61,6 +59,10 @@ impl InvalidPayloadRig { | ||||
| 
 | ||||
|         let harness = BeaconChainHarness::builder(MainnetEthSpec) | ||||
|             .spec(spec) | ||||
|             .chain_config(ChainConfig { | ||||
|                 reconstruct_historic_states: true, | ||||
|                 ..ChainConfig::default() | ||||
|             }) | ||||
|             .logger(test_logger()) | ||||
|             .deterministic_keypairs(VALIDATOR_COUNT) | ||||
|             .mock_execution_layer() | ||||
| @ -698,8 +700,8 @@ async fn invalidates_all_descendants() { | ||||
|         .process_block( | ||||
|             fork_block.canonical_root(), | ||||
|             Arc::new(fork_block), | ||||
|             CountUnrealized::True, | ||||
|             NotifyExecutionLayer::Yes, | ||||
|             || Ok(()), | ||||
|         ) | ||||
|         .await | ||||
|         .unwrap(); | ||||
| @ -795,8 +797,8 @@ async fn switches_heads() { | ||||
|         .process_block( | ||||
|             fork_block.canonical_root(), | ||||
|             Arc::new(fork_block), | ||||
|             CountUnrealized::True, | ||||
|             NotifyExecutionLayer::Yes, | ||||
|             || Ok(()), | ||||
|         ) | ||||
|         .await | ||||
|         .unwrap(); | ||||
| @ -1050,7 +1052,9 @@ async fn invalid_parent() { | ||||
| 
 | ||||
|     // Ensure the block built atop an invalid payload is invalid for import.
 | ||||
|     assert!(matches!( | ||||
|         rig.harness.chain.process_block(block.canonical_root(), block.clone(), CountUnrealized::True, NotifyExecutionLayer::Yes).await, | ||||
|         rig.harness.chain.process_block(block.canonical_root(), block.clone(), NotifyExecutionLayer::Yes, | ||||
|             || Ok(()), | ||||
|         ).await, | ||||
|         Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) | ||||
|         if invalid_root == parent_root | ||||
|     )); | ||||
| @ -1064,8 +1068,9 @@ async fn invalid_parent() { | ||||
|             Duration::from_secs(0), | ||||
|             &state, | ||||
|             PayloadVerificationStatus::Optimistic, | ||||
|             rig.harness.chain.config.progressive_balances_mode, | ||||
|             &rig.harness.chain.spec, | ||||
|             CountUnrealized::True, | ||||
|             rig.harness.logger() | ||||
|         ), | ||||
|         Err(ForkChoiceError::ProtoArrayStringError(message)) | ||||
|         if message.contains(&format!( | ||||
| @ -1339,8 +1344,8 @@ async fn build_optimistic_chain( | ||||
|             .process_block( | ||||
|                 block.canonical_root(), | ||||
|                 block, | ||||
|                 CountUnrealized::True, | ||||
|                 NotifyExecutionLayer::Yes, | ||||
|                 || Ok(()), | ||||
|             ) | ||||
|             .await | ||||
|             .unwrap(); | ||||
| @ -1900,8 +1905,8 @@ async fn recover_from_invalid_head_by_importing_blocks() { | ||||
|         .process_block( | ||||
|             fork_block.canonical_root(), | ||||
|             fork_block.clone(), | ||||
|             CountUnrealized::True, | ||||
|             NotifyExecutionLayer::Yes, | ||||
|             || Ok(()), | ||||
|         ) | ||||
|         .await | ||||
|         .unwrap(); | ||||
|  | ||||
| @ -9,19 +9,22 @@ use beacon_chain::{ | ||||
|     test_utils::{AttestationStrategy, BlockStrategy, RelativeSyncCommittee}, | ||||
|     types::{Epoch, EthSpec, Keypair, MinimalEthSpec}, | ||||
| }; | ||||
| use eth2::lighthouse::attestation_rewards::TotalAttestationRewards; | ||||
| use eth2::lighthouse::StandardAttestationRewards; | ||||
| use eth2::types::ValidatorId; | ||||
| use lazy_static::lazy_static; | ||||
| use types::beacon_state::Error as BeaconStateError; | ||||
| use types::{BeaconState, ChainSpec}; | ||||
| 
 | ||||
| pub const VALIDATOR_COUNT: usize = 64; | ||||
| 
 | ||||
| type E = MinimalEthSpec; | ||||
| 
 | ||||
| lazy_static! { | ||||
|     static ref KEYPAIRS: Vec<Keypair> = generate_deterministic_keypairs(VALIDATOR_COUNT); | ||||
| } | ||||
| 
 | ||||
| fn get_harness<E: EthSpec>() -> BeaconChainHarness<EphemeralHarnessType<E>> { | ||||
|     let mut spec = E::default_spec(); | ||||
| 
 | ||||
|     spec.altair_fork_epoch = Some(Epoch::new(0)); // We use altair for all tests
 | ||||
| 
 | ||||
| fn get_harness(spec: ChainSpec) -> BeaconChainHarness<EphemeralHarnessType<E>> { | ||||
|     let harness = BeaconChainHarness::builder(E::default()) | ||||
|         .spec(spec) | ||||
|         .keypairs(KEYPAIRS.to_vec()) | ||||
| @ -35,8 +38,11 @@ fn get_harness<E: EthSpec>() -> BeaconChainHarness<EphemeralHarnessType<E>> { | ||||
| 
 | ||||
| #[tokio::test] | ||||
| async fn test_sync_committee_rewards() { | ||||
|     let num_block_produced = MinimalEthSpec::slots_per_epoch(); | ||||
|     let harness = get_harness::<MinimalEthSpec>(); | ||||
|     let mut spec = E::default_spec(); | ||||
|     spec.altair_fork_epoch = Some(Epoch::new(0)); | ||||
| 
 | ||||
|     let harness = get_harness(spec); | ||||
|     let num_block_produced = E::slots_per_epoch(); | ||||
| 
 | ||||
|     let latest_block_root = harness | ||||
|         .extend_chain( | ||||
| @ -119,3 +125,175 @@ async fn test_sync_committee_rewards() { | ||||
|         mismatches.join(",") | ||||
|     ); | ||||
| } | ||||
| 
 | ||||
| #[tokio::test] | ||||
| async fn test_verify_attestation_rewards_base() { | ||||
|     let harness = get_harness(E::default_spec()); | ||||
| 
 | ||||
|     // epoch 0 (N), only two thirds of validators vote.
 | ||||
|     let two_thirds = (VALIDATOR_COUNT / 3) * 2; | ||||
|     let two_thirds_validators: Vec<usize> = (0..two_thirds).collect(); | ||||
|     harness | ||||
|         .extend_chain( | ||||
|             E::slots_per_epoch() as usize, | ||||
|             BlockStrategy::OnCanonicalHead, | ||||
|             AttestationStrategy::SomeValidators(two_thirds_validators), | ||||
|         ) | ||||
|         .await; | ||||
| 
 | ||||
|     let initial_balances: Vec<u64> = harness.get_current_state().balances().clone().into(); | ||||
| 
 | ||||
|     // extend slots to beginning of epoch N + 2
 | ||||
|     harness.extend_slots(E::slots_per_epoch() as usize).await; | ||||
| 
 | ||||
|     // compute reward deltas for all validators in epoch N
 | ||||
|     let StandardAttestationRewards { | ||||
|         ideal_rewards, | ||||
|         total_rewards, | ||||
|     } = harness | ||||
|         .chain | ||||
|         .compute_attestation_rewards(Epoch::new(0), vec![]) | ||||
|         .unwrap(); | ||||
| 
 | ||||
|     // assert no inactivity penalty for both ideal rewards and individual validators
 | ||||
|     assert!(ideal_rewards.iter().all(|reward| reward.inactivity == 0)); | ||||
|     assert!(total_rewards.iter().all(|reward| reward.inactivity == 0)); | ||||
| 
 | ||||
|     // apply attestation rewards to initial balances
 | ||||
|     let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); | ||||
| 
 | ||||
|     // verify expected balances against actual balances
 | ||||
|     let balances: Vec<u64> = harness.get_current_state().balances().clone().into(); | ||||
|     assert_eq!(expected_balances, balances); | ||||
| } | ||||
| 
 | ||||
| #[tokio::test] | ||||
| async fn test_verify_attestation_rewards_base_inactivity_leak() { | ||||
|     let spec = E::default_spec(); | ||||
|     let harness = get_harness(spec.clone()); | ||||
| 
 | ||||
|     let half = VALIDATOR_COUNT / 2; | ||||
|     let half_validators: Vec<usize> = (0..half).collect(); | ||||
|     // target epoch is the epoch where the chain enters inactivity leak
 | ||||
|     let target_epoch = &spec.min_epochs_to_inactivity_penalty + 1; | ||||
| 
 | ||||
|     // advance until beginning of epoch N + 1 and get balances
 | ||||
|     harness | ||||
|         .extend_chain( | ||||
|             (E::slots_per_epoch() * (target_epoch + 1)) as usize, | ||||
|             BlockStrategy::OnCanonicalHead, | ||||
|             AttestationStrategy::SomeValidators(half_validators.clone()), | ||||
|         ) | ||||
|         .await; | ||||
|     let initial_balances: Vec<u64> = harness.get_current_state().balances().clone().into(); | ||||
| 
 | ||||
|     // extend slots to beginning of epoch N + 2
 | ||||
|     harness.advance_slot(); | ||||
|     harness | ||||
|         .extend_chain( | ||||
|             E::slots_per_epoch() as usize, | ||||
|             BlockStrategy::OnCanonicalHead, | ||||
|             AttestationStrategy::SomeValidators(half_validators), | ||||
|         ) | ||||
|         .await; | ||||
|     let _slot = harness.get_current_slot(); | ||||
| 
 | ||||
|     // compute reward deltas for all validators in epoch N
 | ||||
|     let StandardAttestationRewards { | ||||
|         ideal_rewards, | ||||
|         total_rewards, | ||||
|     } = harness | ||||
|         .chain | ||||
|         .compute_attestation_rewards(Epoch::new(target_epoch), vec![]) | ||||
|         .unwrap(); | ||||
| 
 | ||||
|     // assert inactivity penalty for both ideal rewards and individual validators
 | ||||
|     assert!(ideal_rewards.iter().all(|reward| reward.inactivity < 0)); | ||||
|     assert!(total_rewards.iter().all(|reward| reward.inactivity < 0)); | ||||
| 
 | ||||
|     // apply attestation rewards to initial balances
 | ||||
|     let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); | ||||
| 
 | ||||
|     // verify expected balances against actual balances
 | ||||
|     let balances: Vec<u64> = harness.get_current_state().balances().clone().into(); | ||||
|     assert_eq!(expected_balances, balances); | ||||
| } | ||||
| 
 | ||||
| #[tokio::test] | ||||
| async fn test_verify_attestation_rewards_base_subset_only() { | ||||
|     let harness = get_harness(E::default_spec()); | ||||
| 
 | ||||
|     // epoch 0 (N), only two thirds of validators vote.
 | ||||
|     let two_thirds = (VALIDATOR_COUNT / 3) * 2; | ||||
|     let two_thirds_validators: Vec<usize> = (0..two_thirds).collect(); | ||||
|     harness | ||||
|         .extend_chain( | ||||
|             E::slots_per_epoch() as usize, | ||||
|             BlockStrategy::OnCanonicalHead, | ||||
|             AttestationStrategy::SomeValidators(two_thirds_validators), | ||||
|         ) | ||||
|         .await; | ||||
| 
 | ||||
|     // a small subset of validators to compute attestation rewards for
 | ||||
|     let validators_subset = [0, VALIDATOR_COUNT / 2, VALIDATOR_COUNT - 1]; | ||||
| 
 | ||||
|     // capture balances before transitioning to N + 2
 | ||||
|     let initial_balances = get_validator_balances(harness.get_current_state(), &validators_subset); | ||||
| 
 | ||||
|     // extend slots to beginning of epoch N + 2
 | ||||
|     harness.extend_slots(E::slots_per_epoch() as usize).await; | ||||
| 
 | ||||
|     let validators_subset_ids: Vec<ValidatorId> = validators_subset | ||||
|         .into_iter() | ||||
|         .map(|idx| ValidatorId::Index(idx as u64)) | ||||
|         .collect(); | ||||
| 
 | ||||
|     // compute reward deltas for the subset of validators in epoch N
 | ||||
|     let StandardAttestationRewards { | ||||
|         ideal_rewards: _, | ||||
|         total_rewards, | ||||
|     } = harness | ||||
|         .chain | ||||
|         .compute_attestation_rewards(Epoch::new(0), validators_subset_ids) | ||||
|         .unwrap(); | ||||
| 
 | ||||
|     // apply attestation rewards to initial balances
 | ||||
|     let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); | ||||
| 
 | ||||
|     // verify expected balances against actual balances
 | ||||
|     let balances = get_validator_balances(harness.get_current_state(), &validators_subset); | ||||
|     assert_eq!(expected_balances, balances); | ||||
| } | ||||
| 
 | ||||
| /// Apply a vec of `TotalAttestationRewards` to initial balances, and return
 | ||||
| fn apply_attestation_rewards( | ||||
|     initial_balances: &[u64], | ||||
|     attestation_rewards: Vec<TotalAttestationRewards>, | ||||
| ) -> Vec<u64> { | ||||
|     initial_balances | ||||
|         .iter() | ||||
|         .zip(attestation_rewards) | ||||
|         .map(|(&initial_balance, rewards)| { | ||||
|             let expected_balance = initial_balance as i64 | ||||
|                 + rewards.head | ||||
|                 + rewards.source | ||||
|                 + rewards.target | ||||
|                 + rewards.inclusion_delay.map(|q| q.value).unwrap_or(0) as i64 | ||||
|                 + rewards.inactivity; | ||||
|             expected_balance as u64 | ||||
|         }) | ||||
|         .collect::<Vec<u64>>() | ||||
| } | ||||
| 
 | ||||
| fn get_validator_balances(state: BeaconState<E>, validators: &[usize]) -> Vec<u64> { | ||||
|     validators | ||||
|         .iter() | ||||
|         .flat_map(|&id| { | ||||
|             state | ||||
|                 .balances() | ||||
|                 .get(id) | ||||
|                 .cloned() | ||||
|                 .ok_or(BeaconStateError::BalancesOutOfBounds(id)) | ||||
|         }) | ||||
|         .collect() | ||||
| } | ||||
|  | ||||
| @ -9,15 +9,15 @@ use beacon_chain::test_utils::{ | ||||
| use beacon_chain::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD; | ||||
| use beacon_chain::{ | ||||
|     historical_blocks::HistoricalBlockError, migrate::MigratorConfig, BeaconChain, | ||||
|     BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, NotifyExecutionLayer, | ||||
|     ServerSentEventHandler, WhenSlotSkipped, | ||||
|     BeaconChainError, BeaconChainTypes, BeaconSnapshot, BlockError, ChainConfig, | ||||
|     NotifyExecutionLayer, ServerSentEventHandler, WhenSlotSkipped, | ||||
| }; | ||||
| use fork_choice::CountUnrealized; | ||||
| use lazy_static::lazy_static; | ||||
| use logging::test_logger; | ||||
| use maplit::hashset; | ||||
| use rand::Rng; | ||||
| use state_processing::BlockReplayer; | ||||
| use slot_clock::{SlotClock, TestingSlotClock}; | ||||
| use state_processing::{state_advance::complete_state_advance, BlockReplayer}; | ||||
| use std::collections::HashMap; | ||||
| use std::collections::HashSet; | ||||
| use std::convert::TryInto; | ||||
| @ -66,6 +66,19 @@ fn get_store_with_spec( | ||||
| fn get_harness( | ||||
|     store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>, | ||||
|     validator_count: usize, | ||||
| ) -> TestHarness { | ||||
|     // Most tests expect to retain historic states, so we use this as the default.
 | ||||
|     let chain_config = ChainConfig { | ||||
|         reconstruct_historic_states: true, | ||||
|         ..ChainConfig::default() | ||||
|     }; | ||||
|     get_harness_generic(store, validator_count, chain_config) | ||||
| } | ||||
| 
 | ||||
| fn get_harness_generic( | ||||
|     store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>, | ||||
|     validator_count: usize, | ||||
|     chain_config: ChainConfig, | ||||
| ) -> TestHarness { | ||||
|     let harness = BeaconChainHarness::builder(MinimalEthSpec) | ||||
|         .default_spec() | ||||
| @ -73,6 +86,7 @@ fn get_harness( | ||||
|         .logger(store.logger().clone()) | ||||
|         .fresh_disk_store(store) | ||||
|         .mock_execution_layer() | ||||
|         .chain_config(chain_config) | ||||
|         .build(); | ||||
|     harness.advance_slot(); | ||||
|     harness | ||||
| @ -407,7 +421,7 @@ async fn forwards_iter_block_and_state_roots_until() { | ||||
| 
 | ||||
|     // The last restore point slot is the point at which the hybrid forwards iterator behaviour
 | ||||
|     // changes.
 | ||||
|     let last_restore_point_slot = store.get_latest_restore_point_slot(); | ||||
|     let last_restore_point_slot = store.get_latest_restore_point_slot().unwrap(); | ||||
|     assert!(last_restore_point_slot > 0); | ||||
| 
 | ||||
|     let chain = &harness.chain; | ||||
| @ -461,13 +475,15 @@ async fn block_replay_with_inaccurate_state_roots() { | ||||
|         .await; | ||||
| 
 | ||||
|     // Slot must not be 0 mod 32 or else no blocks will be replayed.
 | ||||
|     let (mut head_state, head_root) = harness.get_current_state_and_root(); | ||||
|     let (mut head_state, head_state_root) = harness.get_current_state_and_root(); | ||||
|     let head_block_root = harness.head_block_root(); | ||||
|     assert_ne!(head_state.slot() % 32, 0); | ||||
| 
 | ||||
|     let mut fast_head_state = store | ||||
|     let (_, mut fast_head_state) = store | ||||
|         .get_inconsistent_state_for_attestation_verification_only( | ||||
|             &head_root, | ||||
|             Some(head_state.slot()), | ||||
|             &head_block_root, | ||||
|             head_state.slot(), | ||||
|             head_state_root, | ||||
|         ) | ||||
|         .unwrap() | ||||
|         .unwrap(); | ||||
| @ -566,14 +582,7 @@ async fn block_replayer_hooks() { | ||||
| async fn delete_blocks_and_states() { | ||||
|     let db_path = tempdir().unwrap(); | ||||
|     let store = get_store(&db_path); | ||||
|     let validators_keypairs = | ||||
|         types::test_utils::generate_deterministic_keypairs(LOW_VALIDATOR_COUNT); | ||||
|     let harness = BeaconChainHarness::builder(MinimalEthSpec) | ||||
|         .default_spec() | ||||
|         .keypairs(validators_keypairs) | ||||
|         .fresh_disk_store(store.clone()) | ||||
|         .mock_execution_layer() | ||||
|         .build(); | ||||
|     let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); | ||||
| 
 | ||||
|     let unforked_blocks: u64 = 4 * E::slots_per_epoch(); | ||||
| 
 | ||||
| @ -1016,18 +1025,14 @@ fn check_shuffling_compatible( | ||||
| // Ensure blocks from abandoned forks are pruned from the Hot DB
 | ||||
| #[tokio::test] | ||||
| async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { | ||||
|     const HONEST_VALIDATOR_COUNT: usize = 32 + 0; | ||||
|     const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; | ||||
|     const HONEST_VALIDATOR_COUNT: usize = 32; | ||||
|     const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; | ||||
|     const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; | ||||
|     let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); | ||||
|     let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect(); | ||||
|     let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); | ||||
|     let rig = BeaconChainHarness::builder(MinimalEthSpec) | ||||
|         .default_spec() | ||||
|         .keypairs(validators_keypairs) | ||||
|         .fresh_ephemeral_store() | ||||
|         .mock_execution_layer() | ||||
|         .build(); | ||||
|     let db_path = tempdir().unwrap(); | ||||
|     let store = get_store(&db_path); | ||||
|     let rig = get_harness(store.clone(), VALIDATOR_COUNT); | ||||
|     let slots_per_epoch = rig.slots_per_epoch(); | ||||
|     let (mut state, state_root) = rig.get_current_state_and_root(); | ||||
| 
 | ||||
| @ -1126,18 +1131,14 @@ async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { | ||||
| 
 | ||||
| #[tokio::test] | ||||
| async fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { | ||||
|     const HONEST_VALIDATOR_COUNT: usize = 32 + 0; | ||||
|     const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; | ||||
|     const HONEST_VALIDATOR_COUNT: usize = 32; | ||||
|     const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; | ||||
|     const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; | ||||
|     let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); | ||||
|     let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect(); | ||||
|     let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); | ||||
|     let rig = BeaconChainHarness::builder(MinimalEthSpec) | ||||
|         .default_spec() | ||||
|         .keypairs(validators_keypairs) | ||||
|         .fresh_ephemeral_store() | ||||
|         .mock_execution_layer() | ||||
|         .build(); | ||||
|     let db_path = tempdir().unwrap(); | ||||
|     let store = get_store(&db_path); | ||||
|     let rig = get_harness(store.clone(), VALIDATOR_COUNT); | ||||
|     let slots_per_epoch = rig.slots_per_epoch(); | ||||
|     let (state, state_root) = rig.get_current_state_and_root(); | ||||
| 
 | ||||
| @ -1261,15 +1262,11 @@ async fn pruning_does_not_touch_blocks_prior_to_finalization() { | ||||
|     const HONEST_VALIDATOR_COUNT: usize = 32; | ||||
|     const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; | ||||
|     const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; | ||||
|     let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); | ||||
|     let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect(); | ||||
|     let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); | ||||
|     let rig = BeaconChainHarness::builder(MinimalEthSpec) | ||||
|         .default_spec() | ||||
|         .keypairs(validators_keypairs) | ||||
|         .fresh_ephemeral_store() | ||||
|         .mock_execution_layer() | ||||
|         .build(); | ||||
|     let db_path = tempdir().unwrap(); | ||||
|     let store = get_store(&db_path); | ||||
|     let rig = get_harness(store.clone(), VALIDATOR_COUNT); | ||||
|     let slots_per_epoch = rig.slots_per_epoch(); | ||||
|     let (mut state, state_root) = rig.get_current_state_and_root(); | ||||
| 
 | ||||
| @ -1353,18 +1350,14 @@ async fn pruning_does_not_touch_blocks_prior_to_finalization() { | ||||
| 
 | ||||
| #[tokio::test] | ||||
| async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { | ||||
|     const HONEST_VALIDATOR_COUNT: usize = 32 + 0; | ||||
|     const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; | ||||
|     const HONEST_VALIDATOR_COUNT: usize = 32; | ||||
|     const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; | ||||
|     const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; | ||||
|     let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); | ||||
|     let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect(); | ||||
|     let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); | ||||
|     let rig = BeaconChainHarness::builder(MinimalEthSpec) | ||||
|         .default_spec() | ||||
|         .keypairs(validators_keypairs) | ||||
|         .fresh_ephemeral_store() | ||||
|         .mock_execution_layer() | ||||
|         .build(); | ||||
|     let db_path = tempdir().unwrap(); | ||||
|     let store = get_store(&db_path); | ||||
|     let rig = get_harness(store.clone(), VALIDATOR_COUNT); | ||||
|     let (state, state_root) = rig.get_current_state_and_root(); | ||||
| 
 | ||||
|     // Fill up 0th epoch with canonical chain blocks
 | ||||
| @ -1498,18 +1491,14 @@ async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { | ||||
| // This is to check if state outside of normal block processing are pruned correctly.
 | ||||
| #[tokio::test] | ||||
| async fn prunes_skipped_slots_states() { | ||||
|     const HONEST_VALIDATOR_COUNT: usize = 32 + 0; | ||||
|     const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; | ||||
|     const HONEST_VALIDATOR_COUNT: usize = 32; | ||||
|     const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; | ||||
|     const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; | ||||
|     let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); | ||||
|     let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect(); | ||||
|     let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); | ||||
|     let rig = BeaconChainHarness::builder(MinimalEthSpec) | ||||
|         .default_spec() | ||||
|         .keypairs(validators_keypairs) | ||||
|         .fresh_ephemeral_store() | ||||
|         .mock_execution_layer() | ||||
|         .build(); | ||||
|     let db_path = tempdir().unwrap(); | ||||
|     let store = get_store(&db_path); | ||||
|     let rig = get_harness(store.clone(), VALIDATOR_COUNT); | ||||
|     let (state, state_root) = rig.get_current_state_and_root(); | ||||
| 
 | ||||
|     let canonical_slots_zeroth_epoch: Vec<Slot> = | ||||
| @ -1627,18 +1616,14 @@ async fn prunes_skipped_slots_states() { | ||||
| // This is to check if state outside of normal block processing are pruned correctly.
 | ||||
| #[tokio::test] | ||||
| async fn finalizes_non_epoch_start_slot() { | ||||
|     const HONEST_VALIDATOR_COUNT: usize = 32 + 0; | ||||
|     const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; | ||||
|     const HONEST_VALIDATOR_COUNT: usize = 32; | ||||
|     const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; | ||||
|     const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; | ||||
|     let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); | ||||
|     let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect(); | ||||
|     let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); | ||||
|     let rig = BeaconChainHarness::builder(MinimalEthSpec) | ||||
|         .default_spec() | ||||
|         .keypairs(validators_keypairs) | ||||
|         .fresh_ephemeral_store() | ||||
|         .mock_execution_layer() | ||||
|         .build(); | ||||
|     let db_path = tempdir().unwrap(); | ||||
|     let store = get_store(&db_path); | ||||
|     let rig = get_harness(store.clone(), VALIDATOR_COUNT); | ||||
|     let (state, state_root) = rig.get_current_state_and_root(); | ||||
| 
 | ||||
|     let canonical_slots_zeroth_epoch: Vec<Slot> = | ||||
| @ -2054,39 +2039,82 @@ async fn garbage_collect_temp_states_from_failed_block() { | ||||
| } | ||||
| 
 | ||||
| #[tokio::test] | ||||
| async fn weak_subjectivity_sync() { | ||||
| async fn weak_subjectivity_sync_easy() { | ||||
|     let num_initial_slots = E::slots_per_epoch() * 11; | ||||
|     let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9); | ||||
|     let slots = (1..num_initial_slots).map(Slot::new).collect(); | ||||
|     weak_subjectivity_sync_test(slots, checkpoint_slot).await | ||||
| } | ||||
| 
 | ||||
| #[tokio::test] | ||||
| async fn weak_subjectivity_sync_unaligned_advanced_checkpoint() { | ||||
|     let num_initial_slots = E::slots_per_epoch() * 11; | ||||
|     let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9); | ||||
|     let slots = (1..num_initial_slots) | ||||
|         .map(Slot::new) | ||||
|         .filter(|&slot| { | ||||
|             // Skip 3 slots leading up to the checkpoint slot.
 | ||||
|             slot <= checkpoint_slot - 3 || slot > checkpoint_slot | ||||
|         }) | ||||
|         .collect(); | ||||
|     weak_subjectivity_sync_test(slots, checkpoint_slot).await | ||||
| } | ||||
| 
 | ||||
| #[tokio::test] | ||||
| async fn weak_subjectivity_sync_unaligned_unadvanced_checkpoint() { | ||||
|     let num_initial_slots = E::slots_per_epoch() * 11; | ||||
|     let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9 - 3); | ||||
|     let slots = (1..num_initial_slots) | ||||
|         .map(Slot::new) | ||||
|         .filter(|&slot| { | ||||
|             // Skip 3 slots after the checkpoint slot.
 | ||||
|             slot <= checkpoint_slot || slot > checkpoint_slot + 3 | ||||
|         }) | ||||
|         .collect(); | ||||
|     weak_subjectivity_sync_test(slots, checkpoint_slot).await | ||||
| } | ||||
| 
 | ||||
| async fn weak_subjectivity_sync_test(slots: Vec<Slot>, checkpoint_slot: Slot) { | ||||
|     // Build an initial chain on one harness, representing a synced node with full history.
 | ||||
|     let num_initial_blocks = E::slots_per_epoch() * 11; | ||||
|     let num_final_blocks = E::slots_per_epoch() * 2; | ||||
| 
 | ||||
|     let temp1 = tempdir().unwrap(); | ||||
|     let full_store = get_store(&temp1); | ||||
|     let harness = get_harness(full_store.clone(), LOW_VALIDATOR_COUNT); | ||||
| 
 | ||||
|     let all_validators = (0..LOW_VALIDATOR_COUNT).collect::<Vec<_>>(); | ||||
| 
 | ||||
|     let (genesis_state, genesis_state_root) = harness.get_current_state_and_root(); | ||||
|     harness | ||||
|         .extend_chain( | ||||
|             num_initial_blocks as usize, | ||||
|             BlockStrategy::OnCanonicalHead, | ||||
|             AttestationStrategy::AllValidators, | ||||
|         .add_attested_blocks_at_slots( | ||||
|             genesis_state.clone(), | ||||
|             genesis_state_root, | ||||
|             &slots, | ||||
|             &all_validators, | ||||
|         ) | ||||
|         .await; | ||||
| 
 | ||||
|     let genesis_state = full_store | ||||
|         .get_state(&harness.chain.genesis_state_root, Some(Slot::new(0))) | ||||
|     let wss_block_root = harness | ||||
|         .chain | ||||
|         .block_root_at_slot(checkpoint_slot, WhenSlotSkipped::Prev) | ||||
|         .unwrap() | ||||
|         .unwrap(); | ||||
|     let wss_checkpoint = harness.finalized_checkpoint(); | ||||
|     let wss_state_root = harness | ||||
|         .chain | ||||
|         .state_root_at_slot(checkpoint_slot) | ||||
|         .unwrap() | ||||
|         .unwrap(); | ||||
| 
 | ||||
|     let wss_block = harness | ||||
|         .chain | ||||
|         .store | ||||
|         .get_full_block(&wss_checkpoint.root) | ||||
|         .get_full_block(&wss_block_root) | ||||
|         .unwrap() | ||||
|         .unwrap(); | ||||
|     let wss_state = full_store | ||||
|         .get_state(&wss_block.state_root(), None) | ||||
|         .get_state(&wss_state_root, Some(checkpoint_slot)) | ||||
|         .unwrap() | ||||
|         .unwrap(); | ||||
|     let wss_slot = wss_block.slot(); | ||||
| 
 | ||||
|     // Add more blocks that advance finalization further.
 | ||||
|     harness.advance_slot(); | ||||
| @ -2105,20 +2133,26 @@ async fn weak_subjectivity_sync() { | ||||
|     let spec = test_spec::<E>(); | ||||
|     let seconds_per_slot = spec.seconds_per_slot; | ||||
| 
 | ||||
|     // Initialise a new beacon chain from the finalized checkpoint
 | ||||
|     // Initialise a new beacon chain from the finalized checkpoint.
 | ||||
|     // The slot clock must be set to a time ahead of the checkpoint state.
 | ||||
|     let slot_clock = TestingSlotClock::new( | ||||
|         Slot::new(0), | ||||
|         Duration::from_secs(harness.chain.genesis_time), | ||||
|         Duration::from_secs(seconds_per_slot), | ||||
|     ); | ||||
|     slot_clock.set_slot(harness.get_current_slot().as_u64()); | ||||
|     let beacon_chain = Arc::new( | ||||
|         BeaconChainBuilder::new(MinimalEthSpec) | ||||
|             .store(store.clone()) | ||||
|             .custom_spec(test_spec::<E>()) | ||||
|             .task_executor(harness.chain.task_executor.clone()) | ||||
|             .logger(log.clone()) | ||||
|             .weak_subjectivity_state(wss_state, wss_block.clone(), genesis_state) | ||||
|             .unwrap() | ||||
|             .logger(log.clone()) | ||||
|             .store_migrator_config(MigratorConfig::default().blocking()) | ||||
|             .dummy_eth1_backend() | ||||
|             .expect("should build dummy backend") | ||||
|             .testing_slot_clock(Duration::from_secs(seconds_per_slot)) | ||||
|             .expect("should configure testing slot clock") | ||||
|             .slot_clock(slot_clock) | ||||
|             .shutdown_sender(shutdown_tx) | ||||
|             .chain_config(ChainConfig::default()) | ||||
|             .event_handler(Some(ServerSentEventHandler::new_with_capacity( | ||||
| @ -2132,9 +2166,9 @@ async fn weak_subjectivity_sync() { | ||||
| 
 | ||||
|     // Apply blocks forward to reach head.
 | ||||
|     let chain_dump = harness.chain.chain_dump().unwrap(); | ||||
|     let new_blocks = &chain_dump[wss_slot.as_usize() + 1..]; | ||||
| 
 | ||||
|     assert_eq!(new_blocks[0].beacon_block.slot(), wss_slot + 1); | ||||
|     let new_blocks = chain_dump | ||||
|         .iter() | ||||
|         .filter(|snapshot| snapshot.beacon_block.slot() > checkpoint_slot); | ||||
| 
 | ||||
|     for snapshot in new_blocks { | ||||
|         let full_block = harness | ||||
| @ -2151,8 +2185,8 @@ async fn weak_subjectivity_sync() { | ||||
|             .process_block( | ||||
|                 full_block.canonical_root(), | ||||
|                 Arc::new(full_block), | ||||
|                 CountUnrealized::True, | ||||
|                 NotifyExecutionLayer::Yes, | ||||
|                 || Ok(()), | ||||
|             ) | ||||
|             .await | ||||
|             .unwrap(); | ||||
| @ -2220,14 +2254,18 @@ async fn weak_subjectivity_sync() { | ||||
|     assert_eq!(forwards, expected); | ||||
| 
 | ||||
|     // All blocks can be loaded.
 | ||||
|     let mut prev_block_root = Hash256::zero(); | ||||
|     for (block_root, slot) in beacon_chain | ||||
|         .forwards_iter_block_roots(Slot::new(0)) | ||||
|         .unwrap() | ||||
|         .map(Result::unwrap) | ||||
|     { | ||||
|         let block = store.get_blinded_block(&block_root).unwrap().unwrap(); | ||||
|         if block_root != prev_block_root { | ||||
|             assert_eq!(block.slot(), slot); | ||||
|         } | ||||
|         prev_block_root = block_root; | ||||
|     } | ||||
| 
 | ||||
|     // All states from the oldest state slot can be loaded.
 | ||||
|     let (_, oldest_state_slot) = store.get_historic_state_limits(); | ||||
| @ -2241,14 +2279,141 @@ async fn weak_subjectivity_sync() { | ||||
|         assert_eq!(state.canonical_root(), state_root); | ||||
|     } | ||||
| 
 | ||||
|     // Anchor slot is still set to the starting slot.
 | ||||
|     assert_eq!(store.get_anchor_slot(), Some(wss_slot)); | ||||
|     // Anchor slot is still set to the slot of the checkpoint block.
 | ||||
|     assert_eq!(store.get_anchor_slot(), Some(wss_block.slot())); | ||||
| 
 | ||||
|     // Reconstruct states.
 | ||||
|     store.clone().reconstruct_historic_states().unwrap(); | ||||
|     assert_eq!(store.get_anchor_slot(), None); | ||||
| } | ||||
| 
 | ||||
| /// Test that blocks and attestations that refer to states around an unaligned split state are
 | ||||
| /// processed correctly.
 | ||||
| #[tokio::test] | ||||
| async fn process_blocks_and_attestations_for_unaligned_checkpoint() { | ||||
|     let temp = tempdir().unwrap(); | ||||
|     let store = get_store(&temp); | ||||
|     let chain_config = ChainConfig { | ||||
|         reconstruct_historic_states: false, | ||||
|         ..ChainConfig::default() | ||||
|     }; | ||||
|     let harness = get_harness_generic(store.clone(), LOW_VALIDATOR_COUNT, chain_config); | ||||
| 
 | ||||
|     let all_validators = (0..LOW_VALIDATOR_COUNT).collect::<Vec<_>>(); | ||||
| 
 | ||||
|     let split_slot = Slot::new(E::slots_per_epoch() * 4); | ||||
|     let pre_skips = 1; | ||||
|     let post_skips = 1; | ||||
| 
 | ||||
|     // Build the chain up to the intended split slot, with 3 skips before the split.
 | ||||
|     let slots = (1..=split_slot.as_u64() - pre_skips) | ||||
|         .map(Slot::new) | ||||
|         .collect::<Vec<_>>(); | ||||
| 
 | ||||
|     let (genesis_state, genesis_state_root) = harness.get_current_state_and_root(); | ||||
|     harness | ||||
|         .add_attested_blocks_at_slots( | ||||
|             genesis_state.clone(), | ||||
|             genesis_state_root, | ||||
|             &slots, | ||||
|             &all_validators, | ||||
|         ) | ||||
|         .await; | ||||
| 
 | ||||
|     // Before the split slot becomes finalized, create two forking blocks that build on the split
 | ||||
|     // block:
 | ||||
|     //
 | ||||
|     // - one that is invalid because it conflicts with finalization (slot <= finalized_slot)
 | ||||
|     // - one that is valid because its slot is not finalized (slot > finalized_slot)
 | ||||
|     let (unadvanced_split_state, unadvanced_split_state_root) = | ||||
|         harness.get_current_state_and_root(); | ||||
| 
 | ||||
|     let (invalid_fork_block, _) = harness | ||||
|         .make_block(unadvanced_split_state.clone(), split_slot) | ||||
|         .await; | ||||
|     let (valid_fork_block, _) = harness | ||||
|         .make_block(unadvanced_split_state.clone(), split_slot + 1) | ||||
|         .await; | ||||
| 
 | ||||
|     // Advance the chain so that the intended split slot is finalized.
 | ||||
|     // Do not attest in the epoch boundary slot, to make attestation production later easier (no
 | ||||
|     // equivocations).
 | ||||
|     let finalizing_slot = split_slot + 2 * E::slots_per_epoch(); | ||||
|     for _ in 0..pre_skips + post_skips { | ||||
|         harness.advance_slot(); | ||||
|     } | ||||
|     harness.extend_to_slot(finalizing_slot - 1).await; | ||||
|     harness | ||||
|         .add_block_at_slot(finalizing_slot, harness.get_current_state()) | ||||
|         .await | ||||
|         .unwrap(); | ||||
| 
 | ||||
|     // Check that the split slot is as intended.
 | ||||
|     let split = store.get_split_info(); | ||||
|     assert_eq!(split.slot, split_slot); | ||||
|     assert_eq!(split.block_root, valid_fork_block.parent_root()); | ||||
|     assert_ne!(split.state_root, unadvanced_split_state_root); | ||||
| 
 | ||||
|     // Applying the invalid block should fail.
 | ||||
|     let err = harness | ||||
|         .chain | ||||
|         .process_block( | ||||
|             invalid_fork_block.canonical_root(), | ||||
|             Arc::new(invalid_fork_block.clone()), | ||||
|             NotifyExecutionLayer::Yes, | ||||
|             || Ok(()), | ||||
|         ) | ||||
|         .await | ||||
|         .unwrap_err(); | ||||
|     assert!(matches!(err, BlockError::WouldRevertFinalizedSlot { .. })); | ||||
| 
 | ||||
|     // Applying the valid block should succeed, but it should not become head.
 | ||||
|     harness | ||||
|         .chain | ||||
|         .process_block( | ||||
|             valid_fork_block.canonical_root(), | ||||
|             Arc::new(valid_fork_block.clone()), | ||||
|             NotifyExecutionLayer::Yes, | ||||
|             || Ok(()), | ||||
|         ) | ||||
|         .await | ||||
|         .unwrap(); | ||||
|     harness.chain.recompute_head_at_current_slot().await; | ||||
|     assert_ne!(harness.head_block_root(), valid_fork_block.canonical_root()); | ||||
| 
 | ||||
|     // Attestations to the split block in the next 2 epochs should be processed successfully.
 | ||||
|     let attestation_start_slot = harness.get_current_slot(); | ||||
|     let attestation_end_slot = attestation_start_slot + 2 * E::slots_per_epoch(); | ||||
|     let (split_state_root, mut advanced_split_state) = harness | ||||
|         .chain | ||||
|         .store | ||||
|         .get_advanced_hot_state(split.block_root, split.slot, split.state_root) | ||||
|         .unwrap() | ||||
|         .unwrap(); | ||||
|     complete_state_advance( | ||||
|         &mut advanced_split_state, | ||||
|         Some(split_state_root), | ||||
|         attestation_start_slot, | ||||
|         &harness.chain.spec, | ||||
|     ) | ||||
|     .unwrap(); | ||||
|     advanced_split_state | ||||
|         .build_caches(&harness.chain.spec) | ||||
|         .unwrap(); | ||||
|     let advanced_split_state_root = advanced_split_state.update_tree_hash_cache().unwrap(); | ||||
|     for slot in (attestation_start_slot.as_u64()..attestation_end_slot.as_u64()).map(Slot::new) { | ||||
|         let attestations = harness.make_attestations( | ||||
|             &all_validators, | ||||
|             &advanced_split_state, | ||||
|             advanced_split_state_root, | ||||
|             split.block_root.into(), | ||||
|             slot, | ||||
|         ); | ||||
|         harness.advance_slot(); | ||||
|         harness.process_attestations(attestations); | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[tokio::test] | ||||
| async fn finalizes_after_resuming_from_db() { | ||||
|     let validator_count = 16; | ||||
| @ -2307,6 +2472,7 @@ async fn finalizes_after_resuming_from_db() { | ||||
|         .default_spec() | ||||
|         .keypairs(KEYPAIRS[0..validator_count].to_vec()) | ||||
|         .resumed_disk_store(store) | ||||
|         .testing_slot_clock(original_chain.slot_clock.clone()) | ||||
|         .mock_execution_layer() | ||||
|         .build(); | ||||
| 
 | ||||
| @ -2560,6 +2726,9 @@ async fn schema_downgrade_to_min_version() { | ||||
|         SchemaVersion(11) | ||||
|     }; | ||||
| 
 | ||||
|     // Save the slot clock so that the new harness doesn't revert in time.
 | ||||
|     let slot_clock = harness.chain.slot_clock.clone(); | ||||
| 
 | ||||
|     // Close the database to ensure everything is written to disk.
 | ||||
|     drop(store); | ||||
|     drop(harness); | ||||
| @ -2590,11 +2759,21 @@ async fn schema_downgrade_to_min_version() { | ||||
|     ) | ||||
|     .expect("schema upgrade from minimum version should work"); | ||||
| 
 | ||||
|     // Rescreate the harness.
 | ||||
|     // Recreate the harness.
 | ||||
|     /* | ||||
|     let slot_clock = TestingSlotClock::new( | ||||
|         Slot::new(0), | ||||
|         Duration::from_secs(harness.chain.genesis_time), | ||||
|         Duration::from_secs(spec.seconds_per_slot), | ||||
|     ); | ||||
|     slot_clock.set_slot(harness.get_current_slot().as_u64()); | ||||
|     */ | ||||
| 
 | ||||
|     let harness = BeaconChainHarness::builder(MinimalEthSpec) | ||||
|         .default_spec() | ||||
|         .keypairs(KEYPAIRS[0..LOW_VALIDATOR_COUNT].to_vec()) | ||||
|         .logger(store.logger().clone()) | ||||
|         .testing_slot_clock(slot_clock) | ||||
|         .resumed_disk_store(store.clone()) | ||||
|         .mock_execution_layer() | ||||
|         .build(); | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| #![cfg(not(debug_assertions))] | ||||
| 
 | ||||
| use beacon_chain::sync_committee_verification::Error as SyncCommitteeError; | ||||
| use beacon_chain::sync_committee_verification::{Error as SyncCommitteeError, SyncCommitteeData}; | ||||
| use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType, RelativeSyncCommittee}; | ||||
| use int_to_bytes::int_to_bytes32; | ||||
| use lazy_static::lazy_static; | ||||
| @ -444,11 +444,17 @@ async fn aggregated_gossip_verification() { | ||||
|      * subcommittee index contribution.subcommittee_index. | ||||
|      */ | ||||
| 
 | ||||
|     let contribution = &valid_aggregate.message.contribution; | ||||
|     let sync_committee_data = SyncCommitteeData { | ||||
|         slot: contribution.slot, | ||||
|         root: contribution.beacon_block_root, | ||||
|         subcommittee_index: contribution.subcommittee_index, | ||||
|     }; | ||||
|     assert_invalid!( | ||||
|         "aggregate that has already been seen", | ||||
|         valid_aggregate.clone(), | ||||
|         SyncCommitteeError::SyncContributionAlreadyKnown(hash) | ||||
|         if hash == valid_aggregate.message.contribution.tree_hash_root() | ||||
|         SyncCommitteeError::SyncContributionSupersetKnown(hash) | ||||
|         if hash == sync_committee_data.tree_hash_root() | ||||
|     ); | ||||
| 
 | ||||
|     /* | ||||
|  | ||||
| @ -6,9 +6,8 @@ use beacon_chain::{ | ||||
|         AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, | ||||
|         OP_POOL_DB_KEY, | ||||
|     }, | ||||
|     BeaconChain, NotifyExecutionLayer, StateSkipConfig, WhenSlotSkipped, | ||||
|     BeaconChain, ChainConfig, NotifyExecutionLayer, StateSkipConfig, WhenSlotSkipped, | ||||
| }; | ||||
| use fork_choice::CountUnrealized; | ||||
| use lazy_static::lazy_static; | ||||
| use operation_pool::PersistedOperationPool; | ||||
| use state_processing::{ | ||||
| @ -29,6 +28,10 @@ lazy_static! { | ||||
| fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessType<MinimalEthSpec>> { | ||||
|     let harness = BeaconChainHarness::builder(MinimalEthSpec) | ||||
|         .default_spec() | ||||
|         .chain_config(ChainConfig { | ||||
|             reconstruct_historic_states: true, | ||||
|             ..ChainConfig::default() | ||||
|         }) | ||||
|         .keypairs(KEYPAIRS[0..validator_count].to_vec()) | ||||
|         .fresh_ephemeral_store() | ||||
|         .mock_execution_layer() | ||||
| @ -687,8 +690,8 @@ async fn run_skip_slot_test(skip_slots: u64) { | ||||
|             .process_block( | ||||
|                 harness_a.chain.head_snapshot().beacon_block_root, | ||||
|                 harness_a.chain.head_snapshot().beacon_block.clone(), | ||||
|                 CountUnrealized::True, | ||||
|                 NotifyExecutionLayer::Yes, | ||||
|                 || Ok(()) | ||||
|             ) | ||||
|             .await | ||||
|             .unwrap(), | ||||
|  | ||||
							
								
								
									
										26
									
								
								beacon_node/beacon_processor/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										26
									
								
								beacon_node/beacon_processor/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,26 @@ | ||||
| [package] | ||||
| name = "beacon_processor" | ||||
| version = "0.1.0" | ||||
| edition = { workspace = true } | ||||
| 
 | ||||
| [dependencies] | ||||
| slog = { workspace = true } | ||||
| itertools = { workspace = true } | ||||
| logging = { workspace = true } | ||||
| tokio = { workspace = true } | ||||
| tokio-util = { workspace = true } | ||||
| futures = { workspace = true } | ||||
| fnv = { workspace = true } | ||||
| strum = { workspace = true } | ||||
| task_executor = { workspace = true } | ||||
| slot_clock = { workspace = true } | ||||
| lighthouse_network = { workspace = true } | ||||
| hex = { workspace = true } | ||||
| derivative = { workspace = true } | ||||
| types = { workspace = true } | ||||
| ethereum_ssz = { workspace = true } | ||||
| lazy_static = { workspace = true } | ||||
| lighthouse_metrics = { workspace = true } | ||||
| parking_lot = { workspace = true } | ||||
| num_cpus = { workspace = true } | ||||
| serde = { workspace = true } | ||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										150
									
								
								beacon_node/beacon_processor/src/metrics.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										150
									
								
								beacon_node/beacon_processor/src/metrics.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,150 @@ | ||||
| pub use lighthouse_metrics::*; | ||||
| 
 | ||||
| lazy_static::lazy_static! { | ||||
| 
 | ||||
|     /* | ||||
|      * Gossip processor | ||||
|      */ | ||||
|     pub static ref BEACON_PROCESSOR_WORK_EVENTS_RX_COUNT: Result<IntCounterVec> = try_create_int_counter_vec( | ||||
|         "beacon_processor_work_events_rx_count", | ||||
|         "Count of work events received (but not necessarily processed)", | ||||
|         &["type"] | ||||
|     ); | ||||
|     pub static ref BEACON_PROCESSOR_WORK_EVENTS_IGNORED_COUNT: Result<IntCounterVec> = try_create_int_counter_vec( | ||||
|         "beacon_processor_work_events_ignored_count", | ||||
|         "Count of work events purposefully ignored", | ||||
|         &["type"] | ||||
|     ); | ||||
|     pub static ref BEACON_PROCESSOR_WORK_EVENTS_STARTED_COUNT: Result<IntCounterVec> = try_create_int_counter_vec( | ||||
|         "beacon_processor_work_events_started_count", | ||||
|         "Count of work events which have been started by a worker", | ||||
|         &["type"] | ||||
|     ); | ||||
|     pub static ref BEACON_PROCESSOR_WORKER_TIME: Result<HistogramVec> = try_create_histogram_vec( | ||||
|         "beacon_processor_worker_time", | ||||
|         "Time taken for a worker to fully process some parcel of work.", | ||||
|         &["type"] | ||||
|     ); | ||||
|     pub static ref BEACON_PROCESSOR_WORKERS_SPAWNED_TOTAL: Result<IntCounter> = try_create_int_counter( | ||||
|         "beacon_processor_workers_spawned_total", | ||||
|         "The number of workers ever spawned by the gossip processing pool." | ||||
|     ); | ||||
|     pub static ref BEACON_PROCESSOR_WORKERS_ACTIVE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||
|         "beacon_processor_workers_active_total", | ||||
|         "Count of active workers in the gossip processing pool." | ||||
|     ); | ||||
|     pub static ref BEACON_PROCESSOR_IDLE_EVENTS_TOTAL: Result<IntCounter> = try_create_int_counter( | ||||
|         "beacon_processor_idle_events_total", | ||||
|         "Count of idle events processed by the gossip processor manager." | ||||
|     ); | ||||
|     pub static ref BEACON_PROCESSOR_EVENT_HANDLING_SECONDS: Result<Histogram> = try_create_histogram( | ||||
|         "beacon_processor_event_handling_seconds", | ||||
|         "Time spent handling a new message and allocating it to a queue or worker." | ||||
|     ); | ||||
|     // Gossip blocks.
 | ||||
|     pub static ref BEACON_PROCESSOR_GOSSIP_BLOCK_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||
|         "beacon_processor_gossip_block_queue_total", | ||||
|         "Count of blocks from gossip waiting to be verified." | ||||
|     ); | ||||
|     // Gossip Exits.
 | ||||
|     pub static ref BEACON_PROCESSOR_EXIT_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||
|         "beacon_processor_exit_queue_total", | ||||
|         "Count of exits from gossip waiting to be verified." | ||||
|     ); | ||||
|     // Gossip proposer slashings.
 | ||||
|     pub static ref BEACON_PROCESSOR_PROPOSER_SLASHING_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||
|         "beacon_processor_proposer_slashing_queue_total", | ||||
|         "Count of proposer slashings from gossip waiting to be verified." | ||||
|     ); | ||||
|     // Gossip attester slashings.
 | ||||
|     pub static ref BEACON_PROCESSOR_ATTESTER_SLASHING_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||
|         "beacon_processor_attester_slashing_queue_total", | ||||
|         "Count of attester slashings from gossip waiting to be verified." | ||||
|     ); | ||||
|     // Gossip BLS to execution changes.
 | ||||
|     pub static ref BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||
|         "beacon_processor_bls_to_execution_change_queue_total", | ||||
|         "Count of address changes from gossip waiting to be verified." | ||||
|     ); | ||||
|     // Rpc blocks.
 | ||||
|     pub static ref BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||
|         "beacon_processor_rpc_block_queue_total", | ||||
|         "Count of blocks from the rpc waiting to be verified." | ||||
|     ); | ||||
|     // Chain segments.
 | ||||
|     pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||
|         "beacon_processor_chain_segment_queue_total", | ||||
|         "Count of chain segments from the rpc waiting to be verified." | ||||
|     ); | ||||
|     pub static ref BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||
|         "beacon_processor_backfill_chain_segment_queue_total", | ||||
|         "Count of backfill chain segments from the rpc waiting to be verified." | ||||
|     ); | ||||
|     // Unaggregated attestations.
 | ||||
|     pub static ref BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||
|         "beacon_processor_unaggregated_attestation_queue_total", | ||||
|         "Count of unagg. attestations waiting to be processed." | ||||
|     ); | ||||
|     // Aggregated attestations.
 | ||||
|     pub static ref BEACON_PROCESSOR_AGGREGATED_ATTESTATION_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||
|         "beacon_processor_aggregated_attestation_queue_total", | ||||
|         "Count of agg. attestations waiting to be processed." | ||||
|     ); | ||||
|     // Sync committee messages.
 | ||||
|     pub static ref BEACON_PROCESSOR_SYNC_MESSAGE_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||
|         "beacon_processor_sync_message_queue_total", | ||||
|         "Count of sync committee messages waiting to be processed." | ||||
|     ); | ||||
|     // Sync contribution.
 | ||||
|     pub static ref BEACON_PROCESSOR_SYNC_CONTRIBUTION_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||
|         "beacon_processor_sync_contribution_queue_total", | ||||
|         "Count of sync committee contributions waiting to be processed." | ||||
|     ); | ||||
|     // HTTP API requests.
 | ||||
|     pub static ref BEACON_PROCESSOR_API_REQUEST_P0_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||
|         "beacon_processor_api_request_p0_queue_total", | ||||
|         "Count of P0 HTTP requesets waiting to be processed." | ||||
|     ); | ||||
|     pub static ref BEACON_PROCESSOR_API_REQUEST_P1_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||
|         "beacon_processor_api_request_p1_queue_total", | ||||
|         "Count of P1 HTTP requesets waiting to be processed." | ||||
|     ); | ||||
| 
 | ||||
|     /* | ||||
|      * Attestation reprocessing queue metrics. | ||||
|      */ | ||||
|     pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_TOTAL: Result<IntGaugeVec> = | ||||
|         try_create_int_gauge_vec( | ||||
|         "beacon_processor_reprocessing_queue_total", | ||||
|         "Count of items in a reprocessing queue.", | ||||
|         &["type"] | ||||
|     ); | ||||
|     pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_ATTESTATIONS: Result<IntCounter> = try_create_int_counter( | ||||
|         "beacon_processor_reprocessing_queue_expired_attestations", | ||||
|         "Number of queued attestations which have expired before a matching block has been found." | ||||
|     ); | ||||
|     pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_ATTESTATIONS: Result<IntCounter> = try_create_int_counter( | ||||
|         "beacon_processor_reprocessing_queue_matched_attestations", | ||||
|         "Number of queued attestations where as matching block has been imported." | ||||
|     ); | ||||
| 
 | ||||
|     /* | ||||
|      * Light client update reprocessing queue metrics. | ||||
|      */ | ||||
|     pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_OPTIMISTIC_UPDATES: Result<IntCounter> = try_create_int_counter( | ||||
|         "beacon_processor_reprocessing_queue_expired_optimistic_updates", | ||||
|         "Number of queued light client optimistic updates which have expired before a matching block has been found." | ||||
|     ); | ||||
|     pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_OPTIMISTIC_UPDATES: Result<IntCounter> = try_create_int_counter( | ||||
|         "beacon_processor_reprocessing_queue_matched_optimistic_updates", | ||||
|         "Number of queued light client optimistic updates where as matching block has been imported." | ||||
|     ); | ||||
| 
 | ||||
|     /// Errors and Debugging Stats
 | ||||
|     pub static ref BEACON_PROCESSOR_SEND_ERROR_PER_WORK_TYPE: Result<IntCounterVec> = | ||||
|         try_create_int_counter_vec( | ||||
|             "beacon_processor_send_error_per_work_type", | ||||
|             "Total number of beacon processor send error per work type", | ||||
|             &["type"] | ||||
|         ); | ||||
| } | ||||
| @ -10,23 +10,18 @@ | ||||
| //!
 | ||||
| //! Aggregated and unaggregated attestations that failed verification due to referencing an unknown
 | ||||
| //! block will be re-queued until their block is imported, or until they expire.
 | ||||
| use super::MAX_SCHEDULED_WORK_QUEUE_LEN; | ||||
| use crate::beacon_processor::{ChainSegmentProcessId, Work, WorkEvent}; | ||||
| use crate::metrics; | ||||
| use crate::sync::manager::BlockProcessType; | ||||
| use beacon_chain::{BeaconChainTypes, GossipVerifiedBlock, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; | ||||
| use crate::{AsyncFn, BlockingFn, Work, WorkEvent}; | ||||
| use fnv::FnvHashMap; | ||||
| use futures::task::Poll; | ||||
| use futures::{Stream, StreamExt}; | ||||
| use itertools::Itertools; | ||||
| use lighthouse_network::{MessageId, PeerId}; | ||||
| use logging::TimeLatch; | ||||
| use slog::{crit, debug, error, trace, warn, Logger}; | ||||
| use slot_clock::SlotClock; | ||||
| use std::collections::{HashMap, HashSet}; | ||||
| use std::future::Future; | ||||
| use std::pin::Pin; | ||||
| use std::sync::Arc; | ||||
| use std::task::Context; | ||||
| use std::time::Duration; | ||||
| use strum::AsRefStr; | ||||
| @ -34,10 +29,7 @@ use task_executor::TaskExecutor; | ||||
| use tokio::sync::mpsc::{self, Receiver, Sender}; | ||||
| use tokio::time::error::Error as TimeError; | ||||
| use tokio_util::time::delay_queue::{DelayQueue, Key as DelayKey}; | ||||
| use types::{ | ||||
|     Attestation, EthSpec, Hash256, LightClientOptimisticUpdate, SignedAggregateAndProof, | ||||
|     SignedBeaconBlock, SubnetId, | ||||
| }; | ||||
| use types::{EthSpec, Hash256, Slot}; | ||||
| 
 | ||||
| const TASK_NAME: &str = "beacon_processor_reprocess_queue"; | ||||
| const GOSSIP_BLOCKS: &str = "gossip_blocks"; | ||||
| @ -47,7 +39,7 @@ const LIGHT_CLIENT_UPDATES: &str = "lc_updates"; | ||||
| 
 | ||||
| /// Queue blocks for re-processing with an `ADDITIONAL_QUEUED_BLOCK_DELAY` after the slot starts.
 | ||||
| /// This is to account for any slight drift in the system clock.
 | ||||
| const ADDITIONAL_QUEUED_BLOCK_DELAY: Duration = Duration::from_millis(5); | ||||
| pub const ADDITIONAL_QUEUED_BLOCK_DELAY: Duration = Duration::from_millis(5); | ||||
| 
 | ||||
| /// For how long to queue aggregated and unaggregated attestations for re-processing.
 | ||||
| pub const QUEUED_ATTESTATION_DELAY: Duration = Duration::from_secs(12); | ||||
| @ -84,12 +76,12 @@ pub const BACKFILL_SCHEDULE_IN_SLOT: [(u32, u32); 3] = [ | ||||
| 
 | ||||
| /// Messages that the scheduler can receive.
 | ||||
| #[derive(AsRefStr)] | ||||
| pub enum ReprocessQueueMessage<T: BeaconChainTypes> { | ||||
| pub enum ReprocessQueueMessage { | ||||
|     /// A block that has been received early and we should queue for later processing.
 | ||||
|     EarlyBlock(QueuedGossipBlock<T>), | ||||
|     EarlyBlock(QueuedGossipBlock), | ||||
|     /// A gossip block for hash `X` is being imported, we should queue the rpc block for the same
 | ||||
|     /// hash until the gossip block is imported.
 | ||||
|     RpcBlock(QueuedRpcBlock<T::EthSpec>), | ||||
|     RpcBlock(QueuedRpcBlock), | ||||
|     /// A block that was successfully processed. We use this to handle attestations and light client updates
 | ||||
|     /// for unknown blocks.
 | ||||
|     BlockImported { | ||||
| @ -97,139 +89,127 @@ pub enum ReprocessQueueMessage<T: BeaconChainTypes> { | ||||
|         parent_root: Hash256, | ||||
|     }, | ||||
|     /// An unaggregated attestation that references an unknown block.
 | ||||
|     UnknownBlockUnaggregate(QueuedUnaggregate<T::EthSpec>), | ||||
|     UnknownBlockUnaggregate(QueuedUnaggregate), | ||||
|     /// An aggregated attestation that references an unknown block.
 | ||||
|     UnknownBlockAggregate(QueuedAggregate<T::EthSpec>), | ||||
|     UnknownBlockAggregate(QueuedAggregate), | ||||
|     /// A light client optimistic update that references a parent root that has not been seen as a parent.
 | ||||
|     UnknownLightClientOptimisticUpdate(QueuedLightClientUpdate<T::EthSpec>), | ||||
|     UnknownLightClientOptimisticUpdate(QueuedLightClientUpdate), | ||||
|     /// A new backfill batch that needs to be scheduled for processing.
 | ||||
|     BackfillSync(QueuedBackfillBatch<T::EthSpec>), | ||||
|     BackfillSync(QueuedBackfillBatch), | ||||
| } | ||||
| 
 | ||||
| /// Events sent by the scheduler once they are ready for re-processing.
 | ||||
| pub enum ReadyWork<T: BeaconChainTypes> { | ||||
|     Block(QueuedGossipBlock<T>), | ||||
|     RpcBlock(QueuedRpcBlock<T::EthSpec>), | ||||
|     Unaggregate(QueuedUnaggregate<T::EthSpec>), | ||||
|     Aggregate(QueuedAggregate<T::EthSpec>), | ||||
|     LightClientUpdate(QueuedLightClientUpdate<T::EthSpec>), | ||||
|     BackfillSync(QueuedBackfillBatch<T::EthSpec>), | ||||
| pub enum ReadyWork { | ||||
|     Block(QueuedGossipBlock), | ||||
|     RpcBlock(QueuedRpcBlock), | ||||
|     IgnoredRpcBlock(IgnoredRpcBlock), | ||||
|     Unaggregate(QueuedUnaggregate), | ||||
|     Aggregate(QueuedAggregate), | ||||
|     LightClientUpdate(QueuedLightClientUpdate), | ||||
|     BackfillSync(QueuedBackfillBatch), | ||||
| } | ||||
| 
 | ||||
| /// An Attestation for which the corresponding block was not seen while processing, queued for
 | ||||
| /// later.
 | ||||
| pub struct QueuedUnaggregate<T: EthSpec> { | ||||
|     pub peer_id: PeerId, | ||||
|     pub message_id: MessageId, | ||||
|     pub attestation: Box<Attestation<T>>, | ||||
|     pub subnet_id: SubnetId, | ||||
|     pub should_import: bool, | ||||
|     pub seen_timestamp: Duration, | ||||
| pub struct QueuedUnaggregate { | ||||
|     pub beacon_block_root: Hash256, | ||||
|     pub process_fn: BlockingFn, | ||||
| } | ||||
| 
 | ||||
| /// An aggregated attestation for which the corresponding block was not seen while processing, queued for
 | ||||
| /// later.
 | ||||
| pub struct QueuedAggregate<T: EthSpec> { | ||||
|     pub peer_id: PeerId, | ||||
|     pub message_id: MessageId, | ||||
|     pub attestation: Box<SignedAggregateAndProof<T>>, | ||||
|     pub seen_timestamp: Duration, | ||||
| pub struct QueuedAggregate { | ||||
|     pub beacon_block_root: Hash256, | ||||
|     pub process_fn: BlockingFn, | ||||
| } | ||||
| 
 | ||||
| /// A light client update for which the corresponding parent block was not seen while processing,
 | ||||
| /// queued for later.
 | ||||
| pub struct QueuedLightClientUpdate<T: EthSpec> { | ||||
|     pub peer_id: PeerId, | ||||
|     pub message_id: MessageId, | ||||
|     pub light_client_optimistic_update: Box<LightClientOptimisticUpdate<T>>, | ||||
| pub struct QueuedLightClientUpdate { | ||||
|     pub parent_root: Hash256, | ||||
|     pub seen_timestamp: Duration, | ||||
|     pub process_fn: BlockingFn, | ||||
| } | ||||
| 
 | ||||
| /// A block that arrived early and has been queued for later import.
 | ||||
| pub struct QueuedGossipBlock<T: BeaconChainTypes> { | ||||
|     pub peer_id: PeerId, | ||||
|     pub block: Box<GossipVerifiedBlock<T>>, | ||||
|     pub seen_timestamp: Duration, | ||||
| pub struct QueuedGossipBlock { | ||||
|     pub beacon_block_slot: Slot, | ||||
|     pub beacon_block_root: Hash256, | ||||
|     pub process_fn: AsyncFn, | ||||
| } | ||||
| 
 | ||||
| /// A block that arrived for processing when the same block was being imported over gossip.
 | ||||
| /// It is queued for later import.
 | ||||
| pub struct QueuedRpcBlock<T: EthSpec> { | ||||
|     pub block_root: Hash256, | ||||
|     pub block: Arc<SignedBeaconBlock<T>>, | ||||
|     pub process_type: BlockProcessType, | ||||
|     pub seen_timestamp: Duration, | ||||
|     /// Indicates if the beacon chain should process this block or not.
 | ||||
|     /// We use this to ignore block processing when rpc block queues are full.
 | ||||
|     pub should_process: bool, | ||||
| pub struct QueuedRpcBlock { | ||||
|     pub beacon_block_root: Hash256, | ||||
|     /// Processes/imports the block.
 | ||||
|     pub process_fn: AsyncFn, | ||||
|     /// Ignores the block.
 | ||||
|     pub ignore_fn: BlockingFn, | ||||
| } | ||||
| 
 | ||||
| /// A block that arrived for processing when the same block was being imported over gossip.
 | ||||
| /// It is queued for later import.
 | ||||
| pub struct IgnoredRpcBlock { | ||||
|     pub process_fn: BlockingFn, | ||||
| } | ||||
| 
 | ||||
| /// A backfill batch work that has been queued for processing later.
 | ||||
| #[derive(Clone)] | ||||
| pub struct QueuedBackfillBatch<E: EthSpec> { | ||||
|     pub process_id: ChainSegmentProcessId, | ||||
|     pub blocks: Vec<Arc<SignedBeaconBlock<E>>>, | ||||
| } | ||||
| pub struct QueuedBackfillBatch(pub AsyncFn); | ||||
| 
 | ||||
| impl<T: BeaconChainTypes> TryFrom<WorkEvent<T>> for QueuedBackfillBatch<T::EthSpec> { | ||||
| impl<T: EthSpec> TryFrom<WorkEvent<T>> for QueuedBackfillBatch { | ||||
|     type Error = WorkEvent<T>; | ||||
| 
 | ||||
|     fn try_from(event: WorkEvent<T>) -> Result<Self, WorkEvent<T>> { | ||||
|         match event { | ||||
|             WorkEvent { | ||||
|                 work: | ||||
|                     Work::ChainSegment { | ||||
|                         process_id: process_id @ ChainSegmentProcessId::BackSyncBatchId(_), | ||||
|                         blocks, | ||||
|                     }, | ||||
|                 work: Work::ChainSegmentBackfill(process_fn), | ||||
|                 .. | ||||
|             } => Ok(QueuedBackfillBatch { process_id, blocks }), | ||||
|             } => Ok(QueuedBackfillBatch(process_fn)), | ||||
|             _ => Err(event), | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<T: BeaconChainTypes> From<QueuedBackfillBatch<T::EthSpec>> for WorkEvent<T> { | ||||
|     fn from(queued_backfill_batch: QueuedBackfillBatch<T::EthSpec>) -> WorkEvent<T> { | ||||
|         WorkEvent::chain_segment( | ||||
|             queued_backfill_batch.process_id, | ||||
|             queued_backfill_batch.blocks, | ||||
|         ) | ||||
| impl<T: EthSpec> From<QueuedBackfillBatch> for WorkEvent<T> { | ||||
|     fn from(queued_backfill_batch: QueuedBackfillBatch) -> WorkEvent<T> { | ||||
|         WorkEvent { | ||||
|             drop_during_sync: false, | ||||
|             work: Work::ChainSegmentBackfill(queued_backfill_batch.0), | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /// Unifies the different messages processed by the block delay queue.
 | ||||
| enum InboundEvent<T: BeaconChainTypes> { | ||||
| enum InboundEvent { | ||||
|     /// A gossip block that was queued for later processing and is ready for import.
 | ||||
|     ReadyGossipBlock(QueuedGossipBlock<T>), | ||||
|     ReadyGossipBlock(QueuedGossipBlock), | ||||
|     /// A rpc block that was queued because the same gossip block was being imported
 | ||||
|     /// will now be retried for import.
 | ||||
|     ReadyRpcBlock(QueuedRpcBlock<T::EthSpec>), | ||||
|     ReadyRpcBlock(QueuedRpcBlock), | ||||
|     /// An aggregated or unaggregated attestation is ready for re-processing.
 | ||||
|     ReadyAttestation(QueuedAttestationId), | ||||
|     /// A light client update that is ready for re-processing.
 | ||||
|     ReadyLightClientUpdate(QueuedLightClientUpdateId), | ||||
|     /// A backfill batch that was queued is ready for processing.
 | ||||
|     ReadyBackfillSync(QueuedBackfillBatch<T::EthSpec>), | ||||
|     ReadyBackfillSync(QueuedBackfillBatch), | ||||
|     /// A `DelayQueue` returned an error.
 | ||||
|     DelayQueueError(TimeError, &'static str), | ||||
|     /// A message sent to the `ReprocessQueue`
 | ||||
|     Msg(ReprocessQueueMessage<T>), | ||||
|     Msg(ReprocessQueueMessage), | ||||
| } | ||||
| 
 | ||||
| /// Manages scheduling works that need to be later re-processed.
 | ||||
| struct ReprocessQueue<T: BeaconChainTypes> { | ||||
| struct ReprocessQueue<S> { | ||||
|     /// Receiver of messages relevant to schedule works for reprocessing.
 | ||||
|     work_reprocessing_rx: Receiver<ReprocessQueueMessage<T>>, | ||||
|     work_reprocessing_rx: Receiver<ReprocessQueueMessage>, | ||||
|     /// Sender of works once they become ready
 | ||||
|     ready_work_tx: Sender<ReadyWork<T>>, | ||||
|     ready_work_tx: Sender<ReadyWork>, | ||||
| 
 | ||||
|     /* Queues */ | ||||
|     /// Queue to manage scheduled early blocks.
 | ||||
|     gossip_block_delay_queue: DelayQueue<QueuedGossipBlock<T>>, | ||||
|     gossip_block_delay_queue: DelayQueue<QueuedGossipBlock>, | ||||
|     /// Queue to manage scheduled early blocks.
 | ||||
|     rpc_block_delay_queue: DelayQueue<QueuedRpcBlock<T::EthSpec>>, | ||||
|     rpc_block_delay_queue: DelayQueue<QueuedRpcBlock>, | ||||
|     /// Queue to manage scheduled attestations.
 | ||||
|     attestations_delay_queue: DelayQueue<QueuedAttestationId>, | ||||
|     /// Queue to manage scheduled light client updates.
 | ||||
| @ -239,17 +219,17 @@ struct ReprocessQueue<T: BeaconChainTypes> { | ||||
|     /// Queued blocks.
 | ||||
|     queued_gossip_block_roots: HashSet<Hash256>, | ||||
|     /// Queued aggregated attestations.
 | ||||
|     queued_aggregates: FnvHashMap<usize, (QueuedAggregate<T::EthSpec>, DelayKey)>, | ||||
|     queued_aggregates: FnvHashMap<usize, (QueuedAggregate, DelayKey)>, | ||||
|     /// Queued attestations.
 | ||||
|     queued_unaggregates: FnvHashMap<usize, (QueuedUnaggregate<T::EthSpec>, DelayKey)>, | ||||
|     queued_unaggregates: FnvHashMap<usize, (QueuedUnaggregate, DelayKey)>, | ||||
|     /// Attestations (aggregated and unaggregated) per root.
 | ||||
|     awaiting_attestations_per_root: HashMap<Hash256, Vec<QueuedAttestationId>>, | ||||
|     /// Queued Light Client Updates.
 | ||||
|     queued_lc_updates: FnvHashMap<usize, (QueuedLightClientUpdate<T::EthSpec>, DelayKey)>, | ||||
|     queued_lc_updates: FnvHashMap<usize, (QueuedLightClientUpdate, DelayKey)>, | ||||
|     /// Light Client Updates per parent_root.
 | ||||
|     awaiting_lc_updates_per_parent_root: HashMap<Hash256, Vec<QueuedLightClientUpdateId>>, | ||||
|     /// Queued backfill batches
 | ||||
|     queued_backfill_batches: Vec<QueuedBackfillBatch<T::EthSpec>>, | ||||
|     queued_backfill_batches: Vec<QueuedBackfillBatch>, | ||||
| 
 | ||||
|     /* Aux */ | ||||
|     /// Next attestation id, used for both aggregated and unaggregated attestations
 | ||||
| @ -260,7 +240,7 @@ struct ReprocessQueue<T: BeaconChainTypes> { | ||||
|     attestation_delay_debounce: TimeLatch, | ||||
|     lc_update_delay_debounce: TimeLatch, | ||||
|     next_backfill_batch_event: Option<Pin<Box<tokio::time::Sleep>>>, | ||||
|     slot_clock: Pin<Box<T::SlotClock>>, | ||||
|     slot_clock: Pin<Box<S>>, | ||||
| } | ||||
| 
 | ||||
| pub type QueuedLightClientUpdateId = usize; | ||||
| @ -271,20 +251,20 @@ enum QueuedAttestationId { | ||||
|     Unaggregate(usize), | ||||
| } | ||||
| 
 | ||||
| impl<T: EthSpec> QueuedAggregate<T> { | ||||
| impl QueuedAggregate { | ||||
|     pub fn beacon_block_root(&self) -> &Hash256 { | ||||
|         &self.attestation.message.aggregate.data.beacon_block_root | ||||
|         &self.beacon_block_root | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<T: EthSpec> QueuedUnaggregate<T> { | ||||
| impl QueuedUnaggregate { | ||||
|     pub fn beacon_block_root(&self) -> &Hash256 { | ||||
|         &self.attestation.data.beacon_block_root | ||||
|         &self.beacon_block_root | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<T: BeaconChainTypes> Stream for ReprocessQueue<T> { | ||||
|     type Item = InboundEvent<T>; | ||||
| impl<S: SlotClock> Stream for ReprocessQueue<S> { | ||||
|     type Item = InboundEvent; | ||||
| 
 | ||||
|     fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { | ||||
|         // NOTE: implementing `Stream` is not necessary but allows to maintain the future selection
 | ||||
| @ -375,16 +355,18 @@ impl<T: BeaconChainTypes> Stream for ReprocessQueue<T> { | ||||
| /// Starts the job that manages scheduling works that need re-processing. The returned `Sender`
 | ||||
| /// gives the communicating channel to receive those works. Once a work is ready, it is sent back
 | ||||
| /// via `ready_work_tx`.
 | ||||
| pub fn spawn_reprocess_scheduler<T: BeaconChainTypes>( | ||||
|     ready_work_tx: Sender<ReadyWork<T>>, | ||||
| pub fn spawn_reprocess_scheduler<S: SlotClock + 'static>( | ||||
|     ready_work_tx: Sender<ReadyWork>, | ||||
|     work_reprocessing_rx: Receiver<ReprocessQueueMessage>, | ||||
|     executor: &TaskExecutor, | ||||
|     slot_clock: T::SlotClock, | ||||
|     slot_clock: S, | ||||
|     log: Logger, | ||||
| ) -> Sender<ReprocessQueueMessage<T>> { | ||||
|     let (work_reprocessing_tx, work_reprocessing_rx) = mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN); | ||||
|     // Basic sanity check.
 | ||||
|     assert!(ADDITIONAL_QUEUED_BLOCK_DELAY < MAXIMUM_GOSSIP_CLOCK_DISPARITY); | ||||
| 
 | ||||
|     maximum_gossip_clock_disparity: Duration, | ||||
| ) -> Result<(), String> { | ||||
|     // Sanity check
 | ||||
|     if ADDITIONAL_QUEUED_BLOCK_DELAY >= maximum_gossip_clock_disparity { | ||||
|         return Err("The block delay and gossip disparity don't match.".to_string()); | ||||
|     } | ||||
|     let mut queue = ReprocessQueue { | ||||
|         work_reprocessing_rx, | ||||
|         ready_work_tx, | ||||
| @ -423,19 +405,18 @@ pub fn spawn_reprocess_scheduler<T: BeaconChainTypes>( | ||||
|         }, | ||||
|         TASK_NAME, | ||||
|     ); | ||||
| 
 | ||||
|     work_reprocessing_tx | ||||
|     Ok(()) | ||||
| } | ||||
| 
 | ||||
| impl<T: BeaconChainTypes> ReprocessQueue<T> { | ||||
|     fn handle_message(&mut self, msg: InboundEvent<T>, slot_clock: &T::SlotClock, log: &Logger) { | ||||
| impl<S: SlotClock> ReprocessQueue<S> { | ||||
|     fn handle_message(&mut self, msg: InboundEvent, slot_clock: &S, log: &Logger) { | ||||
|         use ReprocessQueueMessage::*; | ||||
|         match msg { | ||||
|             // Some block has been indicated as "early" and should be processed when the
 | ||||
|             // appropriate slot arrives.
 | ||||
|             InboundEvent::Msg(EarlyBlock(early_block)) => { | ||||
|                 let block_slot = early_block.block.block.slot(); | ||||
|                 let block_root = early_block.block.block_root; | ||||
|                 let block_slot = early_block.beacon_block_slot; | ||||
|                 let block_root = early_block.beacon_block_root; | ||||
| 
 | ||||
|                 // Don't add the same block to the queue twice. This prevents DoS attacks.
 | ||||
|                 if self.queued_gossip_block_roots.contains(&block_root) { | ||||
| @ -494,7 +475,7 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> { | ||||
|             // for the same block hash is being imported. We wait for `QUEUED_RPC_BLOCK_DELAY`
 | ||||
|             // and then send the rpc block back for processing assuming the gossip import
 | ||||
|             // has completed by then.
 | ||||
|             InboundEvent::Msg(RpcBlock(mut rpc_block)) => { | ||||
|             InboundEvent::Msg(RpcBlock(rpc_block)) => { | ||||
|                 // Check to ensure this won't over-fill the queue.
 | ||||
|                 if self.rpc_block_delay_queue.len() >= MAXIMUM_QUEUED_BLOCKS { | ||||
|                     if self.rpc_block_debounce.elapsed() { | ||||
| @ -507,10 +488,11 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> { | ||||
|                     } | ||||
|                     // Return the block to the beacon processor signalling to
 | ||||
|                     // ignore processing for this block
 | ||||
|                     rpc_block.should_process = false; | ||||
|                     if self | ||||
|                         .ready_work_tx | ||||
|                         .try_send(ReadyWork::RpcBlock(rpc_block)) | ||||
|                         .try_send(ReadyWork::IgnoredRpcBlock(IgnoredRpcBlock { | ||||
|                             process_fn: rpc_block.ignore_fn, | ||||
|                         })) | ||||
|                         .is_err() | ||||
|                     { | ||||
|                         error!( | ||||
| @ -529,7 +511,7 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> { | ||||
|                 debug!( | ||||
|                     log, | ||||
|                     "Sending rpc block for reprocessing"; | ||||
|                     "block_root" => %queued_rpc_block.block.canonical_root() | ||||
|                     "block_root" => %queued_rpc_block.beacon_block_root | ||||
|                 ); | ||||
|                 if self | ||||
|                     .ready_work_tx | ||||
| @ -767,7 +749,7 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> { | ||||
|             } | ||||
|             // A block that was queued for later processing is now ready to be processed.
 | ||||
|             InboundEvent::ReadyGossipBlock(ready_block) => { | ||||
|                 let block_root = ready_block.block.block_root; | ||||
|                 let block_root = ready_block.beacon_block_root; | ||||
| 
 | ||||
|                 if !self.queued_gossip_block_roots.remove(&block_root) { | ||||
|                     // Log an error to alert that we've made a bad assumption about how this
 | ||||
| @ -885,18 +867,28 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> { | ||||
|                     "millis_from_slot_start" => millis_from_slot_start | ||||
|                 ); | ||||
| 
 | ||||
|                 if self | ||||
|                 match self | ||||
|                     .ready_work_tx | ||||
|                     .try_send(ReadyWork::BackfillSync(queued_backfill_batch.clone())) | ||||
|                     .is_err() | ||||
|                     .try_send(ReadyWork::BackfillSync(queued_backfill_batch)) | ||||
|                 { | ||||
|                     // The message was sent successfully.
 | ||||
|                     Ok(()) => (), | ||||
|                     // The message was not sent, recover it from the returned `Err`.
 | ||||
|                     Err(mpsc::error::TrySendError::Full(ReadyWork::BackfillSync(batch))) | ||||
|                     | Err(mpsc::error::TrySendError::Closed(ReadyWork::BackfillSync(batch))) => { | ||||
|                         error!( | ||||
|                             log, | ||||
|                             "Failed to send scheduled backfill work"; | ||||
|                             "info" => "sending work back to queue" | ||||
|                         ); | ||||
|                     self.queued_backfill_batches | ||||
|                         .insert(0, queued_backfill_batch); | ||||
|                         self.queued_backfill_batches.insert(0, batch) | ||||
|                     } | ||||
|                     // The message was not sent and we didn't get the correct
 | ||||
|                     // return result. This is a logic error.
 | ||||
|                     _ => crit!( | ||||
|                         log, | ||||
|                         "Unexpected return from try_send error"; | ||||
|                     ), | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
| @ -927,7 +919,7 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> { | ||||
|         // only recompute the `next_backfill_batch_event` if there are backfill batches in the queue
 | ||||
|         if !self.queued_backfill_batches.is_empty() { | ||||
|             self.next_backfill_batch_event = Some(Box::pin(tokio::time::sleep( | ||||
|                 ReprocessQueue::<T>::duration_until_next_backfill_batch_event(&self.slot_clock), | ||||
|                 ReprocessQueue::<S>::duration_until_next_backfill_batch_event(&self.slot_clock), | ||||
|             ))); | ||||
|         } else { | ||||
|             self.next_backfill_batch_event = None | ||||
| @ -936,7 +928,7 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> { | ||||
| 
 | ||||
|     /// Returns duration until the next scheduled processing time. The schedule ensure that backfill
 | ||||
|     /// processing is done in windows of time that aren't critical
 | ||||
|     fn duration_until_next_backfill_batch_event(slot_clock: &T::SlotClock) -> Duration { | ||||
|     fn duration_until_next_backfill_batch_event(slot_clock: &S) -> Duration { | ||||
|         let slot_duration = slot_clock.slot_duration(); | ||||
|         slot_clock | ||||
|             .millis_from_current_slot_start() | ||||
| @ -966,16 +958,9 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> { | ||||
| #[cfg(test)] | ||||
| mod tests { | ||||
|     use super::*; | ||||
|     use beacon_chain::builder::Witness; | ||||
|     use beacon_chain::eth1_chain::CachingEth1Backend; | ||||
|     use slot_clock::TestingSlotClock; | ||||
|     use store::MemoryStore; | ||||
|     use types::MainnetEthSpec as E; | ||||
|     use types::Slot; | ||||
| 
 | ||||
|     type TestBeaconChainType = | ||||
|         Witness<TestingSlotClock, CachingEth1Backend<E>, E, MemoryStore<E>, MemoryStore<E>>; | ||||
| 
 | ||||
|     #[test] | ||||
|     fn backfill_processing_schedule_calculation() { | ||||
|         let slot_duration = Duration::from_secs(12); | ||||
| @ -988,7 +973,7 @@ mod tests { | ||||
| 
 | ||||
|         for &event_duration_from_slot_start in event_times.iter() { | ||||
|             let duration_to_next_event = | ||||
|                 ReprocessQueue::<TestBeaconChainType>::duration_until_next_backfill_batch_event( | ||||
|                 ReprocessQueue::<TestingSlotClock>::duration_until_next_backfill_batch_event( | ||||
|                     &slot_clock, | ||||
|                 ); | ||||
| 
 | ||||
| @ -1005,7 +990,7 @@ mod tests { | ||||
|         // check for next event beyond the current slot
 | ||||
|         let duration_to_next_slot = slot_clock.duration_to_next_slot().unwrap(); | ||||
|         let duration_to_next_event = | ||||
|             ReprocessQueue::<TestBeaconChainType>::duration_until_next_backfill_batch_event( | ||||
|             ReprocessQueue::<TestingSlotClock>::duration_until_next_backfill_batch_event( | ||||
|                 &slot_clock, | ||||
|             ); | ||||
|         assert_eq!( | ||||
| @ -1,13 +1,13 @@ | ||||
| [package] | ||||
| name = "builder_client" | ||||
| version = "0.1.0" | ||||
| edition = "2021" | ||||
| edition = { workspace = true } | ||||
| authors = ["Sean Anderson <sean@sigmaprime.io>"] | ||||
| 
 | ||||
| [dependencies] | ||||
| reqwest = { version = "0.11.0", features = ["json","stream"] } | ||||
| sensitive_url = { path = "../../common/sensitive_url" } | ||||
| eth2 = { path = "../../common/eth2" } | ||||
| serde = { version = "1.0.116", features = ["derive"] } | ||||
| serde_json = "1.0.58" | ||||
| lighthouse_version = { path = "../../common/lighthouse_version" } | ||||
| reqwest = { workspace = true } | ||||
| sensitive_url = { workspace = true } | ||||
| eth2 = { workspace = true } | ||||
| serde = { workspace = true } | ||||
| serde_json = { workspace = true } | ||||
| lighthouse_version = { workspace = true } | ||||
|  | ||||
| @ -72,7 +72,7 @@ impl BuilderHttpClient { | ||||
|             .await? | ||||
|             .json() | ||||
|             .await | ||||
|             .map_err(Error::Reqwest) | ||||
|             .map_err(Into::into) | ||||
|     } | ||||
| 
 | ||||
|     /// Perform a HTTP GET request, returning the `Response` for further processing.
 | ||||
| @ -85,7 +85,7 @@ impl BuilderHttpClient { | ||||
|         if let Some(timeout) = timeout { | ||||
|             builder = builder.timeout(timeout); | ||||
|         } | ||||
|         let response = builder.send().await.map_err(Error::Reqwest)?; | ||||
|         let response = builder.send().await.map_err(Error::from)?; | ||||
|         ok_or_error(response).await | ||||
|     } | ||||
| 
 | ||||
| @ -114,7 +114,7 @@ impl BuilderHttpClient { | ||||
|         if let Some(timeout) = timeout { | ||||
|             builder = builder.timeout(timeout); | ||||
|         } | ||||
|         let response = builder.json(body).send().await.map_err(Error::Reqwest)?; | ||||
|         let response = builder.json(body).send().await.map_err(Error::from)?; | ||||
|         ok_or_error(response).await | ||||
|     } | ||||
| 
 | ||||
|  | ||||
| @ -2,44 +2,46 @@ | ||||
| name = "client" | ||||
| version = "0.2.0" | ||||
| authors = ["Sigma Prime <contact@sigmaprime.io>"] | ||||
| edition = "2021" | ||||
| edition = { workspace = true } | ||||
| 
 | ||||
| [dev-dependencies] | ||||
| serde_yaml = "0.8.13" | ||||
| state_processing = { path = "../../consensus/state_processing" } | ||||
| operation_pool = { path = "../operation_pool" } | ||||
| tokio = "1.14.0" | ||||
| serde_yaml = { workspace = true } | ||||
| state_processing = { workspace = true } | ||||
| operation_pool = { workspace = true } | ||||
| tokio = { workspace = true } | ||||
| 
 | ||||
| [dependencies] | ||||
| beacon_chain = { path = "../beacon_chain" } | ||||
| store = { path = "../store" } | ||||
| network = { path = "../network" } | ||||
| beacon_chain = { workspace = true } | ||||
| store = { workspace = true } | ||||
| network = { workspace = true } | ||||
| timer = { path = "../timer" } | ||||
| lighthouse_network = { path = "../lighthouse_network" } | ||||
| logging = { path = "../../common/logging" } | ||||
| parking_lot = "0.12.0" | ||||
| types = { path = "../../consensus/types" } | ||||
| eth2_config = { path = "../../common/eth2_config" } | ||||
| slot_clock = { path = "../../common/slot_clock" } | ||||
| serde = "1.0.116" | ||||
| lighthouse_network = { workspace = true } | ||||
| logging = { workspace = true } | ||||
| parking_lot = { workspace = true } | ||||
| types = { workspace = true } | ||||
| eth2_config = { workspace = true } | ||||
| slot_clock = { workspace = true } | ||||
| serde = { workspace = true } | ||||
| serde_derive = "1.0.116" | ||||
| error-chain = "0.12.4" | ||||
| slog = { version = "2.5.2", features = ["max_level_trace"] } | ||||
| tokio = "1.14.0" | ||||
| dirs = "3.0.1" | ||||
| eth1 = { path = "../eth1" } | ||||
| eth2 = { path = "../../common/eth2" } | ||||
| sensitive_url = { path = "../../common/sensitive_url" } | ||||
| genesis = { path = "../genesis" } | ||||
| task_executor = { path = "../../common/task_executor" } | ||||
| environment = { path = "../../lighthouse/environment" } | ||||
| lazy_static = "1.4.0" | ||||
| lighthouse_metrics = { path = "../../common/lighthouse_metrics" } | ||||
| error-chain = { workspace = true } | ||||
| slog = { workspace = true } | ||||
| tokio = { workspace = true } | ||||
| dirs = { workspace = true } | ||||
| eth1 = { workspace = true } | ||||
| eth2 = { workspace = true } | ||||
| sensitive_url = { workspace = true } | ||||
| genesis = { workspace = true } | ||||
| task_executor = { workspace = true } | ||||
| environment = { workspace = true } | ||||
| lazy_static = { workspace = true } | ||||
| lighthouse_metrics = { workspace = true } | ||||
| time = "0.3.5" | ||||
| directory = {path = "../../common/directory"} | ||||
| http_api = { path = "../http_api" } | ||||
| directory = { workspace = true } | ||||
| http_api = { workspace = true } | ||||
| http_metrics = { path = "../http_metrics" } | ||||
| slasher = { path = "../../slasher", default-features = false } | ||||
| slasher = { workspace = true } | ||||
| slasher_service = { path = "../../slasher/service" } | ||||
| monitoring_api = {path = "../../common/monitoring_api"} | ||||
| execution_layer = { path = "../execution_layer" } | ||||
| monitoring_api = { workspace = true } | ||||
| execution_layer = { workspace = true } | ||||
| beacon_processor = { workspace = true } | ||||
| num_cpus = { workspace = true } | ||||
|  | ||||
| @ -11,8 +11,10 @@ use beacon_chain::{ | ||||
|     slot_clock::{SlotClock, SystemTimeSlotClock}, | ||||
|     state_advance_timer::spawn_state_advance_timer, | ||||
|     store::{HotColdDB, ItemStore, LevelDB, StoreConfig}, | ||||
|     BeaconChain, BeaconChainTypes, Eth1ChainBackend, ServerSentEventHandler, | ||||
|     BeaconChain, BeaconChainTypes, Eth1ChainBackend, MigratorConfig, ServerSentEventHandler, | ||||
| }; | ||||
| use beacon_processor::BeaconProcessorConfig; | ||||
| use beacon_processor::{BeaconProcessor, BeaconProcessorChannels}; | ||||
| use environment::RuntimeContext; | ||||
| use eth1::{Config as Eth1Config, Service as Eth1Service}; | ||||
| use eth2::{ | ||||
| @ -71,6 +73,8 @@ pub struct ClientBuilder<T: BeaconChainTypes> { | ||||
|     http_api_config: http_api::Config, | ||||
|     http_metrics_config: http_metrics::Config, | ||||
|     slasher: Option<Arc<Slasher<T::EthSpec>>>, | ||||
|     beacon_processor_config: Option<BeaconProcessorConfig>, | ||||
|     beacon_processor_channels: Option<BeaconProcessorChannels<T::EthSpec>>, | ||||
|     eth_spec_instance: T::EthSpec, | ||||
| } | ||||
| 
 | ||||
| @ -104,6 +108,8 @@ where | ||||
|             http_metrics_config: <_>::default(), | ||||
|             slasher: None, | ||||
|             eth_spec_instance, | ||||
|             beacon_processor_config: None, | ||||
|             beacon_processor_channels: None, | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
| @ -119,6 +125,12 @@ where | ||||
|         self | ||||
|     } | ||||
| 
 | ||||
|     pub fn beacon_processor(mut self, config: BeaconProcessorConfig) -> Self { | ||||
|         self.beacon_processor_channels = Some(BeaconProcessorChannels::new(&config)); | ||||
|         self.beacon_processor_config = Some(config); | ||||
|         self | ||||
|     } | ||||
| 
 | ||||
|     pub fn slasher(mut self, slasher: Arc<Slasher<TEthSpec>>) -> Self { | ||||
|         self.slasher = Some(slasher); | ||||
|         self | ||||
| @ -142,14 +154,18 @@ where | ||||
|         let runtime_context = | ||||
|             runtime_context.ok_or("beacon_chain_start_method requires a runtime context")?; | ||||
|         let context = runtime_context.service_context("beacon".into()); | ||||
|         let log = context.log(); | ||||
|         let spec = chain_spec.ok_or("beacon_chain_start_method requires a chain spec")?; | ||||
|         let event_handler = if self.http_api_config.enabled { | ||||
|             Some(ServerSentEventHandler::new(context.log().clone())) | ||||
|             Some(ServerSentEventHandler::new( | ||||
|                 context.log().clone(), | ||||
|                 self.http_api_config.sse_capacity_multiplier, | ||||
|             )) | ||||
|         } else { | ||||
|             None | ||||
|         }; | ||||
| 
 | ||||
|         let execution_layer = if let Some(config) = config.execution_layer { | ||||
|         let execution_layer = if let Some(config) = config.execution_layer.clone() { | ||||
|             let context = runtime_context.service_context("exec".into()); | ||||
|             let execution_layer = ExecutionLayer::from_config( | ||||
|                 config, | ||||
| @ -167,6 +183,9 @@ where | ||||
|             .store(store) | ||||
|             .task_executor(context.executor.clone()) | ||||
|             .custom_spec(spec.clone()) | ||||
|             .store_migrator_config( | ||||
|                 MigratorConfig::default().epochs_per_migration(chain_config.epochs_per_migration), | ||||
|             ) | ||||
|             .chain_config(chain_config) | ||||
|             .graffiti(graffiti) | ||||
|             .event_handler(event_handler) | ||||
| @ -231,23 +250,19 @@ where | ||||
|                 )?; | ||||
|                 builder.genesis_state(genesis_state).map(|v| (v, None))? | ||||
|             } | ||||
|             ClientGenesis::SszBytes { | ||||
|                 genesis_state_bytes, | ||||
|             } => { | ||||
|             ClientGenesis::GenesisState => { | ||||
|                 info!( | ||||
|                     context.log(), | ||||
|                     "Starting from known genesis state"; | ||||
|                 ); | ||||
| 
 | ||||
|                 let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec) | ||||
|                     .map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?; | ||||
|                 let genesis_state = genesis_state(&runtime_context, &config, log).await?; | ||||
| 
 | ||||
|                 builder.genesis_state(genesis_state).map(|v| (v, None))? | ||||
|             } | ||||
|             ClientGenesis::WeakSubjSszBytes { | ||||
|                 anchor_state_bytes, | ||||
|                 anchor_block_bytes, | ||||
|                 genesis_state_bytes, | ||||
|             } => { | ||||
|                 info!(context.log(), "Starting checkpoint sync"); | ||||
|                 if config.chain.genesis_backfill { | ||||
| @ -261,17 +276,13 @@ where | ||||
|                     .map_err(|e| format!("Unable to parse weak subj state SSZ: {:?}", e))?; | ||||
|                 let anchor_block = SignedBeaconBlock::from_ssz_bytes(&anchor_block_bytes, &spec) | ||||
|                     .map_err(|e| format!("Unable to parse weak subj block SSZ: {:?}", e))?; | ||||
|                 let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec) | ||||
|                     .map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?; | ||||
|                 let genesis_state = genesis_state(&runtime_context, &config, log).await?; | ||||
| 
 | ||||
|                 builder | ||||
|                     .weak_subjectivity_state(anchor_state, anchor_block, genesis_state) | ||||
|                     .map(|v| (v, None))? | ||||
|             } | ||||
|             ClientGenesis::CheckpointSyncUrl { | ||||
|                 genesis_state_bytes, | ||||
|                 url, | ||||
|             } => { | ||||
|             ClientGenesis::CheckpointSyncUrl { url } => { | ||||
|                 info!( | ||||
|                     context.log(), | ||||
|                     "Starting checkpoint sync"; | ||||
| @ -290,7 +301,6 @@ where | ||||
|                         config.chain.checkpoint_sync_url_timeout, | ||||
|                     )), | ||||
|                 ); | ||||
|                 let slots_per_epoch = TEthSpec::slots_per_epoch(); | ||||
| 
 | ||||
|                 let deposit_snapshot = if config.sync_eth1_chain { | ||||
|                     // We want to fetch deposit snapshot before fetching the finalized beacon state to
 | ||||
| @ -337,10 +347,23 @@ where | ||||
|                     None | ||||
|                 }; | ||||
| 
 | ||||
|                 debug!(context.log(), "Downloading finalized block"); | ||||
|                 // Find a suitable finalized block on an epoch boundary.
 | ||||
|                 let mut block = remote | ||||
|                     .get_beacon_blocks_ssz::<TEthSpec>(BlockId::Finalized, &spec) | ||||
|                 debug!( | ||||
|                     context.log(), | ||||
|                     "Downloading finalized state"; | ||||
|                 ); | ||||
|                 let state = remote | ||||
|                     .get_debug_beacon_states_ssz::<TEthSpec>(StateId::Finalized, &spec) | ||||
|                     .await | ||||
|                     .map_err(|e| format!("Error loading checkpoint state from remote: {:?}", e))? | ||||
|                     .ok_or_else(|| "Checkpoint state missing from remote".to_string())?; | ||||
| 
 | ||||
|                 debug!(context.log(), "Downloaded finalized state"; "slot" => ?state.slot()); | ||||
| 
 | ||||
|                 let finalized_block_slot = state.latest_block_header().slot; | ||||
| 
 | ||||
|                 debug!(context.log(), "Downloading finalized block"; "block_slot" => ?finalized_block_slot); | ||||
|                 let block = remote | ||||
|                     .get_beacon_blocks_ssz::<TEthSpec>(BlockId::Slot(finalized_block_slot), &spec) | ||||
|                     .await | ||||
|                     .map_err(|e| match e { | ||||
|                         ApiError::InvalidSsz(e) => format!( | ||||
| @ -354,65 +377,14 @@ where | ||||
| 
 | ||||
|                 debug!(context.log(), "Downloaded finalized block"); | ||||
| 
 | ||||
|                 let mut block_slot = block.slot(); | ||||
| 
 | ||||
|                 while block.slot() % slots_per_epoch != 0 { | ||||
|                     block_slot = (block_slot / slots_per_epoch - 1) * slots_per_epoch; | ||||
| 
 | ||||
|                     debug!( | ||||
|                         context.log(), | ||||
|                         "Searching for aligned checkpoint block"; | ||||
|                         "block_slot" => block_slot | ||||
|                     ); | ||||
| 
 | ||||
|                     if let Some(found_block) = remote | ||||
|                         .get_beacon_blocks_ssz::<TEthSpec>(BlockId::Slot(block_slot), &spec) | ||||
|                         .await | ||||
|                         .map_err(|e| { | ||||
|                             format!("Error fetching block at slot {}: {:?}", block_slot, e) | ||||
|                         })? | ||||
|                     { | ||||
|                         block = found_block; | ||||
|                     } | ||||
|                 } | ||||
| 
 | ||||
|                 debug!( | ||||
|                     context.log(), | ||||
|                     "Downloaded aligned finalized block"; | ||||
|                     "block_root" => ?block.canonical_root(), | ||||
|                     "block_slot" => block.slot(), | ||||
|                 ); | ||||
| 
 | ||||
|                 let state_root = block.state_root(); | ||||
|                 debug!( | ||||
|                     context.log(), | ||||
|                     "Downloading finalized state"; | ||||
|                     "state_root" => ?state_root | ||||
|                 ); | ||||
|                 let state = remote | ||||
|                     .get_debug_beacon_states_ssz::<TEthSpec>(StateId::Root(state_root), &spec) | ||||
|                     .await | ||||
|                     .map_err(|e| { | ||||
|                         format!( | ||||
|                             "Error loading checkpoint state from remote {:?}: {:?}", | ||||
|                             state_root, e | ||||
|                         ) | ||||
|                     })? | ||||
|                     .ok_or_else(|| { | ||||
|                         format!("Checkpoint state missing from remote: {:?}", state_root) | ||||
|                     })?; | ||||
| 
 | ||||
|                 debug!(context.log(), "Downloaded finalized state"); | ||||
| 
 | ||||
|                 let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec) | ||||
|                     .map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?; | ||||
|                 let genesis_state = genesis_state(&runtime_context, &config, log).await?; | ||||
| 
 | ||||
|                 info!( | ||||
|                     context.log(), | ||||
|                     "Loaded checkpoint block and state"; | ||||
|                     "slot" => block.slot(), | ||||
|                     "block_slot" => block.slot(), | ||||
|                     "state_slot" => state.slot(), | ||||
|                     "block_root" => ?block.canonical_root(), | ||||
|                     "state_root" => ?state_root, | ||||
|                 ); | ||||
| 
 | ||||
|                 let service = | ||||
| @ -476,6 +448,7 @@ where | ||||
|                         chain: None, | ||||
|                         network_senders: None, | ||||
|                         network_globals: None, | ||||
|                         beacon_processor_send: None, | ||||
|                         eth1_service: Some(genesis_service.eth1_service.clone()), | ||||
|                         log: context.log().clone(), | ||||
|                         sse_logging_components: runtime_context.sse_logging_components.clone(), | ||||
| @ -553,6 +526,10 @@ where | ||||
|             .as_ref() | ||||
|             .ok_or("network requires a runtime_context")? | ||||
|             .clone(); | ||||
|         let beacon_processor_channels = self | ||||
|             .beacon_processor_channels | ||||
|             .as_ref() | ||||
|             .ok_or("network requires beacon_processor_channels")?; | ||||
| 
 | ||||
|         // If gossipsub metrics are required we build a registry to record them
 | ||||
|         let mut gossipsub_registry = if config.metrics_enabled { | ||||
| @ -568,6 +545,8 @@ where | ||||
|             gossipsub_registry | ||||
|                 .as_mut() | ||||
|                 .map(|registry| registry.sub_registry_with_prefix("gossipsub")), | ||||
|             beacon_processor_channels.beacon_processor_tx.clone(), | ||||
|             beacon_processor_channels.work_reprocessing_tx.clone(), | ||||
|         ) | ||||
|         .await | ||||
|         .map_err(|e| format!("Failed to start network: {:?}", e))?; | ||||
| @ -690,6 +669,14 @@ where | ||||
|             .runtime_context | ||||
|             .as_ref() | ||||
|             .ok_or("build requires a runtime context")?; | ||||
|         let beacon_processor_channels = self | ||||
|             .beacon_processor_channels | ||||
|             .take() | ||||
|             .ok_or("build requires beacon_processor_channels")?; | ||||
|         let beacon_processor_config = self | ||||
|             .beacon_processor_config | ||||
|             .take() | ||||
|             .ok_or("build requires a beacon_processor_config")?; | ||||
|         let log = runtime_context.log().clone(); | ||||
| 
 | ||||
|         let http_api_listen_addr = if self.http_api_config.enabled { | ||||
| @ -699,6 +686,7 @@ where | ||||
|                 network_senders: self.network_senders.clone(), | ||||
|                 network_globals: self.network_globals.clone(), | ||||
|                 eth1_service: self.eth1_service.clone(), | ||||
|                 beacon_processor_send: Some(beacon_processor_channels.beacon_processor_tx.clone()), | ||||
|                 sse_logging_components: runtime_context.sse_logging_components.clone(), | ||||
|                 log: log.clone(), | ||||
|             }); | ||||
| @ -742,7 +730,7 @@ where | ||||
| 
 | ||||
|             runtime_context | ||||
|                 .executor | ||||
|                 .spawn_without_exit(async move { server.await }, "http-metrics"); | ||||
|                 .spawn_without_exit(server, "http-metrics"); | ||||
| 
 | ||||
|             Some(listen_addr) | ||||
|         } else { | ||||
| @ -755,6 +743,25 @@ where | ||||
|         } | ||||
| 
 | ||||
|         if let Some(beacon_chain) = self.beacon_chain.as_ref() { | ||||
|             if let Some(network_globals) = &self.network_globals { | ||||
|                 let beacon_processor_context = runtime_context.service_context("bproc".into()); | ||||
|                 BeaconProcessor { | ||||
|                     network_globals: network_globals.clone(), | ||||
|                     executor: beacon_processor_context.executor.clone(), | ||||
|                     current_workers: 0, | ||||
|                     config: beacon_processor_config, | ||||
|                     log: beacon_processor_context.log().clone(), | ||||
|                 } | ||||
|                 .spawn_manager( | ||||
|                     beacon_processor_channels.beacon_processor_rx, | ||||
|                     beacon_processor_channels.work_reprocessing_tx, | ||||
|                     beacon_processor_channels.work_reprocessing_rx, | ||||
|                     None, | ||||
|                     beacon_chain.slot_clock.clone(), | ||||
|                     beacon_chain.spec.maximum_gossip_clock_disparity(), | ||||
|                 )?; | ||||
|             } | ||||
| 
 | ||||
|             let state_advance_context = runtime_context.service_context("state_advance".into()); | ||||
|             let state_advance_log = state_advance_context.log().clone(); | ||||
|             spawn_state_advance_timer( | ||||
| @ -807,9 +814,6 @@ where | ||||
|                     execution_layer.spawn_clean_proposer_caches_routine::<TSlotClock>( | ||||
|                         beacon_chain.slot_clock.clone(), | ||||
|                     ); | ||||
| 
 | ||||
|                     // Spawns a routine that polls the `exchange_transition_configuration` endpoint.
 | ||||
|                     execution_layer.spawn_transition_configuration_poll(beacon_chain.spec.clone()); | ||||
|                 } | ||||
| 
 | ||||
|                 // Spawn a service to publish BLS to execution changes at the Capella fork.
 | ||||
| @ -1077,3 +1081,23 @@ where | ||||
|         Ok(self) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /// Obtain the genesis state from the `eth2_network_config` in `context`.
 | ||||
| async fn genesis_state<T: EthSpec>( | ||||
|     context: &RuntimeContext<T>, | ||||
|     config: &ClientConfig, | ||||
|     log: &Logger, | ||||
| ) -> Result<BeaconState<T>, String> { | ||||
|     let eth2_network_config = context | ||||
|         .eth2_network_config | ||||
|         .as_ref() | ||||
|         .ok_or("An eth2_network_config is required to obtain the genesis state")?; | ||||
|     eth2_network_config | ||||
|         .genesis_state::<T>( | ||||
|             config.genesis_state_url.as_deref(), | ||||
|             config.genesis_state_url_timeout, | ||||
|             log, | ||||
|         ) | ||||
|         .await? | ||||
|         .ok_or_else(|| "Genesis state is unknown".to_string()) | ||||
| } | ||||
|  | ||||
| @ -1,4 +1,5 @@ | ||||
| use beacon_chain::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD; | ||||
| use beacon_processor::BeaconProcessorConfig; | ||||
| use directory::DEFAULT_ROOT_DIR; | ||||
| use environment::LoggerConfig; | ||||
| use network::NetworkConfig; | ||||
| @ -6,6 +7,7 @@ use sensitive_url::SensitiveUrl; | ||||
| use serde_derive::{Deserialize, Serialize}; | ||||
| use std::fs; | ||||
| use std::path::PathBuf; | ||||
| use std::time::Duration; | ||||
| use types::{Graffiti, PublicKeyBytes}; | ||||
| /// Default directory name for the freezer database under the top-level data dir.
 | ||||
| const DEFAULT_FREEZER_DB_DIR: &str = "freezer_db"; | ||||
| @ -24,18 +26,13 @@ pub enum ClientGenesis { | ||||
|     /// contract.
 | ||||
|     #[default] | ||||
|     DepositContract, | ||||
|     /// Loads the genesis state from SSZ-encoded `BeaconState` bytes.
 | ||||
|     ///
 | ||||
|     /// We include the bytes instead of the `BeaconState<E>` because the `EthSpec` type
 | ||||
|     /// parameter would be very annoying.
 | ||||
|     SszBytes { genesis_state_bytes: Vec<u8> }, | ||||
|     /// Loads the genesis state from the genesis state in the `Eth2NetworkConfig`.
 | ||||
|     GenesisState, | ||||
|     WeakSubjSszBytes { | ||||
|         genesis_state_bytes: Vec<u8>, | ||||
|         anchor_state_bytes: Vec<u8>, | ||||
|         anchor_block_bytes: Vec<u8>, | ||||
|     }, | ||||
|     CheckpointSyncUrl { | ||||
|         genesis_state_bytes: Vec<u8>, | ||||
|         url: SensitiveUrl, | ||||
|     }, | ||||
| } | ||||
| @ -79,7 +76,9 @@ pub struct Config { | ||||
|     pub monitoring_api: Option<monitoring_api::Config>, | ||||
|     pub slasher: Option<slasher::Config>, | ||||
|     pub logger_config: LoggerConfig, | ||||
|     pub always_prefer_builder_payload: bool, | ||||
|     pub beacon_processor: BeaconProcessorConfig, | ||||
|     pub genesis_state_url: Option<String>, | ||||
|     pub genesis_state_url_timeout: Duration, | ||||
| } | ||||
| 
 | ||||
| impl Default for Config { | ||||
| @ -106,7 +105,10 @@ impl Default for Config { | ||||
|             validator_monitor_pubkeys: vec![], | ||||
|             validator_monitor_individual_tracking_threshold: DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, | ||||
|             logger_config: LoggerConfig::default(), | ||||
|             always_prefer_builder_payload: false, | ||||
|             beacon_processor: <_>::default(), | ||||
|             genesis_state_url: <_>::default(), | ||||
|             // This default value should always be overwritten by the CLI default value.
 | ||||
|             genesis_state_url_timeout: Duration::from_secs(60), | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| @ -46,20 +46,6 @@ impl<T: BeaconChainTypes> Client<T> { | ||||
|         self.http_metrics_listen_addr | ||||
|     } | ||||
| 
 | ||||
|     /// Returns the ipv4 port of the client's libp2p stack, if it was started.
 | ||||
|     pub fn libp2p_listen_ipv4_port(&self) -> Option<u16> { | ||||
|         self.network_globals | ||||
|             .as_ref() | ||||
|             .and_then(|n| n.listen_port_tcp4()) | ||||
|     } | ||||
| 
 | ||||
|     /// Returns the ipv6 port of the client's libp2p stack, if it was started.
 | ||||
|     pub fn libp2p_listen_ipv6_port(&self) -> Option<u16> { | ||||
|         self.network_globals | ||||
|             .as_ref() | ||||
|             .and_then(|n| n.listen_port_tcp6()) | ||||
|     } | ||||
| 
 | ||||
|     /// Returns the list of libp2p addresses the client is listening to.
 | ||||
|     pub fn libp2p_listen_addresses(&self) -> Option<Vec<Multiaddr>> { | ||||
|         self.network_globals.as_ref().map(|n| n.listen_multiaddrs()) | ||||
|  | ||||
| @ -1,7 +1,7 @@ | ||||
| use crate::metrics; | ||||
| use beacon_chain::{ | ||||
|     capella_readiness::CapellaReadiness, | ||||
|     merge_readiness::{MergeConfig, MergeReadiness}, | ||||
|     merge_readiness::{GenesisExecutionPayloadStatus, MergeConfig, MergeReadiness}, | ||||
|     BeaconChain, BeaconChainTypes, ExecutionStatus, | ||||
| }; | ||||
| use lighthouse_network::{types::SyncState, NetworkGlobals}; | ||||
| @ -62,6 +62,9 @@ pub fn spawn_notifier<T: BeaconChainTypes>( | ||||
|                         "wait_time" => estimated_time_pretty(Some(next_slot.as_secs() as f64)), | ||||
|                     ); | ||||
|                     eth1_logging(&beacon_chain, &log); | ||||
|                     merge_readiness_logging(Slot::new(0), &beacon_chain, &log).await; | ||||
|                     capella_readiness_logging(Slot::new(0), &beacon_chain, &log).await; | ||||
|                     genesis_execution_payload_logging(&beacon_chain, &log).await; | ||||
|                     sleep(slot_duration).await; | ||||
|                 } | ||||
|                 _ => break, | ||||
| @ -365,7 +368,7 @@ async fn merge_readiness_logging<T: BeaconChainTypes>( | ||||
|         return; | ||||
|     } | ||||
| 
 | ||||
|     match beacon_chain.check_merge_readiness().await { | ||||
|     match beacon_chain.check_merge_readiness(current_slot).await { | ||||
|         MergeReadiness::Ready { | ||||
|             config, | ||||
|             current_difficulty, | ||||
| @ -404,14 +407,6 @@ async fn merge_readiness_logging<T: BeaconChainTypes>( | ||||
|                 "config" => ?other | ||||
|             ), | ||||
|         }, | ||||
|         readiness @ MergeReadiness::ExchangeTransitionConfigurationFailed { error: _ } => { | ||||
|             error!( | ||||
|                 log, | ||||
|                 "Not ready for merge"; | ||||
|                 "info" => %readiness, | ||||
|                 "hint" => "try updating Lighthouse and/or the execution layer", | ||||
|             ) | ||||
|         } | ||||
|         readiness @ MergeReadiness::NotSynced => warn!( | ||||
|             log, | ||||
|             "Not ready for merge"; | ||||
| @ -484,6 +479,79 @@ async fn capella_readiness_logging<T: BeaconChainTypes>( | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| async fn genesis_execution_payload_logging<T: BeaconChainTypes>( | ||||
|     beacon_chain: &BeaconChain<T>, | ||||
|     log: &Logger, | ||||
| ) { | ||||
|     match beacon_chain | ||||
|         .check_genesis_execution_payload_is_correct() | ||||
|         .await | ||||
|     { | ||||
|         Ok(GenesisExecutionPayloadStatus::Correct(block_hash)) => { | ||||
|             info!( | ||||
|                 log, | ||||
|                 "Execution enabled from genesis"; | ||||
|                 "genesis_payload_block_hash" => ?block_hash, | ||||
|             ); | ||||
|         } | ||||
|         Ok(GenesisExecutionPayloadStatus::BlockHashMismatch { got, expected }) => { | ||||
|             error!( | ||||
|                 log, | ||||
|                 "Genesis payload block hash mismatch"; | ||||
|                 "info" => "genesis is misconfigured and likely to fail", | ||||
|                 "consensus_node_block_hash" => ?expected, | ||||
|                 "execution_node_block_hash" => ?got, | ||||
|             ); | ||||
|         } | ||||
|         Ok(GenesisExecutionPayloadStatus::TransactionsRootMismatch { got, expected }) => { | ||||
|             error!( | ||||
|                 log, | ||||
|                 "Genesis payload transactions root mismatch"; | ||||
|                 "info" => "genesis is misconfigured and likely to fail", | ||||
|                 "consensus_node_transactions_root" => ?expected, | ||||
|                 "execution_node_transactions_root" => ?got, | ||||
|             ); | ||||
|         } | ||||
|         Ok(GenesisExecutionPayloadStatus::WithdrawalsRootMismatch { got, expected }) => { | ||||
|             error!( | ||||
|                 log, | ||||
|                 "Genesis payload withdrawals root mismatch"; | ||||
|                 "info" => "genesis is misconfigured and likely to fail", | ||||
|                 "consensus_node_withdrawals_root" => ?expected, | ||||
|                 "execution_node_withdrawals_root" => ?got, | ||||
|             ); | ||||
|         } | ||||
|         Ok(GenesisExecutionPayloadStatus::OtherMismatch) => { | ||||
|             error!( | ||||
|                 log, | ||||
|                 "Genesis payload header mismatch"; | ||||
|                 "info" => "genesis is misconfigured and likely to fail", | ||||
|                 "detail" => "see debug logs for payload headers" | ||||
|             ); | ||||
|         } | ||||
|         Ok(GenesisExecutionPayloadStatus::Irrelevant) => { | ||||
|             info!( | ||||
|                 log, | ||||
|                 "Execution is not enabled from genesis"; | ||||
|             ); | ||||
|         } | ||||
|         Ok(GenesisExecutionPayloadStatus::AlreadyHappened) => { | ||||
|             warn!( | ||||
|                 log, | ||||
|                 "Unable to check genesis which has already occurred"; | ||||
|                 "info" => "this is probably a race condition or a bug" | ||||
|             ); | ||||
|         } | ||||
|         Err(e) => { | ||||
|             error!( | ||||
|                 log, | ||||
|                 "Unable to check genesis execution payload"; | ||||
|                 "error" => ?e | ||||
|             ); | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| fn eth1_logging<T: BeaconChainTypes>(beacon_chain: &BeaconChain<T>, log: &Logger) { | ||||
|     let current_slot_opt = beacon_chain.slot().ok(); | ||||
| 
 | ||||
|  | ||||
| @ -2,33 +2,33 @@ | ||||
| name = "eth1" | ||||
| version = "0.2.0" | ||||
| authors = ["Paul Hauner <paul@paulhauner.com>"] | ||||
| edition = "2021" | ||||
| edition = { workspace = true } | ||||
| 
 | ||||
| [dev-dependencies] | ||||
| eth1_test_rig = { path = "../../testing/eth1_test_rig" } | ||||
| serde_yaml = "0.8.13" | ||||
| sloggers = { version = "2.1.1", features = ["json"] } | ||||
| environment = { path = "../../lighthouse/environment" } | ||||
| eth1_test_rig = { workspace = true } | ||||
| serde_yaml = { workspace = true } | ||||
| sloggers = { workspace = true } | ||||
| environment = { workspace = true } | ||||
| 
 | ||||
| [dependencies] | ||||
| reqwest = { version = "0.11.0", features = ["native-tls-vendored"] } | ||||
| execution_layer = { path = "../execution_layer" } | ||||
| futures = "0.3.7" | ||||
| serde_json = "1.0.58" | ||||
| serde = { version = "1.0.116", features = ["derive"] } | ||||
| hex = "0.4.2" | ||||
| types = { path = "../../consensus/types"} | ||||
| merkle_proof = { path = "../../consensus/merkle_proof"} | ||||
| ethereum_ssz = "0.5.0" | ||||
| ethereum_ssz_derive = "0.5.0" | ||||
| tree_hash = "0.5.0" | ||||
| parking_lot = "0.12.0" | ||||
| slog = "2.5.2" | ||||
| superstruct = "0.5.0" | ||||
| tokio = { version = "1.14.0", features = ["full"] } | ||||
| state_processing = { path = "../../consensus/state_processing" } | ||||
| lighthouse_metrics = { path = "../../common/lighthouse_metrics"} | ||||
| lazy_static = "1.4.0" | ||||
| task_executor = { path = "../../common/task_executor" } | ||||
| eth2 = { path = "../../common/eth2" } | ||||
| sensitive_url = { path = "../../common/sensitive_url" } | ||||
| reqwest = { workspace = true } | ||||
| execution_layer = { workspace = true } | ||||
| futures = { workspace = true } | ||||
| serde_json = { workspace = true } | ||||
| serde = { workspace = true } | ||||
| hex = { workspace = true } | ||||
| types = { workspace = true } | ||||
| merkle_proof = { workspace = true } | ||||
| ethereum_ssz = { workspace = true } | ||||
| ethereum_ssz_derive = { workspace = true } | ||||
| tree_hash = { workspace = true } | ||||
| parking_lot = { workspace = true } | ||||
| slog = { workspace = true } | ||||
| superstruct = { workspace = true } | ||||
| tokio = { workspace = true } | ||||
| state_processing = { workspace = true } | ||||
| lighthouse_metrics = { workspace = true } | ||||
| lazy_static = { workspace = true } | ||||
| task_executor = { workspace = true } | ||||
| eth2 = { workspace = true } | ||||
| sensitive_url = { workspace = true } | ||||
|  | ||||
| @ -1,52 +1,56 @@ | ||||
| [package] | ||||
| name = "execution_layer" | ||||
| version = "0.1.0" | ||||
| edition = "2021" | ||||
| edition = { workspace = true } | ||||
| 
 | ||||
| # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html | ||||
| 
 | ||||
| [dependencies] | ||||
| types = { path = "../../consensus/types"} | ||||
| tokio = { version = "1.10.0", features = ["full"] } | ||||
| types = { workspace = true } | ||||
| tokio = { workspace = true } | ||||
| async-trait = "0.1.51" | ||||
| slog = "2.5.2" | ||||
| futures = "0.3.7" | ||||
| sensitive_url = { path = "../../common/sensitive_url" } | ||||
| reqwest = { version = "0.11.0", features = ["json","stream"] } | ||||
| ethereum_serde_utils = "0.5.0" | ||||
| serde_json = "1.0.58" | ||||
| serde = { version = "1.0.116", features = ["derive"] } | ||||
| warp = { version = "0.3.2", features = ["tls"] } | ||||
| slog = { workspace = true } | ||||
| futures = { workspace = true } | ||||
| sensitive_url = { workspace = true } | ||||
| reqwest = { workspace = true } | ||||
| ethereum_serde_utils = { workspace = true } | ||||
| serde_json = { workspace = true } | ||||
| serde = { workspace = true } | ||||
| warp = { workspace = true } | ||||
| jsonwebtoken = "8" | ||||
| environment = { path = "../../lighthouse/environment" } | ||||
| bytes = "1.1.0" | ||||
| task_executor = { path = "../../common/task_executor" } | ||||
| hex = "0.4.2" | ||||
| ethereum_ssz = "0.5.0" | ||||
| ssz_types = "0.5.0" | ||||
| eth2 = { path = "../../common/eth2" } | ||||
| state_processing = { path = "../../consensus/state_processing" } | ||||
| superstruct = "0.6.0" | ||||
| lru = "0.7.1" | ||||
| exit-future = "0.2.0" | ||||
| tree_hash = "0.5.0" | ||||
| tree_hash_derive = "0.5.0" | ||||
| parking_lot = "0.12.0" | ||||
| slot_clock = { path = "../../common/slot_clock" } | ||||
| tempfile = "3.1.0" | ||||
| rand = "0.8.5" | ||||
| zeroize = { version = "1.4.2", features = ["zeroize_derive"] } | ||||
| lighthouse_metrics = { path = "../../common/lighthouse_metrics" } | ||||
| lazy_static = "1.4.0" | ||||
| ethers-core = "1.0.2" | ||||
| environment = { workspace = true } | ||||
| bytes = { workspace = true } | ||||
| task_executor = { workspace = true } | ||||
| hex = { workspace = true } | ||||
| ethereum_ssz = { workspace = true } | ||||
| ssz_types = { workspace = true } | ||||
| eth2 = { workspace = true } | ||||
| state_processing = { workspace = true } | ||||
| superstruct = { workspace = true } | ||||
| lru = { workspace = true } | ||||
| exit-future = { workspace = true } | ||||
| tree_hash = { workspace = true } | ||||
| tree_hash_derive = { workspace = true } | ||||
| parking_lot = { workspace = true } | ||||
| slot_clock = { workspace = true } | ||||
| tempfile = { workspace = true } | ||||
| rand = { workspace = true } | ||||
| zeroize = { workspace = true } | ||||
| lighthouse_metrics = { workspace = true } | ||||
| lazy_static = { workspace = true } | ||||
| ethers-core = { workspace = true } | ||||
| builder_client = { path = "../builder_client" } | ||||
| fork_choice = { path = "../../consensus/fork_choice" } | ||||
| mev-rs = { git = "https://github.com/ralexstokes/mev-rs" } | ||||
| ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus" } | ||||
| ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs" } | ||||
| tokio-stream = { version = "0.1.9", features = [ "sync" ] } | ||||
| strum = "0.24.0" | ||||
| fork_choice = { workspace = true } | ||||
| mev-rs = { git = "https://github.com/ralexstokes/mev-rs", rev = "216657016d5c0889b505857c89ae42c7aa2764af" } | ||||
| axum = "0.6" | ||||
| hyper = "0.14" | ||||
| ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus", rev = "e380108" } | ||||
| ssz_rs = "0.9.0" | ||||
| tokio-stream = { workspace = true } | ||||
| strum = { workspace = true } | ||||
| keccak-hash = "0.10.0" | ||||
| hash256-std-hasher = "0.15.2" | ||||
| triehash = "0.8.4" | ||||
| hash-db = "0.15.2" | ||||
| pretty_reqwest_error = { workspace = true } | ||||
| arc-swap = "1.6.0" | ||||
|  | ||||
| @ -12,12 +12,13 @@ use types::{ | ||||
| }; | ||||
| 
 | ||||
| impl<T: EthSpec> ExecutionLayer<T> { | ||||
|     /// Verify `payload.block_hash` locally within Lighthouse.
 | ||||
|     /// Calculate the block hash of an execution block.
 | ||||
|     ///
 | ||||
|     /// No remote calls to the execution client will be made, so this is quite a cheap check.
 | ||||
|     pub fn verify_payload_block_hash(&self, payload: ExecutionPayloadRef<T>) -> Result<(), Error> { | ||||
|         let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_VERIFY_BLOCK_HASH); | ||||
| 
 | ||||
|     /// Return `(block_hash, transactions_root)`, where `transactions_root` is the root of the RLP
 | ||||
|     /// transactions.
 | ||||
|     pub fn calculate_execution_block_hash( | ||||
|         payload: ExecutionPayloadRef<T>, | ||||
|     ) -> (ExecutionBlockHash, Hash256) { | ||||
|         // Calculate the transactions root.
 | ||||
|         // We're currently using a deprecated Parity library for this. We should move to a
 | ||||
|         // better alternative when one appears, possibly following Reth.
 | ||||
| @ -46,7 +47,19 @@ impl<T: EthSpec> ExecutionLayer<T> { | ||||
| 
 | ||||
|         // Hash the RLP encoding of the block header.
 | ||||
|         let rlp_block_header = rlp_encode_block_header(&exec_block_header); | ||||
|         let header_hash = ExecutionBlockHash::from_root(keccak256(&rlp_block_header)); | ||||
|         ( | ||||
|             ExecutionBlockHash::from_root(keccak256(&rlp_block_header)), | ||||
|             rlp_transactions_root, | ||||
|         ) | ||||
|     } | ||||
| 
 | ||||
|     /// Verify `payload.block_hash` locally within Lighthouse.
 | ||||
|     ///
 | ||||
|     /// No remote calls to the execution client will be made, so this is quite a cheap check.
 | ||||
|     pub fn verify_payload_block_hash(&self, payload: ExecutionPayloadRef<T>) -> Result<(), Error> { | ||||
|         let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_VERIFY_BLOCK_HASH); | ||||
| 
 | ||||
|         let (header_hash, rlp_transactions_root) = Self::calculate_execution_block_hash(payload); | ||||
| 
 | ||||
|         if header_hash != payload.block_hash() { | ||||
|             return Err(Error::BlockHashMismatch { | ||||
|  | ||||
| @ -1,15 +1,15 @@ | ||||
| use crate::engines::ForkchoiceState; | ||||
| use crate::http::{ | ||||
|     ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, ENGINE_FORKCHOICE_UPDATED_V1, | ||||
|     ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, | ||||
|     ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, | ||||
|     ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, | ||||
|     ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, | ||||
|     ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, | ||||
|     ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, | ||||
| }; | ||||
| use eth2::types::{SsePayloadAttributes, SsePayloadAttributesV1, SsePayloadAttributesV2}; | ||||
| pub use ethers_core::types::Transaction; | ||||
| use ethers_core::utils::rlp::{self, Decodable, Rlp}; | ||||
| use http::deposit_methods::RpcError; | ||||
| pub use json_structures::{JsonWithdrawal, TransitionConfigurationV1}; | ||||
| use pretty_reqwest_error::PrettyReqwestError; | ||||
| use reqwest::StatusCode; | ||||
| use serde::{Deserialize, Serialize}; | ||||
| use std::convert::TryFrom; | ||||
| @ -32,7 +32,7 @@ pub type PayloadId = [u8; 8]; | ||||
| 
 | ||||
| #[derive(Debug)] | ||||
| pub enum Error { | ||||
|     Reqwest(reqwest::Error), | ||||
|     HttpClient(PrettyReqwestError), | ||||
|     Auth(auth::Error), | ||||
|     BadResponse(String), | ||||
|     RequestFailed(String), | ||||
| @ -67,7 +67,7 @@ impl From<reqwest::Error> for Error { | ||||
|         ) { | ||||
|             Error::Auth(auth::Error::InvalidToken) | ||||
|         } else { | ||||
|             Error::Reqwest(e) | ||||
|             Error::HttpClient(e.into()) | ||||
|         } | ||||
|     } | ||||
| } | ||||
| @ -449,7 +449,6 @@ pub struct EngineCapabilities { | ||||
|     pub get_payload_bodies_by_range_v1: bool, | ||||
|     pub get_payload_v1: bool, | ||||
|     pub get_payload_v2: bool, | ||||
|     pub exchange_transition_configuration_v1: bool, | ||||
| } | ||||
| 
 | ||||
| impl EngineCapabilities { | ||||
| @ -479,9 +478,6 @@ impl EngineCapabilities { | ||||
|         if self.get_payload_v2 { | ||||
|             response.push(ENGINE_GET_PAYLOAD_V2); | ||||
|         } | ||||
|         if self.exchange_transition_configuration_v1 { | ||||
|             response.push(ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1); | ||||
|         } | ||||
| 
 | ||||
|         response | ||||
|     } | ||||
|  | ||||
| @ -46,10 +46,6 @@ pub const ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1: &str = "engine_getPayloadBodiesB | ||||
| pub const ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1: &str = "engine_getPayloadBodiesByRangeV1"; | ||||
| pub const ENGINE_GET_PAYLOAD_BODIES_TIMEOUT: Duration = Duration::from_secs(10); | ||||
| 
 | ||||
| pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1: &str = | ||||
|     "engine_exchangeTransitionConfigurationV1"; | ||||
| pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT: Duration = Duration::from_secs(1); | ||||
| 
 | ||||
| pub const ENGINE_EXCHANGE_CAPABILITIES: &str = "engine_exchangeCapabilities"; | ||||
| pub const ENGINE_EXCHANGE_CAPABILITIES_TIMEOUT: Duration = Duration::from_secs(1); | ||||
| 
 | ||||
| @ -68,7 +64,6 @@ pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ | ||||
|     ENGINE_FORKCHOICE_UPDATED_V2, | ||||
|     ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, | ||||
|     ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, | ||||
|     ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, | ||||
| ]; | ||||
| 
 | ||||
| /// This is necessary because a user might run a capella-enabled version of
 | ||||
| @ -83,7 +78,6 @@ pub static PRE_CAPELLA_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilit | ||||
|     get_payload_bodies_by_range_v1: false, | ||||
|     get_payload_v1: true, | ||||
|     get_payload_v2: false, | ||||
|     exchange_transition_configuration_v1: true, | ||||
| }; | ||||
| 
 | ||||
| /// Contains methods to convert arbitrary bytes to an ETH2 deposit contract object.
 | ||||
| @ -934,24 +928,6 @@ impl HttpJsonRpc { | ||||
|             .collect()) | ||||
|     } | ||||
| 
 | ||||
|     pub async fn exchange_transition_configuration_v1( | ||||
|         &self, | ||||
|         transition_configuration: TransitionConfigurationV1, | ||||
|     ) -> Result<TransitionConfigurationV1, Error> { | ||||
|         let params = json!([transition_configuration]); | ||||
| 
 | ||||
|         let response = self | ||||
|             .rpc_request( | ||||
|                 ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, | ||||
|                 params, | ||||
|                 ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT | ||||
|                     * self.execution_timeout_multiplier, | ||||
|             ) | ||||
|             .await?; | ||||
| 
 | ||||
|         Ok(response) | ||||
|     } | ||||
| 
 | ||||
|     pub async fn exchange_capabilities(&self) -> Result<EngineCapabilities, Error> { | ||||
|         let params = json!([LIGHTHOUSE_CAPABILITIES]); | ||||
| 
 | ||||
| @ -982,8 +958,6 @@ impl HttpJsonRpc { | ||||
|                     .contains(ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1), | ||||
|                 get_payload_v1: capabilities.contains(ENGINE_GET_PAYLOAD_V1), | ||||
|                 get_payload_v2: capabilities.contains(ENGINE_GET_PAYLOAD_V2), | ||||
|                 exchange_transition_configuration_v1: capabilities | ||||
|                     .contains(ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1), | ||||
|             }), | ||||
|         } | ||||
|     } | ||||
|  | ||||
| @ -5,6 +5,7 @@ | ||||
| //! deposit-contract functionality that the `beacon_node/eth1` crate already provides.
 | ||||
| 
 | ||||
| use crate::payload_cache::PayloadCache; | ||||
| use arc_swap::ArcSwapOption; | ||||
| use auth::{strip_prefix, Auth, JwtKey}; | ||||
| use builder_client::BuilderHttpClient; | ||||
| pub use engine_api::EngineCapabilities; | ||||
| @ -38,11 +39,11 @@ use tokio::{ | ||||
| }; | ||||
| use tokio_stream::wrappers::WatchStream; | ||||
| use tree_hash::TreeHash; | ||||
| use types::{AbstractExecPayload, BeaconStateError, ExecPayload, Withdrawals}; | ||||
| use types::{AbstractExecPayload, BeaconStateError, ExecPayload}; | ||||
| use types::{ | ||||
|     BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ExecutionPayload, | ||||
|     ExecutionPayloadCapella, ExecutionPayloadMerge, ForkName, ForkVersionedResponse, | ||||
|     ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, Uint256, | ||||
|     BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionPayloadCapella, ExecutionPayloadMerge, | ||||
|     ForkVersionedResponse, ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, | ||||
|     Slot, | ||||
| }; | ||||
| 
 | ||||
| mod block_hash; | ||||
| @ -74,11 +75,9 @@ const EXECUTION_BLOCKS_LRU_CACHE_SIZE: usize = 128; | ||||
| const DEFAULT_SUGGESTED_FEE_RECIPIENT: [u8; 20] = | ||||
|     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; | ||||
| 
 | ||||
| const CONFIG_POLL_INTERVAL: Duration = Duration::from_secs(60); | ||||
| 
 | ||||
| /// A payload alongside some information about where it came from.
 | ||||
| pub enum ProvenancedPayload<P> { | ||||
|     /// A good ol' fashioned farm-to-table payload from your local EE.
 | ||||
|     /// A good old fashioned farm-to-table payload from your local EE.
 | ||||
|     Local(P), | ||||
|     /// A payload from a builder (e.g. mev-boost).
 | ||||
|     Builder(P), | ||||
| @ -163,7 +162,7 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> BlockProposalContents<T, Paylo | ||||
|                 BlockProposalContents::Payload { | ||||
|                     payload: Payload::default_at_fork(fork_name)?, | ||||
|                     block_value: Uint256::zero(), | ||||
|                     _phantom: PhantomData::default(), | ||||
|                     _phantom: PhantomData, | ||||
|                 } | ||||
|             } | ||||
|         }) | ||||
| @ -211,7 +210,7 @@ pub enum FailedCondition { | ||||
| 
 | ||||
| struct Inner<E: EthSpec> { | ||||
|     engine: Arc<Engine>, | ||||
|     builder: Option<BuilderHttpClient>, | ||||
|     builder: ArcSwapOption<BuilderHttpClient>, | ||||
|     execution_engine_forkchoice_lock: Mutex<()>, | ||||
|     suggested_fee_recipient: Option<Address>, | ||||
|     proposer_preparation_data: Mutex<HashMap<u64, ProposerPreparationDataEntry>>, | ||||
| @ -326,25 +325,9 @@ impl<T: EthSpec> ExecutionLayer<T> { | ||||
|             Engine::new(api, executor.clone(), &log) | ||||
|         }; | ||||
| 
 | ||||
|         let builder = builder_url | ||||
|             .map(|url| { | ||||
|                 let builder_client = BuilderHttpClient::new(url.clone(), builder_user_agent) | ||||
|                     .map_err(Error::Builder)?; | ||||
| 
 | ||||
|                 info!( | ||||
|                     log, | ||||
|                     "Using external block builder"; | ||||
|                     "builder_url" => ?url, | ||||
|                     "builder_profit_threshold" => builder_profit_threshold, | ||||
|                     "local_user_agent" => builder_client.get_user_agent(), | ||||
|                 ); | ||||
|                 Ok::<_, Error>(builder_client) | ||||
|             }) | ||||
|             .transpose()?; | ||||
| 
 | ||||
|         let inner = Inner { | ||||
|             engine: Arc::new(engine), | ||||
|             builder, | ||||
|             builder: ArcSwapOption::empty(), | ||||
|             execution_engine_forkchoice_lock: <_>::default(), | ||||
|             suggested_fee_recipient, | ||||
|             proposer_preparation_data: Mutex::new(HashMap::new()), | ||||
| @ -358,19 +341,45 @@ impl<T: EthSpec> ExecutionLayer<T> { | ||||
|             last_new_payload_errored: RwLock::new(false), | ||||
|         }; | ||||
| 
 | ||||
|         Ok(Self { | ||||
|         let el = Self { | ||||
|             inner: Arc::new(inner), | ||||
|         }) | ||||
|     } | ||||
|         }; | ||||
| 
 | ||||
|         if let Some(builder_url) = builder_url { | ||||
|             el.set_builder_url(builder_url, builder_user_agent)?; | ||||
|         } | ||||
| 
 | ||||
|         Ok(el) | ||||
|     } | ||||
| 
 | ||||
| impl<T: EthSpec> ExecutionLayer<T> { | ||||
|     fn engine(&self) -> &Arc<Engine> { | ||||
|         &self.inner.engine | ||||
|     } | ||||
| 
 | ||||
|     pub fn builder(&self) -> &Option<BuilderHttpClient> { | ||||
|         &self.inner.builder | ||||
|     pub fn builder(&self) -> Option<Arc<BuilderHttpClient>> { | ||||
|         self.inner.builder.load_full() | ||||
|     } | ||||
| 
 | ||||
|     /// Set the builder URL after initialization.
 | ||||
|     ///
 | ||||
|     /// This is useful for breaking circular dependencies between mock ELs and mock builders in
 | ||||
|     /// tests.
 | ||||
|     pub fn set_builder_url( | ||||
|         &self, | ||||
|         builder_url: SensitiveUrl, | ||||
|         builder_user_agent: Option<String>, | ||||
|     ) -> Result<(), Error> { | ||||
|         let builder_client = BuilderHttpClient::new(builder_url.clone(), builder_user_agent) | ||||
|             .map_err(Error::Builder)?; | ||||
|         info!( | ||||
|             self.log(), | ||||
|             "Using external block builder"; | ||||
|             "builder_url" => ?builder_url, | ||||
|             "builder_profit_threshold" => self.inner.builder_profit_threshold.as_u128(), | ||||
|             "local_user_agent" => builder_client.get_user_agent(), | ||||
|         ); | ||||
|         self.inner.builder.swap(Some(Arc::new(builder_client))); | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     /// Cache a full payload, keyed on the `tree_hash_root` of the payload
 | ||||
| @ -380,7 +389,7 @@ impl<T: EthSpec> ExecutionLayer<T> { | ||||
| 
 | ||||
|     /// Attempt to retrieve a full payload from the payload cache by the payload root
 | ||||
|     pub fn get_payload_by_root(&self, root: &Hash256) -> Option<ExecutionPayload<T>> { | ||||
|         self.inner.payload_cache.pop(root) | ||||
|         self.inner.payload_cache.get(root) | ||||
|     } | ||||
| 
 | ||||
|     pub fn executor(&self) -> &TaskExecutor { | ||||
| @ -502,24 +511,6 @@ impl<T: EthSpec> ExecutionLayer<T> { | ||||
|         self.spawn(preparation_cleaner, "exec_preparation_cleanup"); | ||||
|     } | ||||
| 
 | ||||
|     /// Spawns a routine that polls the `exchange_transition_configuration` endpoint.
 | ||||
|     pub fn spawn_transition_configuration_poll(&self, spec: ChainSpec) { | ||||
|         let routine = |el: ExecutionLayer<T>| async move { | ||||
|             loop { | ||||
|                 if let Err(e) = el.exchange_transition_configuration(&spec).await { | ||||
|                     error!( | ||||
|                         el.log(), | ||||
|                         "Failed to check transition config"; | ||||
|                         "error" => ?e | ||||
|                     ); | ||||
|                 } | ||||
|                 sleep(CONFIG_POLL_INTERVAL).await; | ||||
|             } | ||||
|         }; | ||||
| 
 | ||||
|         self.spawn(routine, "exec_config_poll"); | ||||
|     } | ||||
| 
 | ||||
|     /// Returns `true` if the execution engine is synced and reachable.
 | ||||
|     pub async fn is_synced(&self) -> bool { | ||||
|         self.engine().is_synced().await | ||||
| @ -529,9 +520,9 @@ impl<T: EthSpec> ExecutionLayer<T> { | ||||
|     ///
 | ||||
|     /// This function is a wrapper over `Self::is_synced` that makes an additional
 | ||||
|     /// check for the execution layer sync status. Checks if the latest block has
 | ||||
|     /// a `block_number != 0`.
 | ||||
|     /// a `block_number != 0` *if* the `current_slot` is also `> 0`.
 | ||||
|     /// Returns the `Self::is_synced` response if unable to get latest block.
 | ||||
|     pub async fn is_synced_for_notifier(&self) -> bool { | ||||
|     pub async fn is_synced_for_notifier(&self, current_slot: Slot) -> bool { | ||||
|         let synced = self.is_synced().await; | ||||
|         if synced { | ||||
|             if let Ok(Some(block)) = self | ||||
| @ -540,7 +531,7 @@ impl<T: EthSpec> ExecutionLayer<T> { | ||||
|                 .get_block_by_number(BlockByNumberQuery::Tag(LATEST_TAG)) | ||||
|                 .await | ||||
|             { | ||||
|                 if block.block_number == 0 { | ||||
|                 if block.block_number == 0 && current_slot > 0 { | ||||
|                     return false; | ||||
|                 } | ||||
|             } | ||||
| @ -826,9 +817,8 @@ impl<T: EthSpec> ExecutionLayer<T> { | ||||
| 
 | ||||
|                             let relay_value = relay.data.message.value; | ||||
|                             let local_value = *local.block_value(); | ||||
|                             if !self.inner.always_prefer_builder_payload | ||||
|                                 && local_value >= relay_value | ||||
|                             { | ||||
|                             if !self.inner.always_prefer_builder_payload { | ||||
|                                 if local_value >= relay_value { | ||||
|                                     info!( | ||||
|                                         self.log(), | ||||
|                                         "Local block is more profitable than relay block"; | ||||
| @ -836,6 +826,14 @@ impl<T: EthSpec> ExecutionLayer<T> { | ||||
|                                         "relay_value" => %relay_value | ||||
|                                     ); | ||||
|                                     return Ok(ProvenancedPayload::Local(local)); | ||||
|                                 } else { | ||||
|                                     info!( | ||||
|                                         self.log(), | ||||
|                                         "Relay block is more profitable than local block"; | ||||
|                                         "local_block_value" => %local_value, | ||||
|                                         "relay_value" => %relay_value | ||||
|                                     ); | ||||
|                                 } | ||||
|                             } | ||||
| 
 | ||||
|                             match verify_builder_bid( | ||||
| @ -851,7 +849,7 @@ impl<T: EthSpec> ExecutionLayer<T> { | ||||
|                                     BlockProposalContents::Payload { | ||||
|                                         payload: relay.data.message.header, | ||||
|                                         block_value: relay.data.message.value, | ||||
|                                         _phantom: PhantomData::default(), | ||||
|                                         _phantom: PhantomData, | ||||
|                                     }, | ||||
|                                 )), | ||||
|                                 Err(reason) if !reason.payload_invalid() => { | ||||
| @ -906,7 +904,7 @@ impl<T: EthSpec> ExecutionLayer<T> { | ||||
|                                     BlockProposalContents::Payload { | ||||
|                                         payload: relay.data.message.header, | ||||
|                                         block_value: relay.data.message.value, | ||||
|                                         _phantom: PhantomData::default(), | ||||
|                                         _phantom: PhantomData, | ||||
|                                     }, | ||||
|                                 )), | ||||
|                                 // If the payload is valid then use it. The local EE failed
 | ||||
| @ -915,7 +913,7 @@ impl<T: EthSpec> ExecutionLayer<T> { | ||||
|                                     BlockProposalContents::Payload { | ||||
|                                         payload: relay.data.message.header, | ||||
|                                         block_value: relay.data.message.value, | ||||
|                                         _phantom: PhantomData::default(), | ||||
|                                         _phantom: PhantomData, | ||||
|                                     }, | ||||
|                                 )), | ||||
|                                 Err(reason) => { | ||||
| @ -1122,7 +1120,7 @@ impl<T: EthSpec> ExecutionLayer<T> { | ||||
|                 Ok(BlockProposalContents::Payload { | ||||
|                     payload: execution_payload.into(), | ||||
|                     block_value, | ||||
|                     _phantom: PhantomData::default(), | ||||
|                     _phantom: PhantomData, | ||||
|                 }) | ||||
|             }) | ||||
|             .await | ||||
| @ -1311,53 +1309,6 @@ impl<T: EthSpec> ExecutionLayer<T> { | ||||
|         .map_err(Error::EngineError) | ||||
|     } | ||||
| 
 | ||||
|     pub async fn exchange_transition_configuration(&self, spec: &ChainSpec) -> Result<(), Error> { | ||||
|         let local = TransitionConfigurationV1 { | ||||
|             terminal_total_difficulty: spec.terminal_total_difficulty, | ||||
|             terminal_block_hash: spec.terminal_block_hash, | ||||
|             terminal_block_number: 0, | ||||
|         }; | ||||
| 
 | ||||
|         let result = self | ||||
|             .engine() | ||||
|             .request(|engine| engine.api.exchange_transition_configuration_v1(local)) | ||||
|             .await; | ||||
| 
 | ||||
|         match result { | ||||
|             Ok(remote) => { | ||||
|                 if local.terminal_total_difficulty != remote.terminal_total_difficulty | ||||
|                     || local.terminal_block_hash != remote.terminal_block_hash | ||||
|                 { | ||||
|                     error!( | ||||
|                         self.log(), | ||||
|                         "Execution client config mismatch"; | ||||
|                         "msg" => "ensure lighthouse and the execution client are up-to-date and \ | ||||
|                                   configured consistently",
 | ||||
|                         "remote" => ?remote, | ||||
|                         "local" => ?local, | ||||
|                     ); | ||||
|                     Err(Error::EngineError(Box::new(EngineError::Api { | ||||
|                         error: ApiError::TransitionConfigurationMismatch, | ||||
|                     }))) | ||||
|                 } else { | ||||
|                     debug!( | ||||
|                         self.log(), | ||||
|                         "Execution client config is OK"; | ||||
|                     ); | ||||
|                     Ok(()) | ||||
|                 } | ||||
|             } | ||||
|             Err(e) => { | ||||
|                 error!( | ||||
|                     self.log(), | ||||
|                     "Unable to get transition config"; | ||||
|                     "error" => ?e, | ||||
|                 ); | ||||
|                 Err(Error::EngineError(Box::new(e))) | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /// Returns the execution engine capabilities resulting from a call to
 | ||||
|     /// engine_exchangeCapabilities. If the capabilities cache is not populated,
 | ||||
|     /// or if it is populated with a cached result of age >= `age_limit`, this
 | ||||
| @ -1654,6 +1605,17 @@ impl<T: EthSpec> ExecutionLayer<T> { | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     pub async fn get_block_by_number( | ||||
|         &self, | ||||
|         query: BlockByNumberQuery<'_>, | ||||
|     ) -> Result<Option<ExecutionBlock>, Error> { | ||||
|         self.engine() | ||||
|             .request(|engine| async move { engine.api.get_block_by_number(query).await }) | ||||
|             .await | ||||
|             .map_err(Box::new) | ||||
|             .map_err(Error::EngineError) | ||||
|     } | ||||
| 
 | ||||
|     pub async fn get_payload_by_hash_legacy( | ||||
|         &self, | ||||
|         hash: ExecutionBlockHash, | ||||
| @ -2011,6 +1973,22 @@ async fn timed_future<F: Future<Output = T>, T>(metric: &str, future: F) -> (T, | ||||
|     (result, duration) | ||||
| } | ||||
| 
 | ||||
| fn noop<T: EthSpec>( | ||||
|     _: &ExecutionLayer<T>, | ||||
|     _: ExecutionPayloadRef<T>, | ||||
| ) -> Option<ExecutionPayload<T>> { | ||||
|     None | ||||
| } | ||||
| 
 | ||||
| #[cfg(test)] | ||||
| /// Returns the duration since the unix epoch.
 | ||||
| fn timestamp_now() -> u64 { | ||||
|     SystemTime::now() | ||||
|         .duration_since(UNIX_EPOCH) | ||||
|         .unwrap_or_else(|_| Duration::from_secs(0)) | ||||
|         .as_secs() | ||||
| } | ||||
| 
 | ||||
| #[cfg(test)] | ||||
| mod test { | ||||
|     use super::*; | ||||
| @ -2157,19 +2135,3 @@ mod test { | ||||
|             .await; | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| fn noop<T: EthSpec>( | ||||
|     _: &ExecutionLayer<T>, | ||||
|     _: ExecutionPayloadRef<T>, | ||||
| ) -> Option<ExecutionPayload<T>> { | ||||
|     None | ||||
| } | ||||
| 
 | ||||
| #[cfg(test)] | ||||
| /// Returns the duration since the unix epoch.
 | ||||
| fn timestamp_now() -> u64 { | ||||
|     SystemTime::now() | ||||
|         .duration_since(UNIX_EPOCH) | ||||
|         .unwrap_or_else(|_| Duration::from_secs(0)) | ||||
|         .as_secs() | ||||
| } | ||||
|  | ||||
| @ -30,4 +30,8 @@ impl<T: EthSpec> PayloadCache<T> { | ||||
|     pub fn pop(&self, root: &Hash256) -> Option<ExecutionPayload<T>> { | ||||
|         self.payloads.lock().pop(&PayloadCacheId(*root)) | ||||
|     } | ||||
| 
 | ||||
|     pub fn get(&self, hash: &Hash256) -> Option<ExecutionPayload<T>> { | ||||
|         self.payloads.lock().get(&PayloadCacheId(*hash)).cloned() | ||||
|     } | ||||
| } | ||||
|  | ||||
| @ -357,15 +357,6 @@ pub async fn handle_rpc<T: EthSpec>( | ||||
| 
 | ||||
|             Ok(serde_json::to_value(response).unwrap()) | ||||
|         } | ||||
|         ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1 => { | ||||
|             let block_generator = ctx.execution_block_generator.read(); | ||||
|             let transition_config: TransitionConfigurationV1 = TransitionConfigurationV1 { | ||||
|                 terminal_total_difficulty: block_generator.terminal_total_difficulty, | ||||
|                 terminal_block_hash: block_generator.terminal_block_hash, | ||||
|                 terminal_block_number: block_generator.terminal_block_number, | ||||
|             }; | ||||
|             Ok(serde_json::to_value(transition_config).unwrap()) | ||||
|         } | ||||
|         ENGINE_EXCHANGE_CAPABILITIES => { | ||||
|             let engine_capabilities = ctx.engine_capabilities.read(); | ||||
|             Ok(serde_json::to_value(engine_capabilities.to_response()).unwrap()) | ||||
|  | ||||
| @ -11,11 +11,17 @@ use ethereum_consensus::{ | ||||
| }; | ||||
| use fork_choice::ForkchoiceUpdateParameters; | ||||
| use mev_rs::{ | ||||
|     bellatrix::{BuilderBid as BuilderBidBellatrix, SignedBuilderBid as SignedBuilderBidBellatrix}, | ||||
|     blinded_block_provider::Server as BlindedBlockProviderServer, | ||||
|     signing::{sign_builder_message, verify_signed_builder_message}, | ||||
|     types::{ | ||||
|         bellatrix::{ | ||||
|             BuilderBid as BuilderBidBellatrix, SignedBuilderBid as SignedBuilderBidBellatrix, | ||||
|         }, | ||||
|         capella::{BuilderBid as BuilderBidCapella, SignedBuilderBid as SignedBuilderBidCapella}, | ||||
|     sign_builder_message, verify_signed_builder_message, BidRequest, BlindedBlockProviderError, | ||||
|     BlindedBlockProviderServer, BuilderBid, ExecutionPayload as ServerPayload, | ||||
|     SignedBlindedBeaconBlock, SignedBuilderBid, SignedValidatorRegistration, | ||||
|         BidRequest, BuilderBid, ExecutionPayload as ServerPayload, SignedBlindedBeaconBlock, | ||||
|         SignedBuilderBid, SignedValidatorRegistration, | ||||
|     }, | ||||
|     Error as MevError, | ||||
| }; | ||||
| use parking_lot::RwLock; | ||||
| use sensitive_url::SensitiveUrl; | ||||
| @ -34,6 +40,11 @@ use types::{ | ||||
|     Uint256, | ||||
| }; | ||||
| 
 | ||||
| pub type MockBuilderServer = axum::Server< | ||||
|     hyper::server::conn::AddrIncoming, | ||||
|     axum::routing::IntoMakeService<axum::routing::Router>, | ||||
| >; | ||||
| 
 | ||||
| #[derive(Clone)] | ||||
| pub enum Operation { | ||||
|     FeeRecipient(Address), | ||||
| @ -47,7 +58,7 @@ pub enum Operation { | ||||
| } | ||||
| 
 | ||||
| impl Operation { | ||||
|     fn apply<B: BidStuff>(self, bid: &mut B) -> Result<(), BlindedBlockProviderError> { | ||||
|     fn apply<B: BidStuff>(self, bid: &mut B) -> Result<(), MevError> { | ||||
|         match self { | ||||
|             Operation::FeeRecipient(fee_recipient) => { | ||||
|                 *bid.fee_recipient_mut() = to_ssz_rs(&fee_recipient)? | ||||
| @ -73,7 +84,7 @@ pub trait BidStuff { | ||||
|     fn prev_randao_mut(&mut self) -> &mut Hash32; | ||||
|     fn block_number_mut(&mut self) -> &mut u64; | ||||
|     fn timestamp_mut(&mut self) -> &mut u64; | ||||
|     fn withdrawals_root_mut(&mut self) -> Result<&mut Root, BlindedBlockProviderError>; | ||||
|     fn withdrawals_root_mut(&mut self) -> Result<&mut Root, MevError>; | ||||
| 
 | ||||
|     fn sign_builder_message( | ||||
|         &mut self, | ||||
| @ -134,11 +145,9 @@ impl BidStuff for BuilderBid { | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     fn withdrawals_root_mut(&mut self) -> Result<&mut Root, BlindedBlockProviderError> { | ||||
|     fn withdrawals_root_mut(&mut self) -> Result<&mut Root, MevError> { | ||||
|         match self { | ||||
|             Self::Bellatrix(_) => Err(BlindedBlockProviderError::Custom( | ||||
|                 "withdrawals_root called on bellatrix bid".to_string(), | ||||
|             )), | ||||
|             Self::Bellatrix(_) => Err(MevError::InvalidFork), | ||||
|             Self::Capella(bid) => Ok(&mut bid.header.withdrawals_root), | ||||
|         } | ||||
|     } | ||||
| @ -166,19 +175,25 @@ impl BidStuff for BuilderBid { | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| pub struct TestingBuilder<E: EthSpec> { | ||||
|     server: BlindedBlockProviderServer<MockBuilder<E>>, | ||||
|     pub builder: MockBuilder<E>, | ||||
| #[derive(Clone)] | ||||
| pub struct MockBuilder<E: EthSpec> { | ||||
|     el: ExecutionLayer<E>, | ||||
|     beacon_client: BeaconNodeHttpClient, | ||||
|     spec: ChainSpec, | ||||
|     context: Arc<Context>, | ||||
|     val_registration_cache: Arc<RwLock<HashMap<BlsPublicKey, SignedValidatorRegistration>>>, | ||||
|     builder_sk: SecretKey, | ||||
|     operations: Arc<RwLock<Vec<Operation>>>, | ||||
|     invalidate_signatures: Arc<RwLock<bool>>, | ||||
| } | ||||
| 
 | ||||
| impl<E: EthSpec> TestingBuilder<E> { | ||||
|     pub fn new( | ||||
| impl<E: EthSpec> MockBuilder<E> { | ||||
|     pub fn new_for_testing( | ||||
|         mock_el_url: SensitiveUrl, | ||||
|         builder_url: SensitiveUrl, | ||||
|         beacon_url: SensitiveUrl, | ||||
|         spec: ChainSpec, | ||||
|         executor: TaskExecutor, | ||||
|     ) -> Self { | ||||
|     ) -> (Self, MockBuilderServer) { | ||||
|         let file = NamedTempFile::new().unwrap(); | ||||
|         let path = file.path().into(); | ||||
|         std::fs::write(&path, hex::encode(DEFAULT_JWT_SECRET)).unwrap(); | ||||
| @ -207,39 +222,13 @@ impl<E: EthSpec> TestingBuilder<E> { | ||||
|             spec, | ||||
|             context, | ||||
|         ); | ||||
|         let port = builder_url.full.port().unwrap(); | ||||
|         let host: Ipv4Addr = builder_url | ||||
|             .full | ||||
|             .host_str() | ||||
|             .unwrap() | ||||
|             .to_string() | ||||
|             .parse() | ||||
|             .unwrap(); | ||||
|         let server = BlindedBlockProviderServer::new(host, port, builder.clone()); | ||||
|         Self { server, builder } | ||||
|         let host: Ipv4Addr = Ipv4Addr::LOCALHOST; | ||||
|         let port = 0; | ||||
|         let provider = BlindedBlockProviderServer::new(host, port, builder.clone()); | ||||
|         let server = provider.serve(); | ||||
|         (builder, server) | ||||
|     } | ||||
| 
 | ||||
|     pub async fn run(&self) { | ||||
|         let server = self.server.serve(); | ||||
|         if let Err(err) = server.await { | ||||
|             println!("error while listening for incoming: {err}") | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[derive(Clone)] | ||||
| pub struct MockBuilder<E: EthSpec> { | ||||
|     el: ExecutionLayer<E>, | ||||
|     beacon_client: BeaconNodeHttpClient, | ||||
|     spec: ChainSpec, | ||||
|     context: Arc<Context>, | ||||
|     val_registration_cache: Arc<RwLock<HashMap<BlsPublicKey, SignedValidatorRegistration>>>, | ||||
|     builder_sk: SecretKey, | ||||
|     operations: Arc<RwLock<Vec<Operation>>>, | ||||
|     invalidate_signatures: Arc<RwLock<bool>>, | ||||
| } | ||||
| 
 | ||||
| impl<E: EthSpec> MockBuilder<E> { | ||||
|     pub fn new( | ||||
|         el: ExecutionLayer<E>, | ||||
|         beacon_client: BeaconNodeHttpClient, | ||||
| @ -274,7 +263,7 @@ impl<E: EthSpec> MockBuilder<E> { | ||||
|         *self.invalidate_signatures.write() = false; | ||||
|     } | ||||
| 
 | ||||
|     fn apply_operations<B: BidStuff>(&self, bid: &mut B) -> Result<(), BlindedBlockProviderError> { | ||||
|     fn apply_operations<B: BidStuff>(&self, bid: &mut B) -> Result<(), MevError> { | ||||
|         let mut guard = self.operations.write(); | ||||
|         while let Some(op) = guard.pop() { | ||||
|             op.apply(bid)?; | ||||
| @ -288,7 +277,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> { | ||||
|     async fn register_validators( | ||||
|         &self, | ||||
|         registrations: &mut [SignedValidatorRegistration], | ||||
|     ) -> Result<(), BlindedBlockProviderError> { | ||||
|     ) -> Result<(), MevError> { | ||||
|         for registration in registrations { | ||||
|             let pubkey = registration.message.public_key.clone(); | ||||
|             let message = &mut registration.message; | ||||
| @ -307,10 +296,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> { | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     async fn fetch_best_bid( | ||||
|         &self, | ||||
|         bid_request: &BidRequest, | ||||
|     ) -> Result<SignedBuilderBid, BlindedBlockProviderError> { | ||||
|     async fn fetch_best_bid(&self, bid_request: &BidRequest) -> Result<SignedBuilderBid, MevError> { | ||||
|         let slot = Slot::new(bid_request.slot); | ||||
|         let fork = self.spec.fork_name_at_slot::<E>(slot); | ||||
|         let signed_cached_data = self | ||||
| @ -336,7 +322,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> { | ||||
|             .map_err(convert_err)? | ||||
|             .block_hash(); | ||||
|         if head_execution_hash != from_ssz_rs(&bid_request.parent_hash)? { | ||||
|             return Err(BlindedBlockProviderError::Custom(format!( | ||||
|             return Err(custom_err(format!( | ||||
|                 "head mismatch: {} {}", | ||||
|                 head_execution_hash, bid_request.parent_hash | ||||
|             ))); | ||||
| @ -396,7 +382,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> { | ||||
|             .get_debug_beacon_states(StateId::Head) | ||||
|             .await | ||||
|             .map_err(convert_err)? | ||||
|             .ok_or_else(|| BlindedBlockProviderError::Custom("missing head state".to_string()))? | ||||
|             .ok_or_else(|| custom_err("missing head state".to_string()))? | ||||
|             .data; | ||||
|         let prev_randao = head_state | ||||
|             .get_randao_mix(head_state.current_epoch()) | ||||
| @ -409,10 +395,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> { | ||||
|                 PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, Some(vec![])) | ||||
|             } | ||||
|             ForkName::Base | ForkName::Altair => { | ||||
|                 return Err(BlindedBlockProviderError::Custom(format!( | ||||
|                     "Unsupported fork: {}", | ||||
|                     fork | ||||
|                 ))); | ||||
|                 return Err(MevError::InvalidFork); | ||||
|             } | ||||
|         }; | ||||
| 
 | ||||
| @ -452,12 +435,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> { | ||||
|                 value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?, | ||||
|                 public_key: self.builder_sk.public_key(), | ||||
|             }), | ||||
|             ForkName::Base | ForkName::Altair => { | ||||
|                 return Err(BlindedBlockProviderError::Custom(format!( | ||||
|                     "Unsupported fork: {}", | ||||
|                     fork | ||||
|                 ))) | ||||
|             } | ||||
|             ForkName::Base | ForkName::Altair => return Err(MevError::InvalidFork), | ||||
|         }; | ||||
|         *message.gas_limit_mut() = cached_data.gas_limit; | ||||
| 
 | ||||
| @ -475,7 +453,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> { | ||||
|     async fn open_bid( | ||||
|         &self, | ||||
|         signed_block: &mut SignedBlindedBeaconBlock, | ||||
|     ) -> Result<ServerPayload, BlindedBlockProviderError> { | ||||
|     ) -> Result<ServerPayload, MevError> { | ||||
|         let node = match signed_block { | ||||
|             SignedBlindedBeaconBlock::Bellatrix(block) => { | ||||
|                 block.message.body.execution_payload_header.hash_tree_root() | ||||
| @ -496,9 +474,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> { | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| pub fn from_ssz_rs<T: SimpleSerialize, U: Decode>( | ||||
|     ssz_rs_data: &T, | ||||
| ) -> Result<U, BlindedBlockProviderError> { | ||||
| pub fn from_ssz_rs<T: SimpleSerialize, U: Decode>(ssz_rs_data: &T) -> Result<U, MevError> { | ||||
|     U::from_ssz_bytes( | ||||
|         ssz_rs::serialize(ssz_rs_data) | ||||
|             .map_err(convert_err)? | ||||
| @ -507,12 +483,17 @@ pub fn from_ssz_rs<T: SimpleSerialize, U: Decode>( | ||||
|     .map_err(convert_err) | ||||
| } | ||||
| 
 | ||||
| pub fn to_ssz_rs<T: Encode, U: SimpleSerialize>( | ||||
|     ssz_data: &T, | ||||
| ) -> Result<U, BlindedBlockProviderError> { | ||||
| pub fn to_ssz_rs<T: Encode, U: SimpleSerialize>(ssz_data: &T) -> Result<U, MevError> { | ||||
|     ssz_rs::deserialize::<U>(&ssz_data.as_ssz_bytes()).map_err(convert_err) | ||||
| } | ||||
| 
 | ||||
| fn convert_err<E: Debug>(e: E) -> BlindedBlockProviderError { | ||||
|     BlindedBlockProviderError::Custom(format!("{e:?}")) | ||||
| fn convert_err<E: Debug>(e: E) -> MevError { | ||||
|     custom_err(format!("{e:?}")) | ||||
| } | ||||
| 
 | ||||
| // This is a bit of a hack since the `Custom` variant was removed from `mev_rs::Error`.
 | ||||
| fn custom_err(s: String) -> MevError { | ||||
|     MevError::Consensus(ethereum_consensus::state_transition::Error::Io( | ||||
|         std::io::Error::new(std::io::ErrorKind::Other, s), | ||||
|     )) | ||||
| } | ||||
|  | ||||
| @ -31,7 +31,6 @@ impl<T: EthSpec> MockExecutionLayer<T> { | ||||
|             None, | ||||
|             Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), | ||||
|             spec, | ||||
|             None, | ||||
|         ) | ||||
|     } | ||||
| 
 | ||||
| @ -43,7 +42,6 @@ impl<T: EthSpec> MockExecutionLayer<T> { | ||||
|         builder_threshold: Option<u128>, | ||||
|         jwt_key: Option<JwtKey>, | ||||
|         spec: ChainSpec, | ||||
|         builder_url: Option<SensitiveUrl>, | ||||
|     ) -> Self { | ||||
|         let handle = executor.handle().unwrap(); | ||||
| 
 | ||||
| @ -65,7 +63,6 @@ impl<T: EthSpec> MockExecutionLayer<T> { | ||||
| 
 | ||||
|         let config = Config { | ||||
|             execution_endpoints: vec![url], | ||||
|             builder_url, | ||||
|             secret_files: vec![path], | ||||
|             suggested_fee_recipient: Some(Address::repeat_byte(42)), | ||||
|             builder_profit_threshold: builder_threshold.unwrap_or(DEFAULT_BUILDER_THRESHOLD_WEI), | ||||
|  | ||||
| @ -25,7 +25,7 @@ use warp::{http::StatusCode, Filter, Rejection}; | ||||
| use crate::EngineCapabilities; | ||||
| pub use execution_block_generator::{generate_pow_block, Block, ExecutionBlockGenerator}; | ||||
| pub use hook::Hook; | ||||
| pub use mock_builder::{Context as MockBuilderContext, MockBuilder, Operation, TestingBuilder}; | ||||
| pub use mock_builder::{Context as MockBuilderContext, MockBuilder, MockBuilderServer, Operation}; | ||||
| pub use mock_execution_layer::MockExecutionLayer; | ||||
| 
 | ||||
| pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; | ||||
| @ -43,7 +43,6 @@ pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { | ||||
|     get_payload_bodies_by_range_v1: true, | ||||
|     get_payload_v1: true, | ||||
|     get_payload_v2: true, | ||||
|     exchange_transition_configuration_v1: true, | ||||
| }; | ||||
| 
 | ||||
| mod execution_block_generator; | ||||
|  | ||||
| @ -2,23 +2,23 @@ | ||||
| name = "genesis" | ||||
| version = "0.2.0" | ||||
| authors = ["Paul Hauner <paul@paulhauner.com>"] | ||||
| edition = "2021" | ||||
| edition = { workspace = true } | ||||
| 
 | ||||
| [dev-dependencies] | ||||
| eth1_test_rig = { path = "../../testing/eth1_test_rig" } | ||||
| sensitive_url = { path = "../../common/sensitive_url" } | ||||
| eth1_test_rig = { workspace = true } | ||||
| sensitive_url = { workspace = true } | ||||
| 
 | ||||
| [dependencies] | ||||
| futures = "0.3.7" | ||||
| types = { path = "../../consensus/types"} | ||||
| environment = { path = "../../lighthouse/environment"} | ||||
| eth1 = { path = "../eth1"} | ||||
| rayon = "1.4.1" | ||||
| state_processing = { path = "../../consensus/state_processing" } | ||||
| merkle_proof = { path = "../../consensus/merkle_proof" } | ||||
| ethereum_ssz = "0.5.0" | ||||
| ethereum_hashing = "1.0.0-beta.2" | ||||
| tree_hash = "0.5.0" | ||||
| tokio = { version = "1.14.0", features = ["full"] } | ||||
| slog = "2.5.2" | ||||
| int_to_bytes = { path = "../../consensus/int_to_bytes" } | ||||
| futures = { workspace = true } | ||||
| types = { workspace = true } | ||||
| environment = { workspace = true } | ||||
| eth1 = { workspace = true } | ||||
| rayon = { workspace = true } | ||||
| state_processing = { workspace = true } | ||||
| merkle_proof = { workspace = true } | ||||
| ethereum_ssz = { workspace = true } | ||||
| ethereum_hashing = { workspace = true } | ||||
| tree_hash = { workspace = true } | ||||
| tokio = { workspace = true } | ||||
| slog = { workspace = true } | ||||
| int_to_bytes = { workspace = true } | ||||
|  | ||||
| @ -39,7 +39,7 @@ pub fn genesis_deposits( | ||||
| 
 | ||||
|     Ok(deposit_data | ||||
|         .into_iter() | ||||
|         .zip(proofs.into_iter()) | ||||
|         .zip(proofs) | ||||
|         .map(|(data, proof)| (data, proof.into())) | ||||
|         .map(|(data, proof)| Deposit { proof, data }) | ||||
|         .collect()) | ||||
|  | ||||
| @ -2,52 +2,53 @@ | ||||
| name = "http_api" | ||||
| version = "0.1.0" | ||||
| authors = ["Paul Hauner <paul@paulhauner.com>"] | ||||
| edition = "2021" | ||||
| edition = { workspace = true } | ||||
| autotests = false                               # using a single test binary compiles faster | ||||
| 
 | ||||
| [dependencies] | ||||
| warp = { version = "0.3.2", features = ["tls"] } | ||||
| serde = { version = "1.0.116", features = ["derive"] } | ||||
| tokio = { version = "1.14.0", features = ["macros","sync"] } | ||||
| tokio-stream = { version = "0.1.3", features = ["sync"] } | ||||
| types = { path = "../../consensus/types" } | ||||
| hex = "0.4.2" | ||||
| beacon_chain = { path = "../beacon_chain" } | ||||
| eth2 = { path = "../../common/eth2", features = ["lighthouse"] } | ||||
| slog = "2.5.2" | ||||
| network = { path = "../network" } | ||||
| lighthouse_network = { path = "../lighthouse_network" } | ||||
| eth1 = { path = "../eth1" } | ||||
| state_processing = { path = "../../consensus/state_processing" } | ||||
| lighthouse_version = { path = "../../common/lighthouse_version" } | ||||
| lighthouse_metrics = { path = "../../common/lighthouse_metrics" } | ||||
| lazy_static = "1.4.0" | ||||
| warp_utils = { path = "../../common/warp_utils" } | ||||
| slot_clock = { path = "../../common/slot_clock" } | ||||
| ethereum_ssz = "0.5.0" | ||||
| warp = { workspace = true } | ||||
| serde = { workspace = true } | ||||
| tokio = { workspace = true } | ||||
| tokio-stream = { workspace = true } | ||||
| types = { workspace = true } | ||||
| hex = { workspace = true } | ||||
| beacon_chain = { workspace = true } | ||||
| eth2 = { workspace = true } | ||||
| slog = { workspace = true } | ||||
| network = { workspace = true } | ||||
| lighthouse_network = { workspace = true } | ||||
| eth1 = { workspace = true } | ||||
| state_processing = { workspace = true } | ||||
| lighthouse_version = { workspace = true } | ||||
| lighthouse_metrics = { workspace = true } | ||||
| lazy_static = { workspace = true } | ||||
| warp_utils = { workspace = true } | ||||
| slot_clock = { workspace = true } | ||||
| ethereum_ssz = { workspace = true } | ||||
| bs58 = "0.4.0" | ||||
| futures = "0.3.8" | ||||
| execution_layer = {path = "../execution_layer"} | ||||
| parking_lot = "0.12.0" | ||||
| safe_arith = {path = "../../consensus/safe_arith"} | ||||
| task_executor = { path = "../../common/task_executor" } | ||||
| lru = "0.7.7" | ||||
| tree_hash = "0.5.0" | ||||
| sysinfo = "0.26.5" | ||||
| futures = { workspace = true } | ||||
| execution_layer = { workspace = true } | ||||
| parking_lot = { workspace = true } | ||||
| safe_arith = { workspace = true } | ||||
| task_executor = { workspace = true } | ||||
| lru = { workspace = true } | ||||
| tree_hash = { workspace = true } | ||||
| sysinfo = { workspace = true } | ||||
| system_health = { path = "../../common/system_health" } | ||||
| directory = { path = "../../common/directory" } | ||||
| logging = { path = "../../common/logging" } | ||||
| ethereum_serde_utils = "0.5.0" | ||||
| operation_pool = { path = "../operation_pool" } | ||||
| sensitive_url = { path = "../../common/sensitive_url" } | ||||
| unused_port = {path = "../../common/unused_port"} | ||||
| store = { path = "../store" } | ||||
| directory = { workspace = true } | ||||
| logging = { workspace = true } | ||||
| ethereum_serde_utils = { workspace = true } | ||||
| operation_pool = { workspace = true } | ||||
| sensitive_url = { workspace = true } | ||||
| store = { workspace = true } | ||||
| bytes = { workspace = true } | ||||
| beacon_processor = { workspace = true } | ||||
| 
 | ||||
| [dev-dependencies] | ||||
| environment = { path = "../../lighthouse/environment" } | ||||
| serde_json = "1.0.58" | ||||
| proto_array = { path = "../../consensus/proto_array" } | ||||
| genesis = { path = "../genesis" } | ||||
| environment = { workspace = true } | ||||
| serde_json = { workspace = true } | ||||
| proto_array = { workspace = true } | ||||
| genesis = { workspace = true } | ||||
| 
 | ||||
| [[test]] | ||||
| name = "bn_http_api_tests" | ||||
|  | ||||
| @ -1,9 +1,7 @@ | ||||
| //! Contains the handler for the `GET validator/duties/attester/{epoch}` endpoint.
 | ||||
| 
 | ||||
| use crate::state_id::StateId; | ||||
| use beacon_chain::{ | ||||
|     BeaconChain, BeaconChainError, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY, | ||||
| }; | ||||
| use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; | ||||
| use eth2::types::{self as api_types}; | ||||
| use slot_clock::SlotClock; | ||||
| use state_processing::state_advance::partial_state_advance; | ||||
| @ -32,12 +30,11 @@ pub fn attester_duties<T: BeaconChainTypes>( | ||||
|     // will equal `current_epoch + 1`
 | ||||
|     let tolerant_current_epoch = chain | ||||
|         .slot_clock | ||||
|         .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) | ||||
|         .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity()) | ||||
|         .ok_or_else(|| warp_utils::reject::custom_server_error("unable to read slot clock".into()))? | ||||
|         .epoch(T::EthSpec::slots_per_epoch()); | ||||
| 
 | ||||
|     if request_epoch == current_epoch | ||||
|         || request_epoch == tolerant_current_epoch | ||||
|         || request_epoch == current_epoch + 1 | ||||
|         || request_epoch == tolerant_current_epoch + 1 | ||||
|     { | ||||
| @ -48,7 +45,7 @@ pub fn attester_duties<T: BeaconChainTypes>( | ||||
|             request_epoch, current_epoch | ||||
|         ))) | ||||
|     } else { | ||||
|         // request_epoch < current_epoch
 | ||||
|         // request_epoch < current_epoch, in fact we only allow `request_epoch == current_epoch-1` in this case
 | ||||
|         compute_historic_attester_duties(request_epoch, request_indices, chain) | ||||
|     } | ||||
| } | ||||
|  | ||||
| @ -75,7 +75,7 @@ impl<T: EthSpec> PackingEfficiencyHandler<T> { | ||||
|             available_attestations: HashSet::new(), | ||||
|             included_attestations: HashMap::new(), | ||||
|             committee_store: CommitteeStore::new(), | ||||
|             _phantom: PhantomData::default(), | ||||
|             _phantom: PhantomData, | ||||
|         }; | ||||
| 
 | ||||
|         handler.compute_epoch(start_epoch, &starting_state, spec)?; | ||||
|  | ||||
| @ -49,7 +49,7 @@ pub fn get_block_rewards<T: BeaconChainTypes>( | ||||
|         .map_err(beacon_chain_error)?; | ||||
| 
 | ||||
|     state | ||||
|         .build_all_caches(&chain.spec) | ||||
|         .build_caches(&chain.spec) | ||||
|         .map_err(beacon_state_error)?; | ||||
| 
 | ||||
|     let mut reward_cache = Default::default(); | ||||
|  | ||||
							
								
								
									
										72
									
								
								beacon_node/http_api/src/builder_states.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										72
									
								
								beacon_node/http_api/src/builder_states.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,72 @@ | ||||
| use crate::StateId; | ||||
| use beacon_chain::{BeaconChain, BeaconChainTypes}; | ||||
| use safe_arith::SafeArith; | ||||
| use state_processing::per_block_processing::get_expected_withdrawals; | ||||
| use state_processing::state_advance::partial_state_advance; | ||||
| use std::sync::Arc; | ||||
| use types::{BeaconState, EthSpec, ForkName, Slot, Withdrawals}; | ||||
| 
 | ||||
| const MAX_EPOCH_LOOKAHEAD: u64 = 2; | ||||
| 
 | ||||
| /// Get the withdrawals computed from the specified state, that will be included in the block
 | ||||
| /// that gets built on the specified state.
 | ||||
| pub fn get_next_withdrawals<T: BeaconChainTypes>( | ||||
|     chain: &Arc<BeaconChain<T>>, | ||||
|     mut state: BeaconState<T::EthSpec>, | ||||
|     state_id: StateId, | ||||
|     proposal_slot: Slot, | ||||
| ) -> Result<Withdrawals<T::EthSpec>, warp::Rejection> { | ||||
|     get_next_withdrawals_sanity_checks(chain, &state, proposal_slot)?; | ||||
| 
 | ||||
|     // advance the state to the epoch of the proposal slot.
 | ||||
|     let proposal_epoch = proposal_slot.epoch(T::EthSpec::slots_per_epoch()); | ||||
|     let (state_root, _, _) = state_id.root(chain)?; | ||||
|     if proposal_epoch != state.current_epoch() { | ||||
|         if let Err(e) = | ||||
|             partial_state_advance(&mut state, Some(state_root), proposal_slot, &chain.spec) | ||||
|         { | ||||
|             return Err(warp_utils::reject::custom_server_error(format!( | ||||
|                 "failed to advance to the epoch of the proposal slot: {:?}", | ||||
|                 e | ||||
|             ))); | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     match get_expected_withdrawals(&state, &chain.spec) { | ||||
|         Ok(withdrawals) => Ok(withdrawals), | ||||
|         Err(e) => Err(warp_utils::reject::custom_server_error(format!( | ||||
|             "failed to get expected withdrawal: {:?}", | ||||
|             e | ||||
|         ))), | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| fn get_next_withdrawals_sanity_checks<T: BeaconChainTypes>( | ||||
|     chain: &BeaconChain<T>, | ||||
|     state: &BeaconState<T::EthSpec>, | ||||
|     proposal_slot: Slot, | ||||
| ) -> Result<(), warp::Rejection> { | ||||
|     if proposal_slot <= state.slot() { | ||||
|         return Err(warp_utils::reject::custom_bad_request( | ||||
|             "proposal slot must be greater than the pre-state slot".to_string(), | ||||
|         )); | ||||
|     } | ||||
| 
 | ||||
|     let fork = chain.spec.fork_name_at_slot::<T::EthSpec>(proposal_slot); | ||||
|     if let ForkName::Base | ForkName::Altair | ForkName::Merge = fork { | ||||
|         return Err(warp_utils::reject::custom_bad_request( | ||||
|             "the specified state is a pre-capella state.".to_string(), | ||||
|         )); | ||||
|     } | ||||
| 
 | ||||
|     let look_ahead_limit = MAX_EPOCH_LOOKAHEAD | ||||
|         .safe_mul(T::EthSpec::slots_per_epoch()) | ||||
|         .map_err(warp_utils::reject::arith_error)?; | ||||
|     if proposal_slot >= state.slot() + look_ahead_limit { | ||||
|         return Err(warp_utils::reject::custom_bad_request(format!( | ||||
|             "proposal slot is greater than or equal to the look ahead limit: {look_ahead_limit}" | ||||
|         ))); | ||||
|     } | ||||
| 
 | ||||
|     Ok(()) | ||||
| } | ||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @ -3,7 +3,7 @@ | ||||
| use crate::state_id::StateId; | ||||
| use beacon_chain::{ | ||||
|     beacon_proposer_cache::{compute_proposer_duties_from_head, ensure_state_is_in_epoch}, | ||||
|     BeaconChain, BeaconChainError, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY, | ||||
|     BeaconChain, BeaconChainError, BeaconChainTypes, | ||||
| }; | ||||
| use eth2::types::{self as api_types}; | ||||
| use safe_arith::SafeArith; | ||||
| @ -33,7 +33,7 @@ pub fn proposer_duties<T: BeaconChainTypes>( | ||||
|     // will equal `current_epoch + 1`
 | ||||
|     let tolerant_current_epoch = chain | ||||
|         .slot_clock | ||||
|         .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) | ||||
|         .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity()) | ||||
|         .ok_or_else(|| warp_utils::reject::custom_server_error("unable to read slot clock".into()))? | ||||
|         .epoch(T::EthSpec::slots_per_epoch()); | ||||
| 
 | ||||
|  | ||||
| @ -1,13 +1,16 @@ | ||||
| use crate::metrics; | ||||
| use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; | ||||
| use beacon_chain::{ | ||||
|     BeaconChain, BeaconChainTypes, BlockError, CountUnrealized, NotifyExecutionLayer, | ||||
|     BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, IntoGossipVerifiedBlock, | ||||
|     NotifyExecutionLayer, | ||||
| }; | ||||
| use eth2::types::{BroadcastValidation, ErrorMessage}; | ||||
| use execution_layer::ProvenancedPayload; | ||||
| use lighthouse_network::PubsubMessage; | ||||
| use network::NetworkMessage; | ||||
| use slog::{debug, error, info, warn, Logger}; | ||||
| use slot_clock::SlotClock; | ||||
| use std::marker::PhantomData; | ||||
| use std::sync::Arc; | ||||
| use std::time::Duration; | ||||
| use tokio::sync::mpsc::UnboundedSender; | ||||
| @ -16,51 +19,138 @@ use types::{ | ||||
|     AbstractExecPayload, BeaconBlockRef, BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, | ||||
|     FullPayload, Hash256, SignedBeaconBlock, | ||||
| }; | ||||
| use warp::Rejection; | ||||
| use warp::http::StatusCode; | ||||
| use warp::{reply::Response, Rejection, Reply}; | ||||
| 
 | ||||
| pub enum ProvenancedBlock<T: EthSpec> { | ||||
| pub enum ProvenancedBlock<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>> { | ||||
|     /// The payload was built using a local EE.
 | ||||
|     Local(Arc<SignedBeaconBlock<T, FullPayload<T>>>), | ||||
|     Local(B, PhantomData<T>), | ||||
|     /// The payload was build using a remote builder (e.g., via a mev-boost
 | ||||
|     /// compatible relay).
 | ||||
|     Builder(Arc<SignedBeaconBlock<T, FullPayload<T>>>), | ||||
|     Builder(B, PhantomData<T>), | ||||
| } | ||||
| 
 | ||||
| impl<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>> ProvenancedBlock<T, B> { | ||||
|     pub fn local(block: B) -> Self { | ||||
|         Self::Local(block, PhantomData) | ||||
|     } | ||||
| 
 | ||||
|     pub fn builder(block: B) -> Self { | ||||
|         Self::Builder(block, PhantomData) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /// Handles a request from the HTTP API for full blocks.
 | ||||
| pub async fn publish_block<T: BeaconChainTypes>( | ||||
| pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>>( | ||||
|     block_root: Option<Hash256>, | ||||
|     provenanced_block: ProvenancedBlock<T::EthSpec>, | ||||
|     provenanced_block: ProvenancedBlock<T, B>, | ||||
|     chain: Arc<BeaconChain<T>>, | ||||
|     network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>, | ||||
|     log: Logger, | ||||
| ) -> Result<(), Rejection> { | ||||
|     validation_level: BroadcastValidation, | ||||
|     duplicate_status_code: StatusCode, | ||||
| ) -> Result<Response, Rejection> { | ||||
|     let seen_timestamp = timestamp_now(); | ||||
|     let (block, is_locally_built_block) = match provenanced_block { | ||||
|         ProvenancedBlock::Local(block) => (block, true), | ||||
|         ProvenancedBlock::Builder(block) => (block, false), | ||||
|         ProvenancedBlock::Local(block, _) => (block, true), | ||||
|         ProvenancedBlock::Builder(block, _) => (block, false), | ||||
|     }; | ||||
|     let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); | ||||
|     let beacon_block = block.inner(); | ||||
|     let delay = get_block_delay_ms(seen_timestamp, beacon_block.message(), &chain.slot_clock); | ||||
|     debug!(log, "Signed block received in HTTP API"; "slot" => beacon_block.slot()); | ||||
| 
 | ||||
|     debug!( | ||||
|     /* actually publish a block */ | ||||
|     let publish_block = move |block: Arc<SignedBeaconBlock<T::EthSpec>>, | ||||
|                               sender, | ||||
|                               log, | ||||
|         "Signed block published to HTTP API"; | ||||
|         "slot" => block.slot() | ||||
|                               seen_timestamp| { | ||||
|         let publish_timestamp = timestamp_now(); | ||||
|         let publish_delay = publish_timestamp | ||||
|             .checked_sub(seen_timestamp) | ||||
|             .unwrap_or_else(|| Duration::from_secs(0)); | ||||
| 
 | ||||
|         info!(log, "Signed block published to network via HTTP API"; "slot" => block.slot(), "publish_delay" => ?publish_delay); | ||||
| 
 | ||||
|         let message = PubsubMessage::BeaconBlock(block); | ||||
|         crate::publish_pubsub_message(&sender, message) | ||||
|             .map_err(|_| BeaconChainError::UnableToPublish.into()) | ||||
|     }; | ||||
| 
 | ||||
|     /* if we can form a `GossipVerifiedBlock`, we've passed our basic gossip checks */ | ||||
|     let gossip_verified_block = match block.into_gossip_verified_block(&chain) { | ||||
|         Ok(b) => b, | ||||
|         Err(BlockError::BlockIsAlreadyKnown) => { | ||||
|             // Allow the status code for duplicate blocks to be overridden based on config.
 | ||||
|             return Ok(warp::reply::with_status( | ||||
|                 warp::reply::json(&ErrorMessage { | ||||
|                     code: duplicate_status_code.as_u16(), | ||||
|                     message: "duplicate block".to_string(), | ||||
|                     stacktraces: vec![], | ||||
|                 }), | ||||
|                 duplicate_status_code, | ||||
|             ) | ||||
|             .into_response()); | ||||
|         } | ||||
|         Err(e) => { | ||||
|             warn!( | ||||
|                 log, | ||||
|                 "Not publishing block - not gossip verified"; | ||||
|                 "slot" => beacon_block.slot(), | ||||
|                 "error" => ?e | ||||
|             ); | ||||
|             return Err(warp_utils::reject::custom_bad_request(e.to_string())); | ||||
|         } | ||||
|     }; | ||||
| 
 | ||||
|     // Send the block, regardless of whether or not it is valid. The API
 | ||||
|     // specification is very clear that this is the desired behaviour.
 | ||||
|     let block_root = block_root.unwrap_or(gossip_verified_block.block_root); | ||||
| 
 | ||||
|     let message = PubsubMessage::BeaconBlock(block.clone()); | ||||
|     crate::publish_pubsub_message(network_tx, message)?; | ||||
|     if let BroadcastValidation::Gossip = validation_level { | ||||
|         publish_block( | ||||
|             beacon_block.clone(), | ||||
|             network_tx.clone(), | ||||
|             log.clone(), | ||||
|             seen_timestamp, | ||||
|         ) | ||||
|         .map_err(|_| warp_utils::reject::custom_server_error("unable to publish".into()))?; | ||||
|     } | ||||
| 
 | ||||
|     let block_root = block_root.unwrap_or_else(|| block.canonical_root()); | ||||
|     /* only publish if gossip- and consensus-valid and equivocation-free */ | ||||
|     let chain_clone = chain.clone(); | ||||
|     let block_clone = beacon_block.clone(); | ||||
|     let log_clone = log.clone(); | ||||
|     let sender_clone = network_tx.clone(); | ||||
| 
 | ||||
|     let publish_fn = move || match validation_level { | ||||
|         BroadcastValidation::Gossip => Ok(()), | ||||
|         BroadcastValidation::Consensus => { | ||||
|             publish_block(block_clone, sender_clone, log_clone, seen_timestamp) | ||||
|         } | ||||
|         BroadcastValidation::ConsensusAndEquivocation => { | ||||
|             if chain_clone | ||||
|                 .observed_block_producers | ||||
|                 .read() | ||||
|                 .proposer_has_been_observed(block_clone.message(), block_root) | ||||
|                 .map_err(|e| BlockError::BeaconChainError(e.into()))? | ||||
|                 .is_slashable() | ||||
|             { | ||||
|                 warn!( | ||||
|                     log_clone, | ||||
|                     "Not publishing equivocating block"; | ||||
|                     "slot" => block_clone.slot() | ||||
|                 ); | ||||
|                 Err(BlockError::Slashable) | ||||
|             } else { | ||||
|                 publish_block(block_clone, sender_clone, log_clone, seen_timestamp) | ||||
|             } | ||||
|         } | ||||
|     }; | ||||
| 
 | ||||
|     match chain | ||||
|         .process_block( | ||||
|             block_root, | ||||
|             block.clone(), | ||||
|             CountUnrealized::True, | ||||
|             gossip_verified_block, | ||||
|             NotifyExecutionLayer::Yes, | ||||
|             publish_fn, | ||||
|         ) | ||||
|         .await | ||||
|     { | ||||
| @ -70,14 +160,14 @@ pub async fn publish_block<T: BeaconChainTypes>( | ||||
|                 "Valid block from HTTP API"; | ||||
|                 "block_delay" => ?delay, | ||||
|                 "root" => format!("{}", root), | ||||
|                 "proposer_index" => block.message().proposer_index(), | ||||
|                 "slot" => block.slot(), | ||||
|                 "proposer_index" => beacon_block.message().proposer_index(), | ||||
|                 "slot" => beacon_block.slot(), | ||||
|             ); | ||||
| 
 | ||||
|             // Notify the validator monitor.
 | ||||
|             chain.validator_monitor.read().register_api_block( | ||||
|                 seen_timestamp, | ||||
|                 block.message(), | ||||
|                 beacon_block.message(), | ||||
|                 root, | ||||
|                 &chain.slot_clock, | ||||
|             ); | ||||
| @ -90,40 +180,39 @@ pub async fn publish_block<T: BeaconChainTypes>( | ||||
|             // blocks built with builders we consider the broadcast time to be
 | ||||
|             // when the blinded block is published to the builder.
 | ||||
|             if is_locally_built_block { | ||||
|                 late_block_logging(&chain, seen_timestamp, block.message(), root, "local", &log) | ||||
|                 late_block_logging( | ||||
|                     &chain, | ||||
|                     seen_timestamp, | ||||
|                     beacon_block.message(), | ||||
|                     root, | ||||
|                     "local", | ||||
|                     &log, | ||||
|                 ) | ||||
|             } | ||||
| 
 | ||||
|             Ok(()) | ||||
|             Ok(warp::reply().into_response()) | ||||
|         } | ||||
|         Err(BlockError::BlockIsAlreadyKnown) => { | ||||
|             info!( | ||||
|                 log, | ||||
|                 "Block from HTTP API already known"; | ||||
|                 "block" => ?block.canonical_root(), | ||||
|                 "slot" => block.slot(), | ||||
|             ); | ||||
|             Ok(()) | ||||
|         } | ||||
|         Err(BlockError::RepeatProposal { proposer, slot }) => { | ||||
|             warn!( | ||||
|                 log, | ||||
|                 "Block ignored due to repeat proposal"; | ||||
|                 "msg" => "this can happen when a VC uses fallback BNs. \ | ||||
|                     whilst this is not necessarily an error, it can indicate issues with a BN \ | ||||
|                     or between the VC and BN.",
 | ||||
|                 "slot" => slot, | ||||
|                 "proposer" => proposer, | ||||
|             ); | ||||
|             Ok(()) | ||||
|         Err(BlockError::BeaconChainError(BeaconChainError::UnableToPublish)) => { | ||||
|             Err(warp_utils::reject::custom_server_error( | ||||
|                 "unable to publish to network channel".to_string(), | ||||
|             )) | ||||
|         } | ||||
|         Err(BlockError::Slashable) => Err(warp_utils::reject::custom_bad_request( | ||||
|             "proposal for this slot and proposer has already been seen".to_string(), | ||||
|         )), | ||||
|         Err(e) => { | ||||
|             if let BroadcastValidation::Gossip = validation_level { | ||||
|                 Err(warp_utils::reject::broadcast_without_import(format!("{e}"))) | ||||
|             } else { | ||||
|                 let msg = format!("{:?}", e); | ||||
|                 error!( | ||||
|                     log, | ||||
|                     "Invalid block provided to HTTP API"; | ||||
|                     "reason" => &msg | ||||
|                 ); | ||||
|             Err(warp_utils::reject::broadcast_without_import(msg)) | ||||
|                 Err(warp_utils::reject::custom_bad_request(format!( | ||||
|                     "Invalid block: {e}" | ||||
|                 ))) | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| } | ||||
| @ -135,21 +224,33 @@ pub async fn publish_blinded_block<T: BeaconChainTypes>( | ||||
|     chain: Arc<BeaconChain<T>>, | ||||
|     network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>, | ||||
|     log: Logger, | ||||
| ) -> Result<(), Rejection> { | ||||
|     validation_level: BroadcastValidation, | ||||
|     duplicate_status_code: StatusCode, | ||||
| ) -> Result<Response, Rejection> { | ||||
|     let block_root = block.canonical_root(); | ||||
|     let full_block = reconstruct_block(chain.clone(), block_root, block, log.clone()).await?; | ||||
|     publish_block::<T>(Some(block_root), full_block, chain, network_tx, log).await | ||||
|     let full_block: ProvenancedBlock<T, Arc<SignedBeaconBlock<T::EthSpec>>> = | ||||
|         reconstruct_block(chain.clone(), block_root, block, log.clone()).await?; | ||||
|     publish_block::<T, _>( | ||||
|         Some(block_root), | ||||
|         full_block, | ||||
|         chain, | ||||
|         network_tx, | ||||
|         log, | ||||
|         validation_level, | ||||
|         duplicate_status_code, | ||||
|     ) | ||||
|     .await | ||||
| } | ||||
| 
 | ||||
| /// Deconstruct the given blinded block, and construct a full block. This attempts to use the
 | ||||
| /// execution layer's payload cache, and if that misses, attempts a blind block proposal to retrieve
 | ||||
| /// the full payload.
 | ||||
| async fn reconstruct_block<T: BeaconChainTypes>( | ||||
| pub async fn reconstruct_block<T: BeaconChainTypes>( | ||||
|     chain: Arc<BeaconChain<T>>, | ||||
|     block_root: Hash256, | ||||
|     block: SignedBeaconBlock<T::EthSpec, BlindedPayload<T::EthSpec>>, | ||||
|     log: Logger, | ||||
| ) -> Result<ProvenancedBlock<T::EthSpec>, Rejection> { | ||||
| ) -> Result<ProvenancedBlock<T, Arc<SignedBeaconBlock<T::EthSpec>>>, Rejection> { | ||||
|     let full_payload_opt = if let Ok(payload_header) = block.message().body().execution_payload() { | ||||
|         let el = chain.execution_layer.as_ref().ok_or_else(|| { | ||||
|             warp_utils::reject::custom_server_error("Missing execution layer".to_string()) | ||||
| @ -215,15 +316,15 @@ async fn reconstruct_block<T: BeaconChainTypes>( | ||||
|         None => block | ||||
|             .try_into_full_block(None) | ||||
|             .map(Arc::new) | ||||
|             .map(ProvenancedBlock::Local), | ||||
|             .map(ProvenancedBlock::local), | ||||
|         Some(ProvenancedPayload::Local(full_payload)) => block | ||||
|             .try_into_full_block(Some(full_payload)) | ||||
|             .map(Arc::new) | ||||
|             .map(ProvenancedBlock::Local), | ||||
|             .map(ProvenancedBlock::local), | ||||
|         Some(ProvenancedPayload::Builder(full_payload)) => block | ||||
|             .try_into_full_block(Some(full_payload)) | ||||
|             .map(Arc::new) | ||||
|             .map(ProvenancedBlock::Builder), | ||||
|             .map(ProvenancedBlock::builder), | ||||
|     } | ||||
|     .ok_or_else(|| { | ||||
|         warp_utils::reject::custom_server_error("Unable to add payload to block".to_string()) | ||||
|  | ||||
| @ -70,15 +70,30 @@ impl StateId { | ||||
|                     .map_err(BeaconChainError::DBError) | ||||
|                     .map_err(warp_utils::reject::beacon_chain_error)? | ||||
|                 { | ||||
|                     let execution_optimistic = chain | ||||
|                         .canonical_head | ||||
|                         .fork_choice_read_lock() | ||||
|                         .is_optimistic_or_invalid_block_no_fallback(&hot_summary.latest_block_root) | ||||
|                     let finalization_status = chain | ||||
|                         .state_finalization_and_canonicity(root, hot_summary.slot) | ||||
|                         .map_err(warp_utils::reject::beacon_chain_error)?; | ||||
|                     let finalized = finalization_status.is_finalized(); | ||||
|                     let fork_choice = chain.canonical_head.fork_choice_read_lock(); | ||||
|                     let execution_optimistic = if finalization_status.slot_is_finalized | ||||
|                         && !finalization_status.canonical | ||||
|                     { | ||||
|                         // This block is permanently orphaned and has likely been pruned from fork
 | ||||
|                         // choice. If it isn't found in fork choice, mark it optimistic to be on the
 | ||||
|                         // safe side.
 | ||||
|                         fork_choice | ||||
|                             .is_optimistic_or_invalid_block_no_fallback( | ||||
|                                 &hot_summary.latest_block_root, | ||||
|                             ) | ||||
|                             .unwrap_or(true) | ||||
|                     } else { | ||||
|                         // This block is either old and finalized, or recent and unfinalized, so
 | ||||
|                         // it's safe to fallback to the optimistic status of the finalized block.
 | ||||
|                         fork_choice | ||||
|                             .is_optimistic_or_invalid_block(&hot_summary.latest_block_root) | ||||
|                             .map_err(BeaconChainError::ForkChoiceError) | ||||
|                         .map_err(warp_utils::reject::beacon_chain_error)?; | ||||
|                     let finalized = chain | ||||
|                         .is_finalized_state(root, hot_summary.slot) | ||||
|                         .map_err(warp_utils::reject::beacon_chain_error)?; | ||||
|                             .map_err(warp_utils::reject::beacon_chain_error)? | ||||
|                     }; | ||||
|                     return Ok((*root, execution_optimistic, finalized)); | ||||
|                 } else if let Some(_cold_state_slot) = chain | ||||
|                     .store | ||||
|  | ||||
| @ -6,7 +6,7 @@ use beacon_chain::sync_committee_verification::{ | ||||
| }; | ||||
| use beacon_chain::{ | ||||
|     validator_monitor::timestamp_now, BeaconChain, BeaconChainError, BeaconChainTypes, | ||||
|     StateSkipConfig, MAXIMUM_GOSSIP_CLOCK_DISPARITY, | ||||
|     StateSkipConfig, | ||||
| }; | ||||
| use eth2::types::{self as api_types}; | ||||
| use lighthouse_network::PubsubMessage; | ||||
| @ -85,7 +85,7 @@ fn duties_from_state_load<T: BeaconChainTypes>( | ||||
|     let current_epoch = chain.epoch()?; | ||||
|     let tolerant_current_epoch = chain | ||||
|         .slot_clock | ||||
|         .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) | ||||
|         .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity()) | ||||
|         .ok_or(BeaconChainError::UnableToReadSlot)? | ||||
|         .epoch(T::EthSpec::slots_per_epoch()); | ||||
| 
 | ||||
| @ -304,7 +304,7 @@ pub fn process_signed_contribution_and_proofs<T: BeaconChainTypes>( | ||||
|             } | ||||
|             // If we already know the contribution, don't broadcast it or attempt to
 | ||||
|             // further verify it. Return success.
 | ||||
|             Err(SyncVerificationError::SyncContributionAlreadyKnown(_)) => continue, | ||||
|             Err(SyncVerificationError::SyncContributionSupersetKnown(_)) => continue, | ||||
|             // If we've already seen this aggregator produce an aggregate, just
 | ||||
|             // skip this one.
 | ||||
|             //
 | ||||
|  | ||||
							
								
								
									
										192
									
								
								beacon_node/http_api/src/task_spawner.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										192
									
								
								beacon_node/http_api/src/task_spawner.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,192 @@ | ||||
| use beacon_processor::{BeaconProcessorSend, BlockingOrAsync, Work, WorkEvent}; | ||||
| use serde::Serialize; | ||||
| use std::future::Future; | ||||
| use tokio::sync::{mpsc::error::TrySendError, oneshot}; | ||||
| use types::EthSpec; | ||||
| use warp::reply::{Reply, Response}; | ||||
| 
 | ||||
| /// Maps a request to a queue in the `BeaconProcessor`.
 | ||||
| #[derive(Clone, Copy)] | ||||
| pub enum Priority { | ||||
|     /// The highest priority.
 | ||||
|     P0, | ||||
|     /// The lowest priority.
 | ||||
|     P1, | ||||
| } | ||||
| 
 | ||||
| impl Priority { | ||||
|     /// Wrap `self` in a `WorkEvent` with an appropriate priority.
 | ||||
|     fn work_event<E: EthSpec>(&self, process_fn: BlockingOrAsync) -> WorkEvent<E> { | ||||
|         let work = match self { | ||||
|             Priority::P0 => Work::ApiRequestP0(process_fn), | ||||
|             Priority::P1 => Work::ApiRequestP1(process_fn), | ||||
|         }; | ||||
|         WorkEvent { | ||||
|             drop_during_sync: false, | ||||
|             work, | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /// Spawns tasks on the `BeaconProcessor` or directly on the tokio executor.
 | ||||
| pub struct TaskSpawner<E: EthSpec> { | ||||
|     /// Used to send tasks to the `BeaconProcessor`. The tokio executor will be
 | ||||
|     /// used if this is `None`.
 | ||||
|     beacon_processor_send: Option<BeaconProcessorSend<E>>, | ||||
| } | ||||
| 
 | ||||
| /// Convert a warp `Rejection` into a `Response`.
 | ||||
| ///
 | ||||
| /// This function should *always* be used to convert rejections into responses. This prevents warp
 | ||||
| /// from trying to backtrack in strange ways. See: https://github.com/sigp/lighthouse/issues/3404
 | ||||
| pub async fn convert_rejection<T: Reply>(res: Result<T, warp::Rejection>) -> Response { | ||||
|     match res { | ||||
|         Ok(response) => response.into_response(), | ||||
|         Err(e) => match warp_utils::reject::handle_rejection(e).await { | ||||
|             Ok(reply) => reply.into_response(), | ||||
|             Err(_) => warp::reply::with_status( | ||||
|                 warp::reply::json(&"unhandled error"), | ||||
|                 eth2::StatusCode::INTERNAL_SERVER_ERROR, | ||||
|             ) | ||||
|             .into_response(), | ||||
|         }, | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<E: EthSpec> TaskSpawner<E> { | ||||
|     pub fn new(beacon_processor_send: Option<BeaconProcessorSend<E>>) -> Self { | ||||
|         Self { | ||||
|             beacon_processor_send, | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /// Executes a "blocking" (non-async) task which returns a `Response`.
 | ||||
|     pub async fn blocking_response_task<F, T>(self, priority: Priority, func: F) -> Response | ||||
|     where | ||||
|         F: FnOnce() -> Result<T, warp::Rejection> + Send + Sync + 'static, | ||||
|         T: Reply + Send + 'static, | ||||
|     { | ||||
|         if let Some(beacon_processor_send) = &self.beacon_processor_send { | ||||
|             // Create a closure that will execute `func` and send the result to
 | ||||
|             // a channel held by this thread.
 | ||||
|             let (tx, rx) = oneshot::channel(); | ||||
|             let process_fn = move || { | ||||
|                 // Execute the function, collect the return value.
 | ||||
|                 let func_result = func(); | ||||
|                 // Send the result down the channel. Ignore any failures; the
 | ||||
|                 // send can only fail if the receiver is dropped.
 | ||||
|                 let _ = tx.send(func_result); | ||||
|             }; | ||||
| 
 | ||||
|             // Send the function to the beacon processor for execution at some arbitrary time.
 | ||||
|             let result = send_to_beacon_processor( | ||||
|                 beacon_processor_send, | ||||
|                 priority, | ||||
|                 BlockingOrAsync::Blocking(Box::new(process_fn)), | ||||
|                 rx, | ||||
|             ) | ||||
|             .await | ||||
|             .and_then(|x| x); | ||||
|             convert_rejection(result).await | ||||
|         } else { | ||||
|             // There is no beacon processor so spawn a task directly on the
 | ||||
|             // tokio executor.
 | ||||
|             convert_rejection(warp_utils::task::blocking_response_task(func).await).await | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /// Executes a "blocking" (non-async) task which returns a JSON-serializable
 | ||||
|     /// object.
 | ||||
|     pub async fn blocking_json_task<F, T>(self, priority: Priority, func: F) -> Response | ||||
|     where | ||||
|         F: FnOnce() -> Result<T, warp::Rejection> + Send + Sync + 'static, | ||||
|         T: Serialize + Send + 'static, | ||||
|     { | ||||
|         let func = || func().map(|t| warp::reply::json(&t).into_response()); | ||||
|         self.blocking_response_task(priority, func).await | ||||
|     } | ||||
| 
 | ||||
|     /// Executes an async task which may return a `Rejection`, which will be converted to a response.
 | ||||
|     pub async fn spawn_async_with_rejection( | ||||
|         self, | ||||
|         priority: Priority, | ||||
|         func: impl Future<Output = Result<Response, warp::Rejection>> + Send + Sync + 'static, | ||||
|     ) -> Response { | ||||
|         let result = self | ||||
|             .spawn_async_with_rejection_no_conversion(priority, func) | ||||
|             .await; | ||||
|         convert_rejection(result).await | ||||
|     } | ||||
| 
 | ||||
|     /// Same as `spawn_async_with_rejection` but returning a result with the unhandled rejection.
 | ||||
|     ///
 | ||||
|     /// If you call this function you MUST convert the rejection to a response and not let it
 | ||||
|     /// propagate into Warp's filters. See `convert_rejection`.
 | ||||
|     pub async fn spawn_async_with_rejection_no_conversion( | ||||
|         self, | ||||
|         priority: Priority, | ||||
|         func: impl Future<Output = Result<Response, warp::Rejection>> + Send + Sync + 'static, | ||||
|     ) -> Result<Response, warp::Rejection> { | ||||
|         if let Some(beacon_processor_send) = &self.beacon_processor_send { | ||||
|             // Create a wrapper future that will execute `func` and send the
 | ||||
|             // result to a channel held by this thread.
 | ||||
|             let (tx, rx) = oneshot::channel(); | ||||
|             let process_fn = async move { | ||||
|                 // Await the future, collect the return value.
 | ||||
|                 let func_result = func.await; | ||||
|                 // Send the result down the channel. Ignore any failures; the
 | ||||
|                 // send can only fail if the receiver is dropped.
 | ||||
|                 let _ = tx.send(func_result); | ||||
|             }; | ||||
| 
 | ||||
|             // Send the function to the beacon processor for execution at some arbitrary time.
 | ||||
|             send_to_beacon_processor( | ||||
|                 beacon_processor_send, | ||||
|                 priority, | ||||
|                 BlockingOrAsync::Async(Box::pin(process_fn)), | ||||
|                 rx, | ||||
|             ) | ||||
|             .await | ||||
|             .and_then(|x| x) | ||||
|         } else { | ||||
|             // There is no beacon processor so spawn a task directly on the
 | ||||
|             // tokio executor.
 | ||||
|             tokio::task::spawn(func) | ||||
|                 .await | ||||
|                 .map_err(|_| { | ||||
|                     warp_utils::reject::custom_server_error("Tokio failed to spawn task".into()) | ||||
|                 }) | ||||
|                 .and_then(|x| x) | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /// Send a task to the beacon processor and await execution.
 | ||||
| ///
 | ||||
| /// If the task is not executed, return an `Err` with an error message
 | ||||
| /// for the API consumer.
 | ||||
| async fn send_to_beacon_processor<E: EthSpec, T>( | ||||
|     beacon_processor_send: &BeaconProcessorSend<E>, | ||||
|     priority: Priority, | ||||
|     process_fn: BlockingOrAsync, | ||||
|     rx: oneshot::Receiver<T>, | ||||
| ) -> Result<T, warp::Rejection> { | ||||
|     let error_message = match beacon_processor_send.try_send(priority.work_event(process_fn)) { | ||||
|         Ok(()) => { | ||||
|             match rx.await { | ||||
|                 // The beacon processor executed the task and sent a result.
 | ||||
|                 Ok(func_result) => return Ok(func_result), | ||||
|                 // The beacon processor dropped the channel without sending a
 | ||||
|                 // result. The beacon processor dropped this task because its
 | ||||
|                 // queues are full or it's shutting down.
 | ||||
|                 Err(_) => "The task did not execute. The server is overloaded or shutting down.", | ||||
|             } | ||||
|         } | ||||
|         Err(TrySendError::Full(_)) => "The task was dropped. The server is overloaded.", | ||||
|         Err(TrySendError::Closed(_)) => "The task was dropped. The server is shutting down.", | ||||
|     }; | ||||
| 
 | ||||
|     Err(warp_utils::reject::custom_server_error( | ||||
|         error_message.to_string(), | ||||
|     )) | ||||
| } | ||||
| @ -5,16 +5,14 @@ use beacon_chain::{ | ||||
|     }, | ||||
|     BeaconChain, BeaconChainTypes, | ||||
| }; | ||||
| use beacon_processor::{BeaconProcessor, BeaconProcessorChannels, BeaconProcessorConfig}; | ||||
| use directory::DEFAULT_ROOT_DIR; | ||||
| use eth2::{BeaconNodeHttpClient, Timeouts}; | ||||
| use lighthouse_network::{ | ||||
|     discv5::enr::{CombinedKey, EnrBuilder}, | ||||
|     libp2p::{ | ||||
|         core::connection::ConnectionId, | ||||
|         swarm::{ | ||||
|     libp2p::swarm::{ | ||||
|         behaviour::{ConnectionEstablished, FromSwarm}, | ||||
|             NetworkBehaviour, | ||||
|         }, | ||||
|         ConnectionId, NetworkBehaviour, | ||||
|     }, | ||||
|     rpc::methods::{MetaData, MetaDataV2}, | ||||
|     types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, SyncState}, | ||||
| @ -25,11 +23,11 @@ use network::{NetworkReceivers, NetworkSenders}; | ||||
| use sensitive_url::SensitiveUrl; | ||||
| use slog::Logger; | ||||
| use std::future::Future; | ||||
| use std::net::{IpAddr, Ipv4Addr, SocketAddr}; | ||||
| use std::net::SocketAddr; | ||||
| use std::sync::Arc; | ||||
| use std::time::Duration; | ||||
| use store::MemoryStore; | ||||
| use tokio::sync::oneshot; | ||||
| use task_executor::test_utils::TestRuntime; | ||||
| use types::{ChainSpec, EthSpec}; | ||||
| 
 | ||||
| pub const TCP_PORT: u16 = 42; | ||||
| @ -42,7 +40,6 @@ pub struct InteractiveTester<E: EthSpec> { | ||||
|     pub harness: BeaconChainHarness<EphemeralHarnessType<E>>, | ||||
|     pub client: BeaconNodeHttpClient, | ||||
|     pub network_rx: NetworkReceivers<E>, | ||||
|     _server_shutdown: oneshot::Sender<()>, | ||||
| } | ||||
| 
 | ||||
| /// The result of calling `create_api_server`.
 | ||||
| @ -51,7 +48,6 @@ pub struct InteractiveTester<E: EthSpec> { | ||||
| pub struct ApiServer<E: EthSpec, SFut: Future<Output = ()>> { | ||||
|     pub server: SFut, | ||||
|     pub listening_socket: SocketAddr, | ||||
|     pub shutdown_tx: oneshot::Sender<()>, | ||||
|     pub network_rx: NetworkReceivers<E>, | ||||
|     pub local_enr: Enr, | ||||
|     pub external_peer_id: PeerId, | ||||
| @ -99,10 +95,14 @@ impl<E: EthSpec> InteractiveTester<E> { | ||||
|         let ApiServer { | ||||
|             server, | ||||
|             listening_socket, | ||||
|             shutdown_tx: _server_shutdown, | ||||
|             network_rx, | ||||
|             .. | ||||
|         } = create_api_server(harness.chain.clone(), harness.logger().clone()).await; | ||||
|         } = create_api_server( | ||||
|             harness.chain.clone(), | ||||
|             &harness.runtime, | ||||
|             harness.logger().clone(), | ||||
|         ) | ||||
|         .await; | ||||
| 
 | ||||
|         tokio::spawn(server); | ||||
| 
 | ||||
| @ -120,25 +120,18 @@ impl<E: EthSpec> InteractiveTester<E> { | ||||
|             harness, | ||||
|             client, | ||||
|             network_rx, | ||||
|             _server_shutdown, | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| pub async fn create_api_server<T: BeaconChainTypes>( | ||||
|     chain: Arc<BeaconChain<T>>, | ||||
|     test_runtime: &TestRuntime, | ||||
|     log: Logger, | ||||
| ) -> ApiServer<T::EthSpec, impl Future<Output = ()>> { | ||||
|     // Get a random unused port.
 | ||||
|     let port = unused_port::unused_tcp4_port().unwrap(); | ||||
|     create_api_server_on_port(chain, log, port).await | ||||
| } | ||||
|     // Use port 0 to allocate a new unused port.
 | ||||
|     let port = 0; | ||||
| 
 | ||||
| pub async fn create_api_server_on_port<T: BeaconChainTypes>( | ||||
|     chain: Arc<BeaconChain<T>>, | ||||
|     log: Logger, | ||||
|     port: u16, | ||||
| ) -> ApiServer<T::EthSpec, impl Future<Output = ()>> { | ||||
|     let (network_senders, network_receivers) = NetworkSenders::new(); | ||||
| 
 | ||||
|     // Default metadata
 | ||||
| @ -151,8 +144,6 @@ pub async fn create_api_server_on_port<T: BeaconChainTypes>( | ||||
|     let enr = EnrBuilder::new("v4").build(&enr_key).unwrap(); | ||||
|     let network_globals = Arc::new(NetworkGlobals::new( | ||||
|         enr.clone(), | ||||
|         Some(TCP_PORT), | ||||
|         None, | ||||
|         meta_data, | ||||
|         vec![], | ||||
|         false, | ||||
| @ -170,7 +161,7 @@ pub async fn create_api_server_on_port<T: BeaconChainTypes>( | ||||
|         local_addr: EXTERNAL_ADDR.parse().unwrap(), | ||||
|         send_back_addr: EXTERNAL_ADDR.parse().unwrap(), | ||||
|     }; | ||||
|     let connection_id = ConnectionId::new(1); | ||||
|     let connection_id = ConnectionId::new_unchecked(1); | ||||
|     pm.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { | ||||
|         peer_id, | ||||
|         connection_id, | ||||
| @ -183,36 +174,60 @@ pub async fn create_api_server_on_port<T: BeaconChainTypes>( | ||||
|     let eth1_service = | ||||
|         eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone()).unwrap(); | ||||
| 
 | ||||
|     let beacon_processor_config = BeaconProcessorConfig { | ||||
|         // The number of workers must be greater than one. Tests which use the
 | ||||
|         // builder workflow sometimes require an internal HTTP request in order
 | ||||
|         // to fulfill an already in-flight HTTP request, therefore having only
 | ||||
|         // one worker will result in a deadlock.
 | ||||
|         max_workers: 2, | ||||
|         ..BeaconProcessorConfig::default() | ||||
|     }; | ||||
|     let BeaconProcessorChannels { | ||||
|         beacon_processor_tx, | ||||
|         beacon_processor_rx, | ||||
|         work_reprocessing_tx, | ||||
|         work_reprocessing_rx, | ||||
|     } = BeaconProcessorChannels::new(&beacon_processor_config); | ||||
| 
 | ||||
|     let beacon_processor_send = beacon_processor_tx; | ||||
|     BeaconProcessor { | ||||
|         network_globals: network_globals.clone(), | ||||
|         executor: test_runtime.task_executor.clone(), | ||||
|         current_workers: 0, | ||||
|         config: beacon_processor_config, | ||||
|         log: log.clone(), | ||||
|     } | ||||
|     .spawn_manager( | ||||
|         beacon_processor_rx, | ||||
|         work_reprocessing_tx, | ||||
|         work_reprocessing_rx, | ||||
|         None, | ||||
|         chain.slot_clock.clone(), | ||||
|         chain.spec.maximum_gossip_clock_disparity(), | ||||
|     ) | ||||
|     .unwrap(); | ||||
| 
 | ||||
|     let ctx = Arc::new(Context { | ||||
|         config: Config { | ||||
|             enabled: true, | ||||
|             listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), | ||||
|             listen_port: port, | ||||
|             allow_origin: None, | ||||
|             tls_config: None, | ||||
|             allow_sync_stalled: false, | ||||
|             data_dir: std::path::PathBuf::from(DEFAULT_ROOT_DIR), | ||||
|             spec_fork_name: None, | ||||
|             ..Config::default() | ||||
|         }, | ||||
|         chain: Some(chain), | ||||
|         network_senders: Some(network_senders), | ||||
|         network_globals: Some(network_globals), | ||||
|         beacon_processor_send: Some(beacon_processor_send), | ||||
|         eth1_service: Some(eth1_service), | ||||
|         sse_logging_components: None, | ||||
|         log, | ||||
|     }); | ||||
| 
 | ||||
|     let (shutdown_tx, shutdown_rx) = oneshot::channel(); | ||||
|     let server_shutdown = async { | ||||
|         // It's not really interesting why this triggered, just that it happened.
 | ||||
|         let _ = shutdown_rx.await; | ||||
|     }; | ||||
|     let (listening_socket, server) = crate::serve(ctx, server_shutdown).unwrap(); | ||||
|     let (listening_socket, server) = crate::serve(ctx, test_runtime.task_executor.exit()).unwrap(); | ||||
| 
 | ||||
|     ApiServer { | ||||
|         server, | ||||
|         listening_socket, | ||||
|         shutdown_tx, | ||||
|         network_rx: network_receivers, | ||||
|         local_enr: enr, | ||||
|         external_peer_id: peer_id, | ||||
|  | ||||
							
								
								
									
										21
									
								
								beacon_node/http_api/src/validator.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										21
									
								
								beacon_node/http_api/src/validator.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,21 @@ | ||||
| use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; | ||||
| use types::*; | ||||
| 
 | ||||
| /// Uses the `chain.validator_pubkey_cache` to resolve a pubkey to a validator
 | ||||
| /// index and then ensures that the validator exists in the given `state`.
 | ||||
| pub fn pubkey_to_validator_index<T: BeaconChainTypes>( | ||||
|     chain: &BeaconChain<T>, | ||||
|     state: &BeaconState<T::EthSpec>, | ||||
|     pubkey: &PublicKeyBytes, | ||||
| ) -> Result<Option<usize>, BeaconChainError> { | ||||
|     chain | ||||
|         .validator_index(pubkey)? | ||||
|         .filter(|&index| { | ||||
|             state | ||||
|                 .validators() | ||||
|                 .get(index) | ||||
|                 .map_or(false, |v| v.pubkey == *pubkey) | ||||
|         }) | ||||
|         .map(Result::Ok) | ||||
|         .transpose() | ||||
| } | ||||
							
								
								
									
										1358
									
								
								beacon_node/http_api/tests/broadcast_validation_tests.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1358
									
								
								beacon_node/http_api/tests/broadcast_validation_tests.rs
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @ -326,11 +326,8 @@ async fn sync_committee_indices_across_fork() { | ||||
| 
 | ||||
| /// Assert that an HTTP API error has the given status code and indexed errors for the given indices.
 | ||||
| fn assert_server_indexed_error(error: eth2::Error, status_code: u16, indices: Vec<usize>) { | ||||
|     let eth2::Error::ServerIndexedMessage(IndexedErrorMessage { | ||||
|         code, | ||||
|         failures, | ||||
|         .. | ||||
|     }) = error else { | ||||
|     let eth2::Error::ServerIndexedMessage(IndexedErrorMessage { code, failures, .. }) = error | ||||
|     else { | ||||
|         panic!("wrong error, expected ServerIndexedMessage, got: {error:?}") | ||||
|     }; | ||||
|     assert_eq!(code, status_code); | ||||
|  | ||||
| @ -2,8 +2,9 @@ | ||||
| use beacon_chain::{ | ||||
|     chain_config::{DisallowedReOrgOffsets, ReOrgThreshold}, | ||||
|     test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, | ||||
|     ChainConfig, | ||||
| }; | ||||
| use eth2::types::DepositContractData; | ||||
| use eth2::types::{DepositContractData, StateId}; | ||||
| use execution_layer::{ForkchoiceState, PayloadAttributes}; | ||||
| use http_api::test_utils::InteractiveTester; | ||||
| use parking_lot::Mutex; | ||||
| @ -17,7 +18,7 @@ use std::time::Duration; | ||||
| use tree_hash::TreeHash; | ||||
| use types::{ | ||||
|     Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, FullPayload, | ||||
|     MainnetEthSpec, ProposerPreparationData, Slot, | ||||
|     MainnetEthSpec, MinimalEthSpec, ProposerPreparationData, Slot, | ||||
| }; | ||||
| 
 | ||||
| type E = MainnetEthSpec; | ||||
| @ -48,6 +49,76 @@ async fn deposit_contract_custom_network() { | ||||
|     assert_eq!(result, expected); | ||||
| } | ||||
| 
 | ||||
| // Test that state lookups by root function correctly for states that are finalized but still
 | ||||
| // present in the hot database, and have had their block pruned from fork choice.
 | ||||
| #[tokio::test(flavor = "multi_thread", worker_threads = 2)] | ||||
| async fn state_by_root_pruned_from_fork_choice() { | ||||
|     type E = MinimalEthSpec; | ||||
| 
 | ||||
|     let validator_count = 24; | ||||
|     let spec = ForkName::latest().make_genesis_spec(E::default_spec()); | ||||
| 
 | ||||
|     let tester = InteractiveTester::<E>::new_with_initializer_and_mutator( | ||||
|         Some(spec.clone()), | ||||
|         validator_count, | ||||
|         Some(Box::new(move |builder| { | ||||
|             builder | ||||
|                 .deterministic_keypairs(validator_count) | ||||
|                 .fresh_ephemeral_store() | ||||
|                 .chain_config(ChainConfig { | ||||
|                     epochs_per_migration: 1024, | ||||
|                     ..ChainConfig::default() | ||||
|                 }) | ||||
|         })), | ||||
|         None, | ||||
|     ) | ||||
|     .await; | ||||
| 
 | ||||
|     let client = &tester.client; | ||||
|     let harness = &tester.harness; | ||||
| 
 | ||||
|     // Create some chain depth and finalize beyond fork choice's pruning depth.
 | ||||
|     let num_epochs = 8_u64; | ||||
|     let num_initial = num_epochs * E::slots_per_epoch(); | ||||
|     harness.advance_slot(); | ||||
|     harness | ||||
|         .extend_chain_with_sync( | ||||
|             num_initial as usize, | ||||
|             BlockStrategy::OnCanonicalHead, | ||||
|             AttestationStrategy::AllValidators, | ||||
|             SyncCommitteeStrategy::NoValidators, | ||||
|         ) | ||||
|         .await; | ||||
| 
 | ||||
|     // Should now be finalized.
 | ||||
|     let finalized_epoch = harness.finalized_checkpoint().epoch; | ||||
|     assert_eq!(finalized_epoch, num_epochs - 2); | ||||
| 
 | ||||
|     // The split slot should still be at 0.
 | ||||
|     assert_eq!(harness.chain.store.get_split_slot(), 0); | ||||
| 
 | ||||
|     // States that are between the split and the finalized slot should be able to be looked up by
 | ||||
|     // state root.
 | ||||
|     for slot in 0..finalized_epoch.start_slot(E::slots_per_epoch()).as_u64() { | ||||
|         let state_root = harness | ||||
|             .chain | ||||
|             .state_root_at_slot(Slot::new(slot)) | ||||
|             .unwrap() | ||||
|             .unwrap(); | ||||
|         let response = client | ||||
|             .get_debug_beacon_states::<E>(StateId::Root(state_root)) | ||||
|             .await | ||||
|             .unwrap() | ||||
|             .unwrap(); | ||||
| 
 | ||||
|         assert!(response.finalized.unwrap()); | ||||
|         assert!(!response.execution_optimistic.unwrap()); | ||||
| 
 | ||||
|         let mut state = response.data; | ||||
|         assert_eq!(state.update_tree_hash_cache().unwrap(), state_root); | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /// Data structure for tracking fork choice updates received by the mock execution layer.
 | ||||
| #[derive(Debug, Default)] | ||||
| struct ForkChoiceUpdates { | ||||
|  | ||||
| @ -1,5 +1,6 @@ | ||||
| #![cfg(not(debug_assertions))] // Tests are too slow in debug.
 | ||||
| 
 | ||||
| pub mod broadcast_validation_tests; | ||||
| pub mod fork_tests; | ||||
| pub mod interactive_tests; | ||||
| pub mod status_tests; | ||||
|  | ||||
| @ -3,6 +3,7 @@ use beacon_chain::{ | ||||
|     test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, | ||||
|     BlockError, | ||||
| }; | ||||
| use eth2::StatusCode; | ||||
| use execution_layer::{PayloadStatusV1, PayloadStatusV1Status}; | ||||
| use http_api::test_utils::InteractiveTester; | ||||
| use types::{EthSpec, ExecPayload, ForkName, MinimalEthSpec, Slot}; | ||||
| @ -143,3 +144,82 @@ async fn el_error_on_new_payload() { | ||||
|     assert_eq!(api_response.is_optimistic, Some(false)); | ||||
|     assert_eq!(api_response.is_syncing, false); | ||||
| } | ||||
| 
 | ||||
| /// Check `node health` endpoint when the EL is offline.
 | ||||
| #[tokio::test(flavor = "multi_thread", worker_threads = 2)] | ||||
| async fn node_health_el_offline() { | ||||
|     let num_blocks = E::slots_per_epoch() / 2; | ||||
|     let num_validators = E::slots_per_epoch(); | ||||
|     let tester = post_merge_tester(num_blocks, num_validators).await; | ||||
|     let harness = &tester.harness; | ||||
|     let mock_el = harness.mock_execution_layer.as_ref().unwrap(); | ||||
| 
 | ||||
|     // EL offline
 | ||||
|     mock_el.server.set_syncing_response(Err("offline".into())); | ||||
|     mock_el.el.upcheck().await; | ||||
| 
 | ||||
|     let status = tester.client.get_node_health().await; | ||||
|     match status { | ||||
|         Ok(_) => { | ||||
|             panic!("should return 503 error status code"); | ||||
|         } | ||||
|         Err(e) => { | ||||
|             assert_eq!(e.status().unwrap(), 503); | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /// Check `node health` endpoint when the EL is online and synced.
 | ||||
| #[tokio::test(flavor = "multi_thread", worker_threads = 2)] | ||||
| async fn node_health_el_online_and_synced() { | ||||
|     let num_blocks = E::slots_per_epoch() / 2; | ||||
|     let num_validators = E::slots_per_epoch(); | ||||
|     let tester = post_merge_tester(num_blocks, num_validators).await; | ||||
|     let harness = &tester.harness; | ||||
|     let mock_el = harness.mock_execution_layer.as_ref().unwrap(); | ||||
| 
 | ||||
|     // EL synced
 | ||||
|     mock_el.server.set_syncing_response(Ok(false)); | ||||
|     mock_el.el.upcheck().await; | ||||
| 
 | ||||
|     let status = tester.client.get_node_health().await; | ||||
|     match status { | ||||
|         Ok(response) => { | ||||
|             assert_eq!(response, StatusCode::OK); | ||||
|         } | ||||
|         Err(_) => { | ||||
|             panic!("should return 200 status code"); | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /// Check `node health` endpoint when the EL is online but not synced.
 | ||||
| #[tokio::test(flavor = "multi_thread", worker_threads = 2)] | ||||
| async fn node_health_el_online_and_not_synced() { | ||||
|     let num_blocks = E::slots_per_epoch() / 2; | ||||
|     let num_validators = E::slots_per_epoch(); | ||||
|     let tester = post_merge_tester(num_blocks, num_validators).await; | ||||
|     let harness = &tester.harness; | ||||
|     let mock_el = harness.mock_execution_layer.as_ref().unwrap(); | ||||
| 
 | ||||
|     // EL not synced
 | ||||
|     harness.advance_slot(); | ||||
|     mock_el.server.all_payloads_syncing(true); | ||||
|     harness | ||||
|         .extend_chain( | ||||
|             1, | ||||
|             BlockStrategy::OnCanonicalHead, | ||||
|             AttestationStrategy::AllValidators, | ||||
|         ) | ||||
|         .await; | ||||
| 
 | ||||
|     let status = tester.client.get_node_health().await; | ||||
|     match status { | ||||
|         Ok(response) => { | ||||
|             assert_eq!(response, StatusCode::PARTIAL_CONTENT); | ||||
|         } | ||||
|         Err(_) => { | ||||
|             panic!("should return 206 status code"); | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
Some files were not shown because too many files have changed in this diff Show More
		Loading…
	
		Reference in New Issue
	
	Block a user