Merge branch 'sigp:stable' into stable
This commit is contained in:
		
						commit
						f70b3ef11d
					
				| @ -6,4 +6,4 @@ end_of_line=lf | |||||||
| charset=utf-8 | charset=utf-8 | ||||||
| trim_trailing_whitespace=true | trim_trailing_whitespace=true | ||||||
| max_line_length=100 | max_line_length=100 | ||||||
| insert_final_newline=false | insert_final_newline=true | ||||||
							
								
								
									
										4
									
								
								.github/workflows/book.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/workflows/book.yml
									
									
									
									
										vendored
									
									
								
							| @ -5,6 +5,10 @@ on: | |||||||
|     branches: |     branches: | ||||||
|       - unstable |       - unstable | ||||||
| 
 | 
 | ||||||
|  | concurrency: | ||||||
|  |   group: ${{ github.workflow }}-${{ github.ref }} | ||||||
|  |   cancel-in-progress: true | ||||||
|  | 
 | ||||||
| jobs: | jobs: | ||||||
|   build-and-upload-to-s3: |   build-and-upload-to-s3: | ||||||
|     runs-on: ubuntu-20.04 |     runs-on: ubuntu-20.04 | ||||||
|  | |||||||
							
								
								
									
										14
									
								
								.github/workflows/cancel-previous-runs.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										14
									
								
								.github/workflows/cancel-previous-runs.yml
									
									
									
									
										vendored
									
									
								
							| @ -1,14 +0,0 @@ | |||||||
| name: cancel previous runs |  | ||||||
| on: [push] |  | ||||||
| jobs: |  | ||||||
|   cancel: |  | ||||||
|     name: 'Cancel Previous Runs' |  | ||||||
|     runs-on: ubuntu-latest |  | ||||||
|     timeout-minutes: 3 |  | ||||||
|     steps: |  | ||||||
|       # https://github.com/styfle/cancel-workflow-action/releases |  | ||||||
|       - uses: styfle/cancel-workflow-action@514c783324374c6940d1b92bfb962d0763d22de3 # 0.7.0 |  | ||||||
|         with: |  | ||||||
|           # https://api.github.com/repos/sigp/lighthouse/actions/workflows |  | ||||||
|           workflow_id: 697364,2434944,4462424,308241,2883401,316 |  | ||||||
|           access_token: ${{ github.token }} |  | ||||||
							
								
								
									
										31
									
								
								.github/workflows/docker-antithesis.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										31
									
								
								.github/workflows/docker-antithesis.yml
									
									
									
									
										vendored
									
									
								
							| @ -1,31 +0,0 @@ | |||||||
| name: docker antithesis |  | ||||||
| 
 |  | ||||||
| on: |  | ||||||
|     push: |  | ||||||
|         branches: |  | ||||||
|             - unstable |  | ||||||
| 
 |  | ||||||
| env: |  | ||||||
|     ANTITHESIS_PASSWORD: ${{ secrets.ANTITHESIS_PASSWORD }} |  | ||||||
|     ANTITHESIS_USERNAME: ${{ secrets.ANTITHESIS_USERNAME }} |  | ||||||
|     ANTITHESIS_SERVER: ${{ secrets.ANTITHESIS_SERVER }} |  | ||||||
|     REPOSITORY: ${{ secrets.ANTITHESIS_REPOSITORY }} |  | ||||||
|     IMAGE_NAME: lighthouse |  | ||||||
|     TAG: libvoidstar |  | ||||||
| 
 |  | ||||||
| jobs: |  | ||||||
|     build-docker: |  | ||||||
|         runs-on: ubuntu-22.04 |  | ||||||
|         steps: |  | ||||||
|             - uses: actions/checkout@v3 |  | ||||||
|             - name: Update Rust |  | ||||||
|               run: rustup update stable |  | ||||||
|             - name: Dockerhub login |  | ||||||
|               run: | |  | ||||||
|                   echo "${ANTITHESIS_PASSWORD}" | docker login --username ${ANTITHESIS_USERNAME} https://${ANTITHESIS_SERVER} --password-stdin |  | ||||||
|             - name: Build AMD64 dockerfile (with push) |  | ||||||
|               run: | |  | ||||||
|                   docker build \ |  | ||||||
|                       --tag ${ANTITHESIS_SERVER}/${REPOSITORY}/${IMAGE_NAME}:${TAG} \ |  | ||||||
|                       --file ./testing/antithesis/Dockerfile.libvoidstar . |  | ||||||
|                   docker push ${ANTITHESIS_SERVER}/${REPOSITORY}/${IMAGE_NAME}:${TAG} |  | ||||||
							
								
								
									
										65
									
								
								.github/workflows/docker.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										65
									
								
								.github/workflows/docker.yml
									
									
									
									
										vendored
									
									
								
							| @ -8,11 +8,17 @@ on: | |||||||
|         tags: |         tags: | ||||||
|             - v* |             - v* | ||||||
| 
 | 
 | ||||||
|  | concurrency: | ||||||
|  |   group: ${{ github.workflow }}-${{ github.ref }} | ||||||
|  |   cancel-in-progress: true | ||||||
|  | 
 | ||||||
| env: | env: | ||||||
|     DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} |     DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} | ||||||
|     DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} |     DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} | ||||||
|     IMAGE_NAME: ${{ github.repository_owner}}/lighthouse |     IMAGE_NAME: ${{ github.repository_owner}}/lighthouse | ||||||
|     LCLI_IMAGE_NAME: ${{ github.repository_owner }}/lcli |     LCLI_IMAGE_NAME: ${{ github.repository_owner }}/lcli | ||||||
|  |     # Enable self-hosted runners for the sigp repo only. | ||||||
|  |     SELF_HOSTED_RUNNERS: ${{ github.repository == 'sigp/lighthouse' }} | ||||||
| 
 | 
 | ||||||
| jobs: | jobs: | ||||||
|     # Extract the VERSION which is either `latest` or `vX.Y.Z`, and the VERSION_SUFFIX |     # Extract the VERSION which is either `latest` or `vX.Y.Z`, and the VERSION_SUFFIX | ||||||
| @ -44,7 +50,8 @@ jobs: | |||||||
|             VERSION_SUFFIX: ${{ env.VERSION_SUFFIX }} |             VERSION_SUFFIX: ${{ env.VERSION_SUFFIX }} | ||||||
|     build-docker-single-arch: |     build-docker-single-arch: | ||||||
|         name: build-docker-${{ matrix.binary }}${{ matrix.features.version_suffix }} |         name: build-docker-${{ matrix.binary }}${{ matrix.features.version_suffix }} | ||||||
|         runs-on: ubuntu-22.04 |         # Use self-hosted runners only on the sigp repo. | ||||||
|  |         runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release"]') || 'ubuntu-22.04'  }} | ||||||
|         strategy: |         strategy: | ||||||
|             matrix: |             matrix: | ||||||
|                 binary: [aarch64, |                 binary: [aarch64, | ||||||
| @ -60,14 +67,13 @@ jobs: | |||||||
| 
 | 
 | ||||||
|         needs: [extract-version] |         needs: [extract-version] | ||||||
|         env: |         env: | ||||||
|             # We need to enable experimental docker features in order to use `docker buildx` |  | ||||||
|             DOCKER_CLI_EXPERIMENTAL: enabled |  | ||||||
|             VERSION: ${{ needs.extract-version.outputs.VERSION }} |             VERSION: ${{ needs.extract-version.outputs.VERSION }} | ||||||
|             VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} |             VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} | ||||||
|             FEATURE_SUFFIX: ${{ matrix.features.version_suffix }} |             FEATURE_SUFFIX: ${{ matrix.features.version_suffix }} | ||||||
|         steps: |         steps: | ||||||
|             - uses: actions/checkout@v3 |             - uses: actions/checkout@v3 | ||||||
|             - name: Update Rust |             - name: Update Rust | ||||||
|  |               if: env.SELF_HOSTED_RUNNERS == 'false' | ||||||
|               run: rustup update stable |               run: rustup update stable | ||||||
|             - name: Dockerhub login |             - name: Dockerhub login | ||||||
|               run: | |               run: | | ||||||
| @ -76,16 +82,14 @@ jobs: | |||||||
|               run: | |               run: | | ||||||
|                   cargo install cross |                   cargo install cross | ||||||
|                   env CROSS_PROFILE=${{ matrix.profile }} CROSS_FEATURES=${{ matrix.features.env }} make build-${{ matrix.binary }} |                   env CROSS_PROFILE=${{ matrix.profile }} CROSS_FEATURES=${{ matrix.features.env }} make build-${{ matrix.binary }} | ||||||
|  |             - name: Make bin dir | ||||||
|  |               run: mkdir ./bin | ||||||
|             - name: Move cross-built binary into Docker scope (if ARM) |             - name: Move cross-built binary into Docker scope (if ARM) | ||||||
|               if: startsWith(matrix.binary, 'aarch64') |               if: startsWith(matrix.binary, 'aarch64') | ||||||
|               run: | |               run: mv ./target/aarch64-unknown-linux-gnu/${{ matrix.profile }}/lighthouse ./bin | ||||||
|                   mkdir ./bin; |  | ||||||
|                   mv ./target/aarch64-unknown-linux-gnu/${{ matrix.profile }}/lighthouse ./bin; |  | ||||||
|             - name: Move cross-built binary into Docker scope (if x86_64) |             - name: Move cross-built binary into Docker scope (if x86_64) | ||||||
|               if: startsWith(matrix.binary, 'x86_64') |               if: startsWith(matrix.binary, 'x86_64') | ||||||
|               run: | |               run: mv ./target/x86_64-unknown-linux-gnu/${{ matrix.profile }}/lighthouse ./bin | ||||||
|                   mkdir ./bin; |  | ||||||
|                   mv ./target/x86_64-unknown-linux-gnu/${{ matrix.profile }}/lighthouse ./bin; |  | ||||||
|             - name: Map aarch64 to arm64 short arch |             - name: Map aarch64 to arm64 short arch | ||||||
|               if: startsWith(matrix.binary, 'aarch64') |               if: startsWith(matrix.binary, 'aarch64') | ||||||
|               run: echo "SHORT_ARCH=arm64" >> $GITHUB_ENV |               run: echo "SHORT_ARCH=arm64" >> $GITHUB_ENV | ||||||
| @ -95,17 +99,24 @@ jobs: | |||||||
|             - name: Set modernity suffix |             - name: Set modernity suffix | ||||||
|               if: endsWith(matrix.binary, '-portable') != true |               if: endsWith(matrix.binary, '-portable') != true | ||||||
|               run: echo "MODERNITY_SUFFIX=-modern" >> $GITHUB_ENV; |               run: echo "MODERNITY_SUFFIX=-modern" >> $GITHUB_ENV; | ||||||
|             # Install dependencies for emulation. Have to create a new builder to pick up emulation support. | 
 | ||||||
|             - name: Build Dockerfile and push |             - name: Install QEMU | ||||||
|               run: | |               if: env.SELF_HOSTED_RUNNERS == 'false' | ||||||
|                   docker run --privileged --rm tonistiigi/binfmt --install ${SHORT_ARCH} |               run: sudo apt-get update && sudo apt-get install -y qemu-user-static | ||||||
|                   docker buildx create --use --name cross-builder | 
 | ||||||
|                   docker buildx build \ |             - name: Set up Docker Buildx | ||||||
|                       --platform=linux/${SHORT_ARCH} \ |               if: env.SELF_HOSTED_RUNNERS == 'false' | ||||||
|                       --file ./Dockerfile.cross . \ |               uses: docker/setup-buildx-action@v2 | ||||||
|                       --tag ${IMAGE_NAME}:${VERSION}-${SHORT_ARCH}${VERSION_SUFFIX}${MODERNITY_SUFFIX}${FEATURE_SUFFIX} \ | 
 | ||||||
|                       --provenance=false \ |             - name: Build and push | ||||||
|                       --push |               uses: docker/build-push-action@v4 | ||||||
|  |               with: | ||||||
|  |                 file: ./Dockerfile.cross | ||||||
|  |                 context: . | ||||||
|  |                 platforms: linux/${{ env.SHORT_ARCH }} | ||||||
|  |                 push: true | ||||||
|  |                 tags: ${{ env.IMAGE_NAME }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }}${{ env.MODERNITY_SUFFIX }}${{ env.FEATURE_SUFFIX }} | ||||||
|  | 
 | ||||||
|     build-docker-multiarch: |     build-docker-multiarch: | ||||||
|         name: build-docker-multiarch${{ matrix.modernity }} |         name: build-docker-multiarch${{ matrix.modernity }} | ||||||
|         runs-on: ubuntu-22.04 |         runs-on: ubuntu-22.04 | ||||||
| @ -114,20 +125,22 @@ jobs: | |||||||
|             matrix: |             matrix: | ||||||
|                 modernity: ["", "-modern"] |                 modernity: ["", "-modern"] | ||||||
|         env: |         env: | ||||||
|             # We need to enable experimental docker features in order to use `docker manifest` |  | ||||||
|             DOCKER_CLI_EXPERIMENTAL: enabled |  | ||||||
|             VERSION: ${{ needs.extract-version.outputs.VERSION }} |             VERSION: ${{ needs.extract-version.outputs.VERSION }} | ||||||
|             VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} |             VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} | ||||||
|         steps: |         steps: | ||||||
|  |             - name: Set up Docker Buildx | ||||||
|  |               uses: docker/setup-buildx-action@v2 | ||||||
|  | 
 | ||||||
|             - name: Dockerhub login |             - name: Dockerhub login | ||||||
|               run: | |               run: | | ||||||
|                   echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin |                   echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin | ||||||
|  | 
 | ||||||
|             - name: Create and push multiarch manifest |             - name: Create and push multiarch manifest | ||||||
|               run: | |               run: | | ||||||
|                   docker manifest create ${IMAGE_NAME}:${VERSION}${VERSION_SUFFIX}${{ matrix.modernity }} \ |                   docker buildx imagetools create -t ${IMAGE_NAME}:${VERSION}${VERSION_SUFFIX}${{ matrix.modernity }} \ | ||||||
|                       --amend ${IMAGE_NAME}:${VERSION}-arm64${VERSION_SUFFIX}${{ matrix.modernity }} \ |                       ${IMAGE_NAME}:${VERSION}-arm64${VERSION_SUFFIX}${{ matrix.modernity }} \ | ||||||
|                       --amend ${IMAGE_NAME}:${VERSION}-amd64${VERSION_SUFFIX}${{ matrix.modernity }}; |                       ${IMAGE_NAME}:${VERSION}-amd64${VERSION_SUFFIX}${{ matrix.modernity }}; | ||||||
|                   docker manifest push ${IMAGE_NAME}:${VERSION}${VERSION_SUFFIX}${{ matrix.modernity }} | 
 | ||||||
|     build-docker-lcli: |     build-docker-lcli: | ||||||
|         runs-on: ubuntu-22.04 |         runs-on: ubuntu-22.04 | ||||||
|         needs: [extract-version] |         needs: [extract-version] | ||||||
|  | |||||||
							
								
								
									
										4
									
								
								.github/workflows/linkcheck.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/workflows/linkcheck.yml
									
									
									
									
										vendored
									
									
								
							| @ -9,6 +9,10 @@ on: | |||||||
|       - 'book/**' |       - 'book/**' | ||||||
|   merge_group: |   merge_group: | ||||||
| 
 | 
 | ||||||
|  | concurrency: | ||||||
|  |   group: ${{ github.workflow }}-${{ github.ref }} | ||||||
|  |   cancel-in-progress: true | ||||||
|  | 
 | ||||||
| jobs: | jobs: | ||||||
|   linkcheck: |   linkcheck: | ||||||
|     name: Check broken links |     name: Check broken links | ||||||
|  | |||||||
							
								
								
									
										8
									
								
								.github/workflows/local-testnet.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										8
									
								
								.github/workflows/local-testnet.yml
									
									
									
									
										vendored
									
									
								
							| @ -8,6 +8,10 @@ on: | |||||||
|   pull_request: |   pull_request: | ||||||
|   merge_group: |   merge_group: | ||||||
| 
 | 
 | ||||||
|  | concurrency: | ||||||
|  |   group: ${{ github.workflow }}-${{ github.ref }} | ||||||
|  |   cancel-in-progress: true | ||||||
|  | 
 | ||||||
| jobs: | jobs: | ||||||
|   run-local-testnet: |   run-local-testnet: | ||||||
|     strategy: |     strategy: | ||||||
| @ -21,10 +25,6 @@ jobs: | |||||||
| 
 | 
 | ||||||
|       - name: Get latest version of stable Rust |       - name: Get latest version of stable Rust | ||||||
|         run: rustup update stable |         run: rustup update stable | ||||||
|       - name: Install Protoc |  | ||||||
|         uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 |  | ||||||
|         with: |  | ||||||
|             repo-token: ${{ secrets.GITHUB_TOKEN }} |  | ||||||
|       - name: Install geth (ubuntu) |       - name: Install geth (ubuntu) | ||||||
|         if: matrix.os == 'ubuntu-22.04' |         if: matrix.os == 'ubuntu-22.04' | ||||||
|         run: | |         run: | | ||||||
|  | |||||||
							
								
								
									
										66
									
								
								.github/workflows/publish-crate.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										66
									
								
								.github/workflows/publish-crate.yml
									
									
									
									
										vendored
									
									
								
							| @ -1,66 +0,0 @@ | |||||||
| name:   Publish Crate |  | ||||||
| 
 |  | ||||||
| on: |  | ||||||
|     push: |  | ||||||
|         tags: |  | ||||||
|             - tree-hash-v* |  | ||||||
|             - tree-hash-derive-v* |  | ||||||
|             - eth2-ssz-v* |  | ||||||
|             - eth2-ssz-derive-v* |  | ||||||
|             - eth2-ssz-types-v* |  | ||||||
|             - eth2-serde-util-v* |  | ||||||
|             - eth2-hashing-v* |  | ||||||
| 
 |  | ||||||
| env: |  | ||||||
|     CARGO_API_TOKEN: ${{ secrets.CARGO_API_TOKEN }} |  | ||||||
| 
 |  | ||||||
| jobs: |  | ||||||
|     extract-tag: |  | ||||||
|         runs-on: ubuntu-latest |  | ||||||
|         steps: |  | ||||||
|             - name: Extract tag |  | ||||||
|               run: echo "TAG=$(echo ${GITHUB_REF#refs/tags/})" >> $GITHUB_OUTPUT |  | ||||||
|               id: extract_tag |  | ||||||
|         outputs: |  | ||||||
|             TAG: ${{ steps.extract_tag.outputs.TAG }} |  | ||||||
| 
 |  | ||||||
|     publish-crate: |  | ||||||
|         runs-on: ubuntu-latest |  | ||||||
|         needs: [extract-tag] |  | ||||||
|         env: |  | ||||||
|             TAG: ${{ needs.extract-tag.outputs.TAG }} |  | ||||||
|         steps: |  | ||||||
|             - uses: actions/checkout@v3 |  | ||||||
|             - name: Update Rust |  | ||||||
|               run: rustup update stable |  | ||||||
|             - name: Cargo login |  | ||||||
|               run: | |  | ||||||
|                   echo "${CARGO_API_TOKEN}" | cargo login |  | ||||||
|             - name: publish eth2 ssz derive |  | ||||||
|               if: startsWith(env.TAG, 'eth2-ssz-derive-v') |  | ||||||
|               run: | |  | ||||||
|                 ./scripts/ci/publish.sh consensus/ssz_derive eth2_ssz_derive "$TAG" |  | ||||||
|             - name: publish eth2 ssz |  | ||||||
|               if: startsWith(env.TAG, 'eth2-ssz-v') |  | ||||||
|               run: | |  | ||||||
|                 ./scripts/ci/publish.sh consensus/ssz eth2_ssz "$TAG" |  | ||||||
|             - name: publish eth2 hashing |  | ||||||
|               if: startsWith(env.TAG, 'eth2-hashing-v') |  | ||||||
|               run: | |  | ||||||
|                 ./scripts/ci/publish.sh crypto/eth2_hashing eth2_hashing "$TAG" |  | ||||||
|             - name: publish tree hash derive |  | ||||||
|               if: startsWith(env.TAG, 'tree-hash-derive-v') |  | ||||||
|               run: | |  | ||||||
|                 ./scripts/ci/publish.sh consensus/tree_hash_derive tree_hash_derive "$TAG" |  | ||||||
|             - name: publish tree hash |  | ||||||
|               if: startsWith(env.TAG, 'tree-hash-v') |  | ||||||
|               run: | |  | ||||||
|                 ./scripts/ci/publish.sh consensus/tree_hash tree_hash "$TAG" |  | ||||||
|             - name: publish ssz types |  | ||||||
|               if: startsWith(env.TAG, 'eth2-ssz-types-v') |  | ||||||
|               run: | |  | ||||||
|                 ./scripts/ci/publish.sh consensus/ssz_types eth2_ssz_types "$TAG" |  | ||||||
|             - name: publish serde util |  | ||||||
|               if: startsWith(env.TAG, 'eth2-serde-util-v') |  | ||||||
|               run: | |  | ||||||
|                 ./scripts/ci/publish.sh consensus/serde_utils eth2_serde_utils "$TAG" |  | ||||||
							
								
								
									
										46
									
								
								.github/workflows/release.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										46
									
								
								.github/workflows/release.yml
									
									
									
									
										vendored
									
									
								
							| @ -5,11 +5,17 @@ on: | |||||||
|         tags: |         tags: | ||||||
|             - v* |             - v* | ||||||
| 
 | 
 | ||||||
|  | concurrency: | ||||||
|  |   group: ${{ github.workflow }}-${{ github.ref }} | ||||||
|  |   cancel-in-progress: true | ||||||
|  | 
 | ||||||
| env: | env: | ||||||
|     DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} |     DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} | ||||||
|     DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} |     DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} | ||||||
|     REPO_NAME: ${{ github.repository_owner }}/lighthouse |     REPO_NAME: ${{ github.repository_owner }}/lighthouse | ||||||
|     IMAGE_NAME: ${{ github.repository_owner }}/lighthouse |     IMAGE_NAME: ${{ github.repository_owner }}/lighthouse | ||||||
|  |     # Enable self-hosted runners for the sigp repo only. | ||||||
|  |     SELF_HOSTED_RUNNERS: ${{ github.repository == 'sigp/lighthouse' }} | ||||||
| 
 | 
 | ||||||
| jobs: | jobs: | ||||||
|     extract-version: |     extract-version: | ||||||
| @ -34,36 +40,37 @@ jobs: | |||||||
|                        x86_64-windows-portable] |                        x86_64-windows-portable] | ||||||
|                 include: |                 include: | ||||||
|                     -   arch: aarch64-unknown-linux-gnu |                     -   arch: aarch64-unknown-linux-gnu | ||||||
|                         platform: ubuntu-latest |                         runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release", "large"]') || 'ubuntu-latest'  }} | ||||||
|                         profile: maxperf |                         profile: maxperf | ||||||
|                     -   arch: aarch64-unknown-linux-gnu-portable |                     -   arch: aarch64-unknown-linux-gnu-portable | ||||||
|                         platform: ubuntu-latest |                         runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release", "large"]') || 'ubuntu-latest'  }} | ||||||
|                         profile: maxperf |                         profile: maxperf | ||||||
|                     -   arch: x86_64-unknown-linux-gnu |                     -   arch: x86_64-unknown-linux-gnu | ||||||
|                         platform: ubuntu-latest |                         runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release", "large"]') || 'ubuntu-latest'  }} | ||||||
|                         profile: maxperf |                         profile: maxperf | ||||||
|                     -   arch: x86_64-unknown-linux-gnu-portable |                     -   arch: x86_64-unknown-linux-gnu-portable | ||||||
|                         platform: ubuntu-latest |                         runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release", "large"]') || 'ubuntu-latest'  }} | ||||||
|                         profile: maxperf |                         profile: maxperf | ||||||
|                     -   arch: x86_64-apple-darwin |                     -   arch: x86_64-apple-darwin | ||||||
|                         platform: macos-latest |                         runner: macos-latest | ||||||
|                         profile: maxperf |                         profile: maxperf | ||||||
|                     -   arch: x86_64-apple-darwin-portable |                     -   arch: x86_64-apple-darwin-portable | ||||||
|                         platform: macos-latest |                         runner: macos-latest | ||||||
|                         profile: maxperf |                         profile: maxperf | ||||||
|                     -   arch: x86_64-windows |                     -   arch: x86_64-windows | ||||||
|                         platform: windows-2019 |                         runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "windows", "release"]') || 'windows-2019'  }} | ||||||
|                         profile: maxperf |                         profile: maxperf | ||||||
|                     -   arch: x86_64-windows-portable |                     -   arch: x86_64-windows-portable | ||||||
|                         platform: windows-2019 |                         runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "windows", "release"]') || 'windows-2019'  }} | ||||||
|                         profile: maxperf |                         profile: maxperf | ||||||
| 
 | 
 | ||||||
|         runs-on:    ${{ matrix.platform }} |         runs-on:    ${{ matrix.runner }} | ||||||
|         needs: extract-version |         needs: extract-version | ||||||
|         steps: |         steps: | ||||||
|             - name: Checkout sources |             - name: Checkout sources | ||||||
|               uses: actions/checkout@v3 |               uses: actions/checkout@v3 | ||||||
|             - name: Get latest version of stable Rust |             - name: Get latest version of stable Rust | ||||||
|  |               if: env.SELF_HOSTED_RUNNERS == 'false' | ||||||
|               run: rustup update stable |               run: rustup update stable | ||||||
| 
 | 
 | ||||||
|             # ============================== |             # ============================== | ||||||
| @ -71,7 +78,7 @@ jobs: | |||||||
|             # ============================== |             # ============================== | ||||||
| 
 | 
 | ||||||
|             - uses: KyleMayes/install-llvm-action@v1 |             - uses: KyleMayes/install-llvm-action@v1 | ||||||
|               if: startsWith(matrix.arch, 'x86_64-windows') |               if: env.SELF_HOSTED_RUNNERS == 'false' && startsWith(matrix.arch, 'x86_64-windows') | ||||||
|               with: |               with: | ||||||
|                 version: "15.0" |                 version: "15.0" | ||||||
|                 directory: ${{ runner.temp }}/llvm |                 directory: ${{ runner.temp }}/llvm | ||||||
| @ -79,15 +86,6 @@ jobs: | |||||||
|               if: startsWith(matrix.arch, 'x86_64-windows') |               if: startsWith(matrix.arch, 'x86_64-windows') | ||||||
|               run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV |               run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV | ||||||
| 
 | 
 | ||||||
|             # ============================== |  | ||||||
|             # Windows & Mac dependencies |  | ||||||
|             # ============================== |  | ||||||
|             - name: Install Protoc |  | ||||||
|               if: contains(matrix.arch, 'darwin') || contains(matrix.arch, 'windows') |  | ||||||
|               uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 |  | ||||||
|               with: |  | ||||||
|                 repo-token: ${{ secrets.GITHUB_TOKEN }} |  | ||||||
| 
 |  | ||||||
|             # ============================== |             # ============================== | ||||||
|             #       Builds |             #       Builds | ||||||
|             # ============================== |             # ============================== | ||||||
| @ -134,17 +132,11 @@ jobs: | |||||||
| 
 | 
 | ||||||
|             - name: Build Lighthouse for Windows portable |             - name: Build Lighthouse for Windows portable | ||||||
|               if:   matrix.arch == 'x86_64-windows-portable' |               if:   matrix.arch == 'x86_64-windows-portable' | ||||||
|               # NOTE: profile set to release until this rustc issue is fixed: |               run:  cargo install --path lighthouse --force --locked --features portable,gnosis --profile ${{ matrix.profile }} | ||||||
|               # |  | ||||||
|               # https://github.com/rust-lang/rust/issues/107781 |  | ||||||
|               # |  | ||||||
|               # tracked at: https://github.com/sigp/lighthouse/issues/3964 |  | ||||||
|               run:  cargo install --path lighthouse --force --locked --features portable,gnosis --profile release |  | ||||||
| 
 | 
 | ||||||
|             - name: Build Lighthouse for Windows modern |             - name: Build Lighthouse for Windows modern | ||||||
|               if:   matrix.arch == 'x86_64-windows' |               if:   matrix.arch == 'x86_64-windows' | ||||||
|               # NOTE: profile set to release (see above) |               run:  cargo install --path lighthouse --force --locked --features modern,gnosis --profile ${{ matrix.profile }} | ||||||
|               run:  cargo install --path lighthouse --force --locked --features modern,gnosis --profile release |  | ||||||
| 
 | 
 | ||||||
|             - name: Configure GPG and create artifacts |             - name: Configure GPG and create artifacts | ||||||
|               if: startsWith(matrix.arch, 'x86_64-windows') != true |               if: startsWith(matrix.arch, 'x86_64-windows') != true | ||||||
|  | |||||||
							
								
								
									
										127
									
								
								.github/workflows/test-suite.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										127
									
								
								.github/workflows/test-suite.yml
									
									
									
									
										vendored
									
									
								
							| @ -9,6 +9,11 @@ on: | |||||||
|       - 'pr/*' |       - 'pr/*' | ||||||
|   pull_request: |   pull_request: | ||||||
|   merge_group: |   merge_group: | ||||||
|  | 
 | ||||||
|  | concurrency: | ||||||
|  |   group: ${{ github.workflow }}-${{ github.ref }} | ||||||
|  |   cancel-in-progress: true | ||||||
|  | 
 | ||||||
| env: | env: | ||||||
|   # Deny warnings in CI |   # Deny warnings in CI | ||||||
|   # Disable debug info (see https://github.com/sigp/lighthouse/issues/4005) |   # Disable debug info (see https://github.com/sigp/lighthouse/issues/4005) | ||||||
| @ -17,6 +22,10 @@ env: | |||||||
|   PINNED_NIGHTLY: nightly-2023-04-16 |   PINNED_NIGHTLY: nightly-2023-04-16 | ||||||
|   # Prevent Github API rate limiting. |   # Prevent Github API rate limiting. | ||||||
|   LIGHTHOUSE_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} |   LIGHTHOUSE_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} | ||||||
|  |   # Enable self-hosted runners for the sigp repo only. | ||||||
|  |   SELF_HOSTED_RUNNERS: ${{ github.repository == 'sigp/lighthouse' }} | ||||||
|  |   # Self-hosted runners need to reference a different host for `./watch` tests. | ||||||
|  |   WATCH_HOST: ${{ github.repository == 'sigp/lighthouse' && 'host.docker.internal' || 'localhost' }} | ||||||
| jobs: | jobs: | ||||||
|   target-branch-check: |   target-branch-check: | ||||||
|     name: target-branch-check |     name: target-branch-check | ||||||
| @ -48,41 +57,37 @@ jobs: | |||||||
|       run: make cargo-fmt |       run: make cargo-fmt | ||||||
|   release-tests-ubuntu: |   release-tests-ubuntu: | ||||||
|     name: release-tests-ubuntu |     name: release-tests-ubuntu | ||||||
|     runs-on: ubuntu-latest |     # Use self-hosted runners only on the sigp repo. | ||||||
|  |     runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest'  }} | ||||||
|     needs: cargo-fmt |     needs: cargo-fmt | ||||||
|     steps: |     steps: | ||||||
|     - uses: actions/checkout@v3 |     - uses: actions/checkout@v3 | ||||||
|     - name: Get latest version of stable Rust |     - name: Get latest version of stable Rust | ||||||
|  |       if: env.SELF_HOSTED_RUNNERS == false | ||||||
|       run: rustup update stable |       run: rustup update stable | ||||||
|     - name: Install Protoc |     - name: Install Foundry (anvil) | ||||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 |       uses: foundry-rs/foundry-toolchain@v1 | ||||||
|       with: |       with: | ||||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} |         version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d | ||||||
|     - name: Install anvil |  | ||||||
|       run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil |  | ||||||
|     - name: Run tests in release |     - name: Run tests in release | ||||||
|       run: make test-release |       run: make test-release | ||||||
|   release-tests-windows: |   release-tests-windows: | ||||||
|     name: release-tests-windows |     name: release-tests-windows | ||||||
|     runs-on: windows-2019 |     runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "windows", "CI"]') || 'windows-2019'  }} | ||||||
|     needs: cargo-fmt |     needs: cargo-fmt | ||||||
|     steps: |     steps: | ||||||
|     - uses: actions/checkout@v3 |     - uses: actions/checkout@v3 | ||||||
|     - name: Get latest version of stable Rust |     - name: Get latest version of stable Rust | ||||||
|  |       if: env.SELF_HOSTED_RUNNERS == false | ||||||
|       run: rustup update stable |       run: rustup update stable | ||||||
|     - name: Use Node.js |     - name: Install Foundry (anvil) | ||||||
|       uses: actions/setup-node@v2 |       uses: foundry-rs/foundry-toolchain@v1 | ||||||
|       with: |       with: | ||||||
|         node-version: '14' |         version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d | ||||||
|     - name: Install windows build tools |  | ||||||
|       run: | |  | ||||||
|         choco install python protoc visualstudio2019-workload-vctools -y |  | ||||||
|         npm config set msvs_version 2019 |  | ||||||
|     - name: Install anvil |  | ||||||
|       run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil |  | ||||||
|     - name: Install make |     - name: Install make | ||||||
|       run: choco install -y make |       run: choco install -y make | ||||||
|     - uses: KyleMayes/install-llvm-action@v1 |     - uses: KyleMayes/install-llvm-action@v1 | ||||||
|  |       if: env.SELF_HOSTED_RUNNERS == false | ||||||
|       with: |       with: | ||||||
|         version: "15.0" |         version: "15.0" | ||||||
|         directory: ${{ runner.temp }}/llvm |         directory: ${{ runner.temp }}/llvm | ||||||
| @ -92,16 +97,14 @@ jobs: | |||||||
|       run: make test-release |       run: make test-release | ||||||
|   beacon-chain-tests: |   beacon-chain-tests: | ||||||
|     name: beacon-chain-tests |     name: beacon-chain-tests | ||||||
|     runs-on: ubuntu-latest |     # Use self-hosted runners only on the sigp repo. | ||||||
|  |     runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest'  }} | ||||||
|     needs: cargo-fmt |     needs: cargo-fmt | ||||||
|     steps: |     steps: | ||||||
|     - uses: actions/checkout@v3 |     - uses: actions/checkout@v3 | ||||||
|     - name: Get latest version of stable Rust |     - name: Get latest version of stable Rust | ||||||
|  |       if: env.SELF_HOSTED_RUNNERS == false | ||||||
|       run: rustup update stable |       run: rustup update stable | ||||||
|     - name: Install Protoc |  | ||||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 |  | ||||||
|       with: |  | ||||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} |  | ||||||
|     - name: Run beacon_chain tests for all known forks |     - name: Run beacon_chain tests for all known forks | ||||||
|       run: make test-beacon-chain |       run: make test-beacon-chain | ||||||
|   op-pool-tests: |   op-pool-tests: | ||||||
| @ -112,10 +115,6 @@ jobs: | |||||||
|     - uses: actions/checkout@v3 |     - uses: actions/checkout@v3 | ||||||
|     - name: Get latest version of stable Rust |     - name: Get latest version of stable Rust | ||||||
|       run: rustup update stable |       run: rustup update stable | ||||||
|     - name: Install Protoc |  | ||||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 |  | ||||||
|       with: |  | ||||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} |  | ||||||
|     - name: Run operation_pool tests for all known forks |     - name: Run operation_pool tests for all known forks | ||||||
|       run: make test-op-pool |       run: make test-op-pool | ||||||
|   slasher-tests: |   slasher-tests: | ||||||
| @ -130,18 +129,18 @@ jobs: | |||||||
|       run: make test-slasher |       run: make test-slasher | ||||||
|   debug-tests-ubuntu: |   debug-tests-ubuntu: | ||||||
|     name: debug-tests-ubuntu |     name: debug-tests-ubuntu | ||||||
|     runs-on: ubuntu-22.04 |     # Use self-hosted runners only on the sigp repo. | ||||||
|  |     runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest'  }} | ||||||
|     needs: cargo-fmt |     needs: cargo-fmt | ||||||
|     steps: |     steps: | ||||||
|     - uses: actions/checkout@v3 |     - uses: actions/checkout@v3 | ||||||
|     - name: Get latest version of stable Rust |     - name: Get latest version of stable Rust | ||||||
|  |       if: env.SELF_HOSTED_RUNNERS == false | ||||||
|       run: rustup update stable |       run: rustup update stable | ||||||
|     - name: Install Protoc |     - name: Install Foundry (anvil) | ||||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 |       uses: foundry-rs/foundry-toolchain@v1 | ||||||
|       with: |       with: | ||||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} |         version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d | ||||||
|     - name: Install anvil |  | ||||||
|       run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil |  | ||||||
|     - name: Run tests in debug |     - name: Run tests in debug | ||||||
|       run: make test-debug |       run: make test-debug | ||||||
|   state-transition-vectors-ubuntu: |   state-transition-vectors-ubuntu: | ||||||
| @ -152,24 +151,18 @@ jobs: | |||||||
|     - uses: actions/checkout@v3 |     - uses: actions/checkout@v3 | ||||||
|     - name: Get latest version of stable Rust |     - name: Get latest version of stable Rust | ||||||
|       run: rustup update stable |       run: rustup update stable | ||||||
|     - name: Install Protoc |  | ||||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 |  | ||||||
|       with: |  | ||||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} |  | ||||||
|     - name: Run state_transition_vectors in release. |     - name: Run state_transition_vectors in release. | ||||||
|       run: make run-state-transition-tests |       run: make run-state-transition-tests | ||||||
|   ef-tests-ubuntu: |   ef-tests-ubuntu: | ||||||
|     name: ef-tests-ubuntu |     name: ef-tests-ubuntu | ||||||
|     runs-on: ubuntu-latest |     # Use self-hosted runners only on the sigp repo. | ||||||
|  |     runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest'  }} | ||||||
|     needs: cargo-fmt |     needs: cargo-fmt | ||||||
|     steps: |     steps: | ||||||
|     - uses: actions/checkout@v3 |     - uses: actions/checkout@v3 | ||||||
|     - name: Get latest version of stable Rust |     - name: Get latest version of stable Rust | ||||||
|  |       if: env.SELF_HOSTED_RUNNERS == false | ||||||
|       run: rustup update stable |       run: rustup update stable | ||||||
|     - name: Install Protoc |  | ||||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 |  | ||||||
|       with: |  | ||||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} |  | ||||||
|     - name: Run consensus-spec-tests with blst, milagro and fake_crypto |     - name: Run consensus-spec-tests with blst, milagro and fake_crypto | ||||||
|       run: make test-ef |       run: make test-ef | ||||||
|   dockerfile-ubuntu: |   dockerfile-ubuntu: | ||||||
| @ -192,12 +185,10 @@ jobs: | |||||||
|     - uses: actions/checkout@v3 |     - uses: actions/checkout@v3 | ||||||
|     - name: Get latest version of stable Rust |     - name: Get latest version of stable Rust | ||||||
|       run: rustup update stable |       run: rustup update stable | ||||||
|     - name: Install Protoc |     - name: Install Foundry (anvil) | ||||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 |       uses: foundry-rs/foundry-toolchain@v1 | ||||||
|       with: |       with: | ||||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} |         version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d | ||||||
|     - name: Install anvil |  | ||||||
|       run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil |  | ||||||
|     - name: Run the beacon chain sim that starts from an eth1 contract |     - name: Run the beacon chain sim that starts from an eth1 contract | ||||||
|       run: cargo run --release --bin simulator eth1-sim |       run: cargo run --release --bin simulator eth1-sim | ||||||
|   merge-transition-ubuntu: |   merge-transition-ubuntu: | ||||||
| @ -208,12 +199,10 @@ jobs: | |||||||
|     - uses: actions/checkout@v3 |     - uses: actions/checkout@v3 | ||||||
|     - name: Get latest version of stable Rust |     - name: Get latest version of stable Rust | ||||||
|       run: rustup update stable |       run: rustup update stable | ||||||
|     - name: Install Protoc |     - name: Install Foundry (anvil) | ||||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 |       uses: foundry-rs/foundry-toolchain@v1 | ||||||
|       with: |       with: | ||||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} |         version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d | ||||||
|     - name: Install anvil |  | ||||||
|       run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil |  | ||||||
|     - name: Run the beacon chain sim and go through the merge transition |     - name: Run the beacon chain sim and go through the merge transition | ||||||
|       run: cargo run --release --bin simulator eth1-sim --post-merge |       run: cargo run --release --bin simulator eth1-sim --post-merge | ||||||
|   no-eth1-simulator-ubuntu: |   no-eth1-simulator-ubuntu: | ||||||
| @ -224,10 +213,6 @@ jobs: | |||||||
|     - uses: actions/checkout@v3 |     - uses: actions/checkout@v3 | ||||||
|     - name: Get latest version of stable Rust |     - name: Get latest version of stable Rust | ||||||
|       run: rustup update stable |       run: rustup update stable | ||||||
|     - name: Install Protoc |  | ||||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 |  | ||||||
|       with: |  | ||||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} |  | ||||||
|     - name: Run the beacon chain sim without an eth1 connection |     - name: Run the beacon chain sim without an eth1 connection | ||||||
|       run: cargo run --release --bin simulator no-eth1-sim |       run: cargo run --release --bin simulator no-eth1-sim | ||||||
|   syncing-simulator-ubuntu: |   syncing-simulator-ubuntu: | ||||||
| @ -238,12 +223,10 @@ jobs: | |||||||
|     - uses: actions/checkout@v3 |     - uses: actions/checkout@v3 | ||||||
|     - name: Get latest version of stable Rust |     - name: Get latest version of stable Rust | ||||||
|       run: rustup update stable |       run: rustup update stable | ||||||
|     - name: Install Protoc |     - name: Install Foundry (anvil) | ||||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 |       uses: foundry-rs/foundry-toolchain@v1 | ||||||
|       with: |       with: | ||||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} |         version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d | ||||||
|     - name: Install anvil |  | ||||||
|       run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil |  | ||||||
|     - name: Run the syncing simulator |     - name: Run the syncing simulator | ||||||
|       run: cargo run --release --bin simulator syncing-sim |       run: cargo run --release --bin simulator syncing-sim | ||||||
|   doppelganger-protection-test: |   doppelganger-protection-test: | ||||||
| @ -254,10 +237,6 @@ jobs: | |||||||
|     - uses: actions/checkout@v3 |     - uses: actions/checkout@v3 | ||||||
|     - name: Get latest version of stable Rust |     - name: Get latest version of stable Rust | ||||||
|       run: rustup update stable |       run: rustup update stable | ||||||
|     - name: Install Protoc |  | ||||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 |  | ||||||
|       with: |  | ||||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} |  | ||||||
|     - name: Install geth |     - name: Install geth | ||||||
|       run: | |       run: | | ||||||
|           sudo add-apt-repository -y ppa:ethereum/ethereum |           sudo add-apt-repository -y ppa:ethereum/ethereum | ||||||
| @ -289,10 +268,6 @@ jobs: | |||||||
|         dotnet-version: '6.0.201' |         dotnet-version: '6.0.201' | ||||||
|     - name: Get latest version of stable Rust |     - name: Get latest version of stable Rust | ||||||
|       run: rustup update stable |       run: rustup update stable | ||||||
|     - name: Install Protoc |  | ||||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 |  | ||||||
|       with: |  | ||||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} |  | ||||||
|     - name: Run exec engine integration tests in release |     - name: Run exec engine integration tests in release | ||||||
|       run: make test-exec-engine |       run: make test-exec-engine | ||||||
|   check-benchmarks: |   check-benchmarks: | ||||||
| @ -303,10 +278,6 @@ jobs: | |||||||
|     - uses: actions/checkout@v3 |     - uses: actions/checkout@v3 | ||||||
|     - name: Get latest version of stable Rust |     - name: Get latest version of stable Rust | ||||||
|       run: rustup update stable |       run: rustup update stable | ||||||
|     - name: Install Protoc |  | ||||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 |  | ||||||
|       with: |  | ||||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} |  | ||||||
|     - name: Typecheck benchmark code without running it |     - name: Typecheck benchmark code without running it | ||||||
|       run: make check-benches |       run: make check-benches | ||||||
|   clippy: |   clippy: | ||||||
| @ -317,10 +288,6 @@ jobs: | |||||||
|     - uses: actions/checkout@v3 |     - uses: actions/checkout@v3 | ||||||
|     - name: Get latest version of stable Rust |     - name: Get latest version of stable Rust | ||||||
|       run: rustup update stable |       run: rustup update stable | ||||||
|     - name: Install Protoc |  | ||||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 |  | ||||||
|       with: |  | ||||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} |  | ||||||
|     - name: Lint code for quality and style with Clippy |     - name: Lint code for quality and style with Clippy | ||||||
|       run: make lint |       run: make lint | ||||||
|     - name: Certify Cargo.lock freshness |     - name: Certify Cargo.lock freshness | ||||||
| @ -333,10 +300,6 @@ jobs: | |||||||
|     - uses: actions/checkout@v3 |     - uses: actions/checkout@v3 | ||||||
|     - name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }}) |     - name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }}) | ||||||
|       run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }} |       run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }} | ||||||
|     - name: Install Protoc |  | ||||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 |  | ||||||
|       with: |  | ||||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} |  | ||||||
|     - name: Run cargo check |     - name: Run cargo check | ||||||
|       run: cargo check --workspace |       run: cargo check --workspace | ||||||
|   arbitrary-check: |   arbitrary-check: | ||||||
| @ -375,10 +338,6 @@ jobs: | |||||||
|     - uses: actions/checkout@v3 |     - uses: actions/checkout@v3 | ||||||
|     - name: Install Rust (${{ env.PINNED_NIGHTLY }}) |     - name: Install Rust (${{ env.PINNED_NIGHTLY }}) | ||||||
|       run: rustup toolchain install $PINNED_NIGHTLY |       run: rustup toolchain install $PINNED_NIGHTLY | ||||||
|     - name: Install Protoc |  | ||||||
|       uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 |  | ||||||
|       with: |  | ||||||
|         repo-token: ${{ secrets.GITHUB_TOKEN }} |  | ||||||
|     - name: Install cargo-udeps |     - name: Install cargo-udeps | ||||||
|       run: cargo install cargo-udeps --locked --force |       run: cargo install cargo-udeps --locked --force | ||||||
|     - name: Create Cargo config dir |     - name: Create Cargo config dir | ||||||
| @ -396,7 +355,7 @@ jobs: | |||||||
|     steps: |     steps: | ||||||
|     - uses: actions/checkout@v3 |     - uses: actions/checkout@v3 | ||||||
|     - name: Install dependencies |     - name: Install dependencies | ||||||
|       run: sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler |       run: sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang | ||||||
|     - name: Use Rust beta |     - name: Use Rust beta | ||||||
|       run: rustup override set beta |       run: rustup override set beta | ||||||
|     - name: Run make |     - name: Run make | ||||||
|  | |||||||
							
								
								
									
										3761
									
								
								Cargo.lock
									
									
									
										generated
									
									
									
								
							
							
						
						
									
										3761
									
								
								Cargo.lock
									
									
									
										generated
									
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										145
									
								
								Cargo.toml
									
									
									
									
									
								
							
							
						
						
									
										145
									
								
								Cargo.toml
									
									
									
									
									
								
							| @ -4,6 +4,7 @@ members = [ | |||||||
| 
 | 
 | ||||||
|     "beacon_node", |     "beacon_node", | ||||||
|     "beacon_node/beacon_chain", |     "beacon_node/beacon_chain", | ||||||
|  |     "beacon_node/beacon_processor", | ||||||
|     "beacon_node/builder_client", |     "beacon_node/builder_client", | ||||||
|     "beacon_node/client", |     "beacon_node/client", | ||||||
|     "beacon_node/eth1", |     "beacon_node/eth1", | ||||||
| @ -35,6 +36,7 @@ members = [ | |||||||
|     "common/lru_cache", |     "common/lru_cache", | ||||||
|     "common/malloc_utils", |     "common/malloc_utils", | ||||||
|     "common/oneshot_broadcast", |     "common/oneshot_broadcast", | ||||||
|  |     "common/pretty_reqwest_error", | ||||||
|     "common/sensitive_url", |     "common/sensitive_url", | ||||||
|     "common/slot_clock", |     "common/slot_clock", | ||||||
|     "common/system_health", |     "common/system_health", | ||||||
| @ -81,21 +83,144 @@ members = [ | |||||||
|     "validator_client", |     "validator_client", | ||||||
|     "validator_client/slashing_protection", |     "validator_client/slashing_protection", | ||||||
| 
 | 
 | ||||||
|  |     "validator_manager", | ||||||
|  | 
 | ||||||
|     "watch", |     "watch", | ||||||
| ] | ] | ||||||
| resolver = "2" | resolver = "2" | ||||||
| 
 | 
 | ||||||
| [patch] | [workspace.package] | ||||||
| [patch.crates-io] | edition = "2021" | ||||||
| warp = { git = "https://github.com/macladson/warp", rev="7e75acc368229a46a236a8c991bf251fe7fe50ef" } |  | ||||||
| arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="f002b99989b561ddce62e4cf2887b0f8860ae991" } |  | ||||||
| 
 | 
 | ||||||
| [patch."https://github.com/ralexstokes/mev-rs"] | [workspace.dependencies] | ||||||
| mev-rs = { git = "https://github.com/ralexstokes//mev-rs", rev = "7813d4a4a564e0754e9aaab2d95520ba437c3889" } | arbitrary = { version = "1", features = ["derive"] } | ||||||
| [patch."https://github.com/ralexstokes/ethereum-consensus"] | bincode = "1" | ||||||
| ethereum-consensus = { git = "https://github.com/ralexstokes//ethereum-consensus", rev = "9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d" } | bitvec = "1" | ||||||
| [patch."https://github.com/ralexstokes/ssz-rs"] | byteorder = "1" | ||||||
| ssz-rs = { git = "https://github.com/ralexstokes//ssz-rs", rev = "adf1a0b14cef90b9536f28ef89da1fab316465e1" } | bytes = "1" | ||||||
|  | clap = "2" | ||||||
|  | compare_fields_derive = { path = "common/compare_fields_derive" } | ||||||
|  | criterion = "0.3" | ||||||
|  | delay_map = "0.3" | ||||||
|  | derivative = "2" | ||||||
|  | dirs = "3" | ||||||
|  | discv5 = { version = "0.3", features = ["libp2p"] } | ||||||
|  | env_logger = "0.9" | ||||||
|  | error-chain = "0.12" | ||||||
|  | ethereum-types = "0.14" | ||||||
|  | ethereum_hashing = "1.0.0-beta.2" | ||||||
|  | ethereum_serde_utils = "0.5" | ||||||
|  | ethereum_ssz = "0.5" | ||||||
|  | ethereum_ssz_derive = "0.5" | ||||||
|  | ethers-core = "1" | ||||||
|  | ethers-providers = { version = "1", default-features = false } | ||||||
|  | exit-future = "0.2" | ||||||
|  | fnv = "1" | ||||||
|  | fs2 = "0.4" | ||||||
|  | futures = "0.3" | ||||||
|  | hex = "0.4" | ||||||
|  | hyper = "0.14" | ||||||
|  | itertools = "0.10" | ||||||
|  | lazy_static = "1" | ||||||
|  | libsecp256k1 = "0.7" | ||||||
|  | log = "0.4" | ||||||
|  | lru = "0.7" | ||||||
|  | maplit = "1" | ||||||
|  | num_cpus = "1" | ||||||
|  | parking_lot = "0.12" | ||||||
|  | paste = "1" | ||||||
|  | quickcheck = "1" | ||||||
|  | quickcheck_macros = "1" | ||||||
|  | quote = "1" | ||||||
|  | r2d2 = "0.8" | ||||||
|  | rand = "0.8" | ||||||
|  | rayon = "1.7" | ||||||
|  | regex = "1" | ||||||
|  | reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "stream", "rustls-tls"] } | ||||||
|  | ring = "0.16" | ||||||
|  | rusqlite = { version = "0.28", features = ["bundled"] } | ||||||
|  | serde = { version = "1", features = ["derive"] } | ||||||
|  | serde_json = "1" | ||||||
|  | serde_repr = "0.1" | ||||||
|  | serde_yaml = "0.8" | ||||||
|  | sha2 = "0.9" | ||||||
|  | slog = { version = "2", features = ["max_level_trace", "release_max_level_trace"] } | ||||||
|  | slog-async = "2" | ||||||
|  | slog-term = "2" | ||||||
|  | sloggers = { version = "2", features = ["json"] } | ||||||
|  | smallvec = "1" | ||||||
|  | snap = "1" | ||||||
|  | ssz_types = "0.5" | ||||||
|  | strum = { version = "0.24", features = ["derive"] } | ||||||
|  | superstruct = "0.6" | ||||||
|  | syn = "1" | ||||||
|  | sysinfo = "0.26" | ||||||
|  | tempfile = "3" | ||||||
|  | tokio = { version = "1", features = ["rt-multi-thread", "sync"] } | ||||||
|  | tokio-stream = { version = "0.1", features = ["sync"] } | ||||||
|  | tokio-util = { version = "0.6", features = ["codec", "compat", "time"] } | ||||||
|  | tree_hash = "0.5" | ||||||
|  | tree_hash_derive = "0.5" | ||||||
|  | url = "2" | ||||||
|  | uuid = { version = "0.8", features = ["serde", "v4"] } | ||||||
|  | # TODO update to warp 0.3.6 after released. | ||||||
|  | warp = { git = "https://github.com/seanmonstar/warp.git", default-features = false, features = ["tls"] } | ||||||
|  | zeroize = { version = "1", features = ["zeroize_derive"] } | ||||||
|  | zip = "0.6" | ||||||
|  | 
 | ||||||
|  | # Local crates. | ||||||
|  | account_utils = { path = "common/account_utils" } | ||||||
|  | beacon_chain = { path = "beacon_node/beacon_chain" } | ||||||
|  | beacon_node = { path = "beacon_node" } | ||||||
|  | beacon_processor =  { path = "beacon_node/beacon_processor" } | ||||||
|  | bls = { path = "crypto/bls" } | ||||||
|  | cached_tree_hash = { path = "consensus/cached_tree_hash" } | ||||||
|  | clap_utils = { path = "common/clap_utils" } | ||||||
|  | compare_fields = { path = "common/compare_fields" } | ||||||
|  | deposit_contract = { path = "common/deposit_contract" } | ||||||
|  | directory = { path = "common/directory" } | ||||||
|  | environment = { path = "lighthouse/environment" } | ||||||
|  | eth1 = { path = "beacon_node/eth1" } | ||||||
|  | eth1_test_rig = { path = "testing/eth1_test_rig" } | ||||||
|  | eth2 = { path = "common/eth2" } | ||||||
|  | eth2_config = { path = "common/eth2_config" } | ||||||
|  | eth2_key_derivation = { path = "crypto/eth2_key_derivation" } | ||||||
|  | eth2_keystore = { path = "crypto/eth2_keystore" } | ||||||
|  | eth2_network_config = { path = "common/eth2_network_config" } | ||||||
|  | eth2_wallet = { path = "crypto/eth2_wallet" } | ||||||
|  | execution_layer = { path = "beacon_node/execution_layer" } | ||||||
|  | filesystem = { path = "common/filesystem" } | ||||||
|  | fork_choice = { path = "consensus/fork_choice" } | ||||||
|  | genesis = { path = "beacon_node/genesis" } | ||||||
|  | http_api = { path = "beacon_node/http_api" } | ||||||
|  | int_to_bytes = { path = "consensus/int_to_bytes" } | ||||||
|  | lighthouse_metrics = { path = "common/lighthouse_metrics" } | ||||||
|  | lighthouse_network = { path = "beacon_node/lighthouse_network" } | ||||||
|  | lighthouse_version = { path = "common/lighthouse_version" } | ||||||
|  | lockfile = { path = "common/lockfile" } | ||||||
|  | logging = { path = "common/logging" } | ||||||
|  | lru_cache = { path = "common/lru_cache" } | ||||||
|  | malloc_utils = { path = "common/malloc_utils" } | ||||||
|  | merkle_proof = { path = "consensus/merkle_proof" } | ||||||
|  | monitoring_api = { path = "common/monitoring_api" } | ||||||
|  | network = { path = "beacon_node/network" } | ||||||
|  | operation_pool = { path = "beacon_node/operation_pool" } | ||||||
|  | pretty_reqwest_error = { path = "common/pretty_reqwest_error" } | ||||||
|  | proto_array = { path = "consensus/proto_array" } | ||||||
|  | safe_arith = {path = "consensus/safe_arith"} | ||||||
|  | sensitive_url = { path = "common/sensitive_url" } | ||||||
|  | slasher = { path = "slasher" } | ||||||
|  | slashing_protection = { path = "validator_client/slashing_protection" } | ||||||
|  | slot_clock = { path = "common/slot_clock" } | ||||||
|  | state_processing = { path = "consensus/state_processing" } | ||||||
|  | store = { path = "beacon_node/store" } | ||||||
|  | swap_or_not_shuffle = { path = "consensus/swap_or_not_shuffle" } | ||||||
|  | task_executor = { path = "common/task_executor" } | ||||||
|  | types = { path = "consensus/types" } | ||||||
|  | unused_port = { path = "common/unused_port" } | ||||||
|  | validator_client = { path = "validator_client/" } | ||||||
|  | validator_dir = { path = "common/validator_dir" } | ||||||
|  | warp_utils = { path = "common/warp_utils" } | ||||||
| 
 | 
 | ||||||
| [profile.maxperf] | [profile.maxperf] | ||||||
| inherits = "release" | inherits = "release" | ||||||
|  | |||||||
| @ -1,5 +1,5 @@ | |||||||
| [target.x86_64-unknown-linux-gnu] | [target.x86_64-unknown-linux-gnu] | ||||||
| dockerfile = './scripts/cross/Dockerfile' | pre-build = ["apt-get install -y cmake clang-3.9"] | ||||||
| 
 | 
 | ||||||
| [target.aarch64-unknown-linux-gnu] | [target.aarch64-unknown-linux-gnu] | ||||||
| dockerfile = './scripts/cross/Dockerfile' | pre-build = ["apt-get install -y cmake clang-3.9"] | ||||||
|  | |||||||
| @ -1,5 +1,5 @@ | |||||||
| FROM rust:1.68.2-bullseye AS builder | FROM rust:1.69.0-bullseye AS builder | ||||||
| RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler | RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev | ||||||
| COPY . lighthouse | COPY . lighthouse | ||||||
| ARG FEATURES | ARG FEATURES | ||||||
| ARG PROFILE=release | ARG PROFILE=release | ||||||
| @ -13,4 +13,4 @@ RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-reco | |||||||
|   ca-certificates \ |   ca-certificates \ | ||||||
|   && apt-get clean \ |   && apt-get clean \ | ||||||
|   && rm -rf /var/lib/apt/lists/* |   && rm -rf /var/lib/apt/lists/* | ||||||
| COPY --from=builder /usr/local/cargo/bin/lighthouse /usr/local/bin/lighthouse | COPY --from=builder /usr/local/cargo/bin/lighthouse /usr/local/bin/lighthouse | ||||||
							
								
								
									
										21
									
								
								Makefile
									
									
									
									
									
								
							
							
						
						
									
										21
									
								
								Makefile
									
									
									
									
									
								
							| @ -71,13 +71,13 @@ install-lcli: | |||||||
| # optimized CPU functions that may not be available on some systems. This
 | # optimized CPU functions that may not be available on some systems. This
 | ||||||
| # results in a more portable binary with ~20% slower BLS verification.
 | # results in a more portable binary with ~20% slower BLS verification.
 | ||||||
| build-x86_64: | build-x86_64: | ||||||
| 	cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "modern,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" | 	cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "modern,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked | ||||||
| build-x86_64-portable: | build-x86_64-portable: | ||||||
| 	cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" | 	cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked | ||||||
| build-aarch64: | build-aarch64: | ||||||
| 	cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" | 	cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked | ||||||
| build-aarch64-portable: | build-aarch64-portable: | ||||||
| 	cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" | 	cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked | ||||||
| 
 | 
 | ||||||
| # Create a `.tar.gz` containing a binary for a specific target.
 | # Create a `.tar.gz` containing a binary for a specific target.
 | ||||||
| define tarball_release_binary | define tarball_release_binary | ||||||
| @ -145,8 +145,9 @@ test-op-pool-%: | |||||||
| 
 | 
 | ||||||
| # Run the tests in the `slasher` crate for all supported database backends.
 | # Run the tests in the `slasher` crate for all supported database backends.
 | ||||||
| test-slasher: | test-slasher: | ||||||
| 	cargo test --release -p slasher --features mdbx | 	cargo test --release -p slasher --features lmdb | ||||||
| 	cargo test --release -p slasher --no-default-features --features lmdb | 	cargo test --release -p slasher --no-default-features --features mdbx | ||||||
|  | 	cargo test --release -p slasher --features lmdb,mdbx # both backends enabled | ||||||
| 
 | 
 | ||||||
| # Runs only the tests/state_transition_vectors tests.
 | # Runs only the tests/state_transition_vectors tests.
 | ||||||
| run-state-transition-tests: | run-state-transition-tests: | ||||||
| @ -169,7 +170,7 @@ test-full: cargo-fmt test-release test-debug test-ef test-exec-engine | |||||||
| # Lints the code for bad style and potentially unsafe arithmetic using Clippy.
 | # Lints the code for bad style and potentially unsafe arithmetic using Clippy.
 | ||||||
| # Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints.
 | # Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints.
 | ||||||
| lint: | lint: | ||||||
| 	cargo clippy --workspace --tests -- \
 | 	cargo clippy --workspace --tests $(EXTRA_CLIPPY_OPTS) -- \
 | ||||||
| 		-D clippy::fn_to_numeric_cast_any \
 | 		-D clippy::fn_to_numeric_cast_any \
 | ||||||
| 		-D warnings \
 | 		-D warnings \
 | ||||||
| 		-A clippy::derive_partial_eq_without_eq \
 | 		-A clippy::derive_partial_eq_without_eq \
 | ||||||
| @ -179,6 +180,10 @@ lint: | |||||||
| 		-A clippy::question-mark \
 | 		-A clippy::question-mark \
 | ||||||
| 		-A clippy::uninlined-format-args | 		-A clippy::uninlined-format-args | ||||||
| 
 | 
 | ||||||
|  | # Lints the code using Clippy and automatically fix some simple compiler warnings.
 | ||||||
|  | lint-fix: | ||||||
|  | 	EXTRA_CLIPPY_OPTS="--fix --allow-staged --allow-dirty" $(MAKE) lint | ||||||
|  | 
 | ||||||
| nightly-lint: | nightly-lint: | ||||||
| 	cp .github/custom/clippy.toml . | 	cp .github/custom/clippy.toml . | ||||||
| 	cargo +$(CLIPPY_PINNED_NIGHTLY) clippy --workspace --tests --release -- \
 | 	cargo +$(CLIPPY_PINNED_NIGHTLY) clippy --workspace --tests --release -- \
 | ||||||
| @ -202,7 +207,7 @@ arbitrary-fuzz: | |||||||
| # Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database)
 | # Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database)
 | ||||||
| audit: | audit: | ||||||
| 	cargo install --force cargo-audit | 	cargo install --force cargo-audit | ||||||
| 	cargo audit --ignore RUSTSEC-2020-0071 | 	cargo audit | ||||||
| 
 | 
 | ||||||
| # Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose.
 | # Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose.
 | ||||||
| vendor: | vendor: | ||||||
|  | |||||||
| @ -10,7 +10,7 @@ An open-source Ethereum consensus client, written in Rust and maintained by Sigm | |||||||
| [Book Link]: https://lighthouse-book.sigmaprime.io | [Book Link]: https://lighthouse-book.sigmaprime.io | ||||||
| [stable]: https://github.com/sigp/lighthouse/tree/stable | [stable]: https://github.com/sigp/lighthouse/tree/stable | ||||||
| [unstable]: https://github.com/sigp/lighthouse/tree/unstable | [unstable]: https://github.com/sigp/lighthouse/tree/unstable | ||||||
| [blog]: https://lighthouse.sigmaprime.io | [blog]: https://lighthouse-blog.sigmaprime.io | ||||||
| 
 | 
 | ||||||
| [Documentation](https://lighthouse-book.sigmaprime.io) | [Documentation](https://lighthouse-book.sigmaprime.io) | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -1,29 +1,35 @@ | |||||||
| [package] | [package] | ||||||
| name = "account_manager" | name = "account_manager" | ||||||
| version = "0.3.5" | version = "0.3.5" | ||||||
| authors = ["Paul Hauner <paul@paulhauner.com>", "Luke Anderson <luke@sigmaprime.io>"] | authors = [ | ||||||
| edition = "2021" |     "Paul Hauner <paul@paulhauner.com>", | ||||||
|  |     "Luke Anderson <luke@sigmaprime.io>", | ||||||
|  | ] | ||||||
|  | edition = { workspace = true } | ||||||
| 
 | 
 | ||||||
| [dependencies] | [dependencies] | ||||||
| bls = { path = "../crypto/bls" } | bls = { workspace = true } | ||||||
| clap = "2.33.3" | clap = { workspace = true } | ||||||
| types = { path = "../consensus/types" } | types = { workspace = true } | ||||||
| environment = { path = "../lighthouse/environment" } | environment = { workspace = true } | ||||||
| eth2_network_config = { path = "../common/eth2_network_config" } | eth2_network_config = { workspace = true } | ||||||
| clap_utils = { path = "../common/clap_utils" } | clap_utils = { workspace = true } | ||||||
| directory = { path = "../common/directory" } | directory = { workspace = true } | ||||||
| eth2_wallet = { path = "../crypto/eth2_wallet" } | eth2_wallet = { workspace = true } | ||||||
| eth2_wallet_manager = { path = "../common/eth2_wallet_manager" } | eth2_wallet_manager = { path = "../common/eth2_wallet_manager" } | ||||||
| validator_dir = { path = "../common/validator_dir" } | validator_dir = { workspace = true } | ||||||
| tokio = { version = "1.14.0", features = ["full"] } | tokio = { workspace = true } | ||||||
| eth2_keystore = { path = "../crypto/eth2_keystore" } | eth2_keystore = { workspace = true } | ||||||
| account_utils = { path = "../common/account_utils" } | account_utils = { workspace = true } | ||||||
| slashing_protection = { path = "../validator_client/slashing_protection" } | slashing_protection = { workspace = true } | ||||||
| eth2 = {path = "../common/eth2"} | eth2 = { workspace = true } | ||||||
| safe_arith = {path = "../consensus/safe_arith"} | safe_arith = { workspace = true } | ||||||
| slot_clock = { path = "../common/slot_clock" } | slot_clock = { workspace = true } | ||||||
| filesystem = { path = "../common/filesystem" } | filesystem = { workspace = true } | ||||||
| sensitive_url = { path = "../common/sensitive_url" } | sensitive_url = { workspace = true } | ||||||
|  | serde = { workspace = true } | ||||||
|  | serde_json = { workspace = true } | ||||||
|  | slog = { workspace = true } | ||||||
| 
 | 
 | ||||||
| [dev-dependencies] | [dev-dependencies] | ||||||
| tempfile = "3.1.0" | tempfile = { workspace = true } | ||||||
|  | |||||||
| @ -1,55 +1,7 @@ | |||||||
| use account_utils::PlainText; | use account_utils::read_input_from_user; | ||||||
| use account_utils::{read_input_from_user, strip_off_newlines}; |  | ||||||
| use eth2_wallet::bip39::{Language, Mnemonic}; |  | ||||||
| use std::fs; |  | ||||||
| use std::path::PathBuf; |  | ||||||
| use std::str::from_utf8; |  | ||||||
| use std::thread::sleep; |  | ||||||
| use std::time::Duration; |  | ||||||
| 
 | 
 | ||||||
| pub const MNEMONIC_PROMPT: &str = "Enter the mnemonic phrase:"; |  | ||||||
| pub const WALLET_NAME_PROMPT: &str = "Enter wallet name:"; | pub const WALLET_NAME_PROMPT: &str = "Enter wallet name:"; | ||||||
| 
 | 
 | ||||||
| pub fn read_mnemonic_from_cli( |  | ||||||
|     mnemonic_path: Option<PathBuf>, |  | ||||||
|     stdin_inputs: bool, |  | ||||||
| ) -> Result<Mnemonic, String> { |  | ||||||
|     let mnemonic = match mnemonic_path { |  | ||||||
|         Some(path) => fs::read(&path) |  | ||||||
|             .map_err(|e| format!("Unable to read {:?}: {:?}", path, e)) |  | ||||||
|             .and_then(|bytes| { |  | ||||||
|                 let bytes_no_newlines: PlainText = strip_off_newlines(bytes).into(); |  | ||||||
|                 let phrase = from_utf8(bytes_no_newlines.as_ref()) |  | ||||||
|                     .map_err(|e| format!("Unable to derive mnemonic: {:?}", e))?; |  | ||||||
|                 Mnemonic::from_phrase(phrase, Language::English).map_err(|e| { |  | ||||||
|                     format!( |  | ||||||
|                         "Unable to derive mnemonic from string {:?}: {:?}", |  | ||||||
|                         phrase, e |  | ||||||
|                     ) |  | ||||||
|                 }) |  | ||||||
|             })?, |  | ||||||
|         None => loop { |  | ||||||
|             eprintln!(); |  | ||||||
|             eprintln!("{}", MNEMONIC_PROMPT); |  | ||||||
| 
 |  | ||||||
|             let mnemonic = read_input_from_user(stdin_inputs)?; |  | ||||||
| 
 |  | ||||||
|             match Mnemonic::from_phrase(mnemonic.as_str(), Language::English) { |  | ||||||
|                 Ok(mnemonic_m) => { |  | ||||||
|                     eprintln!("Valid mnemonic provided."); |  | ||||||
|                     eprintln!(); |  | ||||||
|                     sleep(Duration::from_secs(1)); |  | ||||||
|                     break mnemonic_m; |  | ||||||
|                 } |  | ||||||
|                 Err(_) => { |  | ||||||
|                     eprintln!("Invalid mnemonic"); |  | ||||||
|                 } |  | ||||||
|             } |  | ||||||
|         }, |  | ||||||
|     }; |  | ||||||
|     Ok(mnemonic) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /// Reads in a wallet name from the user. If the `--wallet-name` flag is provided, use it. Otherwise
 | /// Reads in a wallet name from the user. If the `--wallet-name` flag is provided, use it. Otherwise
 | ||||||
| /// read from an interactive prompt using tty unless the `--stdin-inputs` flag is provided.
 | /// read from an interactive prompt using tty unless the `--stdin-inputs` flag is provided.
 | ||||||
| pub fn read_wallet_name_from_cli( | pub fn read_wallet_name_from_cli( | ||||||
|  | |||||||
| @ -123,10 +123,8 @@ async fn publish_voluntary_exit<E: EthSpec>( | |||||||
| ) -> Result<(), String> { | ) -> Result<(), String> { | ||||||
|     let genesis_data = get_geneisis_data(client).await?; |     let genesis_data = get_geneisis_data(client).await?; | ||||||
|     let testnet_genesis_root = eth2_network_config |     let testnet_genesis_root = eth2_network_config | ||||||
|         .beacon_state::<E>() |         .genesis_validators_root::<E>()? | ||||||
|         .as_ref() |         .ok_or("Genesis state is unknown")?; | ||||||
|         .expect("network should have valid genesis state") |  | ||||||
|         .genesis_validators_root(); |  | ||||||
| 
 | 
 | ||||||
|     // Verify that the beacon node and validator being exited are on the same network.
 |     // Verify that the beacon node and validator being exited are on the same network.
 | ||||||
|     if genesis_data.genesis_validators_root != testnet_genesis_root { |     if genesis_data.genesis_validators_root != testnet_genesis_root { | ||||||
|  | |||||||
| @ -4,8 +4,8 @@ use account_utils::{ | |||||||
|     eth2_keystore::Keystore, |     eth2_keystore::Keystore, | ||||||
|     read_password_from_user, |     read_password_from_user, | ||||||
|     validator_definitions::{ |     validator_definitions::{ | ||||||
|         recursively_find_voting_keystores, ValidatorDefinition, ValidatorDefinitions, |         recursively_find_voting_keystores, PasswordStorage, ValidatorDefinition, | ||||||
|         CONFIG_FILENAME, |         ValidatorDefinitions, CONFIG_FILENAME, | ||||||
|     }, |     }, | ||||||
|     ZeroizeString, |     ZeroizeString, | ||||||
| }; | }; | ||||||
| @ -277,7 +277,9 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin | |||||||
|         let suggested_fee_recipient = None; |         let suggested_fee_recipient = None; | ||||||
|         let validator_def = ValidatorDefinition::new_keystore_with_password( |         let validator_def = ValidatorDefinition::new_keystore_with_password( | ||||||
|             &dest_keystore, |             &dest_keystore, | ||||||
|             password_opt, |             password_opt | ||||||
|  |                 .map(PasswordStorage::ValidatorDefinitions) | ||||||
|  |                 .unwrap_or(PasswordStorage::None), | ||||||
|             graffiti, |             graffiti, | ||||||
|             suggested_fee_recipient, |             suggested_fee_recipient, | ||||||
|             None, |             None, | ||||||
|  | |||||||
| @ -1,10 +1,9 @@ | |||||||
| use super::create::STORE_WITHDRAW_FLAG; | use super::create::STORE_WITHDRAW_FLAG; | ||||||
| use crate::common::read_mnemonic_from_cli; |  | ||||||
| use crate::validator::create::COUNT_FLAG; | use crate::validator::create::COUNT_FLAG; | ||||||
| use crate::wallet::create::STDIN_INPUTS_FLAG; | use crate::wallet::create::STDIN_INPUTS_FLAG; | ||||||
| use crate::SECRETS_DIR_FLAG; | use crate::SECRETS_DIR_FLAG; | ||||||
| use account_utils::eth2_keystore::{keypair_from_secret, Keystore, KeystoreBuilder}; | use account_utils::eth2_keystore::{keypair_from_secret, Keystore, KeystoreBuilder}; | ||||||
| use account_utils::random_password; | use account_utils::{random_password, read_mnemonic_from_cli}; | ||||||
| use clap::{App, Arg, ArgMatches}; | use clap::{App, Arg, ArgMatches}; | ||||||
| use directory::ensure_dir_exists; | use directory::ensure_dir_exists; | ||||||
| use directory::{parse_path_or_default_with_flag, DEFAULT_SECRET_DIR}; | use directory::{parse_path_or_default_with_flag, DEFAULT_SECRET_DIR}; | ||||||
|  | |||||||
| @ -7,7 +7,7 @@ use slashing_protection::{ | |||||||
| use std::fs::File; | use std::fs::File; | ||||||
| use std::path::PathBuf; | use std::path::PathBuf; | ||||||
| use std::str::FromStr; | use std::str::FromStr; | ||||||
| use types::{BeaconState, Epoch, EthSpec, PublicKeyBytes, Slot}; | use types::{Epoch, EthSpec, PublicKeyBytes, Slot}; | ||||||
| 
 | 
 | ||||||
| pub const CMD: &str = "slashing-protection"; | pub const CMD: &str = "slashing-protection"; | ||||||
| pub const IMPORT_CMD: &str = "import"; | pub const IMPORT_CMD: &str = "import"; | ||||||
| @ -81,20 +81,13 @@ pub fn cli_run<T: EthSpec>( | |||||||
|     validator_base_dir: PathBuf, |     validator_base_dir: PathBuf, | ||||||
| ) -> Result<(), String> { | ) -> Result<(), String> { | ||||||
|     let slashing_protection_db_path = validator_base_dir.join(SLASHING_PROTECTION_FILENAME); |     let slashing_protection_db_path = validator_base_dir.join(SLASHING_PROTECTION_FILENAME); | ||||||
| 
 |  | ||||||
|     let eth2_network_config = env |     let eth2_network_config = env | ||||||
|         .eth2_network_config |         .eth2_network_config | ||||||
|         .ok_or("Unable to get testnet configuration from the environment")?; |         .ok_or("Unable to get testnet configuration from the environment")?; | ||||||
| 
 | 
 | ||||||
|     let genesis_validators_root = eth2_network_config |     let genesis_validators_root = eth2_network_config | ||||||
|         .beacon_state::<T>() |         .genesis_validators_root::<T>()? | ||||||
|         .map(|state: BeaconState<T>| state.genesis_validators_root()) |         .ok_or_else(|| "Unable to get genesis state, has genesis occurred?".to_string())?; | ||||||
|         .map_err(|e| { |  | ||||||
|             format!( |  | ||||||
|                 "Unable to get genesis state, has genesis occurred? Detail: {:?}", |  | ||||||
|                 e |  | ||||||
|             ) |  | ||||||
|         })?; |  | ||||||
| 
 | 
 | ||||||
|     match matches.subcommand() { |     match matches.subcommand() { | ||||||
|         (IMPORT_CMD, Some(matches)) => { |         (IMPORT_CMD, Some(matches)) => { | ||||||
|  | |||||||
| @ -1,6 +1,6 @@ | |||||||
| use crate::common::read_mnemonic_from_cli; |  | ||||||
| use crate::wallet::create::{create_wallet_from_mnemonic, STDIN_INPUTS_FLAG}; | use crate::wallet::create::{create_wallet_from_mnemonic, STDIN_INPUTS_FLAG}; | ||||||
| use crate::wallet::create::{HD_TYPE, NAME_FLAG, PASSWORD_FLAG, TYPE_FLAG}; | use crate::wallet::create::{HD_TYPE, NAME_FLAG, PASSWORD_FLAG, TYPE_FLAG}; | ||||||
|  | use account_utils::read_mnemonic_from_cli; | ||||||
| use clap::{App, Arg, ArgMatches}; | use clap::{App, Arg, ArgMatches}; | ||||||
| use std::path::PathBuf; | use std::path::PathBuf; | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -1,8 +1,11 @@ | |||||||
| [package] | [package] | ||||||
| name = "beacon_node" | name = "beacon_node" | ||||||
| version = "4.2.0" | version = "4.5.0" | ||||||
| authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"] | authors = [ | ||||||
| edition = "2021" |     "Paul Hauner <paul@paulhauner.com>", | ||||||
|  |     "Age Manning <Age@AgeManning.com", | ||||||
|  | ] | ||||||
|  | edition = { workspace = true } | ||||||
| 
 | 
 | ||||||
| [lib] | [lib] | ||||||
| name = "beacon_node" | name = "beacon_node" | ||||||
| @ -12,33 +15,35 @@ path = "src/lib.rs" | |||||||
| node_test_rig = { path = "../testing/node_test_rig" } | node_test_rig = { path = "../testing/node_test_rig" } | ||||||
| 
 | 
 | ||||||
| [features] | [features] | ||||||
| write_ssz_files = ["beacon_chain/write_ssz_files"]  # Writes debugging .ssz files to /tmp during block processing. | write_ssz_files = [ | ||||||
|  |     "beacon_chain/write_ssz_files", | ||||||
|  | ] # Writes debugging .ssz files to /tmp during block processing. | ||||||
| 
 | 
 | ||||||
| [dependencies] | [dependencies] | ||||||
| eth2_config = { path = "../common/eth2_config" } | eth2_config = { workspace = true } | ||||||
| beacon_chain = { path = "beacon_chain" } | beacon_chain = { workspace = true } | ||||||
| types = { path = "../consensus/types" } | types = { workspace = true } | ||||||
| store = { path = "./store" } | store = { workspace = true } | ||||||
| client = { path = "client" } | client = { path = "client" } | ||||||
| clap = "2.33.3" | clap = { workspace = true } | ||||||
| slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } | slog = { workspace = true } | ||||||
| dirs = "3.0.1" | dirs = { workspace = true } | ||||||
| directory = {path = "../common/directory"} | directory = { workspace = true } | ||||||
| futures = "0.3.7" | futures = { workspace = true } | ||||||
| environment = { path = "../lighthouse/environment" } | environment = { workspace = true } | ||||||
| task_executor = { path = "../common/task_executor" } | task_executor = { workspace = true } | ||||||
| genesis = { path = "genesis" } | genesis = { workspace = true } | ||||||
| eth2_network_config = { path = "../common/eth2_network_config" } | eth2_network_config = { workspace = true } | ||||||
| execution_layer = { path = "execution_layer" } | execution_layer = { workspace = true } | ||||||
| lighthouse_network = { path = "./lighthouse_network" } | lighthouse_network = { workspace = true } | ||||||
| serde = "1.0.116" | serde = { workspace = true } | ||||||
| clap_utils = { path = "../common/clap_utils" } | clap_utils = { workspace = true } | ||||||
| hyper = "0.14.4" | hyper = { workspace = true } | ||||||
| lighthouse_version = { path = "../common/lighthouse_version" } | lighthouse_version = { workspace = true } | ||||||
| hex = "0.4.2" | hex = { workspace = true } | ||||||
| slasher = { path = "../slasher", default-features = false } | slasher = { workspace = true } | ||||||
| monitoring_api = { path = "../common/monitoring_api" } | monitoring_api = { workspace = true } | ||||||
| sensitive_url = { path = "../common/sensitive_url" } | sensitive_url = { workspace = true } | ||||||
| http_api = { path = "http_api" } | http_api = { workspace = true } | ||||||
| unused_port = { path = "../common/unused_port" } | unused_port = { workspace = true } | ||||||
| strum = "0.24.1" | strum = { workspace = true } | ||||||
|  | |||||||
| @ -2,7 +2,7 @@ | |||||||
| name = "beacon_chain" | name = "beacon_chain" | ||||||
| version = "0.2.0" | version = "0.2.0" | ||||||
| authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com>"] | authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com>"] | ||||||
| edition = "2021" | edition = { workspace = true } | ||||||
| autotests = false # using a single test binary compiles faster | autotests = false # using a single test binary compiles faster | ||||||
| 
 | 
 | ||||||
| [features] | [features] | ||||||
| @ -12,59 +12,59 @@ participation_metrics = []  # Exposes validator participation metrics to Prometh | |||||||
| fork_from_env = [] # Initialise the harness chain spec from the FORK_NAME env variable | fork_from_env = [] # Initialise the harness chain spec from the FORK_NAME env variable | ||||||
| 
 | 
 | ||||||
| [dev-dependencies] | [dev-dependencies] | ||||||
| maplit = "1.0.2" | maplit = { workspace = true } | ||||||
| environment = { path = "../../lighthouse/environment" } | environment = { workspace = true } | ||||||
| serde_json = "1.0.58" | serde_json = { workspace = true } | ||||||
| 
 | 
 | ||||||
| [dependencies] | [dependencies] | ||||||
| merkle_proof = { path = "../../consensus/merkle_proof" } | merkle_proof = { workspace = true } | ||||||
| store = { path = "../store" } | store = { workspace = true } | ||||||
| parking_lot = "0.12.0" | parking_lot = { workspace = true } | ||||||
| lazy_static = "1.4.0" | lazy_static = { workspace = true } | ||||||
| smallvec = "1.6.1" | smallvec = { workspace = true } | ||||||
| lighthouse_metrics = { path = "../../common/lighthouse_metrics" } | lighthouse_metrics = { workspace = true } | ||||||
| operation_pool = { path = "../operation_pool" } | operation_pool = { workspace = true } | ||||||
| rayon = "1.4.1" | rayon = { workspace = true } | ||||||
| serde = "1.0.116" | serde = { workspace = true } | ||||||
| serde_derive = "1.0.116" | ethereum_serde_utils = { workspace = true } | ||||||
| slog = { version = "2.5.2", features = ["max_level_trace"] } | slog = { workspace = true } | ||||||
| sloggers = { version = "2.1.1", features = ["json"] } | sloggers = { workspace = true } | ||||||
| slot_clock = { path = "../../common/slot_clock" } | slot_clock = { workspace = true } | ||||||
| ethereum_hashing = "1.0.0-beta.2" | ethereum_hashing = { workspace = true } | ||||||
| ethereum_ssz = "0.5.0" | ethereum_ssz = { workspace = true } | ||||||
| ssz_types = "0.5.0" | ssz_types = { workspace = true } | ||||||
| ethereum_ssz_derive = "0.5.0" | ethereum_ssz_derive = { workspace = true } | ||||||
| state_processing = { path = "../../consensus/state_processing" } | state_processing = { workspace = true } | ||||||
| tree_hash = "0.5.0" | tree_hash_derive = { workspace = true } | ||||||
| types = { path = "../../consensus/types" } | tree_hash = { workspace = true } | ||||||
| tokio = "1.14.0" | types = { workspace = true } | ||||||
| tokio-stream = "0.1.3" | tokio = { workspace = true } | ||||||
| eth1 = { path = "../eth1" } | tokio-stream = { workspace = true } | ||||||
| futures = "0.3.7" | eth1 = { workspace = true } | ||||||
| genesis = { path = "../genesis" } | futures = { workspace = true } | ||||||
| int_to_bytes = { path = "../../consensus/int_to_bytes" } | genesis = { workspace = true } | ||||||
| rand = "0.8.5" | int_to_bytes = { workspace = true } | ||||||
| proto_array = { path = "../../consensus/proto_array" } | rand = { workspace = true } | ||||||
| lru = "0.7.1" | proto_array = { workspace = true } | ||||||
| tempfile = "3.1.0" | lru = { workspace = true } | ||||||
| bitvec = "0.20.4" | tempfile = { workspace = true } | ||||||
| bls = { path = "../../crypto/bls" } | bitvec = { workspace = true } | ||||||
| safe_arith = { path = "../../consensus/safe_arith" } | bls = { workspace = true } | ||||||
| fork_choice = { path = "../../consensus/fork_choice" } | safe_arith = { workspace = true } | ||||||
| task_executor = { path = "../../common/task_executor" } | fork_choice = { workspace = true } | ||||||
| derivative = "2.1.1" | task_executor = { workspace = true } | ||||||
| itertools = "0.10.0" | derivative = { workspace = true } | ||||||
| slasher = { path = "../../slasher", default-features = false } | itertools = { workspace = true } | ||||||
| eth2 = { path = "../../common/eth2" } | slasher = { workspace = true } | ||||||
| strum = { version = "0.24.0", features = ["derive"] } | eth2 = { workspace = true } | ||||||
| logging = { path = "../../common/logging" } | strum = { workspace = true } | ||||||
| execution_layer = { path = "../execution_layer" } | logging = { workspace = true } | ||||||
| sensitive_url = { path = "../../common/sensitive_url" } | execution_layer = { workspace = true } | ||||||
| superstruct = "0.5.0" | sensitive_url = { workspace = true } | ||||||
| hex = "0.4.2" | superstruct = { workspace = true } | ||||||
| exit-future = "0.2.0" | hex = { workspace = true } | ||||||
| unused_port = {path = "../../common/unused_port"} | exit-future = { workspace = true } | ||||||
| oneshot_broadcast = { path = "../../common/oneshot_broadcast" } | oneshot_broadcast = { path = "../../common/oneshot_broadcast/" } | ||||||
| 
 | 
 | ||||||
| [[test]] | [[test]] | ||||||
| name = "beacon_chain_tests" | name = "beacon_chain_tests" | ||||||
|  | |||||||
| @ -3,7 +3,8 @@ use eth2::lighthouse::attestation_rewards::{IdealAttestationRewards, TotalAttest | |||||||
| use eth2::lighthouse::StandardAttestationRewards; | use eth2::lighthouse::StandardAttestationRewards; | ||||||
| use participation_cache::ParticipationCache; | use participation_cache::ParticipationCache; | ||||||
| use safe_arith::SafeArith; | use safe_arith::SafeArith; | ||||||
| use slog::{debug, Logger}; | use serde_utils::quoted_u64::Quoted; | ||||||
|  | use slog::debug; | ||||||
| use state_processing::{ | use state_processing::{ | ||||||
|     common::altair::BaseRewardPerIncrement, |     common::altair::BaseRewardPerIncrement, | ||||||
|     per_epoch_processing::altair::{participation_cache, rewards_and_penalties::get_flag_weight}, |     per_epoch_processing::altair::{participation_cache, rewards_and_penalties::get_flag_weight}, | ||||||
| @ -15,32 +16,111 @@ use store::consts::altair::{ | |||||||
| }; | }; | ||||||
| use types::consts::altair::WEIGHT_DENOMINATOR; | use types::consts::altair::WEIGHT_DENOMINATOR; | ||||||
| 
 | 
 | ||||||
| use types::{Epoch, EthSpec}; | use types::{BeaconState, Epoch, EthSpec}; | ||||||
| 
 | 
 | ||||||
| use eth2::types::ValidatorId; | use eth2::types::ValidatorId; | ||||||
|  | use state_processing::common::base::get_base_reward_from_effective_balance; | ||||||
|  | use state_processing::per_epoch_processing::base::rewards_and_penalties::{ | ||||||
|  |     get_attestation_component_delta, get_attestation_deltas_all, get_attestation_deltas_subset, | ||||||
|  |     get_inactivity_penalty_delta, get_inclusion_delay_delta, | ||||||
|  | }; | ||||||
|  | use state_processing::per_epoch_processing::base::validator_statuses::InclusionInfo; | ||||||
|  | use state_processing::per_epoch_processing::base::{ | ||||||
|  |     TotalBalances, ValidatorStatus, ValidatorStatuses, | ||||||
|  | }; | ||||||
| 
 | 
 | ||||||
| impl<T: BeaconChainTypes> BeaconChain<T> { | impl<T: BeaconChainTypes> BeaconChain<T> { | ||||||
|     pub fn compute_attestation_rewards( |     pub fn compute_attestation_rewards( | ||||||
|         &self, |         &self, | ||||||
|         epoch: Epoch, |         epoch: Epoch, | ||||||
|         validators: Vec<ValidatorId>, |         validators: Vec<ValidatorId>, | ||||||
|         log: Logger, |  | ||||||
|     ) -> Result<StandardAttestationRewards, BeaconChainError> { |     ) -> Result<StandardAttestationRewards, BeaconChainError> { | ||||||
|         debug!(log, "computing attestation rewards"; "epoch" => epoch, "validator_count" => validators.len()); |         debug!(self.log, "computing attestation rewards"; "epoch" => epoch, "validator_count" => validators.len()); | ||||||
| 
 | 
 | ||||||
|         // Get state
 |         // Get state
 | ||||||
|         let spec = &self.spec; |  | ||||||
| 
 |  | ||||||
|         let state_slot = (epoch + 1).end_slot(T::EthSpec::slots_per_epoch()); |         let state_slot = (epoch + 1).end_slot(T::EthSpec::slots_per_epoch()); | ||||||
| 
 | 
 | ||||||
|         let state_root = self |         let state_root = self | ||||||
|             .state_root_at_slot(state_slot)? |             .state_root_at_slot(state_slot)? | ||||||
|             .ok_or(BeaconChainError::NoStateForSlot(state_slot))?; |             .ok_or(BeaconChainError::NoStateForSlot(state_slot))?; | ||||||
| 
 | 
 | ||||||
|         let mut state = self |         let state = self | ||||||
|             .get_state(&state_root, Some(state_slot))? |             .get_state(&state_root, Some(state_slot))? | ||||||
|             .ok_or(BeaconChainError::MissingBeaconState(state_root))?; |             .ok_or(BeaconChainError::MissingBeaconState(state_root))?; | ||||||
| 
 | 
 | ||||||
|  |         match state { | ||||||
|  |             BeaconState::Base(_) => self.compute_attestation_rewards_base(state, validators), | ||||||
|  |             BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { | ||||||
|  |                 self.compute_attestation_rewards_altair(state, validators) | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn compute_attestation_rewards_base( | ||||||
|  |         &self, | ||||||
|  |         mut state: BeaconState<T::EthSpec>, | ||||||
|  |         validators: Vec<ValidatorId>, | ||||||
|  |     ) -> Result<StandardAttestationRewards, BeaconChainError> { | ||||||
|  |         let spec = &self.spec; | ||||||
|  |         let mut validator_statuses = ValidatorStatuses::new(&state, spec)?; | ||||||
|  |         validator_statuses.process_attestations(&state)?; | ||||||
|  | 
 | ||||||
|  |         let ideal_rewards = | ||||||
|  |             self.compute_ideal_rewards_base(&state, &validator_statuses.total_balances)?; | ||||||
|  | 
 | ||||||
|  |         let indices_to_attestation_delta = if validators.is_empty() { | ||||||
|  |             get_attestation_deltas_all(&state, &validator_statuses, spec)? | ||||||
|  |                 .into_iter() | ||||||
|  |                 .enumerate() | ||||||
|  |                 .collect() | ||||||
|  |         } else { | ||||||
|  |             let validator_indices = Self::validators_ids_to_indices(&mut state, validators)?; | ||||||
|  |             get_attestation_deltas_subset(&state, &validator_statuses, &validator_indices, spec)? | ||||||
|  |         }; | ||||||
|  | 
 | ||||||
|  |         let mut total_rewards = vec![]; | ||||||
|  | 
 | ||||||
|  |         for (index, delta) in indices_to_attestation_delta.into_iter() { | ||||||
|  |             let head_delta = delta.head_delta; | ||||||
|  |             let head = (head_delta.rewards as i64).safe_sub(head_delta.penalties as i64)?; | ||||||
|  | 
 | ||||||
|  |             let target_delta = delta.target_delta; | ||||||
|  |             let target = (target_delta.rewards as i64).safe_sub(target_delta.penalties as i64)?; | ||||||
|  | 
 | ||||||
|  |             let source_delta = delta.source_delta; | ||||||
|  |             let source = (source_delta.rewards as i64).safe_sub(source_delta.penalties as i64)?; | ||||||
|  | 
 | ||||||
|  |             // No penalties associated with inclusion delay
 | ||||||
|  |             let inclusion_delay = delta.inclusion_delay_delta.rewards; | ||||||
|  |             let inactivity = delta.inactivity_penalty_delta.penalties.wrapping_neg() as i64; | ||||||
|  | 
 | ||||||
|  |             let rewards = TotalAttestationRewards { | ||||||
|  |                 validator_index: index as u64, | ||||||
|  |                 head, | ||||||
|  |                 target, | ||||||
|  |                 source, | ||||||
|  |                 inclusion_delay: Some(Quoted { | ||||||
|  |                     value: inclusion_delay, | ||||||
|  |                 }), | ||||||
|  |                 inactivity, | ||||||
|  |             }; | ||||||
|  | 
 | ||||||
|  |             total_rewards.push(rewards); | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         Ok(StandardAttestationRewards { | ||||||
|  |             ideal_rewards, | ||||||
|  |             total_rewards, | ||||||
|  |         }) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn compute_attestation_rewards_altair( | ||||||
|  |         &self, | ||||||
|  |         mut state: BeaconState<T::EthSpec>, | ||||||
|  |         validators: Vec<ValidatorId>, | ||||||
|  |     ) -> Result<StandardAttestationRewards, BeaconChainError> { | ||||||
|  |         let spec = &self.spec; | ||||||
|  | 
 | ||||||
|         // Calculate ideal_rewards
 |         // Calculate ideal_rewards
 | ||||||
|         let participation_cache = ParticipationCache::new(&state, spec)?; |         let participation_cache = ParticipationCache::new(&state, spec)?; | ||||||
| 
 | 
 | ||||||
| @ -71,7 +151,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | |||||||
|             let base_reward_per_increment = |             let base_reward_per_increment = | ||||||
|                 BaseRewardPerIncrement::new(total_active_balance, spec)?; |                 BaseRewardPerIncrement::new(total_active_balance, spec)?; | ||||||
| 
 | 
 | ||||||
|             for effective_balance_eth in 0..=32 { |             for effective_balance_eth in 1..=self.max_effective_balance_increment_steps()? { | ||||||
|                 let effective_balance = |                 let effective_balance = | ||||||
|                     effective_balance_eth.safe_mul(spec.effective_balance_increment)?; |                     effective_balance_eth.safe_mul(spec.effective_balance_increment)?; | ||||||
|                 let base_reward = |                 let base_reward = | ||||||
| @ -86,7 +166,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | |||||||
|                 let ideal_reward = reward_numerator |                 let ideal_reward = reward_numerator | ||||||
|                     .safe_div(active_increments)? |                     .safe_div(active_increments)? | ||||||
|                     .safe_div(WEIGHT_DENOMINATOR)?; |                     .safe_div(WEIGHT_DENOMINATOR)?; | ||||||
|                 if !state.is_in_inactivity_leak(previous_epoch, spec) { |                 if !state.is_in_inactivity_leak(previous_epoch, spec)? { | ||||||
|                     ideal_rewards_hashmap |                     ideal_rewards_hashmap | ||||||
|                         .insert((flag_index, effective_balance), (ideal_reward, penalty)); |                         .insert((flag_index, effective_balance), (ideal_reward, penalty)); | ||||||
|                 } else { |                 } else { | ||||||
| @ -101,20 +181,12 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | |||||||
|         let validators = if validators.is_empty() { |         let validators = if validators.is_empty() { | ||||||
|             participation_cache.eligible_validator_indices().to_vec() |             participation_cache.eligible_validator_indices().to_vec() | ||||||
|         } else { |         } else { | ||||||
|             validators |             Self::validators_ids_to_indices(&mut state, validators)? | ||||||
|                 .into_iter() |  | ||||||
|                 .map(|validator| match validator { |  | ||||||
|                     ValidatorId::Index(i) => Ok(i as usize), |  | ||||||
|                     ValidatorId::PublicKey(pubkey) => state |  | ||||||
|                         .get_validator_index(&pubkey)? |  | ||||||
|                         .ok_or(BeaconChainError::ValidatorPubkeyUnknown(pubkey)), |  | ||||||
|                 }) |  | ||||||
|                 .collect::<Result<Vec<_>, _>>()? |  | ||||||
|         }; |         }; | ||||||
| 
 | 
 | ||||||
|         for validator_index in &validators { |         for validator_index in &validators { | ||||||
|             let eligible = state.is_eligible_validator(previous_epoch, *validator_index)?; |             let eligible = state.is_eligible_validator(previous_epoch, *validator_index)?; | ||||||
|             let mut head_reward = 0u64; |             let mut head_reward = 0i64; | ||||||
|             let mut target_reward = 0i64; |             let mut target_reward = 0i64; | ||||||
|             let mut source_reward = 0i64; |             let mut source_reward = 0i64; | ||||||
| 
 | 
 | ||||||
| @ -132,7 +204,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | |||||||
|                         .map_err(|_| BeaconChainError::AttestationRewardsError)?; |                         .map_err(|_| BeaconChainError::AttestationRewardsError)?; | ||||||
|                     if voted_correctly { |                     if voted_correctly { | ||||||
|                         if flag_index == TIMELY_HEAD_FLAG_INDEX { |                         if flag_index == TIMELY_HEAD_FLAG_INDEX { | ||||||
|                             head_reward += ideal_reward; |                             head_reward += *ideal_reward as i64; | ||||||
|                         } else if flag_index == TIMELY_TARGET_FLAG_INDEX { |                         } else if flag_index == TIMELY_TARGET_FLAG_INDEX { | ||||||
|                             target_reward += *ideal_reward as i64; |                             target_reward += *ideal_reward as i64; | ||||||
|                         } else if flag_index == TIMELY_SOURCE_FLAG_INDEX { |                         } else if flag_index == TIMELY_SOURCE_FLAG_INDEX { | ||||||
| @ -152,6 +224,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | |||||||
|                 head: head_reward, |                 head: head_reward, | ||||||
|                 target: target_reward, |                 target: target_reward, | ||||||
|                 source: source_reward, |                 source: source_reward, | ||||||
|  |                 inclusion_delay: None, | ||||||
|  |                 // TODO: altair calculation logic needs to be updated to include inactivity penalty
 | ||||||
|  |                 inactivity: 0, | ||||||
|             }); |             }); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
| @ -173,6 +248,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | |||||||
|                             head: 0, |                             head: 0, | ||||||
|                             target: 0, |                             target: 0, | ||||||
|                             source: 0, |                             source: 0, | ||||||
|  |                             inclusion_delay: None, | ||||||
|  |                             // TODO: altair calculation logic needs to be updated to include inactivity penalty
 | ||||||
|  |                             inactivity: 0, | ||||||
|                         }); |                         }); | ||||||
|                     match *flag_index { |                     match *flag_index { | ||||||
|                         TIMELY_SOURCE_FLAG_INDEX => entry.source += ideal_reward, |                         TIMELY_SOURCE_FLAG_INDEX => entry.source += ideal_reward, | ||||||
| @ -192,4 +270,126 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | |||||||
|             total_rewards, |             total_rewards, | ||||||
|         }) |         }) | ||||||
|     } |     } | ||||||
|  | 
 | ||||||
|  |     fn max_effective_balance_increment_steps(&self) -> Result<u64, BeaconChainError> { | ||||||
|  |         let spec = &self.spec; | ||||||
|  |         let max_steps = spec | ||||||
|  |             .max_effective_balance | ||||||
|  |             .safe_div(spec.effective_balance_increment)?; | ||||||
|  |         Ok(max_steps) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn validators_ids_to_indices( | ||||||
|  |         state: &mut BeaconState<T::EthSpec>, | ||||||
|  |         validators: Vec<ValidatorId>, | ||||||
|  |     ) -> Result<Vec<usize>, BeaconChainError> { | ||||||
|  |         let indices = validators | ||||||
|  |             .into_iter() | ||||||
|  |             .map(|validator| match validator { | ||||||
|  |                 ValidatorId::Index(i) => Ok(i as usize), | ||||||
|  |                 ValidatorId::PublicKey(pubkey) => state | ||||||
|  |                     .get_validator_index(&pubkey)? | ||||||
|  |                     .ok_or(BeaconChainError::ValidatorPubkeyUnknown(pubkey)), | ||||||
|  |             }) | ||||||
|  |             .collect::<Result<Vec<_>, _>>()?; | ||||||
|  |         Ok(indices) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn compute_ideal_rewards_base( | ||||||
|  |         &self, | ||||||
|  |         state: &BeaconState<T::EthSpec>, | ||||||
|  |         total_balances: &TotalBalances, | ||||||
|  |     ) -> Result<Vec<IdealAttestationRewards>, BeaconChainError> { | ||||||
|  |         let spec = &self.spec; | ||||||
|  |         let previous_epoch = state.previous_epoch(); | ||||||
|  |         let finality_delay = previous_epoch | ||||||
|  |             .safe_sub(state.finalized_checkpoint().epoch)? | ||||||
|  |             .as_u64(); | ||||||
|  | 
 | ||||||
|  |         let ideal_validator_status = ValidatorStatus { | ||||||
|  |             is_previous_epoch_attester: true, | ||||||
|  |             is_slashed: false, | ||||||
|  |             inclusion_info: Some(InclusionInfo { | ||||||
|  |                 delay: 1, | ||||||
|  |                 ..Default::default() | ||||||
|  |             }), | ||||||
|  |             ..Default::default() | ||||||
|  |         }; | ||||||
|  | 
 | ||||||
|  |         let mut ideal_attestation_rewards_list = Vec::new(); | ||||||
|  | 
 | ||||||
|  |         for effective_balance_step in 1..=self.max_effective_balance_increment_steps()? { | ||||||
|  |             let effective_balance = | ||||||
|  |                 effective_balance_step.safe_mul(spec.effective_balance_increment)?; | ||||||
|  |             let base_reward = get_base_reward_from_effective_balance::<T::EthSpec>( | ||||||
|  |                 effective_balance, | ||||||
|  |                 total_balances.current_epoch(), | ||||||
|  |                 spec, | ||||||
|  |             )?; | ||||||
|  | 
 | ||||||
|  |             // compute ideal head rewards
 | ||||||
|  |             let head = get_attestation_component_delta( | ||||||
|  |                 true, | ||||||
|  |                 total_balances.previous_epoch_head_attesters(), | ||||||
|  |                 total_balances, | ||||||
|  |                 base_reward, | ||||||
|  |                 finality_delay, | ||||||
|  |                 spec, | ||||||
|  |             )? | ||||||
|  |             .rewards; | ||||||
|  | 
 | ||||||
|  |             // compute ideal target rewards
 | ||||||
|  |             let target = get_attestation_component_delta( | ||||||
|  |                 true, | ||||||
|  |                 total_balances.previous_epoch_target_attesters(), | ||||||
|  |                 total_balances, | ||||||
|  |                 base_reward, | ||||||
|  |                 finality_delay, | ||||||
|  |                 spec, | ||||||
|  |             )? | ||||||
|  |             .rewards; | ||||||
|  | 
 | ||||||
|  |             // compute ideal source rewards
 | ||||||
|  |             let source = get_attestation_component_delta( | ||||||
|  |                 true, | ||||||
|  |                 total_balances.previous_epoch_attesters(), | ||||||
|  |                 total_balances, | ||||||
|  |                 base_reward, | ||||||
|  |                 finality_delay, | ||||||
|  |                 spec, | ||||||
|  |             )? | ||||||
|  |             .rewards; | ||||||
|  | 
 | ||||||
|  |             // compute ideal inclusion delay rewards
 | ||||||
|  |             let inclusion_delay = | ||||||
|  |                 get_inclusion_delay_delta(&ideal_validator_status, base_reward, spec)? | ||||||
|  |                     .0 | ||||||
|  |                     .rewards; | ||||||
|  | 
 | ||||||
|  |             // compute inactivity penalty
 | ||||||
|  |             let inactivity = get_inactivity_penalty_delta( | ||||||
|  |                 &ideal_validator_status, | ||||||
|  |                 base_reward, | ||||||
|  |                 finality_delay, | ||||||
|  |                 spec, | ||||||
|  |             )? | ||||||
|  |             .penalties | ||||||
|  |             .wrapping_neg() as i64; | ||||||
|  | 
 | ||||||
|  |             let ideal_attestation_rewards = IdealAttestationRewards { | ||||||
|  |                 effective_balance, | ||||||
|  |                 head, | ||||||
|  |                 target, | ||||||
|  |                 source, | ||||||
|  |                 inclusion_delay: Some(Quoted { | ||||||
|  |                     value: inclusion_delay, | ||||||
|  |                 }), | ||||||
|  |                 inactivity, | ||||||
|  |             }; | ||||||
|  | 
 | ||||||
|  |             ideal_attestation_rewards_list.push(ideal_attestation_rewards); | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         Ok(ideal_attestation_rewards_list) | ||||||
|  |     } | ||||||
| } | } | ||||||
|  | |||||||
| @ -35,10 +35,8 @@ | |||||||
| mod batch; | mod batch; | ||||||
| 
 | 
 | ||||||
| use crate::{ | use crate::{ | ||||||
|     beacon_chain::{MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT}, |     beacon_chain::VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, metrics, | ||||||
|     metrics, |     observed_aggregates::ObserveOutcome, observed_attesters::Error as ObservedAttestersError, | ||||||
|     observed_aggregates::ObserveOutcome, |  | ||||||
|     observed_attesters::Error as ObservedAttestersError, |  | ||||||
|     BeaconChain, BeaconChainError, BeaconChainTypes, |     BeaconChain, BeaconChainError, BeaconChainTypes, | ||||||
| }; | }; | ||||||
| use bls::verify_signature_sets; | use bls::verify_signature_sets; | ||||||
| @ -57,8 +55,8 @@ use std::borrow::Cow; | |||||||
| use strum::AsRefStr; | use strum::AsRefStr; | ||||||
| use tree_hash::TreeHash; | use tree_hash::TreeHash; | ||||||
| use types::{ | use types::{ | ||||||
|     Attestation, BeaconCommittee, CommitteeIndex, Epoch, EthSpec, Hash256, IndexedAttestation, |     Attestation, BeaconCommittee, ChainSpec, CommitteeIndex, Epoch, EthSpec, Hash256, | ||||||
|     SelectionProof, SignedAggregateAndProof, Slot, SubnetId, |     IndexedAttestation, SelectionProof, SignedAggregateAndProof, Slot, SubnetId, | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| pub use batch::{batch_verify_aggregated_attestations, batch_verify_unaggregated_attestations}; | pub use batch::{batch_verify_aggregated_attestations, batch_verify_unaggregated_attestations}; | ||||||
| @ -117,14 +115,14 @@ pub enum Error { | |||||||
|     ///
 |     ///
 | ||||||
|     /// The peer has sent an invalid message.
 |     /// The peer has sent an invalid message.
 | ||||||
|     AggregatorPubkeyUnknown(u64), |     AggregatorPubkeyUnknown(u64), | ||||||
|     /// The attestation has been seen before; either in a block, on the gossip network or from a
 |     /// The attestation or a superset of this attestation's aggregations bits for the same data
 | ||||||
|     /// local validator.
 |     /// has been seen before; either in a block, on the gossip network or from a local validator.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// ## Peer scoring
 |     /// ## Peer scoring
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// It's unclear if this attestation is valid, however we have already observed it and do not
 |     /// It's unclear if this attestation is valid, however we have already observed it and do not
 | ||||||
|     /// need to observe it again.
 |     /// need to observe it again.
 | ||||||
|     AttestationAlreadyKnown(Hash256), |     AttestationSupersetKnown(Hash256), | ||||||
|     /// There has already been an aggregation observed for this validator, we refuse to process a
 |     /// There has already been an aggregation observed for this validator, we refuse to process a
 | ||||||
|     /// second.
 |     /// second.
 | ||||||
|     ///
 |     ///
 | ||||||
| @ -268,7 +266,7 @@ enum CheckAttestationSignature { | |||||||
| struct IndexedAggregatedAttestation<'a, T: BeaconChainTypes> { | struct IndexedAggregatedAttestation<'a, T: BeaconChainTypes> { | ||||||
|     signed_aggregate: &'a SignedAggregateAndProof<T::EthSpec>, |     signed_aggregate: &'a SignedAggregateAndProof<T::EthSpec>, | ||||||
|     indexed_attestation: IndexedAttestation<T::EthSpec>, |     indexed_attestation: IndexedAttestation<T::EthSpec>, | ||||||
|     attestation_root: Hash256, |     attestation_data_root: Hash256, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// Wraps a `Attestation` that has been verified up until the point that an `IndexedAttestation` can
 | /// Wraps a `Attestation` that has been verified up until the point that an `IndexedAttestation` can
 | ||||||
| @ -454,7 +452,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { | |||||||
|         // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance).
 |         // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance).
 | ||||||
|         //
 |         //
 | ||||||
|         // We do not queue future attestations for later processing.
 |         // We do not queue future attestations for later processing.
 | ||||||
|         verify_propagation_slot_range(&chain.slot_clock, attestation)?; |         verify_propagation_slot_range(&chain.slot_clock, attestation, &chain.spec)?; | ||||||
| 
 | 
 | ||||||
|         // Check the attestation's epoch matches its target.
 |         // Check the attestation's epoch matches its target.
 | ||||||
|         if attestation.data.slot.epoch(T::EthSpec::slots_per_epoch()) |         if attestation.data.slot.epoch(T::EthSpec::slots_per_epoch()) | ||||||
| @ -467,14 +465,17 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { | |||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         // Ensure the valid aggregated attestation has not already been seen locally.
 |         // Ensure the valid aggregated attestation has not already been seen locally.
 | ||||||
|         let attestation_root = attestation.tree_hash_root(); |         let attestation_data = &attestation.data; | ||||||
|  |         let attestation_data_root = attestation_data.tree_hash_root(); | ||||||
|  | 
 | ||||||
|         if chain |         if chain | ||||||
|             .observed_attestations |             .observed_attestations | ||||||
|             .write() |             .write() | ||||||
|             .is_known(attestation, attestation_root) |             .is_known_subset(attestation, attestation_data_root) | ||||||
|             .map_err(|e| Error::BeaconChainError(e.into()))? |             .map_err(|e| Error::BeaconChainError(e.into()))? | ||||||
|         { |         { | ||||||
|             return Err(Error::AttestationAlreadyKnown(attestation_root)); |             metrics::inc_counter(&metrics::AGGREGATED_ATTESTATION_SUBSETS); | ||||||
|  |             return Err(Error::AttestationSupersetKnown(attestation_data_root)); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         let aggregator_index = signed_aggregate.message.aggregator_index; |         let aggregator_index = signed_aggregate.message.aggregator_index; | ||||||
| @ -520,7 +521,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { | |||||||
|         if attestation.aggregation_bits.is_zero() { |         if attestation.aggregation_bits.is_zero() { | ||||||
|             Err(Error::EmptyAggregationBitfield) |             Err(Error::EmptyAggregationBitfield) | ||||||
|         } else { |         } else { | ||||||
|             Ok(attestation_root) |             Ok(attestation_data_root) | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
| @ -533,7 +534,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { | |||||||
| 
 | 
 | ||||||
|         let attestation = &signed_aggregate.message.aggregate; |         let attestation = &signed_aggregate.message.aggregate; | ||||||
|         let aggregator_index = signed_aggregate.message.aggregator_index; |         let aggregator_index = signed_aggregate.message.aggregator_index; | ||||||
|         let attestation_root = match Self::verify_early_checks(signed_aggregate, chain) { |         let attestation_data_root = match Self::verify_early_checks(signed_aggregate, chain) { | ||||||
|             Ok(root) => root, |             Ok(root) => root, | ||||||
|             Err(e) => return Err(SignatureNotChecked(&signed_aggregate.message.aggregate, e)), |             Err(e) => return Err(SignatureNotChecked(&signed_aggregate.message.aggregate, e)), | ||||||
|         }; |         }; | ||||||
| @ -568,7 +569,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { | |||||||
|         Ok(IndexedAggregatedAttestation { |         Ok(IndexedAggregatedAttestation { | ||||||
|             signed_aggregate, |             signed_aggregate, | ||||||
|             indexed_attestation, |             indexed_attestation, | ||||||
|             attestation_root, |             attestation_data_root, | ||||||
|         }) |         }) | ||||||
|     } |     } | ||||||
| } | } | ||||||
| @ -577,7 +578,7 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { | |||||||
|     /// Run the checks that happen after the indexed attestation and signature have been checked.
 |     /// Run the checks that happen after the indexed attestation and signature have been checked.
 | ||||||
|     fn verify_late_checks( |     fn verify_late_checks( | ||||||
|         signed_aggregate: &SignedAggregateAndProof<T::EthSpec>, |         signed_aggregate: &SignedAggregateAndProof<T::EthSpec>, | ||||||
|         attestation_root: Hash256, |         attestation_data_root: Hash256, | ||||||
|         chain: &BeaconChain<T>, |         chain: &BeaconChain<T>, | ||||||
|     ) -> Result<(), Error> { |     ) -> Result<(), Error> { | ||||||
|         let attestation = &signed_aggregate.message.aggregate; |         let attestation = &signed_aggregate.message.aggregate; | ||||||
| @ -587,13 +588,14 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { | |||||||
|         //
 |         //
 | ||||||
|         // It's important to double check that the attestation is not already known, otherwise two
 |         // It's important to double check that the attestation is not already known, otherwise two
 | ||||||
|         // attestations processed at the same time could be published.
 |         // attestations processed at the same time could be published.
 | ||||||
|         if let ObserveOutcome::AlreadyKnown = chain |         if let ObserveOutcome::Subset = chain | ||||||
|             .observed_attestations |             .observed_attestations | ||||||
|             .write() |             .write() | ||||||
|             .observe_item(attestation, Some(attestation_root)) |             .observe_item(attestation, Some(attestation_data_root)) | ||||||
|             .map_err(|e| Error::BeaconChainError(e.into()))? |             .map_err(|e| Error::BeaconChainError(e.into()))? | ||||||
|         { |         { | ||||||
|             return Err(Error::AttestationAlreadyKnown(attestation_root)); |             metrics::inc_counter(&metrics::AGGREGATED_ATTESTATION_SUBSETS); | ||||||
|  |             return Err(Error::AttestationSupersetKnown(attestation_data_root)); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         // Observe the aggregator so we don't process another aggregate from them.
 |         // Observe the aggregator so we don't process another aggregate from them.
 | ||||||
| @ -653,7 +655,7 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { | |||||||
|         let IndexedAggregatedAttestation { |         let IndexedAggregatedAttestation { | ||||||
|             signed_aggregate, |             signed_aggregate, | ||||||
|             indexed_attestation, |             indexed_attestation, | ||||||
|             attestation_root, |             attestation_data_root, | ||||||
|         } = signed_aggregate; |         } = signed_aggregate; | ||||||
| 
 | 
 | ||||||
|         match check_signature { |         match check_signature { | ||||||
| @ -677,7 +679,7 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { | |||||||
|             CheckAttestationSignature::No => (), |             CheckAttestationSignature::No => (), | ||||||
|         }; |         }; | ||||||
| 
 | 
 | ||||||
|         if let Err(e) = Self::verify_late_checks(signed_aggregate, attestation_root, chain) { |         if let Err(e) = Self::verify_late_checks(signed_aggregate, attestation_data_root, chain) { | ||||||
|             return Err(SignatureValid(indexed_attestation, e)); |             return Err(SignatureValid(indexed_attestation, e)); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
| @ -718,7 +720,7 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { | |||||||
|         // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance).
 |         // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance).
 | ||||||
|         //
 |         //
 | ||||||
|         // We do not queue future attestations for later processing.
 |         // We do not queue future attestations for later processing.
 | ||||||
|         verify_propagation_slot_range(&chain.slot_clock, attestation)?; |         verify_propagation_slot_range(&chain.slot_clock, attestation, &chain.spec)?; | ||||||
| 
 | 
 | ||||||
|         // Check to ensure that the attestation is "unaggregated". I.e., it has exactly one
 |         // Check to ensure that the attestation is "unaggregated". I.e., it has exactly one
 | ||||||
|         // aggregation bit set.
 |         // aggregation bit set.
 | ||||||
| @ -1033,11 +1035,11 @@ fn verify_head_block_is_known<T: BeaconChainTypes>( | |||||||
| pub fn verify_propagation_slot_range<S: SlotClock, E: EthSpec>( | pub fn verify_propagation_slot_range<S: SlotClock, E: EthSpec>( | ||||||
|     slot_clock: &S, |     slot_clock: &S, | ||||||
|     attestation: &Attestation<E>, |     attestation: &Attestation<E>, | ||||||
|  |     spec: &ChainSpec, | ||||||
| ) -> Result<(), Error> { | ) -> Result<(), Error> { | ||||||
|     let attestation_slot = attestation.data.slot; |     let attestation_slot = attestation.data.slot; | ||||||
| 
 |  | ||||||
|     let latest_permissible_slot = slot_clock |     let latest_permissible_slot = slot_clock | ||||||
|         .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) |         .now_with_future_tolerance(spec.maximum_gossip_clock_disparity()) | ||||||
|         .ok_or(BeaconChainError::UnableToReadSlot)?; |         .ok_or(BeaconChainError::UnableToReadSlot)?; | ||||||
|     if attestation_slot > latest_permissible_slot { |     if attestation_slot > latest_permissible_slot { | ||||||
|         return Err(Error::FutureSlot { |         return Err(Error::FutureSlot { | ||||||
| @ -1048,7 +1050,7 @@ pub fn verify_propagation_slot_range<S: SlotClock, E: EthSpec>( | |||||||
| 
 | 
 | ||||||
|     // Taking advantage of saturating subtraction on `Slot`.
 |     // Taking advantage of saturating subtraction on `Slot`.
 | ||||||
|     let earliest_permissible_slot = slot_clock |     let earliest_permissible_slot = slot_clock | ||||||
|         .now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) |         .now_with_past_tolerance(spec.maximum_gossip_clock_disparity()) | ||||||
|         .ok_or(BeaconChainError::UnableToReadSlot)? |         .ok_or(BeaconChainError::UnableToReadSlot)? | ||||||
|         - E::slots_per_epoch(); |         - E::slots_per_epoch(); | ||||||
|     if attestation_slot < earliest_permissible_slot { |     if attestation_slot < earliest_permissible_slot { | ||||||
|  | |||||||
| @ -63,7 +63,6 @@ use execution_layer::{ | |||||||
|     BlockProposalContents, BuilderParams, ChainHealth, ExecutionLayer, FailedCondition, |     BlockProposalContents, BuilderParams, ChainHealth, ExecutionLayer, FailedCondition, | ||||||
|     PayloadAttributes, PayloadStatus, |     PayloadAttributes, PayloadStatus, | ||||||
| }; | }; | ||||||
| pub use fork_choice::CountUnrealized; |  | ||||||
| use fork_choice::{ | use fork_choice::{ | ||||||
|     AttestationFromBlock, ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters, |     AttestationFromBlock, ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters, | ||||||
|     InvalidationOperation, PayloadVerificationStatus, ResetPayloadStatuses, |     InvalidationOperation, PayloadVerificationStatus, ResetPayloadStatuses, | ||||||
| @ -165,7 +164,7 @@ pub enum WhenSlotSkipped { | |||||||
|     ///
 |     ///
 | ||||||
|     /// This is how the HTTP API behaves.
 |     /// This is how the HTTP API behaves.
 | ||||||
|     None, |     None, | ||||||
|     /// If the slot it a skip slot, return the previous non-skipped block.
 |     /// If the slot is a skip slot, return the previous non-skipped block.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// This is generally how the specification behaves.
 |     /// This is generally how the specification behaves.
 | ||||||
|     Prev, |     Prev, | ||||||
| @ -198,6 +197,17 @@ pub struct PrePayloadAttributes { | |||||||
|     pub parent_block_number: u64, |     pub parent_block_number: u64, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | /// Information about a state/block at a specific slot.
 | ||||||
|  | #[derive(Debug, Clone, Copy)] | ||||||
|  | pub struct FinalizationAndCanonicity { | ||||||
|  |     /// True if the slot of the state or block is finalized.
 | ||||||
|  |     ///
 | ||||||
|  |     /// This alone DOES NOT imply that the state/block is finalized, use `self.is_finalized()`.
 | ||||||
|  |     pub slot_is_finalized: bool, | ||||||
|  |     /// True if the state or block is canonical at its slot.
 | ||||||
|  |     pub canonical: bool, | ||||||
|  | } | ||||||
|  | 
 | ||||||
| /// Define whether a forkchoiceUpdate needs to be checked for an override (`Yes`) or has already
 | /// Define whether a forkchoiceUpdate needs to be checked for an override (`Yes`) or has already
 | ||||||
| /// been checked (`AlreadyApplied`). It is safe to specify `Yes` even if re-orgs are disabled.
 | /// been checked (`AlreadyApplied`). It is safe to specify `Yes` even if re-orgs are disabled.
 | ||||||
| #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] | #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] | ||||||
| @ -207,11 +217,6 @@ pub enum OverrideForkchoiceUpdate { | |||||||
|     AlreadyApplied, |     AlreadyApplied, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// The accepted clock drift for nodes gossiping blocks and attestations. See:
 |  | ||||||
| ///
 |  | ||||||
| /// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/p2p-interface.md#configuration
 |  | ||||||
| pub const MAXIMUM_GOSSIP_CLOCK_DISPARITY: Duration = Duration::from_millis(500); |  | ||||||
| 
 |  | ||||||
| #[derive(Debug, PartialEq)] | #[derive(Debug, PartialEq)] | ||||||
| pub enum AttestationProcessingOutcome { | pub enum AttestationProcessingOutcome { | ||||||
|     Processed, |     Processed, | ||||||
| @ -427,6 +432,12 @@ pub struct BeaconChain<T: BeaconChainTypes> { | |||||||
| 
 | 
 | ||||||
| type BeaconBlockAndState<T, Payload> = (BeaconBlock<T, Payload>, BeaconState<T>); | type BeaconBlockAndState<T, Payload> = (BeaconBlock<T, Payload>, BeaconState<T>); | ||||||
| 
 | 
 | ||||||
|  | impl FinalizationAndCanonicity { | ||||||
|  |     pub fn is_finalized(self) -> bool { | ||||||
|  |         self.slot_is_finalized && self.canonical | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
| impl<T: BeaconChainTypes> BeaconChain<T> { | impl<T: BeaconChainTypes> BeaconChain<T> { | ||||||
|     /// Checks if a block is finalized.
 |     /// Checks if a block is finalized.
 | ||||||
|     /// The finalization check is done with the block slot. The block root is used to verify that
 |     /// The finalization check is done with the block slot. The block root is used to verify that
 | ||||||
| @ -456,16 +467,30 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | |||||||
|         state_root: &Hash256, |         state_root: &Hash256, | ||||||
|         state_slot: Slot, |         state_slot: Slot, | ||||||
|     ) -> Result<bool, Error> { |     ) -> Result<bool, Error> { | ||||||
|  |         self.state_finalization_and_canonicity(state_root, state_slot) | ||||||
|  |             .map(FinalizationAndCanonicity::is_finalized) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     /// Fetch the finalization and canonicity status of the state with `state_root`.
 | ||||||
|  |     pub fn state_finalization_and_canonicity( | ||||||
|  |         &self, | ||||||
|  |         state_root: &Hash256, | ||||||
|  |         state_slot: Slot, | ||||||
|  |     ) -> Result<FinalizationAndCanonicity, Error> { | ||||||
|         let finalized_slot = self |         let finalized_slot = self | ||||||
|             .canonical_head |             .canonical_head | ||||||
|             .cached_head() |             .cached_head() | ||||||
|             .finalized_checkpoint() |             .finalized_checkpoint() | ||||||
|             .epoch |             .epoch | ||||||
|             .start_slot(T::EthSpec::slots_per_epoch()); |             .start_slot(T::EthSpec::slots_per_epoch()); | ||||||
|         let is_canonical = self |         let slot_is_finalized = state_slot <= finalized_slot; | ||||||
|  |         let canonical = self | ||||||
|             .state_root_at_slot(state_slot)? |             .state_root_at_slot(state_slot)? | ||||||
|             .map_or(false, |canonical_root| state_root == &canonical_root); |             .map_or(false, |canonical_root| state_root == &canonical_root); | ||||||
|         Ok(state_slot <= finalized_slot && is_canonical) |         Ok(FinalizationAndCanonicity { | ||||||
|  |             slot_is_finalized, | ||||||
|  |             canonical, | ||||||
|  |         }) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Persists the head tracker and fork choice.
 |     /// Persists the head tracker and fork choice.
 | ||||||
| @ -784,10 +809,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | |||||||
|     ///
 |     ///
 | ||||||
|     /// May return a database error.
 |     /// May return a database error.
 | ||||||
|     pub fn state_root_at_slot(&self, request_slot: Slot) -> Result<Option<Hash256>, Error> { |     pub fn state_root_at_slot(&self, request_slot: Slot) -> Result<Option<Hash256>, Error> { | ||||||
|         if request_slot > self.slot()? { |         if request_slot == self.spec.genesis_slot { | ||||||
|             return Ok(None); |  | ||||||
|         } else if request_slot == self.spec.genesis_slot { |  | ||||||
|             return Ok(Some(self.genesis_state_root)); |             return Ok(Some(self.genesis_state_root)); | ||||||
|  |         } else if request_slot > self.slot()? { | ||||||
|  |             return Ok(None); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         // Check limits w.r.t historic state bounds.
 |         // Check limits w.r.t historic state bounds.
 | ||||||
| @ -864,10 +889,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | |||||||
|     ///
 |     ///
 | ||||||
|     /// May return a database error.
 |     /// May return a database error.
 | ||||||
|     fn block_root_at_slot_skips_none(&self, request_slot: Slot) -> Result<Option<Hash256>, Error> { |     fn block_root_at_slot_skips_none(&self, request_slot: Slot) -> Result<Option<Hash256>, Error> { | ||||||
|         if request_slot > self.slot()? { |         if request_slot == self.spec.genesis_slot { | ||||||
|             return Ok(None); |  | ||||||
|         } else if request_slot == self.spec.genesis_slot { |  | ||||||
|             return Ok(Some(self.genesis_block_root)); |             return Ok(Some(self.genesis_block_root)); | ||||||
|  |         } else if request_slot > self.slot()? { | ||||||
|  |             return Ok(None); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         let prev_slot = request_slot.saturating_sub(1_u64); |         let prev_slot = request_slot.saturating_sub(1_u64); | ||||||
| @ -927,10 +952,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | |||||||
|     ///
 |     ///
 | ||||||
|     /// May return a database error.
 |     /// May return a database error.
 | ||||||
|     fn block_root_at_slot_skips_prev(&self, request_slot: Slot) -> Result<Option<Hash256>, Error> { |     fn block_root_at_slot_skips_prev(&self, request_slot: Slot) -> Result<Option<Hash256>, Error> { | ||||||
|         if request_slot > self.slot()? { |         if request_slot == self.spec.genesis_slot { | ||||||
|             return Ok(None); |  | ||||||
|         } else if request_slot == self.spec.genesis_slot { |  | ||||||
|             return Ok(Some(self.genesis_block_root)); |             return Ok(Some(self.genesis_block_root)); | ||||||
|  |         } else if request_slot > self.slot()? { | ||||||
|  |             return Ok(None); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         // Try an optimized path of reading the root directly from the head state.
 |         // Try an optimized path of reading the root directly from the head state.
 | ||||||
| @ -2510,7 +2535,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | |||||||
|     pub async fn process_chain_segment( |     pub async fn process_chain_segment( | ||||||
|         self: &Arc<Self>, |         self: &Arc<Self>, | ||||||
|         chain_segment: Vec<Arc<SignedBeaconBlock<T::EthSpec>>>, |         chain_segment: Vec<Arc<SignedBeaconBlock<T::EthSpec>>>, | ||||||
|         count_unrealized: CountUnrealized, |  | ||||||
|         notify_execution_layer: NotifyExecutionLayer, |         notify_execution_layer: NotifyExecutionLayer, | ||||||
|     ) -> ChainSegmentResult<T::EthSpec> { |     ) -> ChainSegmentResult<T::EthSpec> { | ||||||
|         let mut imported_blocks = 0; |         let mut imported_blocks = 0; | ||||||
| @ -2579,8 +2603,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | |||||||
|                     .process_block( |                     .process_block( | ||||||
|                         signature_verified_block.block_root(), |                         signature_verified_block.block_root(), | ||||||
|                         signature_verified_block, |                         signature_verified_block, | ||||||
|                         count_unrealized, |  | ||||||
|                         notify_execution_layer, |                         notify_execution_layer, | ||||||
|  |                         || Ok(()), | ||||||
|                     ) |                     ) | ||||||
|                     .await |                     .await | ||||||
|                 { |                 { | ||||||
| @ -2668,8 +2692,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | |||||||
|         self: &Arc<Self>, |         self: &Arc<Self>, | ||||||
|         block_root: Hash256, |         block_root: Hash256, | ||||||
|         unverified_block: B, |         unverified_block: B, | ||||||
|         count_unrealized: CountUnrealized, |  | ||||||
|         notify_execution_layer: NotifyExecutionLayer, |         notify_execution_layer: NotifyExecutionLayer, | ||||||
|  |         publish_fn: impl FnOnce() -> Result<(), BlockError<T::EthSpec>> + Send + 'static, | ||||||
|     ) -> Result<Hash256, BlockError<T::EthSpec>> { |     ) -> Result<Hash256, BlockError<T::EthSpec>> { | ||||||
|         // Start the Prometheus timer.
 |         // Start the Prometheus timer.
 | ||||||
|         let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); |         let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); | ||||||
| @ -2688,8 +2712,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | |||||||
|                 &chain, |                 &chain, | ||||||
|                 notify_execution_layer, |                 notify_execution_layer, | ||||||
|             )?; |             )?; | ||||||
|  |             publish_fn()?; | ||||||
|             chain |             chain | ||||||
|                 .import_execution_pending_block(execution_pending, count_unrealized) |                 .import_execution_pending_block(execution_pending) | ||||||
|                 .await |                 .await | ||||||
|         }; |         }; | ||||||
| 
 | 
 | ||||||
| @ -2729,7 +2754,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | |||||||
|             } |             } | ||||||
|             // The block failed verification.
 |             // The block failed verification.
 | ||||||
|             Err(other) => { |             Err(other) => { | ||||||
|                 trace!( |                 debug!( | ||||||
|                     self.log, |                     self.log, | ||||||
|                     "Beacon block rejected"; |                     "Beacon block rejected"; | ||||||
|                     "reason" => other.to_string(), |                     "reason" => other.to_string(), | ||||||
| @ -2744,10 +2769,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | |||||||
|     ///
 |     ///
 | ||||||
|     /// An error is returned if the block was unable to be imported. It may be partially imported
 |     /// An error is returned if the block was unable to be imported. It may be partially imported
 | ||||||
|     /// (i.e., this function is not atomic).
 |     /// (i.e., this function is not atomic).
 | ||||||
|     async fn import_execution_pending_block( |     pub async fn import_execution_pending_block( | ||||||
|         self: Arc<Self>, |         self: Arc<Self>, | ||||||
|         execution_pending_block: ExecutionPendingBlock<T>, |         execution_pending_block: ExecutionPendingBlock<T>, | ||||||
|         count_unrealized: CountUnrealized, |  | ||||||
|     ) -> Result<Hash256, BlockError<T::EthSpec>> { |     ) -> Result<Hash256, BlockError<T::EthSpec>> { | ||||||
|         let ExecutionPendingBlock { |         let ExecutionPendingBlock { | ||||||
|             block, |             block, | ||||||
| @ -2808,7 +2832,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | |||||||
|                         state, |                         state, | ||||||
|                         confirmed_state_roots, |                         confirmed_state_roots, | ||||||
|                         payload_verification_status, |                         payload_verification_status, | ||||||
|                         count_unrealized, |  | ||||||
|                         parent_block, |                         parent_block, | ||||||
|                         parent_eth1_finalization_data, |                         parent_eth1_finalization_data, | ||||||
|                         consensus_context, |                         consensus_context, | ||||||
| @ -2834,7 +2857,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | |||||||
|         mut state: BeaconState<T::EthSpec>, |         mut state: BeaconState<T::EthSpec>, | ||||||
|         confirmed_state_roots: Vec<Hash256>, |         confirmed_state_roots: Vec<Hash256>, | ||||||
|         payload_verification_status: PayloadVerificationStatus, |         payload_verification_status: PayloadVerificationStatus, | ||||||
|         count_unrealized: CountUnrealized, |  | ||||||
|         parent_block: SignedBlindedBeaconBlock<T::EthSpec>, |         parent_block: SignedBlindedBeaconBlock<T::EthSpec>, | ||||||
|         parent_eth1_finalization_data: Eth1FinalizationData, |         parent_eth1_finalization_data: Eth1FinalizationData, | ||||||
|         mut consensus_context: ConsensusContext<T::EthSpec>, |         mut consensus_context: ConsensusContext<T::EthSpec>, | ||||||
| @ -2902,8 +2924,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | |||||||
|                     block_delay, |                     block_delay, | ||||||
|                     &state, |                     &state, | ||||||
|                     payload_verification_status, |                     payload_verification_status, | ||||||
|  |                     self.config.progressive_balances_mode, | ||||||
|                     &self.spec, |                     &self.spec, | ||||||
|                     count_unrealized, |                     &self.log, | ||||||
|                 ) |                 ) | ||||||
|                 .map_err(|e| BlockError::BeaconChainError(e.into()))?; |                 .map_err(|e| BlockError::BeaconChainError(e.into()))?; | ||||||
|         } |         } | ||||||
| @ -4633,6 +4656,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | |||||||
|             self.log, |             self.log, | ||||||
|             "Produced block on state"; |             "Produced block on state"; | ||||||
|             "block_size" => block_size, |             "block_size" => block_size, | ||||||
|  |             "slot" => block.slot(), | ||||||
|         ); |         ); | ||||||
| 
 | 
 | ||||||
|         metrics::observe(&metrics::BLOCK_SIZE, block_size as f64); |         metrics::observe(&metrics::BLOCK_SIZE, block_size as f64); | ||||||
| @ -5548,14 +5572,16 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | |||||||
|             let (mut state, state_root) = if let Some((state, state_root)) = head_state_opt { |             let (mut state, state_root) = if let Some((state, state_root)) = head_state_opt { | ||||||
|                 (state, state_root) |                 (state, state_root) | ||||||
|             } else { |             } else { | ||||||
|                 let state_root = head_block.state_root; |                 let block_state_root = head_block.state_root; | ||||||
|                 let state = self |                 let max_slot = shuffling_epoch.start_slot(T::EthSpec::slots_per_epoch()); | ||||||
|  |                 let (state_root, state) = self | ||||||
|                     .store |                     .store | ||||||
|                     .get_inconsistent_state_for_attestation_verification_only( |                     .get_inconsistent_state_for_attestation_verification_only( | ||||||
|                         &state_root, |                         &head_block_root, | ||||||
|                         Some(head_block.slot), |                         max_slot, | ||||||
|  |                         block_state_root, | ||||||
|                     )? |                     )? | ||||||
|                     .ok_or(Error::MissingBeaconState(head_block.state_root))?; |                     .ok_or(Error::MissingBeaconState(block_state_root))?; | ||||||
|                 (state, state_root) |                 (state, state_root) | ||||||
|             }; |             }; | ||||||
| 
 | 
 | ||||||
| @ -5707,13 +5733,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | |||||||
|     /// Since we are likely calling this during the slot we are going to propose in, don't take into
 |     /// Since we are likely calling this during the slot we are going to propose in, don't take into
 | ||||||
|     /// account the current slot when accounting for skips.
 |     /// account the current slot when accounting for skips.
 | ||||||
|     pub fn is_healthy(&self, parent_root: &Hash256) -> Result<ChainHealth, Error> { |     pub fn is_healthy(&self, parent_root: &Hash256) -> Result<ChainHealth, Error> { | ||||||
|  |         let cached_head = self.canonical_head.cached_head(); | ||||||
|         // Check if the merge has been finalized.
 |         // Check if the merge has been finalized.
 | ||||||
|         if let Some(finalized_hash) = self |         if let Some(finalized_hash) = cached_head.forkchoice_update_parameters().finalized_hash { | ||||||
|             .canonical_head |  | ||||||
|             .cached_head() |  | ||||||
|             .forkchoice_update_parameters() |  | ||||||
|             .finalized_hash |  | ||||||
|         { |  | ||||||
|             if ExecutionBlockHash::zero() == finalized_hash { |             if ExecutionBlockHash::zero() == finalized_hash { | ||||||
|                 return Ok(ChainHealth::PreMerge); |                 return Ok(ChainHealth::PreMerge); | ||||||
|             } |             } | ||||||
| @ -5740,17 +5762,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | |||||||
| 
 | 
 | ||||||
|         // Check slots at the head of the chain.
 |         // Check slots at the head of the chain.
 | ||||||
|         let prev_slot = current_slot.saturating_sub(Slot::new(1)); |         let prev_slot = current_slot.saturating_sub(Slot::new(1)); | ||||||
|         let head_skips = prev_slot.saturating_sub(self.canonical_head.cached_head().head_slot()); |         let head_skips = prev_slot.saturating_sub(cached_head.head_slot()); | ||||||
|         let head_skips_check = head_skips.as_usize() <= self.config.builder_fallback_skips; |         let head_skips_check = head_skips.as_usize() <= self.config.builder_fallback_skips; | ||||||
| 
 | 
 | ||||||
|         // Check if finalization is advancing.
 |         // Check if finalization is advancing.
 | ||||||
|         let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); |         let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); | ||||||
|         let epochs_since_finalization = current_epoch.saturating_sub( |         let epochs_since_finalization = | ||||||
|             self.canonical_head |             current_epoch.saturating_sub(cached_head.finalized_checkpoint().epoch); | ||||||
|                 .cached_head() |  | ||||||
|                 .finalized_checkpoint() |  | ||||||
|                 .epoch, |  | ||||||
|         ); |  | ||||||
|         let finalization_check = epochs_since_finalization.as_usize() |         let finalization_check = epochs_since_finalization.as_usize() | ||||||
|             <= self.config.builder_fallback_epochs_since_finalization; |             <= self.config.builder_fallback_epochs_since_finalization; | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -321,9 +321,17 @@ where | |||||||
|                 .deconstruct() |                 .deconstruct() | ||||||
|                 .0; |                 .0; | ||||||
| 
 | 
 | ||||||
|             let state = self |             let max_slot = self | ||||||
|  |                 .justified_checkpoint | ||||||
|  |                 .epoch | ||||||
|  |                 .start_slot(E::slots_per_epoch()); | ||||||
|  |             let (_, state) = self | ||||||
|                 .store |                 .store | ||||||
|                 .get_state(&justified_block.state_root(), Some(justified_block.slot())) |                 .get_advanced_hot_state( | ||||||
|  |                     self.justified_checkpoint.root, | ||||||
|  |                     max_slot, | ||||||
|  |                     justified_block.state_root(), | ||||||
|  |                 ) | ||||||
|                 .map_err(Error::FailedToReadState)? |                 .map_err(Error::FailedToReadState)? | ||||||
|                 .ok_or_else(|| Error::MissingState(justified_block.state_root()))?; |                 .ok_or_else(|| Error::MissingState(justified_block.state_root()))?; | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -135,7 +135,7 @@ impl BeaconProposerCache { | |||||||
| 
 | 
 | ||||||
| /// Compute the proposer duties using the head state without cache.
 | /// Compute the proposer duties using the head state without cache.
 | ||||||
| pub fn compute_proposer_duties_from_head<T: BeaconChainTypes>( | pub fn compute_proposer_duties_from_head<T: BeaconChainTypes>( | ||||||
|     current_epoch: Epoch, |     request_epoch: Epoch, | ||||||
|     chain: &BeaconChain<T>, |     chain: &BeaconChain<T>, | ||||||
| ) -> Result<(Vec<usize>, Hash256, ExecutionStatus, Fork), BeaconChainError> { | ) -> Result<(Vec<usize>, Hash256, ExecutionStatus, Fork), BeaconChainError> { | ||||||
|     // Atomically collect information about the head whilst holding the canonical head `Arc` as
 |     // Atomically collect information about the head whilst holding the canonical head `Arc` as
 | ||||||
| @ -159,7 +159,7 @@ pub fn compute_proposer_duties_from_head<T: BeaconChainTypes>( | |||||||
|         .ok_or(BeaconChainError::HeadMissingFromForkChoice(head_block_root))?; |         .ok_or(BeaconChainError::HeadMissingFromForkChoice(head_block_root))?; | ||||||
| 
 | 
 | ||||||
|     // Advance the state into the requested epoch.
 |     // Advance the state into the requested epoch.
 | ||||||
|     ensure_state_is_in_epoch(&mut state, head_state_root, current_epoch, &chain.spec)?; |     ensure_state_is_in_epoch(&mut state, head_state_root, request_epoch, &chain.spec)?; | ||||||
| 
 | 
 | ||||||
|     let indices = state |     let indices = state | ||||||
|         .get_beacon_proposer_indices(&chain.spec) |         .get_beacon_proposer_indices(&chain.spec) | ||||||
|  | |||||||
| @ -1,4 +1,4 @@ | |||||||
| use serde_derive::Serialize; | use serde::Serialize; | ||||||
| use std::sync::Arc; | use std::sync::Arc; | ||||||
| use types::{ | use types::{ | ||||||
|     beacon_state::CloneConfig, AbstractExecPayload, BeaconState, EthSpec, FullPayload, Hash256, |     beacon_state::CloneConfig, AbstractExecPayload, BeaconState, EthSpec, FullPayload, Hash256, | ||||||
|  | |||||||
| @ -52,13 +52,14 @@ use crate::execution_payload::{ | |||||||
|     is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, |     is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, | ||||||
|     AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier, |     AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier, | ||||||
| }; | }; | ||||||
|  | use crate::observed_block_producers::SeenBlock; | ||||||
| use crate::snapshot_cache::PreProcessingSnapshot; | use crate::snapshot_cache::PreProcessingSnapshot; | ||||||
| use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; | use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; | ||||||
| use crate::validator_pubkey_cache::ValidatorPubkeyCache; | use crate::validator_pubkey_cache::ValidatorPubkeyCache; | ||||||
| use crate::{ | use crate::{ | ||||||
|     beacon_chain::{ |     beacon_chain::{ | ||||||
|         BeaconForkChoice, ForkChoiceError, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, |         BeaconForkChoice, ForkChoiceError, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, | ||||||
|         MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, |         VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, | ||||||
|     }, |     }, | ||||||
|     metrics, BeaconChain, BeaconChainError, BeaconChainTypes, |     metrics, BeaconChain, BeaconChainError, BeaconChainTypes, | ||||||
| }; | }; | ||||||
| @ -141,8 +142,6 @@ pub enum BlockError<T: EthSpec> { | |||||||
|     /// It's unclear if this block is valid, but it cannot be processed without already knowing
 |     /// It's unclear if this block is valid, but it cannot be processed without already knowing
 | ||||||
|     /// its parent.
 |     /// its parent.
 | ||||||
|     ParentUnknown(Arc<SignedBeaconBlock<T>>), |     ParentUnknown(Arc<SignedBeaconBlock<T>>), | ||||||
|     /// The block skips too many slots and is a DoS risk.
 |  | ||||||
|     TooManySkippedSlots { parent_slot: Slot, block_slot: Slot }, |  | ||||||
|     /// The block slot is greater than the present slot.
 |     /// The block slot is greater than the present slot.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// ## Peer scoring
 |     /// ## Peer scoring
 | ||||||
| @ -183,13 +182,6 @@ pub enum BlockError<T: EthSpec> { | |||||||
|     ///
 |     ///
 | ||||||
|     /// The block is valid and we have already imported a block with this hash.
 |     /// The block is valid and we have already imported a block with this hash.
 | ||||||
|     BlockIsAlreadyKnown, |     BlockIsAlreadyKnown, | ||||||
|     /// A block for this proposer and slot has already been observed.
 |  | ||||||
|     ///
 |  | ||||||
|     /// ## Peer scoring
 |  | ||||||
|     ///
 |  | ||||||
|     /// The `proposer` has already proposed a block at this slot. The existing block may or may not
 |  | ||||||
|     /// be equal to the given block.
 |  | ||||||
|     RepeatProposal { proposer: u64, slot: Slot }, |  | ||||||
|     /// The block slot exceeds the MAXIMUM_BLOCK_SLOT_NUMBER.
 |     /// The block slot exceeds the MAXIMUM_BLOCK_SLOT_NUMBER.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// ## Peer scoring
 |     /// ## Peer scoring
 | ||||||
| @ -285,6 +277,13 @@ pub enum BlockError<T: EthSpec> { | |||||||
|     /// problems to worry about than losing peers, and we're doing the network a favour by
 |     /// problems to worry about than losing peers, and we're doing the network a favour by
 | ||||||
|     /// disconnecting.
 |     /// disconnecting.
 | ||||||
|     ParentExecutionPayloadInvalid { parent_root: Hash256 }, |     ParentExecutionPayloadInvalid { parent_root: Hash256 }, | ||||||
|  |     /// The block is a slashable equivocation from the proposer.
 | ||||||
|  |     ///
 | ||||||
|  |     /// ## Peer scoring
 | ||||||
|  |     ///
 | ||||||
|  |     /// Honest peers shouldn't forward more than 1 equivocating block from the same proposer, so
 | ||||||
|  |     /// we penalise them with a mid-tolerance error.
 | ||||||
|  |     Slashable, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// Returned when block validation failed due to some issue verifying
 | /// Returned when block validation failed due to some issue verifying
 | ||||||
| @ -633,6 +632,40 @@ pub struct ExecutionPendingBlock<T: BeaconChainTypes> { | |||||||
|     pub payload_verification_handle: PayloadVerificationHandle<T::EthSpec>, |     pub payload_verification_handle: PayloadVerificationHandle<T::EthSpec>, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | pub trait IntoGossipVerifiedBlock<T: BeaconChainTypes>: Sized { | ||||||
|  |     fn into_gossip_verified_block( | ||||||
|  |         self, | ||||||
|  |         chain: &BeaconChain<T>, | ||||||
|  |     ) -> Result<GossipVerifiedBlock<T>, BlockError<T::EthSpec>>; | ||||||
|  |     fn inner(&self) -> Arc<SignedBeaconBlock<T::EthSpec>>; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl<T: BeaconChainTypes> IntoGossipVerifiedBlock<T> for GossipVerifiedBlock<T> { | ||||||
|  |     fn into_gossip_verified_block( | ||||||
|  |         self, | ||||||
|  |         _chain: &BeaconChain<T>, | ||||||
|  |     ) -> Result<GossipVerifiedBlock<T>, BlockError<T::EthSpec>> { | ||||||
|  |         Ok(self) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn inner(&self) -> Arc<SignedBeaconBlock<T::EthSpec>> { | ||||||
|  |         self.block.clone() | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl<T: BeaconChainTypes> IntoGossipVerifiedBlock<T> for Arc<SignedBeaconBlock<T::EthSpec>> { | ||||||
|  |     fn into_gossip_verified_block( | ||||||
|  |         self, | ||||||
|  |         chain: &BeaconChain<T>, | ||||||
|  |     ) -> Result<GossipVerifiedBlock<T>, BlockError<T::EthSpec>> { | ||||||
|  |         GossipVerifiedBlock::new(self, chain) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn inner(&self) -> Arc<SignedBeaconBlock<T::EthSpec>> { | ||||||
|  |         self.clone() | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
| /// Implemented on types that can be converted into a `ExecutionPendingBlock`.
 | /// Implemented on types that can be converted into a `ExecutionPendingBlock`.
 | ||||||
| ///
 | ///
 | ||||||
| /// Used to allow functions to accept blocks at various stages of verification.
 | /// Used to allow functions to accept blocks at various stages of verification.
 | ||||||
| @ -697,7 +730,7 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> { | |||||||
|         // Do not gossip or process blocks from future slots.
 |         // Do not gossip or process blocks from future slots.
 | ||||||
|         let present_slot_with_tolerance = chain |         let present_slot_with_tolerance = chain | ||||||
|             .slot_clock |             .slot_clock | ||||||
|             .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) |             .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity()) | ||||||
|             .ok_or(BeaconChainError::UnableToReadSlot)?; |             .ok_or(BeaconChainError::UnableToReadSlot)?; | ||||||
|         if block.slot() > present_slot_with_tolerance { |         if block.slot() > present_slot_with_tolerance { | ||||||
|             return Err(BlockError::FutureSlot { |             return Err(BlockError::FutureSlot { | ||||||
| @ -721,35 +754,16 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> { | |||||||
|         // reboot if the `observed_block_producers` cache is empty. In that case, without this
 |         // reboot if the `observed_block_producers` cache is empty. In that case, without this
 | ||||||
|         // check, we will load the parent and state from disk only to find out later that we
 |         // check, we will load the parent and state from disk only to find out later that we
 | ||||||
|         // already know this block.
 |         // already know this block.
 | ||||||
|         if chain |         let fork_choice_read_lock = chain.canonical_head.fork_choice_read_lock(); | ||||||
|             .canonical_head |         if fork_choice_read_lock.contains_block(&block_root) { | ||||||
|             .fork_choice_read_lock() |  | ||||||
|             .contains_block(&block_root) |  | ||||||
|         { |  | ||||||
|             return Err(BlockError::BlockIsAlreadyKnown); |             return Err(BlockError::BlockIsAlreadyKnown); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         // Check that we have not already received a block with a valid signature for this slot.
 |  | ||||||
|         if chain |  | ||||||
|             .observed_block_producers |  | ||||||
|             .read() |  | ||||||
|             .proposer_has_been_observed(block.message()) |  | ||||||
|             .map_err(|e| BlockError::BeaconChainError(e.into()))? |  | ||||||
|         { |  | ||||||
|             return Err(BlockError::RepeatProposal { |  | ||||||
|                 proposer: block.message().proposer_index(), |  | ||||||
|                 slot: block.slot(), |  | ||||||
|             }); |  | ||||||
|         } |  | ||||||
| 
 |  | ||||||
|         // Do not process a block that doesn't descend from the finalized root.
 |         // Do not process a block that doesn't descend from the finalized root.
 | ||||||
|         //
 |         //
 | ||||||
|         // We check this *before* we load the parent so that we can return a more detailed error.
 |         // We check this *before* we load the parent so that we can return a more detailed error.
 | ||||||
|         check_block_is_finalized_checkpoint_or_descendant( |         check_block_is_finalized_checkpoint_or_descendant(chain, &fork_choice_read_lock, &block)?; | ||||||
|             chain, |         drop(fork_choice_read_lock); | ||||||
|             &chain.canonical_head.fork_choice_write_lock(), |  | ||||||
|             &block, |  | ||||||
|         )?; |  | ||||||
| 
 | 
 | ||||||
|         let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); |         let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); | ||||||
|         let (parent_block, block) = verify_parent_block_is_known(chain, block)?; |         let (parent_block, block) = verify_parent_block_is_known(chain, block)?; | ||||||
| @ -786,9 +800,6 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> { | |||||||
|                 parent_block.root |                 parent_block.root | ||||||
|             }; |             }; | ||||||
| 
 | 
 | ||||||
|         // Reject any block that exceeds our limit on skipped slots.
 |  | ||||||
|         check_block_skip_slots(chain, parent_block.slot, block.message())?; |  | ||||||
| 
 |  | ||||||
|         // We assign to a variable instead of using `if let Some` directly to ensure we drop the
 |         // We assign to a variable instead of using `if let Some` directly to ensure we drop the
 | ||||||
|         // write lock before trying to acquire it again in the `else` clause.
 |         // write lock before trying to acquire it again in the `else` clause.
 | ||||||
|         let proposer_opt = chain |         let proposer_opt = chain | ||||||
| @ -860,17 +871,16 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> { | |||||||
|         //
 |         //
 | ||||||
|         // It's important to double-check that the proposer still hasn't been observed so we don't
 |         // It's important to double-check that the proposer still hasn't been observed so we don't
 | ||||||
|         // have a race-condition when verifying two blocks simultaneously.
 |         // have a race-condition when verifying two blocks simultaneously.
 | ||||||
|         if chain |         match chain | ||||||
|             .observed_block_producers |             .observed_block_producers | ||||||
|             .write() |             .write() | ||||||
|             .observe_proposer(block.message()) |             .observe_proposal(block_root, block.message()) | ||||||
|             .map_err(|e| BlockError::BeaconChainError(e.into()))? |             .map_err(|e| BlockError::BeaconChainError(e.into()))? | ||||||
|         { |         { | ||||||
|             return Err(BlockError::RepeatProposal { |             SeenBlock::Slashable => return Err(BlockError::Slashable), | ||||||
|                 proposer: block.message().proposer_index(), |             SeenBlock::Duplicate => return Err(BlockError::BlockIsAlreadyKnown), | ||||||
|                 slot: block.slot(), |             SeenBlock::UniqueNonSlashable => {} | ||||||
|             }); |         }; | ||||||
|         } |  | ||||||
| 
 | 
 | ||||||
|         if block.message().proposer_index() != expected_proposer as u64 { |         if block.message().proposer_index() != expected_proposer as u64 { | ||||||
|             return Err(BlockError::IncorrectBlockProposer { |             return Err(BlockError::IncorrectBlockProposer { | ||||||
| @ -942,9 +952,6 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> { | |||||||
| 
 | 
 | ||||||
|         let (mut parent, block) = load_parent(block_root, block, chain)?; |         let (mut parent, block) = load_parent(block_root, block, chain)?; | ||||||
| 
 | 
 | ||||||
|         // Reject any block that exceeds our limit on skipped slots.
 |  | ||||||
|         check_block_skip_slots(chain, parent.beacon_block.slot(), block.message())?; |  | ||||||
| 
 |  | ||||||
|         let state = cheap_state_advance_to_obtain_committees( |         let state = cheap_state_advance_to_obtain_committees( | ||||||
|             &mut parent.pre_state, |             &mut parent.pre_state, | ||||||
|             parent.beacon_state_root, |             parent.beacon_state_root, | ||||||
| @ -1109,6 +1116,12 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> { | |||||||
|         chain: &Arc<BeaconChain<T>>, |         chain: &Arc<BeaconChain<T>>, | ||||||
|         notify_execution_layer: NotifyExecutionLayer, |         notify_execution_layer: NotifyExecutionLayer, | ||||||
|     ) -> Result<Self, BlockError<T::EthSpec>> { |     ) -> Result<Self, BlockError<T::EthSpec>> { | ||||||
|  |         chain | ||||||
|  |             .observed_block_producers | ||||||
|  |             .write() | ||||||
|  |             .observe_proposal(block_root, block.message()) | ||||||
|  |             .map_err(|e| BlockError::BeaconChainError(e.into()))?; | ||||||
|  | 
 | ||||||
|         if let Some(parent) = chain |         if let Some(parent) = chain | ||||||
|             .canonical_head |             .canonical_head | ||||||
|             .fork_choice_read_lock() |             .fork_choice_read_lock() | ||||||
| @ -1135,9 +1148,6 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> { | |||||||
|             return Err(BlockError::ParentUnknown(block)); |             return Err(BlockError::ParentUnknown(block)); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         // Reject any block that exceeds our limit on skipped slots.
 |  | ||||||
|         check_block_skip_slots(chain, parent.beacon_block.slot(), block.message())?; |  | ||||||
| 
 |  | ||||||
|         /* |         /* | ||||||
|          *  Perform cursory checks to see if the block is even worth processing. |          *  Perform cursory checks to see if the block is even worth processing. | ||||||
|          */ |          */ | ||||||
| @ -1245,7 +1255,7 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> { | |||||||
| 
 | 
 | ||||||
|         // Perform a sanity check on the pre-state.
 |         // Perform a sanity check on the pre-state.
 | ||||||
|         let parent_slot = parent.beacon_block.slot(); |         let parent_slot = parent.beacon_block.slot(); | ||||||
|         if state.slot() < parent_slot || state.slot() > parent_slot + 1 { |         if state.slot() < parent_slot || state.slot() > block.slot() { | ||||||
|             return Err(BeaconChainError::BadPreState { |             return Err(BeaconChainError::BadPreState { | ||||||
|                 parent_root: parent.beacon_block_root, |                 parent_root: parent.beacon_block_root, | ||||||
|                 parent_slot, |                 parent_slot, | ||||||
| @ -1492,30 +1502,6 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> { | |||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// Check that the count of skip slots between the block and its parent does not exceed our maximum
 |  | ||||||
| /// value.
 |  | ||||||
| ///
 |  | ||||||
| /// Whilst this is not part of the specification, we include this to help prevent us from DoS
 |  | ||||||
| /// attacks. In times of dire network circumstance, the user can configure the
 |  | ||||||
| /// `import_max_skip_slots` value.
 |  | ||||||
| fn check_block_skip_slots<T: BeaconChainTypes>( |  | ||||||
|     chain: &BeaconChain<T>, |  | ||||||
|     parent_slot: Slot, |  | ||||||
|     block: BeaconBlockRef<'_, T::EthSpec>, |  | ||||||
| ) -> Result<(), BlockError<T::EthSpec>> { |  | ||||||
|     // Reject any block that exceeds our limit on skipped slots.
 |  | ||||||
|     if let Some(max_skip_slots) = chain.config.import_max_skip_slots { |  | ||||||
|         if block.slot() > parent_slot + max_skip_slots { |  | ||||||
|             return Err(BlockError::TooManySkippedSlots { |  | ||||||
|                 parent_slot, |  | ||||||
|                 block_slot: block.slot(), |  | ||||||
|             }); |  | ||||||
|         } |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     Ok(()) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /// Returns `Ok(())` if the block's slot is greater than the anchor block's slot (if any).
 | /// Returns `Ok(())` if the block's slot is greater than the anchor block's slot (if any).
 | ||||||
| fn check_block_against_anchor_slot<T: BeaconChainTypes>( | fn check_block_against_anchor_slot<T: BeaconChainTypes>( | ||||||
|     block: BeaconBlockRef<'_, T::EthSpec>, |     block: BeaconBlockRef<'_, T::EthSpec>, | ||||||
| @ -1768,13 +1754,18 @@ fn load_parent<T: BeaconChainTypes>( | |||||||
|                 BlockError::from(BeaconChainError::MissingBeaconBlock(block.parent_root())) |                 BlockError::from(BeaconChainError::MissingBeaconBlock(block.parent_root())) | ||||||
|             })?; |             })?; | ||||||
| 
 | 
 | ||||||
|         // Load the parent blocks state from the database, returning an error if it is not found.
 |         // Load the parent block's state from the database, returning an error if it is not found.
 | ||||||
|         // It is an error because if we know the parent block we should also know the parent state.
 |         // It is an error because if we know the parent block we should also know the parent state.
 | ||||||
|         let parent_state_root = parent_block.state_root(); |         // Retrieve any state that is advanced through to at most `block.slot()`: this is
 | ||||||
|         let parent_state = chain |         // particularly important if `block` descends from the finalized/split block, but at a slot
 | ||||||
|             .get_state(&parent_state_root, Some(parent_block.slot()))? |         // prior to the finalized slot (which is invalid and inaccessible in our DB schema).
 | ||||||
|  |         let (parent_state_root, parent_state) = chain | ||||||
|  |             .store | ||||||
|  |             .get_advanced_hot_state(root, block.slot(), parent_block.state_root())? | ||||||
|             .ok_or_else(|| { |             .ok_or_else(|| { | ||||||
|                 BeaconChainError::DBInconsistent(format!("Missing state {:?}", parent_state_root)) |                 BeaconChainError::DBInconsistent( | ||||||
|  |                     format!("Missing state for parent block {root:?}",), | ||||||
|  |                 ) | ||||||
|             })?; |             })?; | ||||||
| 
 | 
 | ||||||
|         metrics::inc_counter(&metrics::BLOCK_PROCESSING_SNAPSHOT_CACHE_MISSES); |         metrics::inc_counter(&metrics::BLOCK_PROCESSING_SNAPSHOT_CACHE_MISSES); | ||||||
|  | |||||||
| @ -18,14 +18,15 @@ use crate::{ | |||||||
| }; | }; | ||||||
| use eth1::Config as Eth1Config; | use eth1::Config as Eth1Config; | ||||||
| use execution_layer::ExecutionLayer; | use execution_layer::ExecutionLayer; | ||||||
| use fork_choice::{CountUnrealized, ForkChoice, ResetPayloadStatuses}; | use fork_choice::{ForkChoice, ResetPayloadStatuses}; | ||||||
| use futures::channel::mpsc::Sender; | use futures::channel::mpsc::Sender; | ||||||
| use operation_pool::{OperationPool, PersistedOperationPool}; | use operation_pool::{OperationPool, PersistedOperationPool}; | ||||||
| use parking_lot::RwLock; | use parking_lot::RwLock; | ||||||
| use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; | use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; | ||||||
| use slasher::Slasher; | use slasher::Slasher; | ||||||
| use slog::{crit, error, info, Logger}; | use slog::{crit, debug, error, info, Logger}; | ||||||
| use slot_clock::{SlotClock, TestingSlotClock}; | use slot_clock::{SlotClock, TestingSlotClock}; | ||||||
|  | use state_processing::per_slot_processing; | ||||||
| use std::marker::PhantomData; | use std::marker::PhantomData; | ||||||
| use std::sync::Arc; | use std::sync::Arc; | ||||||
| use std::time::Duration; | use std::time::Duration; | ||||||
| @ -287,7 +288,7 @@ where | |||||||
|         let genesis_state = store |         let genesis_state = store | ||||||
|             .get_state(&genesis_block.state_root(), Some(genesis_block.slot())) |             .get_state(&genesis_block.state_root(), Some(genesis_block.slot())) | ||||||
|             .map_err(|e| descriptive_db_error("genesis state", &e))? |             .map_err(|e| descriptive_db_error("genesis state", &e))? | ||||||
|             .ok_or("Genesis block not found in store")?; |             .ok_or("Genesis state not found in store")?; | ||||||
| 
 | 
 | ||||||
|         self.genesis_time = Some(genesis_state.genesis_time()); |         self.genesis_time = Some(genesis_state.genesis_time()); | ||||||
| 
 | 
 | ||||||
| @ -338,7 +339,7 @@ where | |||||||
|         let beacon_block = genesis_block(&mut beacon_state, &self.spec)?; |         let beacon_block = genesis_block(&mut beacon_state, &self.spec)?; | ||||||
| 
 | 
 | ||||||
|         beacon_state |         beacon_state | ||||||
|             .build_all_caches(&self.spec) |             .build_caches(&self.spec) | ||||||
|             .map_err(|e| format!("Failed to build genesis state caches: {:?}", e))?; |             .map_err(|e| format!("Failed to build genesis state caches: {:?}", e))?; | ||||||
| 
 | 
 | ||||||
|         let beacon_state_root = beacon_block.message().state_root(); |         let beacon_state_root = beacon_block.message().state_root(); | ||||||
| @ -382,6 +383,16 @@ where | |||||||
|         let (genesis, updated_builder) = self.set_genesis_state(beacon_state)?; |         let (genesis, updated_builder) = self.set_genesis_state(beacon_state)?; | ||||||
|         self = updated_builder; |         self = updated_builder; | ||||||
| 
 | 
 | ||||||
|  |         // Stage the database's metadata fields for atomic storage when `build` is called.
 | ||||||
|  |         // Since v4.4.0 we will set the anchor with a dummy state upper limit in order to prevent
 | ||||||
|  |         // historic states from being retained (unless `--reconstruct-historic-states` is set).
 | ||||||
|  |         let retain_historic_states = self.chain_config.reconstruct_historic_states; | ||||||
|  |         self.pending_io_batch.push( | ||||||
|  |             store | ||||||
|  |                 .init_anchor_info(genesis.beacon_block.message(), retain_historic_states) | ||||||
|  |                 .map_err(|e| format!("Failed to initialize genesis anchor: {:?}", e))?, | ||||||
|  |         ); | ||||||
|  | 
 | ||||||
|         let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis) |         let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis) | ||||||
|             .map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?; |             .map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?; | ||||||
|         let current_slot = None; |         let current_slot = None; | ||||||
| @ -408,46 +419,48 @@ where | |||||||
|         weak_subj_block: SignedBeaconBlock<TEthSpec>, |         weak_subj_block: SignedBeaconBlock<TEthSpec>, | ||||||
|         genesis_state: BeaconState<TEthSpec>, |         genesis_state: BeaconState<TEthSpec>, | ||||||
|     ) -> Result<Self, String> { |     ) -> Result<Self, String> { | ||||||
|         let store = self.store.clone().ok_or("genesis_state requires a store")?; |         let store = self | ||||||
|  |             .store | ||||||
|  |             .clone() | ||||||
|  |             .ok_or("weak_subjectivity_state requires a store")?; | ||||||
|  |         let log = self | ||||||
|  |             .log | ||||||
|  |             .as_ref() | ||||||
|  |             .ok_or("weak_subjectivity_state requires a log")?; | ||||||
| 
 | 
 | ||||||
|         let weak_subj_slot = weak_subj_state.slot(); |         // Ensure the state is advanced to an epoch boundary.
 | ||||||
|         let weak_subj_block_root = weak_subj_block.canonical_root(); |         let slots_per_epoch = TEthSpec::slots_per_epoch(); | ||||||
|         let weak_subj_state_root = weak_subj_block.state_root(); |         if weak_subj_state.slot() % slots_per_epoch != 0 { | ||||||
| 
 |             debug!( | ||||||
|         // Check that the given block lies on an epoch boundary. Due to the database only storing
 |                 log, | ||||||
|         // full states on epoch boundaries and at restore points it would be difficult to support
 |                 "Advancing checkpoint state to boundary"; | ||||||
|         // starting from a mid-epoch state.
 |                 "state_slot" => weak_subj_state.slot(), | ||||||
|         if weak_subj_slot % TEthSpec::slots_per_epoch() != 0 { |                 "block_slot" => weak_subj_block.slot(), | ||||||
|             return Err(format!( |             ); | ||||||
|                 "Checkpoint block at slot {} is not aligned to epoch start. \ |             while weak_subj_state.slot() % slots_per_epoch != 0 { | ||||||
|                  Please supply an aligned checkpoint with block.slot % 32 == 0",
 |                 per_slot_processing(&mut weak_subj_state, None, &self.spec) | ||||||
|                 weak_subj_block.slot(), |                     .map_err(|e| format!("Error advancing state: {e:?}"))?; | ||||||
|             )); |             } | ||||||
|         } |  | ||||||
| 
 |  | ||||||
|         // Check that the block and state have consistent slots and state roots.
 |  | ||||||
|         if weak_subj_state.slot() != weak_subj_block.slot() { |  | ||||||
|             return Err(format!( |  | ||||||
|                 "Slot of snapshot block ({}) does not match snapshot state ({})", |  | ||||||
|                 weak_subj_block.slot(), |  | ||||||
|                 weak_subj_state.slot(), |  | ||||||
|             )); |  | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         // Prime all caches before storing the state in the database and computing the tree hash
 |         // Prime all caches before storing the state in the database and computing the tree hash
 | ||||||
|         // root.
 |         // root.
 | ||||||
|         weak_subj_state |         weak_subj_state | ||||||
|             .build_all_caches(&self.spec) |             .build_caches(&self.spec) | ||||||
|             .map_err(|e| format!("Error building caches on checkpoint state: {e:?}"))?; |             .map_err(|e| format!("Error building caches on checkpoint state: {e:?}"))?; | ||||||
| 
 |         let weak_subj_state_root = weak_subj_state | ||||||
|         let computed_state_root = weak_subj_state |  | ||||||
|             .update_tree_hash_cache() |             .update_tree_hash_cache() | ||||||
|             .map_err(|e| format!("Error computing checkpoint state root: {:?}", e))?; |             .map_err(|e| format!("Error computing checkpoint state root: {:?}", e))?; | ||||||
| 
 | 
 | ||||||
|         if weak_subj_state_root != computed_state_root { |         let weak_subj_slot = weak_subj_state.slot(); | ||||||
|  |         let weak_subj_block_root = weak_subj_block.canonical_root(); | ||||||
|  | 
 | ||||||
|  |         // Validate the state's `latest_block_header` against the checkpoint block.
 | ||||||
|  |         let state_latest_block_root = weak_subj_state.get_latest_block_root(weak_subj_state_root); | ||||||
|  |         if weak_subj_block_root != state_latest_block_root { | ||||||
|             return Err(format!( |             return Err(format!( | ||||||
|                 "Snapshot state root does not match block, expected: {:?}, got: {:?}", |                 "Snapshot state's most recent block root does not match block, expected: {:?}, got: {:?}", | ||||||
|                 weak_subj_state_root, computed_state_root |                 weak_subj_block_root, state_latest_block_root | ||||||
|             )); |             )); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
| @ -464,10 +477,25 @@ where | |||||||
| 
 | 
 | ||||||
|         // Set the store's split point *before* storing genesis so that genesis is stored
 |         // Set the store's split point *before* storing genesis so that genesis is stored
 | ||||||
|         // immediately in the freezer DB.
 |         // immediately in the freezer DB.
 | ||||||
|         store.set_split(weak_subj_slot, weak_subj_state_root); |         store.set_split(weak_subj_slot, weak_subj_state_root, weak_subj_block_root); | ||||||
|         let (_, updated_builder) = self.set_genesis_state(genesis_state)?; |         let (_, updated_builder) = self.set_genesis_state(genesis_state)?; | ||||||
|         self = updated_builder; |         self = updated_builder; | ||||||
| 
 | 
 | ||||||
|  |         // Fill in the linear block roots between the checkpoint block's slot and the aligned
 | ||||||
|  |         // state's slot. All slots less than the block's slot will be handled by block backfill,
 | ||||||
|  |         // while states greater or equal to the checkpoint state will be handled by `migrate_db`.
 | ||||||
|  |         let block_root_batch = store | ||||||
|  |             .store_frozen_block_root_at_skip_slots( | ||||||
|  |                 weak_subj_block.slot(), | ||||||
|  |                 weak_subj_state.slot(), | ||||||
|  |                 weak_subj_block_root, | ||||||
|  |             ) | ||||||
|  |             .map_err(|e| format!("Error writing frozen block roots: {e:?}"))?; | ||||||
|  |         store | ||||||
|  |             .cold_db | ||||||
|  |             .do_atomically(block_root_batch) | ||||||
|  |             .map_err(|e| format!("Error writing frozen block roots: {e:?}"))?; | ||||||
|  | 
 | ||||||
|         // Write the state and block non-atomically, it doesn't matter if they're forgotten
 |         // Write the state and block non-atomically, it doesn't matter if they're forgotten
 | ||||||
|         // about on a crash restart.
 |         // about on a crash restart.
 | ||||||
|         store |         store | ||||||
| @ -480,10 +508,11 @@ where | |||||||
|         // Stage the database's metadata fields for atomic storage when `build` is called.
 |         // Stage the database's metadata fields for atomic storage when `build` is called.
 | ||||||
|         // This prevents the database from restarting in an inconsistent state if the anchor
 |         // This prevents the database from restarting in an inconsistent state if the anchor
 | ||||||
|         // info or split point is written before the `PersistedBeaconChain`.
 |         // info or split point is written before the `PersistedBeaconChain`.
 | ||||||
|  |         let retain_historic_states = self.chain_config.reconstruct_historic_states; | ||||||
|         self.pending_io_batch.push(store.store_split_in_batch()); |         self.pending_io_batch.push(store.store_split_in_batch()); | ||||||
|         self.pending_io_batch.push( |         self.pending_io_batch.push( | ||||||
|             store |             store | ||||||
|                 .init_anchor_info(weak_subj_block.message()) |                 .init_anchor_info(weak_subj_block.message(), retain_historic_states) | ||||||
|                 .map_err(|e| format!("Failed to initialize anchor info: {:?}", e))?, |                 .map_err(|e| format!("Failed to initialize anchor info: {:?}", e))?, | ||||||
|         ); |         ); | ||||||
| 
 | 
 | ||||||
| @ -503,13 +532,12 @@ where | |||||||
|         let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &snapshot) |         let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &snapshot) | ||||||
|             .map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?; |             .map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?; | ||||||
| 
 | 
 | ||||||
|         let current_slot = Some(snapshot.beacon_block.slot()); |  | ||||||
|         let fork_choice = ForkChoice::from_anchor( |         let fork_choice = ForkChoice::from_anchor( | ||||||
|             fc_store, |             fc_store, | ||||||
|             snapshot.beacon_block_root, |             snapshot.beacon_block_root, | ||||||
|             &snapshot.beacon_block, |             &snapshot.beacon_block, | ||||||
|             &snapshot.beacon_state, |             &snapshot.beacon_state, | ||||||
|             current_slot, |             Some(weak_subj_slot), | ||||||
|             &self.spec, |             &self.spec, | ||||||
|         ) |         ) | ||||||
|         .map_err(|e| format!("Unable to initialize ForkChoice: {:?}", e))?; |         .map_err(|e| format!("Unable to initialize ForkChoice: {:?}", e))?; | ||||||
| @ -672,9 +700,8 @@ where | |||||||
|                 Err(e) => return Err(descriptive_db_error("head block", &e)), |                 Err(e) => return Err(descriptive_db_error("head block", &e)), | ||||||
|             }; |             }; | ||||||
| 
 | 
 | ||||||
|         let head_state_root = head_block.state_root(); |         let (_head_state_root, head_state) = store | ||||||
|         let head_state = store |             .get_advanced_hot_state(head_block_root, current_slot, head_block.state_root()) | ||||||
|             .get_state(&head_state_root, Some(head_block.slot())) |  | ||||||
|             .map_err(|e| descriptive_db_error("head state", &e))? |             .map_err(|e| descriptive_db_error("head state", &e))? | ||||||
|             .ok_or("Head state not found in store")?; |             .ok_or("Head state not found in store")?; | ||||||
| 
 | 
 | ||||||
| @ -687,7 +714,8 @@ where | |||||||
|                 store.clone(), |                 store.clone(), | ||||||
|                 Some(current_slot), |                 Some(current_slot), | ||||||
|                 &self.spec, |                 &self.spec, | ||||||
|                 CountUnrealized::True, |                 self.chain_config.progressive_balances_mode, | ||||||
|  |                 &log, | ||||||
|             )?; |             )?; | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
| @ -701,7 +729,7 @@ where | |||||||
| 
 | 
 | ||||||
|         head_snapshot |         head_snapshot | ||||||
|             .beacon_state |             .beacon_state | ||||||
|             .build_all_caches(&self.spec) |             .build_caches(&self.spec) | ||||||
|             .map_err(|e| format!("Failed to build state caches: {:?}", e))?; |             .map_err(|e| format!("Failed to build state caches: {:?}", e))?; | ||||||
| 
 | 
 | ||||||
|         // Perform a check to ensure that the finalization points of the head and fork choice are
 |         // Perform a check to ensure that the finalization points of the head and fork choice are
 | ||||||
| @ -827,7 +855,6 @@ where | |||||||
|             observed_sync_aggregators: <_>::default(), |             observed_sync_aggregators: <_>::default(), | ||||||
|             // TODO: allow for persisting and loading the pool from disk.
 |             // TODO: allow for persisting and loading the pool from disk.
 | ||||||
|             observed_block_producers: <_>::default(), |             observed_block_producers: <_>::default(), | ||||||
|             // TODO: allow for persisting and loading the pool from disk.
 |  | ||||||
|             observed_voluntary_exits: <_>::default(), |             observed_voluntary_exits: <_>::default(), | ||||||
|             observed_proposer_slashings: <_>::default(), |             observed_proposer_slashings: <_>::default(), | ||||||
|             observed_attester_slashings: <_>::default(), |             observed_attester_slashings: <_>::default(), | ||||||
|  | |||||||
| @ -47,7 +47,8 @@ use crate::{ | |||||||
| }; | }; | ||||||
| use eth2::types::{EventKind, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead}; | use eth2::types::{EventKind, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead}; | ||||||
| use fork_choice::{ | use fork_choice::{ | ||||||
|     ExecutionStatus, ForkChoiceView, ForkchoiceUpdateParameters, ProtoBlock, ResetPayloadStatuses, |     ExecutionStatus, ForkChoiceStore, ForkChoiceView, ForkchoiceUpdateParameters, ProtoBlock, | ||||||
|  |     ResetPayloadStatuses, | ||||||
| }; | }; | ||||||
| use itertools::process_results; | use itertools::process_results; | ||||||
| use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; | use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; | ||||||
| @ -298,10 +299,10 @@ impl<T: BeaconChainTypes> CanonicalHead<T> { | |||||||
|         let beacon_block = store |         let beacon_block = store | ||||||
|             .get_full_block(&beacon_block_root)? |             .get_full_block(&beacon_block_root)? | ||||||
|             .ok_or(Error::MissingBeaconBlock(beacon_block_root))?; |             .ok_or(Error::MissingBeaconBlock(beacon_block_root))?; | ||||||
|         let beacon_state_root = beacon_block.state_root(); |         let current_slot = fork_choice.fc_store().get_current_slot(); | ||||||
|         let beacon_state = store |         let (_, beacon_state) = store | ||||||
|             .get_state(&beacon_state_root, Some(beacon_block.slot()))? |             .get_advanced_hot_state(beacon_block_root, current_slot, beacon_block.state_root())? | ||||||
|             .ok_or(Error::MissingBeaconState(beacon_state_root))?; |             .ok_or(Error::MissingBeaconState(beacon_block.state_root()))?; | ||||||
| 
 | 
 | ||||||
|         let snapshot = BeaconSnapshot { |         let snapshot = BeaconSnapshot { | ||||||
|             beacon_block_root, |             beacon_block_root, | ||||||
| @ -669,10 +670,14 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | |||||||
|                         .get_full_block(&new_view.head_block_root)? |                         .get_full_block(&new_view.head_block_root)? | ||||||
|                         .ok_or(Error::MissingBeaconBlock(new_view.head_block_root))?; |                         .ok_or(Error::MissingBeaconBlock(new_view.head_block_root))?; | ||||||
| 
 | 
 | ||||||
|                     let beacon_state_root = beacon_block.state_root(); |                     let (_, beacon_state) = self | ||||||
|                     let beacon_state: BeaconState<T::EthSpec> = self |                         .store | ||||||
|                         .get_state(&beacon_state_root, Some(beacon_block.slot()))? |                         .get_advanced_hot_state( | ||||||
|                         .ok_or(Error::MissingBeaconState(beacon_state_root))?; |                             new_view.head_block_root, | ||||||
|  |                             current_slot, | ||||||
|  |                             beacon_block.state_root(), | ||||||
|  |                         )? | ||||||
|  |                         .ok_or(Error::MissingBeaconState(beacon_block.state_root()))?; | ||||||
| 
 | 
 | ||||||
|                     Ok(BeaconSnapshot { |                     Ok(BeaconSnapshot { | ||||||
|                         beacon_block: Arc::new(beacon_block), |                         beacon_block: Arc::new(beacon_block), | ||||||
|  | |||||||
| @ -1,7 +1,7 @@ | |||||||
| pub use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; | pub use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; | ||||||
| use serde_derive::{Deserialize, Serialize}; | use serde::{Deserialize, Serialize}; | ||||||
| use std::time::Duration; | use std::time::Duration; | ||||||
| use types::{Checkpoint, Epoch}; | use types::{Checkpoint, Epoch, ProgressiveBalancesMode}; | ||||||
| 
 | 
 | ||||||
| pub const DEFAULT_RE_ORG_THRESHOLD: ReOrgThreshold = ReOrgThreshold(20); | pub const DEFAULT_RE_ORG_THRESHOLD: ReOrgThreshold = ReOrgThreshold(20); | ||||||
| pub const DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION: Epoch = Epoch::new(2); | pub const DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION: Epoch = Epoch::new(2); | ||||||
| @ -17,8 +17,7 @@ pub const FORK_CHOICE_LOOKAHEAD_FACTOR: u32 = 24; | |||||||
| 
 | 
 | ||||||
| #[derive(Debug, PartialEq, Eq, Clone, Deserialize, Serialize)] | #[derive(Debug, PartialEq, Eq, Clone, Deserialize, Serialize)] | ||||||
| pub struct ChainConfig { | pub struct ChainConfig { | ||||||
|     /// Maximum number of slots to skip when importing a consensus message (e.g., block,
 |     /// Maximum number of slots to skip when importing an attestation.
 | ||||||
|     /// attestation, etc).
 |  | ||||||
|     ///
 |     ///
 | ||||||
|     /// If `None`, there is no limit.
 |     /// If `None`, there is no limit.
 | ||||||
|     pub import_max_skip_slots: Option<u64>, |     pub import_max_skip_slots: Option<u64>, | ||||||
| @ -80,8 +79,10 @@ pub struct ChainConfig { | |||||||
|     ///
 |     ///
 | ||||||
|     /// This is useful for block builders and testing.
 |     /// This is useful for block builders and testing.
 | ||||||
|     pub always_prepare_payload: bool, |     pub always_prepare_payload: bool, | ||||||
|     /// Whether backfill sync processing should be rate-limited.
 |     /// Whether to use `ProgressiveBalancesCache` in unrealized FFG progression calculation.
 | ||||||
|     pub enable_backfill_rate_limiting: bool, |     pub progressive_balances_mode: ProgressiveBalancesMode, | ||||||
|  |     /// Number of epochs between each migration of data from the hot database to the freezer.
 | ||||||
|  |     pub epochs_per_migration: u64, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl Default for ChainConfig { | impl Default for ChainConfig { | ||||||
| @ -111,7 +112,8 @@ impl Default for ChainConfig { | |||||||
|             shuffling_cache_size: crate::shuffling_cache::DEFAULT_CACHE_SIZE, |             shuffling_cache_size: crate::shuffling_cache::DEFAULT_CACHE_SIZE, | ||||||
|             genesis_backfill: false, |             genesis_backfill: false, | ||||||
|             always_prepare_payload: false, |             always_prepare_payload: false, | ||||||
|             enable_backfill_rate_limiting: true, |             progressive_balances_mode: ProgressiveBalancesMode::Checked, | ||||||
|  |             epochs_per_migration: crate::migrate::DEFAULT_EPOCHS_PER_MIGRATION, | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
| } | } | ||||||
|  | |||||||
| @ -24,7 +24,7 @@ use state_processing::{ | |||||||
|     }, |     }, | ||||||
|     signature_sets::Error as SignatureSetError, |     signature_sets::Error as SignatureSetError, | ||||||
|     state_advance::Error as StateAdvanceError, |     state_advance::Error as StateAdvanceError, | ||||||
|     BlockProcessingError, BlockReplayError, SlotProcessingError, |     BlockProcessingError, BlockReplayError, EpochProcessingError, SlotProcessingError, | ||||||
| }; | }; | ||||||
| use std::time::Duration; | use std::time::Duration; | ||||||
| use task_executor::ShutdownReason; | use task_executor::ShutdownReason; | ||||||
| @ -60,6 +60,7 @@ pub enum BeaconChainError { | |||||||
|     MissingBeaconBlock(Hash256), |     MissingBeaconBlock(Hash256), | ||||||
|     MissingBeaconState(Hash256), |     MissingBeaconState(Hash256), | ||||||
|     SlotProcessingError(SlotProcessingError), |     SlotProcessingError(SlotProcessingError), | ||||||
|  |     EpochProcessingError(EpochProcessingError), | ||||||
|     StateAdvanceError(StateAdvanceError), |     StateAdvanceError(StateAdvanceError), | ||||||
|     UnableToAdvanceState(String), |     UnableToAdvanceState(String), | ||||||
|     NoStateForAttestation { |     NoStateForAttestation { | ||||||
| @ -145,6 +146,8 @@ pub enum BeaconChainError { | |||||||
|     BlockVariantLacksExecutionPayload(Hash256), |     BlockVariantLacksExecutionPayload(Hash256), | ||||||
|     ExecutionLayerErrorPayloadReconstruction(ExecutionBlockHash, Box<execution_layer::Error>), |     ExecutionLayerErrorPayloadReconstruction(ExecutionBlockHash, Box<execution_layer::Error>), | ||||||
|     EngineGetCapabilititesFailed(Box<execution_layer::Error>), |     EngineGetCapabilititesFailed(Box<execution_layer::Error>), | ||||||
|  |     ExecutionLayerGetBlockByNumberFailed(Box<execution_layer::Error>), | ||||||
|  |     ExecutionLayerGetBlockByHashFailed(Box<execution_layer::Error>), | ||||||
|     BlockHashMissingFromExecutionLayer(ExecutionBlockHash), |     BlockHashMissingFromExecutionLayer(ExecutionBlockHash), | ||||||
|     InconsistentPayloadReconstructed { |     InconsistentPayloadReconstructed { | ||||||
|         slot: Slot, |         slot: Slot, | ||||||
| @ -213,9 +216,11 @@ pub enum BeaconChainError { | |||||||
|     BlsToExecutionConflictsWithPool, |     BlsToExecutionConflictsWithPool, | ||||||
|     InconsistentFork(InconsistentFork), |     InconsistentFork(InconsistentFork), | ||||||
|     ProposerHeadForkChoiceError(fork_choice::Error<proto_array::Error>), |     ProposerHeadForkChoiceError(fork_choice::Error<proto_array::Error>), | ||||||
|  |     UnableToPublish, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| easy_from_to!(SlotProcessingError, BeaconChainError); | easy_from_to!(SlotProcessingError, BeaconChainError); | ||||||
|  | easy_from_to!(EpochProcessingError, BeaconChainError); | ||||||
| easy_from_to!(AttestationValidationError, BeaconChainError); | easy_from_to!(AttestationValidationError, BeaconChainError); | ||||||
| easy_from_to!(SyncCommitteeMessageValidationError, BeaconChainError); | easy_from_to!(SyncCommitteeMessageValidationError, BeaconChainError); | ||||||
| easy_from_to!(ExitValidationError, BeaconChainError); | easy_from_to!(ExitValidationError, BeaconChainError); | ||||||
|  | |||||||
| @ -21,8 +21,11 @@ pub struct ServerSentEventHandler<T: EthSpec> { | |||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl<T: EthSpec> ServerSentEventHandler<T> { | impl<T: EthSpec> ServerSentEventHandler<T> { | ||||||
|     pub fn new(log: Logger) -> Self { |     pub fn new(log: Logger, capacity_multiplier: usize) -> Self { | ||||||
|         Self::new_with_capacity(log, DEFAULT_CHANNEL_CAPACITY) |         Self::new_with_capacity( | ||||||
|  |             log, | ||||||
|  |             capacity_multiplier.saturating_mul(DEFAULT_CHANNEL_CAPACITY), | ||||||
|  |         ) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     pub fn new_with_capacity(log: Logger, capacity: usize) -> Self { |     pub fn new_with_capacity(log: Logger, capacity: usize) -> Self { | ||||||
|  | |||||||
| @ -1,5 +1,5 @@ | |||||||
| use crate::{BeaconForkChoiceStore, BeaconSnapshot}; | use crate::{BeaconForkChoiceStore, BeaconSnapshot}; | ||||||
| use fork_choice::{CountUnrealized, ForkChoice, PayloadVerificationStatus}; | use fork_choice::{ForkChoice, PayloadVerificationStatus}; | ||||||
| use itertools::process_results; | use itertools::process_results; | ||||||
| use slog::{info, warn, Logger}; | use slog::{info, warn, Logger}; | ||||||
| use state_processing::state_advance::complete_state_advance; | use state_processing::state_advance::complete_state_advance; | ||||||
| @ -10,7 +10,10 @@ use state_processing::{ | |||||||
| use std::sync::Arc; | use std::sync::Arc; | ||||||
| use std::time::Duration; | use std::time::Duration; | ||||||
| use store::{iter::ParentRootBlockIterator, HotColdDB, ItemStore}; | use store::{iter::ParentRootBlockIterator, HotColdDB, ItemStore}; | ||||||
| use types::{BeaconState, ChainSpec, EthSpec, ForkName, Hash256, SignedBeaconBlock, Slot}; | use types::{ | ||||||
|  |     BeaconState, ChainSpec, EthSpec, ForkName, Hash256, ProgressiveBalancesMode, SignedBeaconBlock, | ||||||
|  |     Slot, | ||||||
|  | }; | ||||||
| 
 | 
 | ||||||
| const CORRUPT_DB_MESSAGE: &str = "The database could be corrupt. Check its file permissions or \ | const CORRUPT_DB_MESSAGE: &str = "The database could be corrupt. Check its file permissions or \ | ||||||
|                                   consider deleting it by running with the --purge-db flag.";
 |                                   consider deleting it by running with the --purge-db flag.";
 | ||||||
| @ -100,7 +103,8 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It | |||||||
|     store: Arc<HotColdDB<E, Hot, Cold>>, |     store: Arc<HotColdDB<E, Hot, Cold>>, | ||||||
|     current_slot: Option<Slot>, |     current_slot: Option<Slot>, | ||||||
|     spec: &ChainSpec, |     spec: &ChainSpec, | ||||||
|     count_unrealized_config: CountUnrealized, |     progressive_balances_mode: ProgressiveBalancesMode, | ||||||
|  |     log: &Logger, | ||||||
| ) -> Result<ForkChoice<BeaconForkChoiceStore<E, Hot, Cold>, E>, String> { | ) -> Result<ForkChoice<BeaconForkChoiceStore<E, Hot, Cold>, E>, String> { | ||||||
|     // Fetch finalized block.
 |     // Fetch finalized block.
 | ||||||
|     let finalized_checkpoint = head_state.finalized_checkpoint(); |     let finalized_checkpoint = head_state.finalized_checkpoint(); | ||||||
| @ -166,8 +170,7 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It | |||||||
|         .map_err(|e| format!("Error loading blocks to replay for fork choice: {:?}", e))?; |         .map_err(|e| format!("Error loading blocks to replay for fork choice: {:?}", e))?; | ||||||
| 
 | 
 | ||||||
|     let mut state = finalized_snapshot.beacon_state; |     let mut state = finalized_snapshot.beacon_state; | ||||||
|     let blocks_len = blocks.len(); |     for block in blocks { | ||||||
|     for (i, block) in blocks.into_iter().enumerate() { |  | ||||||
|         complete_state_advance(&mut state, None, block.slot(), spec) |         complete_state_advance(&mut state, None, block.slot(), spec) | ||||||
|             .map_err(|e| format!("State advance failed: {:?}", e))?; |             .map_err(|e| format!("State advance failed: {:?}", e))?; | ||||||
| 
 | 
 | ||||||
| @ -190,15 +193,6 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It | |||||||
|         // This scenario is so rare that it seems OK to double-verify some blocks.
 |         // This scenario is so rare that it seems OK to double-verify some blocks.
 | ||||||
|         let payload_verification_status = PayloadVerificationStatus::Optimistic; |         let payload_verification_status = PayloadVerificationStatus::Optimistic; | ||||||
| 
 | 
 | ||||||
|         // Because we are replaying a single chain of blocks, we only need to calculate unrealized
 |  | ||||||
|         // justification for the last block in the chain.
 |  | ||||||
|         let is_last_block = i + 1 == blocks_len; |  | ||||||
|         let count_unrealized = if is_last_block { |  | ||||||
|             count_unrealized_config |  | ||||||
|         } else { |  | ||||||
|             CountUnrealized::False |  | ||||||
|         }; |  | ||||||
| 
 |  | ||||||
|         fork_choice |         fork_choice | ||||||
|             .on_block( |             .on_block( | ||||||
|                 block.slot(), |                 block.slot(), | ||||||
| @ -208,8 +202,9 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It | |||||||
|                 Duration::from_secs(0), |                 Duration::from_secs(0), | ||||||
|                 &state, |                 &state, | ||||||
|                 payload_verification_status, |                 payload_verification_status, | ||||||
|  |                 progressive_balances_mode, | ||||||
|                 spec, |                 spec, | ||||||
|                 count_unrealized, |                 log, | ||||||
|             ) |             ) | ||||||
|             .map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?; |             .map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?; | ||||||
|     } |     } | ||||||
|  | |||||||
| @ -52,9 +52,9 @@ pub mod validator_pubkey_cache; | |||||||
| 
 | 
 | ||||||
| pub use self::beacon_chain::{ | pub use self::beacon_chain::{ | ||||||
|     AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, |     AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, | ||||||
|     CountUnrealized, ForkChoiceError, OverrideForkchoiceUpdate, ProduceBlockVerification, |     ForkChoiceError, OverrideForkchoiceUpdate, ProduceBlockVerification, StateSkipConfig, | ||||||
|     StateSkipConfig, WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, |     WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, | ||||||
|     INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY, |     INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, | ||||||
| }; | }; | ||||||
| pub use self::beacon_snapshot::BeaconSnapshot; | pub use self::beacon_snapshot::BeaconSnapshot; | ||||||
| pub use self::chain_config::ChainConfig; | pub use self::chain_config::ChainConfig; | ||||||
| @ -64,6 +64,7 @@ pub use attestation_verification::Error as AttestationError; | |||||||
| pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError}; | pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError}; | ||||||
| pub use block_verification::{ | pub use block_verification::{ | ||||||
|     get_block_root, BlockError, ExecutionPayloadError, GossipVerifiedBlock, |     get_block_root, BlockError, ExecutionPayloadError, GossipVerifiedBlock, | ||||||
|  |     IntoExecutionPendingBlock, IntoGossipVerifiedBlock, | ||||||
| }; | }; | ||||||
| pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock}; | pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock}; | ||||||
| pub use eth1_chain::{Eth1Chain, Eth1ChainBackend}; | pub use eth1_chain::{Eth1Chain, Eth1ChainBackend}; | ||||||
| @ -72,6 +73,7 @@ pub use execution_layer::EngineState; | |||||||
| pub use execution_payload::NotifyExecutionLayer; | pub use execution_payload::NotifyExecutionLayer; | ||||||
| pub use fork_choice::{ExecutionStatus, ForkchoiceUpdateParameters}; | pub use fork_choice::{ExecutionStatus, ForkchoiceUpdateParameters}; | ||||||
| pub use metrics::scrape_for_metrics; | pub use metrics::scrape_for_metrics; | ||||||
|  | pub use migrate::MigratorConfig; | ||||||
| pub use parking_lot; | pub use parking_lot; | ||||||
| pub use slot_clock; | pub use slot_clock; | ||||||
| pub use state_processing::per_block_processing::errors::{ | pub use state_processing::per_block_processing::errors::{ | ||||||
|  | |||||||
| @ -1,6 +1,4 @@ | |||||||
| use crate::{ | use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; | ||||||
|     beacon_chain::MAXIMUM_GOSSIP_CLOCK_DISPARITY, BeaconChain, BeaconChainError, BeaconChainTypes, |  | ||||||
| }; |  | ||||||
| use derivative::Derivative; | use derivative::Derivative; | ||||||
| use slot_clock::SlotClock; | use slot_clock::SlotClock; | ||||||
| use std::time::Duration; | use std::time::Duration; | ||||||
| @ -103,7 +101,8 @@ impl<T: BeaconChainTypes> VerifiedLightClientFinalityUpdate<T> { | |||||||
|         // verify that enough time has passed for the block to have been propagated
 |         // verify that enough time has passed for the block to have been propagated
 | ||||||
|         match start_time { |         match start_time { | ||||||
|             Some(time) => { |             Some(time) => { | ||||||
|                 if seen_timestamp + MAXIMUM_GOSSIP_CLOCK_DISPARITY < time + one_third_slot_duration |                 if seen_timestamp + chain.spec.maximum_gossip_clock_disparity() | ||||||
|  |                     < time + one_third_slot_duration | ||||||
|                 { |                 { | ||||||
|                     return Err(Error::TooEarly); |                     return Err(Error::TooEarly); | ||||||
|                 } |                 } | ||||||
|  | |||||||
| @ -1,6 +1,4 @@ | |||||||
| use crate::{ | use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; | ||||||
|     beacon_chain::MAXIMUM_GOSSIP_CLOCK_DISPARITY, BeaconChain, BeaconChainError, BeaconChainTypes, |  | ||||||
| }; |  | ||||||
| use derivative::Derivative; | use derivative::Derivative; | ||||||
| use eth2::types::Hash256; | use eth2::types::Hash256; | ||||||
| use slot_clock::SlotClock; | use slot_clock::SlotClock; | ||||||
| @ -103,7 +101,8 @@ impl<T: BeaconChainTypes> VerifiedLightClientOptimisticUpdate<T> { | |||||||
|         // verify that enough time has passed for the block to have been propagated
 |         // verify that enough time has passed for the block to have been propagated
 | ||||||
|         match start_time { |         match start_time { | ||||||
|             Some(time) => { |             Some(time) => { | ||||||
|                 if seen_timestamp + MAXIMUM_GOSSIP_CLOCK_DISPARITY < time + one_third_slot_duration |                 if seen_timestamp + chain.spec.maximum_gossip_clock_disparity() | ||||||
|  |                     < time + one_third_slot_duration | ||||||
|                 { |                 { | ||||||
|                     return Err(Error::TooEarly); |                     return Err(Error::TooEarly); | ||||||
|                 } |                 } | ||||||
|  | |||||||
| @ -1,8 +1,10 @@ | |||||||
| //! Provides tools for checking if a node is ready for the Bellatrix upgrade and following merge
 | //! Provides tools for checking if a node is ready for the Bellatrix upgrade and following merge
 | ||||||
| //! transition.
 | //! transition.
 | ||||||
| 
 | 
 | ||||||
| use crate::{BeaconChain, BeaconChainTypes}; | use crate::{BeaconChain, BeaconChainError as Error, BeaconChainTypes}; | ||||||
|  | use execution_layer::BlockByNumberQuery; | ||||||
| use serde::{Deserialize, Serialize, Serializer}; | use serde::{Deserialize, Serialize, Serializer}; | ||||||
|  | use slog::debug; | ||||||
| use std::fmt; | use std::fmt; | ||||||
| use std::fmt::Write; | use std::fmt::Write; | ||||||
| use types::*; | use types::*; | ||||||
| @ -86,9 +88,6 @@ pub enum MergeReadiness { | |||||||
|         #[serde(serialize_with = "serialize_uint256")] |         #[serde(serialize_with = "serialize_uint256")] | ||||||
|         current_difficulty: Option<Uint256>, |         current_difficulty: Option<Uint256>, | ||||||
|     }, |     }, | ||||||
|     /// The transition configuration with the EL failed, there might be a problem with
 |  | ||||||
|     /// connectivity, authentication or a difference in configuration.
 |  | ||||||
|     ExchangeTransitionConfigurationFailed { error: String }, |  | ||||||
|     /// The EL can be reached and has the correct configuration, however it's not yet synced.
 |     /// The EL can be reached and has the correct configuration, however it's not yet synced.
 | ||||||
|     NotSynced, |     NotSynced, | ||||||
|     /// The user has not configured this node to use an execution endpoint.
 |     /// The user has not configured this node to use an execution endpoint.
 | ||||||
| @ -109,12 +108,6 @@ impl fmt::Display for MergeReadiness { | |||||||
|                     params, current_difficulty |                     params, current_difficulty | ||||||
|                 ) |                 ) | ||||||
|             } |             } | ||||||
|             MergeReadiness::ExchangeTransitionConfigurationFailed { error } => write!( |  | ||||||
|                 f, |  | ||||||
|                 "Could not confirm the transition configuration with the \ |  | ||||||
|                     execution endpoint: {:?}",
 |  | ||||||
|                 error |  | ||||||
|             ), |  | ||||||
|             MergeReadiness::NotSynced => write!( |             MergeReadiness::NotSynced => write!( | ||||||
|                 f, |                 f, | ||||||
|                 "The execution endpoint is connected and configured, \ |                 "The execution endpoint is connected and configured, \ | ||||||
| @ -129,6 +122,25 @@ impl fmt::Display for MergeReadiness { | |||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | pub enum GenesisExecutionPayloadStatus { | ||||||
|  |     Correct(ExecutionBlockHash), | ||||||
|  |     BlockHashMismatch { | ||||||
|  |         got: ExecutionBlockHash, | ||||||
|  |         expected: ExecutionBlockHash, | ||||||
|  |     }, | ||||||
|  |     TransactionsRootMismatch { | ||||||
|  |         got: Hash256, | ||||||
|  |         expected: Hash256, | ||||||
|  |     }, | ||||||
|  |     WithdrawalsRootMismatch { | ||||||
|  |         got: Hash256, | ||||||
|  |         expected: Hash256, | ||||||
|  |     }, | ||||||
|  |     OtherMismatch, | ||||||
|  |     Irrelevant, | ||||||
|  |     AlreadyHappened, | ||||||
|  | } | ||||||
|  | 
 | ||||||
| impl<T: BeaconChainTypes> BeaconChain<T> { | impl<T: BeaconChainTypes> BeaconChain<T> { | ||||||
|     /// Returns `true` if user has an EL configured, or if the Bellatrix fork has occurred or will
 |     /// Returns `true` if user has an EL configured, or if the Bellatrix fork has occurred or will
 | ||||||
|     /// occur within `MERGE_READINESS_PREPARATION_SECONDS`.
 |     /// occur within `MERGE_READINESS_PREPARATION_SECONDS`.
 | ||||||
| @ -153,17 +165,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | |||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Attempts to connect to the EL and confirm that it is ready for the merge.
 |     /// Attempts to connect to the EL and confirm that it is ready for the merge.
 | ||||||
|     pub async fn check_merge_readiness(&self) -> MergeReadiness { |     pub async fn check_merge_readiness(&self, current_slot: Slot) -> MergeReadiness { | ||||||
|         if let Some(el) = self.execution_layer.as_ref() { |         if let Some(el) = self.execution_layer.as_ref() { | ||||||
|             if let Err(e) = el.exchange_transition_configuration(&self.spec).await { |             if !el.is_synced_for_notifier(current_slot).await { | ||||||
|                 // The EL was either unreachable, responded with an error or has a different
 |  | ||||||
|                 // configuration.
 |  | ||||||
|                 return MergeReadiness::ExchangeTransitionConfigurationFailed { |  | ||||||
|                     error: format!("{:?}", e), |  | ||||||
|                 }; |  | ||||||
|             } |  | ||||||
| 
 |  | ||||||
|             if !el.is_synced_for_notifier().await { |  | ||||||
|                 // The EL is not synced.
 |                 // The EL is not synced.
 | ||||||
|                 return MergeReadiness::NotSynced; |                 return MergeReadiness::NotSynced; | ||||||
|             } |             } | ||||||
| @ -178,6 +182,91 @@ impl<T: BeaconChainTypes> BeaconChain<T> { | |||||||
|             MergeReadiness::NoExecutionEndpoint |             MergeReadiness::NoExecutionEndpoint | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
|  | 
 | ||||||
|  |     /// Check that the execution payload embedded in the genesis state matches the EL's genesis
 | ||||||
|  |     /// block.
 | ||||||
|  |     pub async fn check_genesis_execution_payload_is_correct( | ||||||
|  |         &self, | ||||||
|  |     ) -> Result<GenesisExecutionPayloadStatus, Error> { | ||||||
|  |         let head_snapshot = self.head_snapshot(); | ||||||
|  |         let genesis_state = &head_snapshot.beacon_state; | ||||||
|  | 
 | ||||||
|  |         if genesis_state.slot() != 0 { | ||||||
|  |             return Ok(GenesisExecutionPayloadStatus::AlreadyHappened); | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         let Ok(latest_execution_payload_header) = genesis_state.latest_execution_payload_header() | ||||||
|  |         else { | ||||||
|  |             return Ok(GenesisExecutionPayloadStatus::Irrelevant); | ||||||
|  |         }; | ||||||
|  |         let fork = self.spec.fork_name_at_epoch(Epoch::new(0)); | ||||||
|  | 
 | ||||||
|  |         let execution_layer = self | ||||||
|  |             .execution_layer | ||||||
|  |             .as_ref() | ||||||
|  |             .ok_or(Error::ExecutionLayerMissing)?; | ||||||
|  |         let exec_block_hash = latest_execution_payload_header.block_hash(); | ||||||
|  | 
 | ||||||
|  |         // Use getBlockByNumber(0) to check that the block hash matches.
 | ||||||
|  |         // At present, Geth does not respond to engine_getPayloadBodiesByRange before genesis.
 | ||||||
|  |         let execution_block = execution_layer | ||||||
|  |             .get_block_by_number(BlockByNumberQuery::Tag("0x0")) | ||||||
|  |             .await | ||||||
|  |             .map_err(|e| Error::ExecutionLayerGetBlockByNumberFailed(Box::new(e)))? | ||||||
|  |             .ok_or(Error::BlockHashMissingFromExecutionLayer(exec_block_hash))?; | ||||||
|  | 
 | ||||||
|  |         if execution_block.block_hash != exec_block_hash { | ||||||
|  |             return Ok(GenesisExecutionPayloadStatus::BlockHashMismatch { | ||||||
|  |                 got: execution_block.block_hash, | ||||||
|  |                 expected: exec_block_hash, | ||||||
|  |             }); | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         // Double-check the block by reconstructing it.
 | ||||||
|  |         let execution_payload = execution_layer | ||||||
|  |             .get_payload_by_hash_legacy(exec_block_hash, fork) | ||||||
|  |             .await | ||||||
|  |             .map_err(|e| Error::ExecutionLayerGetBlockByHashFailed(Box::new(e)))? | ||||||
|  |             .ok_or(Error::BlockHashMissingFromExecutionLayer(exec_block_hash))?; | ||||||
|  | 
 | ||||||
|  |         // Verify payload integrity.
 | ||||||
|  |         let header_from_payload = ExecutionPayloadHeader::from(execution_payload.to_ref()); | ||||||
|  | 
 | ||||||
|  |         let got_transactions_root = header_from_payload.transactions_root(); | ||||||
|  |         let expected_transactions_root = latest_execution_payload_header.transactions_root(); | ||||||
|  |         let got_withdrawals_root = header_from_payload.withdrawals_root().ok(); | ||||||
|  |         let expected_withdrawals_root = latest_execution_payload_header.withdrawals_root().ok(); | ||||||
|  | 
 | ||||||
|  |         if got_transactions_root != expected_transactions_root { | ||||||
|  |             return Ok(GenesisExecutionPayloadStatus::TransactionsRootMismatch { | ||||||
|  |                 got: got_transactions_root, | ||||||
|  |                 expected: expected_transactions_root, | ||||||
|  |             }); | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         if let Some(&expected) = expected_withdrawals_root { | ||||||
|  |             if let Some(&got) = got_withdrawals_root { | ||||||
|  |                 if got != expected { | ||||||
|  |                     return Ok(GenesisExecutionPayloadStatus::WithdrawalsRootMismatch { | ||||||
|  |                         got, | ||||||
|  |                         expected, | ||||||
|  |                     }); | ||||||
|  |                 } | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         if header_from_payload.to_ref() != latest_execution_payload_header { | ||||||
|  |             debug!( | ||||||
|  |                 self.log, | ||||||
|  |                 "Genesis execution payload reconstruction failure"; | ||||||
|  |                 "consensus_node_header" => ?latest_execution_payload_header, | ||||||
|  |                 "execution_node_header" => ?header_from_payload | ||||||
|  |             ); | ||||||
|  |             return Ok(GenesisExecutionPayloadStatus::OtherMismatch); | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         Ok(GenesisExecutionPayloadStatus::Correct(exec_block_hash)) | ||||||
|  |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// Utility function to serialize a Uint256 as a decimal string.
 | /// Utility function to serialize a Uint256 as a decimal string.
 | ||||||
|  | |||||||
| @ -998,6 +998,17 @@ lazy_static! { | |||||||
|         "light_client_optimistic_update_verification_success_total", |         "light_client_optimistic_update_verification_success_total", | ||||||
|         "Number of light client optimistic updates verified for gossip" |         "Number of light client optimistic updates verified for gossip" | ||||||
|     ); |     ); | ||||||
|  |     /* | ||||||
|  |     * Aggregate subset metrics | ||||||
|  |      */ | ||||||
|  |     pub static ref SYNC_CONTRIBUTION_SUBSETS: Result<IntCounter> = try_create_int_counter( | ||||||
|  |         "beacon_sync_contribution_subsets_total", | ||||||
|  |         "Count of new sync contributions that are subsets of already known aggregates" | ||||||
|  |     ); | ||||||
|  |     pub static ref AGGREGATED_ATTESTATION_SUBSETS: Result<IntCounter> = try_create_int_counter( | ||||||
|  |         "beacon_aggregated_attestation_subsets_total", | ||||||
|  |         "Count of new aggregated attestations that are subsets of already known aggregates" | ||||||
|  |     ); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot,
 | /// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot,
 | ||||||
|  | |||||||
| @ -25,10 +25,15 @@ const MIN_COMPACTION_PERIOD_SECONDS: u64 = 7200; | |||||||
| /// Compact after a large finality gap, if we respect `MIN_COMPACTION_PERIOD_SECONDS`.
 | /// Compact after a large finality gap, if we respect `MIN_COMPACTION_PERIOD_SECONDS`.
 | ||||||
| const COMPACTION_FINALITY_DISTANCE: u64 = 1024; | const COMPACTION_FINALITY_DISTANCE: u64 = 1024; | ||||||
| 
 | 
 | ||||||
|  | /// Default number of epochs to wait between finalization migrations.
 | ||||||
|  | pub const DEFAULT_EPOCHS_PER_MIGRATION: u64 = 1; | ||||||
|  | 
 | ||||||
| /// The background migrator runs a thread to perform pruning and migrate state from the hot
 | /// The background migrator runs a thread to perform pruning and migrate state from the hot
 | ||||||
| /// to the cold database.
 | /// to the cold database.
 | ||||||
| pub struct BackgroundMigrator<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> { | pub struct BackgroundMigrator<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> { | ||||||
|     db: Arc<HotColdDB<E, Hot, Cold>>, |     db: Arc<HotColdDB<E, Hot, Cold>>, | ||||||
|  |     /// Record of when the last migration ran, for enforcing `epochs_per_migration`.
 | ||||||
|  |     prev_migration: Arc<Mutex<PrevMigration>>, | ||||||
|     #[allow(clippy::type_complexity)] |     #[allow(clippy::type_complexity)] | ||||||
|     tx_thread: Option<Mutex<(mpsc::Sender<Notification>, thread::JoinHandle<()>)>>, |     tx_thread: Option<Mutex<(mpsc::Sender<Notification>, thread::JoinHandle<()>)>>, | ||||||
|     /// Genesis block root, for persisting the `PersistedBeaconChain`.
 |     /// Genesis block root, for persisting the `PersistedBeaconChain`.
 | ||||||
| @ -36,9 +41,22 @@ pub struct BackgroundMigrator<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> | |||||||
|     log: Logger, |     log: Logger, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| #[derive(Debug, Default, Clone, PartialEq, Eq)] | #[derive(Debug, Clone, PartialEq, Eq)] | ||||||
| pub struct MigratorConfig { | pub struct MigratorConfig { | ||||||
|     pub blocking: bool, |     pub blocking: bool, | ||||||
|  |     /// Run migrations at most once per `epochs_per_migration`.
 | ||||||
|  |     ///
 | ||||||
|  |     /// If set to 0 or 1, then run every finalization.
 | ||||||
|  |     pub epochs_per_migration: u64, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl Default for MigratorConfig { | ||||||
|  |     fn default() -> Self { | ||||||
|  |         Self { | ||||||
|  |             blocking: false, | ||||||
|  |             epochs_per_migration: DEFAULT_EPOCHS_PER_MIGRATION, | ||||||
|  |         } | ||||||
|  |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl MigratorConfig { | impl MigratorConfig { | ||||||
| @ -46,6 +64,19 @@ impl MigratorConfig { | |||||||
|         self.blocking = true; |         self.blocking = true; | ||||||
|         self |         self | ||||||
|     } |     } | ||||||
|  | 
 | ||||||
|  |     pub fn epochs_per_migration(mut self, epochs_per_migration: u64) -> Self { | ||||||
|  |         self.epochs_per_migration = epochs_per_migration; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// Record of when the last migration ran.
 | ||||||
|  | pub struct PrevMigration { | ||||||
|  |     /// The epoch at which the last finalization migration ran.
 | ||||||
|  |     epoch: Epoch, | ||||||
|  |     /// The number of epochs to wait between runs.
 | ||||||
|  |     epochs_per_migration: u64, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// Pruning can be successful, or in rare cases deferred to a later point.
 | /// Pruning can be successful, or in rare cases deferred to a later point.
 | ||||||
| @ -92,6 +123,7 @@ pub struct FinalizationNotification { | |||||||
|     finalized_state_root: BeaconStateHash, |     finalized_state_root: BeaconStateHash, | ||||||
|     finalized_checkpoint: Checkpoint, |     finalized_checkpoint: Checkpoint, | ||||||
|     head_tracker: Arc<HeadTracker>, |     head_tracker: Arc<HeadTracker>, | ||||||
|  |     prev_migration: Arc<Mutex<PrevMigration>>, | ||||||
|     genesis_block_root: Hash256, |     genesis_block_root: Hash256, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| @ -103,6 +135,11 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho | |||||||
|         genesis_block_root: Hash256, |         genesis_block_root: Hash256, | ||||||
|         log: Logger, |         log: Logger, | ||||||
|     ) -> Self { |     ) -> Self { | ||||||
|  |         // Estimate last migration run from DB split slot.
 | ||||||
|  |         let prev_migration = Arc::new(Mutex::new(PrevMigration { | ||||||
|  |             epoch: db.get_split_slot().epoch(E::slots_per_epoch()), | ||||||
|  |             epochs_per_migration: config.epochs_per_migration, | ||||||
|  |         })); | ||||||
|         let tx_thread = if config.blocking { |         let tx_thread = if config.blocking { | ||||||
|             None |             None | ||||||
|         } else { |         } else { | ||||||
| @ -111,6 +148,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho | |||||||
|         Self { |         Self { | ||||||
|             db, |             db, | ||||||
|             tx_thread, |             tx_thread, | ||||||
|  |             prev_migration, | ||||||
|             genesis_block_root, |             genesis_block_root, | ||||||
|             log, |             log, | ||||||
|         } |         } | ||||||
| @ -131,6 +169,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho | |||||||
|             finalized_state_root, |             finalized_state_root, | ||||||
|             finalized_checkpoint, |             finalized_checkpoint, | ||||||
|             head_tracker, |             head_tracker, | ||||||
|  |             prev_migration: self.prev_migration.clone(), | ||||||
|             genesis_block_root: self.genesis_block_root, |             genesis_block_root: self.genesis_block_root, | ||||||
|         }; |         }; | ||||||
| 
 | 
 | ||||||
| @ -204,9 +243,30 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho | |||||||
|         notif: FinalizationNotification, |         notif: FinalizationNotification, | ||||||
|         log: &Logger, |         log: &Logger, | ||||||
|     ) { |     ) { | ||||||
|  |         // Do not run too frequently.
 | ||||||
|  |         let epoch = notif.finalized_checkpoint.epoch; | ||||||
|  |         let mut prev_migration = notif.prev_migration.lock(); | ||||||
|  |         if epoch < prev_migration.epoch + prev_migration.epochs_per_migration { | ||||||
|  |             debug!( | ||||||
|  |                 log, | ||||||
|  |                 "Database consolidation deferred"; | ||||||
|  |                 "last_finalized_epoch" => prev_migration.epoch, | ||||||
|  |                 "new_finalized_epoch" => epoch, | ||||||
|  |                 "epochs_per_migration" => prev_migration.epochs_per_migration, | ||||||
|  |             ); | ||||||
|  |             return; | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         // Update the previous migration epoch immediately to avoid holding the lock. If the
 | ||||||
|  |         // migration doesn't succeed then the next migration will be retried at the next scheduled
 | ||||||
|  |         // run.
 | ||||||
|  |         prev_migration.epoch = epoch; | ||||||
|  |         drop(prev_migration); | ||||||
|  | 
 | ||||||
|         debug!(log, "Database consolidation started"); |         debug!(log, "Database consolidation started"); | ||||||
| 
 | 
 | ||||||
|         let finalized_state_root = notif.finalized_state_root; |         let finalized_state_root = notif.finalized_state_root; | ||||||
|  |         let finalized_block_root = notif.finalized_checkpoint.root; | ||||||
| 
 | 
 | ||||||
|         let finalized_state = match db.get_state(&finalized_state_root.into(), None) { |         let finalized_state = match db.get_state(&finalized_state_root.into(), None) { | ||||||
|             Ok(Some(state)) => state, |             Ok(Some(state)) => state, | ||||||
| @ -260,7 +320,12 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho | |||||||
|             } |             } | ||||||
|         }; |         }; | ||||||
| 
 | 
 | ||||||
|         match migrate_database(db.clone(), finalized_state_root.into(), &finalized_state) { |         match migrate_database( | ||||||
|  |             db.clone(), | ||||||
|  |             finalized_state_root.into(), | ||||||
|  |             finalized_block_root, | ||||||
|  |             &finalized_state, | ||||||
|  |         ) { | ||||||
|             Ok(()) => {} |             Ok(()) => {} | ||||||
|             Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => { |             Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => { | ||||||
|                 debug!( |                 debug!( | ||||||
|  | |||||||
| @ -1,7 +1,9 @@ | |||||||
| //! Provides an `ObservedAggregates` struct which allows us to reject aggregated attestations or
 | //! Provides an `ObservedAggregates` struct which allows us to reject aggregated attestations or
 | ||||||
| //! sync committee contributions if we've already seen them.
 | //! sync committee contributions if we've already seen them.
 | ||||||
| 
 | 
 | ||||||
| use std::collections::HashSet; | use crate::sync_committee_verification::SyncCommitteeData; | ||||||
|  | use ssz_types::{BitList, BitVector}; | ||||||
|  | use std::collections::HashMap; | ||||||
| use std::marker::PhantomData; | use std::marker::PhantomData; | ||||||
| use tree_hash::TreeHash; | use tree_hash::TreeHash; | ||||||
| use types::consts::altair::{ | use types::consts::altair::{ | ||||||
| @ -10,8 +12,16 @@ use types::consts::altair::{ | |||||||
| use types::slot_data::SlotData; | use types::slot_data::SlotData; | ||||||
| use types::{Attestation, EthSpec, Hash256, Slot, SyncCommitteeContribution}; | use types::{Attestation, EthSpec, Hash256, Slot, SyncCommitteeContribution}; | ||||||
| 
 | 
 | ||||||
| pub type ObservedSyncContributions<E> = ObservedAggregates<SyncCommitteeContribution<E>, E>; | pub type ObservedSyncContributions<E> = ObservedAggregates< | ||||||
| pub type ObservedAggregateAttestations<E> = ObservedAggregates<Attestation<E>, E>; |     SyncCommitteeContribution<E>, | ||||||
|  |     E, | ||||||
|  |     BitVector<<E as types::EthSpec>::SyncSubcommitteeSize>, | ||||||
|  | >; | ||||||
|  | pub type ObservedAggregateAttestations<E> = ObservedAggregates< | ||||||
|  |     Attestation<E>, | ||||||
|  |     E, | ||||||
|  |     BitList<<E as types::EthSpec>::MaxValidatorsPerCommittee>, | ||||||
|  | >; | ||||||
| 
 | 
 | ||||||
| /// A trait use to associate capacity constants with the type being stored in `ObservedAggregates`.
 | /// A trait use to associate capacity constants with the type being stored in `ObservedAggregates`.
 | ||||||
| pub trait Consts { | pub trait Consts { | ||||||
| @ -69,10 +79,81 @@ impl<T: EthSpec> Consts for SyncCommitteeContribution<T> { | |||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | /// A trait for types that implement a behaviour where one object of that type
 | ||||||
|  | /// can be a subset/superset of another.
 | ||||||
|  | /// This trait allows us to be generic over the aggregate item that we store in the cache that
 | ||||||
|  | /// we want to prevent duplicates/subsets for.
 | ||||||
|  | pub trait SubsetItem { | ||||||
|  |     /// The item that is stored for later comparison with new incoming aggregate items.
 | ||||||
|  |     type Item; | ||||||
|  | 
 | ||||||
|  |     /// Returns `true` if `self` is a non-strict subset of `other` and `false` otherwise.
 | ||||||
|  |     fn is_subset(&self, other: &Self::Item) -> bool; | ||||||
|  | 
 | ||||||
|  |     /// Returns `true` if `self` is a non-strict superset of `other` and `false` otherwise.
 | ||||||
|  |     fn is_superset(&self, other: &Self::Item) -> bool; | ||||||
|  | 
 | ||||||
|  |     /// Returns the item that gets stored in `ObservedAggregates` for later subset
 | ||||||
|  |     /// comparison with incoming aggregates.
 | ||||||
|  |     fn get_item(&self) -> Self::Item; | ||||||
|  | 
 | ||||||
|  |     /// Returns a unique value that keys the object to the item that is being stored
 | ||||||
|  |     /// in `ObservedAggregates`.
 | ||||||
|  |     fn root(&self) -> Hash256; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl<T: EthSpec> SubsetItem for Attestation<T> { | ||||||
|  |     type Item = BitList<T::MaxValidatorsPerCommittee>; | ||||||
|  |     fn is_subset(&self, other: &Self::Item) -> bool { | ||||||
|  |         self.aggregation_bits.is_subset(other) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn is_superset(&self, other: &Self::Item) -> bool { | ||||||
|  |         other.is_subset(&self.aggregation_bits) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     /// Returns the sync contribution aggregation bits.
 | ||||||
|  |     fn get_item(&self) -> Self::Item { | ||||||
|  |         self.aggregation_bits.clone() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     /// Returns the hash tree root of the attestation data.
 | ||||||
|  |     fn root(&self) -> Hash256 { | ||||||
|  |         self.data.tree_hash_root() | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl<T: EthSpec> SubsetItem for SyncCommitteeContribution<T> { | ||||||
|  |     type Item = BitVector<T::SyncSubcommitteeSize>; | ||||||
|  |     fn is_subset(&self, other: &Self::Item) -> bool { | ||||||
|  |         self.aggregation_bits.is_subset(other) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     fn is_superset(&self, other: &Self::Item) -> bool { | ||||||
|  |         other.is_subset(&self.aggregation_bits) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     /// Returns the sync contribution aggregation bits.
 | ||||||
|  |     fn get_item(&self) -> Self::Item { | ||||||
|  |         self.aggregation_bits.clone() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     /// Returns the hash tree root of the root, slot and subcommittee index
 | ||||||
|  |     /// of the sync contribution.
 | ||||||
|  |     fn root(&self) -> Hash256 { | ||||||
|  |         SyncCommitteeData { | ||||||
|  |             root: self.beacon_block_root, | ||||||
|  |             slot: self.slot, | ||||||
|  |             subcommittee_index: self.subcommittee_index, | ||||||
|  |         } | ||||||
|  |         .tree_hash_root() | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
| #[derive(Debug, PartialEq)] | #[derive(Debug, PartialEq)] | ||||||
| pub enum ObserveOutcome { | pub enum ObserveOutcome { | ||||||
|     /// This item was already known.
 |     /// This item is a non-strict subset of an already known item.
 | ||||||
|     AlreadyKnown, |     Subset, | ||||||
|     /// This was the first time this item was observed.
 |     /// This was the first time this item was observed.
 | ||||||
|     New, |     New, | ||||||
| } | } | ||||||
| @ -94,26 +175,28 @@ pub enum Error { | |||||||
|     }, |     }, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// A `HashSet` that contains entries related to some `Slot`.
 | /// A `HashMap` that contains entries related to some `Slot`.
 | ||||||
| struct SlotHashSet { | struct SlotHashSet<I> { | ||||||
|     set: HashSet<Hash256>, |     /// Contains a vector of maximally-sized aggregation bitfields/bitvectors
 | ||||||
|  |     /// such that no bitfield/bitvector is a subset of any other in the list.
 | ||||||
|  |     map: HashMap<Hash256, Vec<I>>, | ||||||
|     slot: Slot, |     slot: Slot, | ||||||
|     max_capacity: usize, |     max_capacity: usize, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl SlotHashSet { | impl<I> SlotHashSet<I> { | ||||||
|     pub fn new(slot: Slot, initial_capacity: usize, max_capacity: usize) -> Self { |     pub fn new(slot: Slot, initial_capacity: usize, max_capacity: usize) -> Self { | ||||||
|         Self { |         Self { | ||||||
|             slot, |             slot, | ||||||
|             set: HashSet::with_capacity(initial_capacity), |             map: HashMap::with_capacity(initial_capacity), | ||||||
|             max_capacity, |             max_capacity, | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Store the items in self so future observations recognise its existence.
 |     /// Store the items in self so future observations recognise its existence.
 | ||||||
|     pub fn observe_item<T: SlotData>( |     pub fn observe_item<S: SlotData + SubsetItem<Item = I>>( | ||||||
|         &mut self, |         &mut self, | ||||||
|         item: &T, |         item: &S, | ||||||
|         root: Hash256, |         root: Hash256, | ||||||
|     ) -> Result<ObserveOutcome, Error> { |     ) -> Result<ObserveOutcome, Error> { | ||||||
|         if item.get_slot() != self.slot { |         if item.get_slot() != self.slot { | ||||||
| @ -123,29 +206,45 @@ impl SlotHashSet { | |||||||
|             }); |             }); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         if self.set.contains(&root) { |         if let Some(aggregates) = self.map.get_mut(&root) { | ||||||
|             Ok(ObserveOutcome::AlreadyKnown) |             for existing in aggregates { | ||||||
|         } else { |                 // Check if `item` is a subset of any of the observed aggregates
 | ||||||
|             // Here we check to see if this slot has reached the maximum observation count.
 |                 if item.is_subset(existing) { | ||||||
|             //
 |                     return Ok(ObserveOutcome::Subset); | ||||||
|             // The resulting behaviour is that we are no longer able to successfully observe new
 |                 // Check if `item` is a superset of any of the observed aggregates
 | ||||||
|             // items, however we will continue to return `is_known` values. We could also
 |                 // If true, we replace the new item with its existing subset. This allows us
 | ||||||
|             // disable `is_known`, however then we would stop forwarding items across the
 |                 // to hold fewer items in the list.
 | ||||||
|             // gossip network and I think that this is a worse case than sending some invalid ones.
 |                 } else if item.is_superset(existing) { | ||||||
|             // The underlying libp2p network is responsible for removing duplicate messages, so
 |                     *existing = item.get_item(); | ||||||
|             // this doesn't risk a broadcast loop.
 |                     return Ok(ObserveOutcome::New); | ||||||
|             if self.set.len() >= self.max_capacity { |                 } | ||||||
|                 return Err(Error::ReachedMaxObservationsPerSlot(self.max_capacity)); |  | ||||||
|             } |             } | ||||||
| 
 |  | ||||||
|             self.set.insert(root); |  | ||||||
| 
 |  | ||||||
|             Ok(ObserveOutcome::New) |  | ||||||
|         } |         } | ||||||
|  | 
 | ||||||
|  |         // Here we check to see if this slot has reached the maximum observation count.
 | ||||||
|  |         //
 | ||||||
|  |         // The resulting behaviour is that we are no longer able to successfully observe new
 | ||||||
|  |         // items, however we will continue to return `is_known_subset` values. We could also
 | ||||||
|  |         // disable `is_known_subset`, however then we would stop forwarding items across the
 | ||||||
|  |         // gossip network and I think that this is a worse case than sending some invalid ones.
 | ||||||
|  |         // The underlying libp2p network is responsible for removing duplicate messages, so
 | ||||||
|  |         // this doesn't risk a broadcast loop.
 | ||||||
|  |         if self.map.len() >= self.max_capacity { | ||||||
|  |             return Err(Error::ReachedMaxObservationsPerSlot(self.max_capacity)); | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         let item = item.get_item(); | ||||||
|  |         self.map.entry(root).or_default().push(item); | ||||||
|  |         Ok(ObserveOutcome::New) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Indicates if `item` has been observed before.
 |     /// Check if `item` is a non-strict subset of any of the already observed aggregates for
 | ||||||
|     pub fn is_known<T: SlotData>(&self, item: &T, root: Hash256) -> Result<bool, Error> { |     /// the given root and slot.
 | ||||||
|  |     pub fn is_known_subset<S: SlotData + SubsetItem<Item = I>>( | ||||||
|  |         &self, | ||||||
|  |         item: &S, | ||||||
|  |         root: Hash256, | ||||||
|  |     ) -> Result<bool, Error> { | ||||||
|         if item.get_slot() != self.slot { |         if item.get_slot() != self.slot { | ||||||
|             return Err(Error::IncorrectSlot { |             return Err(Error::IncorrectSlot { | ||||||
|                 expected: self.slot, |                 expected: self.slot, | ||||||
| @ -153,25 +252,28 @@ impl SlotHashSet { | |||||||
|             }); |             }); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         Ok(self.set.contains(&root)) |         Ok(self | ||||||
|  |             .map | ||||||
|  |             .get(&root) | ||||||
|  |             .map_or(false, |agg| agg.iter().any(|val| item.is_subset(val)))) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// The number of observed items in `self`.
 |     /// The number of observed items in `self`.
 | ||||||
|     pub fn len(&self) -> usize { |     pub fn len(&self) -> usize { | ||||||
|         self.set.len() |         self.map.len() | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// Stores the roots of objects for some number of `Slots`, so we can determine if
 | /// Stores the roots of objects for some number of `Slots`, so we can determine if
 | ||||||
| /// these have previously been seen on the network.
 | /// these have previously been seen on the network.
 | ||||||
| pub struct ObservedAggregates<T: TreeHash + SlotData + Consts, E: EthSpec> { | pub struct ObservedAggregates<T: SlotData + Consts, E: EthSpec, I> { | ||||||
|     lowest_permissible_slot: Slot, |     lowest_permissible_slot: Slot, | ||||||
|     sets: Vec<SlotHashSet>, |     sets: Vec<SlotHashSet<I>>, | ||||||
|     _phantom_spec: PhantomData<E>, |     _phantom_spec: PhantomData<E>, | ||||||
|     _phantom_tree_hash: PhantomData<T>, |     _phantom_tree_hash: PhantomData<T>, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl<T: TreeHash + SlotData + Consts, E: EthSpec> Default for ObservedAggregates<T, E> { | impl<T: SlotData + Consts, E: EthSpec, I> Default for ObservedAggregates<T, E, I> { | ||||||
|     fn default() -> Self { |     fn default() -> Self { | ||||||
|         Self { |         Self { | ||||||
|             lowest_permissible_slot: Slot::new(0), |             lowest_permissible_slot: Slot::new(0), | ||||||
| @ -182,17 +284,17 @@ impl<T: TreeHash + SlotData + Consts, E: EthSpec> Default for ObservedAggregates | |||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl<T: TreeHash + SlotData + Consts, E: EthSpec> ObservedAggregates<T, E> { | impl<T: SlotData + Consts + SubsetItem<Item = I>, E: EthSpec, I> ObservedAggregates<T, E, I> { | ||||||
|     /// Store the root of `item` in `self`.
 |     /// Store `item` in `self` keyed at `root`.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// `root` must equal `item.tree_hash_root()`.
 |     /// `root` must equal `item.root::<SubsetItem>()`.
 | ||||||
|     pub fn observe_item( |     pub fn observe_item( | ||||||
|         &mut self, |         &mut self, | ||||||
|         item: &T, |         item: &T, | ||||||
|         root_opt: Option<Hash256>, |         root_opt: Option<Hash256>, | ||||||
|     ) -> Result<ObserveOutcome, Error> { |     ) -> Result<ObserveOutcome, Error> { | ||||||
|         let index = self.get_set_index(item.get_slot())?; |         let index = self.get_set_index(item.get_slot())?; | ||||||
|         let root = root_opt.unwrap_or_else(|| item.tree_hash_root()); |         let root = root_opt.unwrap_or_else(|| item.root()); | ||||||
| 
 | 
 | ||||||
|         self.sets |         self.sets | ||||||
|             .get_mut(index) |             .get_mut(index) | ||||||
| @ -200,17 +302,18 @@ impl<T: TreeHash + SlotData + Consts, E: EthSpec> ObservedAggregates<T, E> { | |||||||
|             .and_then(|set| set.observe_item(item, root)) |             .and_then(|set| set.observe_item(item, root)) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Check to see if the `root` of `item` is in self.
 |     /// Check if `item` is a non-strict subset of any of the already observed aggregates for
 | ||||||
|  |     /// the given root and slot.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// `root` must equal `a.tree_hash_root()`.
 |     /// `root` must equal `item.root::<SubsetItem>()`.
 | ||||||
|     #[allow(clippy::wrong_self_convention)] |     #[allow(clippy::wrong_self_convention)] | ||||||
|     pub fn is_known(&mut self, item: &T, root: Hash256) -> Result<bool, Error> { |     pub fn is_known_subset(&mut self, item: &T, root: Hash256) -> Result<bool, Error> { | ||||||
|         let index = self.get_set_index(item.get_slot())?; |         let index = self.get_set_index(item.get_slot())?; | ||||||
| 
 | 
 | ||||||
|         self.sets |         self.sets | ||||||
|             .get(index) |             .get(index) | ||||||
|             .ok_or(Error::InvalidSetIndex(index)) |             .ok_or(Error::InvalidSetIndex(index)) | ||||||
|             .and_then(|set| set.is_known(item, root)) |             .and_then(|set| set.is_known_subset(item, root)) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// The maximum number of slots that items are stored for.
 |     /// The maximum number of slots that items are stored for.
 | ||||||
| @ -296,7 +399,6 @@ impl<T: TreeHash + SlotData + Consts, E: EthSpec> ObservedAggregates<T, E> { | |||||||
| #[cfg(not(debug_assertions))] | #[cfg(not(debug_assertions))] | ||||||
| mod tests { | mod tests { | ||||||
|     use super::*; |     use super::*; | ||||||
|     use tree_hash::TreeHash; |  | ||||||
|     use types::{test_utils::test_random_instance, Hash256}; |     use types::{test_utils::test_random_instance, Hash256}; | ||||||
| 
 | 
 | ||||||
|     type E = types::MainnetEthSpec; |     type E = types::MainnetEthSpec; | ||||||
| @ -330,7 +432,7 @@ mod tests { | |||||||
| 
 | 
 | ||||||
|                     for a in &items { |                     for a in &items { | ||||||
|                         assert_eq!( |                         assert_eq!( | ||||||
|                             store.is_known(a, a.tree_hash_root()), |                             store.is_known_subset(a, a.root()), | ||||||
|                             Ok(false), |                             Ok(false), | ||||||
|                             "should indicate an unknown attestation is unknown" |                             "should indicate an unknown attestation is unknown" | ||||||
|                         ); |                         ); | ||||||
| @ -343,13 +445,13 @@ mod tests { | |||||||
| 
 | 
 | ||||||
|                     for a in &items { |                     for a in &items { | ||||||
|                         assert_eq!( |                         assert_eq!( | ||||||
|                             store.is_known(a, a.tree_hash_root()), |                             store.is_known_subset(a, a.root()), | ||||||
|                             Ok(true), |                             Ok(true), | ||||||
|                             "should indicate a known attestation is known" |                             "should indicate a known attestation is known" | ||||||
|                         ); |                         ); | ||||||
|                         assert_eq!( |                         assert_eq!( | ||||||
|                             store.observe_item(a, Some(a.tree_hash_root())), |                             store.observe_item(a, Some(a.root())), | ||||||
|                             Ok(ObserveOutcome::AlreadyKnown), |                             Ok(ObserveOutcome::Subset), | ||||||
|                             "should acknowledge an existing attestation" |                             "should acknowledge an existing attestation" | ||||||
|                         ); |                         ); | ||||||
|                     } |                     } | ||||||
|  | |||||||
| @ -841,7 +841,7 @@ mod tests { | |||||||
|                     let mut store = $type::default(); |                     let mut store = $type::default(); | ||||||
|                     let max_cap = store.max_capacity(); |                     let max_cap = store.max_capacity(); | ||||||
| 
 | 
 | ||||||
|                     let to_skip = vec![1_u64, 3, 4, 5]; |                     let to_skip = [1_u64, 3, 4, 5]; | ||||||
|                     let periods = (0..max_cap * 3) |                     let periods = (0..max_cap * 3) | ||||||
|                         .into_iter() |                         .into_iter() | ||||||
|                         .filter(|i| !to_skip.contains(i)) |                         .filter(|i| !to_skip.contains(i)) | ||||||
| @ -1012,7 +1012,7 @@ mod tests { | |||||||
|                     let mut store = $type::default(); |                     let mut store = $type::default(); | ||||||
|                     let max_cap = store.max_capacity(); |                     let max_cap = store.max_capacity(); | ||||||
| 
 | 
 | ||||||
|                     let to_skip = vec![1_u64, 3, 4, 5]; |                     let to_skip = [1_u64, 3, 4, 5]; | ||||||
|                     let periods = (0..max_cap * 3) |                     let periods = (0..max_cap * 3) | ||||||
|                         .into_iter() |                         .into_iter() | ||||||
|                         .filter(|i| !to_skip.contains(i)) |                         .filter(|i| !to_skip.contains(i)) | ||||||
| @ -1121,7 +1121,7 @@ mod tests { | |||||||
|                     let mut store = $type::default(); |                     let mut store = $type::default(); | ||||||
|                     let max_cap = store.max_capacity(); |                     let max_cap = store.max_capacity(); | ||||||
| 
 | 
 | ||||||
|                     let to_skip = vec![1_u64, 3, 4, 5]; |                     let to_skip = [1_u64, 3, 4, 5]; | ||||||
|                     let periods = (0..max_cap * 3) |                     let periods = (0..max_cap * 3) | ||||||
|                         .into_iter() |                         .into_iter() | ||||||
|                         .filter(|i| !to_skip.contains(i)) |                         .filter(|i| !to_skip.contains(i)) | ||||||
|  | |||||||
| @ -1,9 +1,10 @@ | |||||||
| //! Provides the `ObservedBlockProducers` struct which allows for rejecting gossip blocks from
 | //! Provides the `ObservedBlockProducers` struct which allows for rejecting gossip blocks from
 | ||||||
| //! validators that have already produced a block.
 | //! validators that have already produced a block.
 | ||||||
| 
 | 
 | ||||||
|  | use std::collections::hash_map::Entry; | ||||||
| use std::collections::{HashMap, HashSet}; | use std::collections::{HashMap, HashSet}; | ||||||
| use std::marker::PhantomData; | use std::marker::PhantomData; | ||||||
| use types::{BeaconBlockRef, Epoch, EthSpec, Slot, Unsigned}; | use types::{BeaconBlockRef, Epoch, EthSpec, Hash256, Slot, Unsigned}; | ||||||
| 
 | 
 | ||||||
| #[derive(Debug, PartialEq)] | #[derive(Debug, PartialEq)] | ||||||
| pub enum Error { | pub enum Error { | ||||||
| @ -14,6 +15,12 @@ pub enum Error { | |||||||
|     ValidatorIndexTooHigh(u64), |     ValidatorIndexTooHigh(u64), | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | #[derive(Eq, Hash, PartialEq, Debug, Default)] | ||||||
|  | struct ProposalKey { | ||||||
|  |     slot: Slot, | ||||||
|  |     proposer: u64, | ||||||
|  | } | ||||||
|  | 
 | ||||||
| /// Maintains a cache of observed `(block.slot, block.proposer)`.
 | /// Maintains a cache of observed `(block.slot, block.proposer)`.
 | ||||||
| ///
 | ///
 | ||||||
| /// The cache supports pruning based upon the finalized epoch. It does not automatically prune, you
 | /// The cache supports pruning based upon the finalized epoch. It does not automatically prune, you
 | ||||||
| @ -27,7 +34,7 @@ pub enum Error { | |||||||
| /// known_distinct_shufflings` which is much smaller.
 | /// known_distinct_shufflings` which is much smaller.
 | ||||||
| pub struct ObservedBlockProducers<E: EthSpec> { | pub struct ObservedBlockProducers<E: EthSpec> { | ||||||
|     finalized_slot: Slot, |     finalized_slot: Slot, | ||||||
|     items: HashMap<Slot, HashSet<u64>>, |     items: HashMap<ProposalKey, HashSet<Hash256>>, | ||||||
|     _phantom: PhantomData<E>, |     _phantom: PhantomData<E>, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| @ -42,6 +49,24 @@ impl<E: EthSpec> Default for ObservedBlockProducers<E> { | |||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | pub enum SeenBlock { | ||||||
|  |     Duplicate, | ||||||
|  |     Slashable, | ||||||
|  |     UniqueNonSlashable, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl SeenBlock { | ||||||
|  |     pub fn proposer_previously_observed(self) -> bool { | ||||||
|  |         match self { | ||||||
|  |             Self::Duplicate | Self::Slashable => true, | ||||||
|  |             Self::UniqueNonSlashable => false, | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |     pub fn is_slashable(&self) -> bool { | ||||||
|  |         matches!(self, Self::Slashable) | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
| impl<E: EthSpec> ObservedBlockProducers<E> { | impl<E: EthSpec> ObservedBlockProducers<E> { | ||||||
|     /// Observe that the `block` was produced by `block.proposer_index` at `block.slot`. This will
 |     /// Observe that the `block` was produced by `block.proposer_index` at `block.slot`. This will
 | ||||||
|     /// update `self` so future calls to it indicate that this block is known.
 |     /// update `self` so future calls to it indicate that this block is known.
 | ||||||
| @ -52,16 +77,44 @@ impl<E: EthSpec> ObservedBlockProducers<E> { | |||||||
|     ///
 |     ///
 | ||||||
|     /// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`.
 |     /// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`.
 | ||||||
|     /// - `block.slot` is equal to or less than the latest pruned `finalized_slot`.
 |     /// - `block.slot` is equal to or less than the latest pruned `finalized_slot`.
 | ||||||
|     pub fn observe_proposer(&mut self, block: BeaconBlockRef<'_, E>) -> Result<bool, Error> { |     pub fn observe_proposal( | ||||||
|  |         &mut self, | ||||||
|  |         block_root: Hash256, | ||||||
|  |         block: BeaconBlockRef<'_, E>, | ||||||
|  |     ) -> Result<SeenBlock, Error> { | ||||||
|         self.sanitize_block(block)?; |         self.sanitize_block(block)?; | ||||||
| 
 | 
 | ||||||
|         let did_not_exist = self |         let key = ProposalKey { | ||||||
|             .items |             slot: block.slot(), | ||||||
|             .entry(block.slot()) |             proposer: block.proposer_index(), | ||||||
|             .or_insert_with(|| HashSet::with_capacity(E::SlotsPerEpoch::to_usize())) |         }; | ||||||
|             .insert(block.proposer_index()); |  | ||||||
| 
 | 
 | ||||||
|         Ok(!did_not_exist) |         let entry = self.items.entry(key); | ||||||
|  | 
 | ||||||
|  |         let slashable_proposal = match entry { | ||||||
|  |             Entry::Occupied(mut occupied_entry) => { | ||||||
|  |                 let block_roots = occupied_entry.get_mut(); | ||||||
|  |                 let newly_inserted = block_roots.insert(block_root); | ||||||
|  | 
 | ||||||
|  |                 let is_equivocation = block_roots.len() > 1; | ||||||
|  | 
 | ||||||
|  |                 if is_equivocation { | ||||||
|  |                     SeenBlock::Slashable | ||||||
|  |                 } else if !newly_inserted { | ||||||
|  |                     SeenBlock::Duplicate | ||||||
|  |                 } else { | ||||||
|  |                     SeenBlock::UniqueNonSlashable | ||||||
|  |                 } | ||||||
|  |             } | ||||||
|  |             Entry::Vacant(vacant_entry) => { | ||||||
|  |                 let block_roots = HashSet::from([block_root]); | ||||||
|  |                 vacant_entry.insert(block_roots); | ||||||
|  | 
 | ||||||
|  |                 SeenBlock::UniqueNonSlashable | ||||||
|  |             } | ||||||
|  |         }; | ||||||
|  | 
 | ||||||
|  |         Ok(slashable_proposal) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Returns `Ok(true)` if the `block` has been observed before, `Ok(false)` if not. Does not
 |     /// Returns `Ok(true)` if the `block` has been observed before, `Ok(false)` if not. Does not
 | ||||||
| @ -72,15 +125,33 @@ impl<E: EthSpec> ObservedBlockProducers<E> { | |||||||
|     ///
 |     ///
 | ||||||
|     /// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`.
 |     /// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`.
 | ||||||
|     /// - `block.slot` is equal to or less than the latest pruned `finalized_slot`.
 |     /// - `block.slot` is equal to or less than the latest pruned `finalized_slot`.
 | ||||||
|     pub fn proposer_has_been_observed(&self, block: BeaconBlockRef<'_, E>) -> Result<bool, Error> { |     pub fn proposer_has_been_observed( | ||||||
|  |         &self, | ||||||
|  |         block: BeaconBlockRef<'_, E>, | ||||||
|  |         block_root: Hash256, | ||||||
|  |     ) -> Result<SeenBlock, Error> { | ||||||
|         self.sanitize_block(block)?; |         self.sanitize_block(block)?; | ||||||
| 
 | 
 | ||||||
|         let exists = self |         let key = ProposalKey { | ||||||
|             .items |             slot: block.slot(), | ||||||
|             .get(&block.slot()) |             proposer: block.proposer_index(), | ||||||
|             .map_or(false, |set| set.contains(&block.proposer_index())); |         }; | ||||||
| 
 | 
 | ||||||
|         Ok(exists) |         if let Some(block_roots) = self.items.get(&key) { | ||||||
|  |             let block_already_known = block_roots.contains(&block_root); | ||||||
|  |             let no_prev_known_blocks = | ||||||
|  |                 block_roots.difference(&HashSet::from([block_root])).count() == 0; | ||||||
|  | 
 | ||||||
|  |             if !no_prev_known_blocks { | ||||||
|  |                 Ok(SeenBlock::Slashable) | ||||||
|  |             } else if block_already_known { | ||||||
|  |                 Ok(SeenBlock::Duplicate) | ||||||
|  |             } else { | ||||||
|  |                 Ok(SeenBlock::UniqueNonSlashable) | ||||||
|  |             } | ||||||
|  |         } else { | ||||||
|  |             Ok(SeenBlock::UniqueNonSlashable) | ||||||
|  |         } | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Returns `Ok(())` if the given `block` is sane.
 |     /// Returns `Ok(())` if the given `block` is sane.
 | ||||||
| @ -112,15 +183,15 @@ impl<E: EthSpec> ObservedBlockProducers<E> { | |||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         self.finalized_slot = finalized_slot; |         self.finalized_slot = finalized_slot; | ||||||
|         self.items.retain(|slot, _set| *slot > finalized_slot); |         self.items.retain(|key, _| key.slot > finalized_slot); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Returns `true` if the given `validator_index` has been stored in `self` at `epoch`.
 |     /// Returns `true` if the given `validator_index` has been stored in `self` at `epoch`.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// This is useful for doppelganger detection.
 |     /// This is useful for doppelganger detection.
 | ||||||
|     pub fn index_seen_at_epoch(&self, validator_index: u64, epoch: Epoch) -> bool { |     pub fn index_seen_at_epoch(&self, validator_index: u64, epoch: Epoch) -> bool { | ||||||
|         self.items.iter().any(|(slot, producers)| { |         self.items.iter().any(|(key, _)| { | ||||||
|             slot.epoch(E::slots_per_epoch()) == epoch && producers.contains(&validator_index) |             key.slot.epoch(E::slots_per_epoch()) == epoch && key.proposer == validator_index | ||||||
|         }) |         }) | ||||||
|     } |     } | ||||||
| } | } | ||||||
| @ -148,9 +219,12 @@ mod tests { | |||||||
| 
 | 
 | ||||||
|         // Slot 0, proposer 0
 |         // Slot 0, proposer 0
 | ||||||
|         let block_a = get_block(0, 0); |         let block_a = get_block(0, 0); | ||||||
|  |         let block_root = block_a.canonical_root(); | ||||||
| 
 | 
 | ||||||
|         assert_eq!( |         assert_eq!( | ||||||
|             cache.observe_proposer(block_a.to_ref()), |             cache | ||||||
|  |                 .observe_proposal(block_root, block_a.to_ref()) | ||||||
|  |                 .map(SeenBlock::proposer_previously_observed), | ||||||
|             Ok(false), |             Ok(false), | ||||||
|             "can observe proposer, indicates proposer unobserved" |             "can observe proposer, indicates proposer unobserved" | ||||||
|         ); |         ); | ||||||
| @ -164,7 +238,10 @@ mod tests { | |||||||
|         assert_eq!( |         assert_eq!( | ||||||
|             cache |             cache | ||||||
|                 .items |                 .items | ||||||
|                 .get(&Slot::new(0)) |                 .get(&ProposalKey { | ||||||
|  |                     slot: Slot::new(0), | ||||||
|  |                     proposer: 0 | ||||||
|  |                 }) | ||||||
|                 .expect("slot zero should be present") |                 .expect("slot zero should be present") | ||||||
|                 .len(), |                 .len(), | ||||||
|             1, |             1, | ||||||
| @ -182,7 +259,10 @@ mod tests { | |||||||
|         assert_eq!( |         assert_eq!( | ||||||
|             cache |             cache | ||||||
|                 .items |                 .items | ||||||
|                 .get(&Slot::new(0)) |                 .get(&ProposalKey { | ||||||
|  |                     slot: Slot::new(0), | ||||||
|  |                     proposer: 0 | ||||||
|  |                 }) | ||||||
|                 .expect("slot zero should be present") |                 .expect("slot zero should be present") | ||||||
|                 .len(), |                 .len(), | ||||||
|             1, |             1, | ||||||
| @ -207,9 +287,12 @@ mod tests { | |||||||
| 
 | 
 | ||||||
|         // First slot of finalized epoch, proposer 0
 |         // First slot of finalized epoch, proposer 0
 | ||||||
|         let block_b = get_block(E::slots_per_epoch(), 0); |         let block_b = get_block(E::slots_per_epoch(), 0); | ||||||
|  |         let block_root_b = block_b.canonical_root(); | ||||||
| 
 | 
 | ||||||
|         assert_eq!( |         assert_eq!( | ||||||
|             cache.observe_proposer(block_b.to_ref()), |             cache | ||||||
|  |                 .observe_proposal(block_root_b, block_b.to_ref()) | ||||||
|  |                 .map(SeenBlock::proposer_previously_observed), | ||||||
|             Err(Error::FinalizedBlock { |             Err(Error::FinalizedBlock { | ||||||
|                 slot: E::slots_per_epoch().into(), |                 slot: E::slots_per_epoch().into(), | ||||||
|                 finalized_slot: E::slots_per_epoch().into(), |                 finalized_slot: E::slots_per_epoch().into(), | ||||||
| @ -229,7 +312,9 @@ mod tests { | |||||||
|         let block_b = get_block(three_epochs, 0); |         let block_b = get_block(three_epochs, 0); | ||||||
| 
 | 
 | ||||||
|         assert_eq!( |         assert_eq!( | ||||||
|             cache.observe_proposer(block_b.to_ref()), |             cache | ||||||
|  |                 .observe_proposal(block_root_b, block_b.to_ref()) | ||||||
|  |                 .map(SeenBlock::proposer_previously_observed), | ||||||
|             Ok(false), |             Ok(false), | ||||||
|             "can insert non-finalized block" |             "can insert non-finalized block" | ||||||
|         ); |         ); | ||||||
| @ -238,7 +323,10 @@ mod tests { | |||||||
|         assert_eq!( |         assert_eq!( | ||||||
|             cache |             cache | ||||||
|                 .items |                 .items | ||||||
|                 .get(&Slot::new(three_epochs)) |                 .get(&ProposalKey { | ||||||
|  |                     slot: Slot::new(three_epochs), | ||||||
|  |                     proposer: 0 | ||||||
|  |                 }) | ||||||
|                 .expect("the three epochs slot should be present") |                 .expect("the three epochs slot should be present") | ||||||
|                 .len(), |                 .len(), | ||||||
|             1, |             1, | ||||||
| @ -262,7 +350,10 @@ mod tests { | |||||||
|         assert_eq!( |         assert_eq!( | ||||||
|             cache |             cache | ||||||
|                 .items |                 .items | ||||||
|                 .get(&Slot::new(three_epochs)) |                 .get(&ProposalKey { | ||||||
|  |                     slot: Slot::new(three_epochs), | ||||||
|  |                     proposer: 0 | ||||||
|  |                 }) | ||||||
|                 .expect("the three epochs slot should be present") |                 .expect("the three epochs slot should be present") | ||||||
|                 .len(), |                 .len(), | ||||||
|             1, |             1, | ||||||
| @ -276,24 +367,33 @@ mod tests { | |||||||
| 
 | 
 | ||||||
|         // Slot 0, proposer 0
 |         // Slot 0, proposer 0
 | ||||||
|         let block_a = get_block(0, 0); |         let block_a = get_block(0, 0); | ||||||
|  |         let block_root_a = block_a.canonical_root(); | ||||||
| 
 | 
 | ||||||
|         assert_eq!( |         assert_eq!( | ||||||
|             cache.proposer_has_been_observed(block_a.to_ref()), |             cache | ||||||
|  |                 .proposer_has_been_observed(block_a.to_ref(), block_a.canonical_root()) | ||||||
|  |                 .map(|x| x.proposer_previously_observed()), | ||||||
|             Ok(false), |             Ok(false), | ||||||
|             "no observation in empty cache" |             "no observation in empty cache" | ||||||
|         ); |         ); | ||||||
|         assert_eq!( |         assert_eq!( | ||||||
|             cache.observe_proposer(block_a.to_ref()), |             cache | ||||||
|  |                 .observe_proposal(block_root_a, block_a.to_ref()) | ||||||
|  |                 .map(SeenBlock::proposer_previously_observed), | ||||||
|             Ok(false), |             Ok(false), | ||||||
|             "can observe proposer, indicates proposer unobserved" |             "can observe proposer, indicates proposer unobserved" | ||||||
|         ); |         ); | ||||||
|         assert_eq!( |         assert_eq!( | ||||||
|             cache.proposer_has_been_observed(block_a.to_ref()), |             cache | ||||||
|  |                 .proposer_has_been_observed(block_a.to_ref(), block_a.canonical_root()) | ||||||
|  |                 .map(|x| x.proposer_previously_observed()), | ||||||
|             Ok(true), |             Ok(true), | ||||||
|             "observed block is indicated as true" |             "observed block is indicated as true" | ||||||
|         ); |         ); | ||||||
|         assert_eq!( |         assert_eq!( | ||||||
|             cache.observe_proposer(block_a.to_ref()), |             cache | ||||||
|  |                 .observe_proposal(block_root_a, block_a.to_ref()) | ||||||
|  |                 .map(SeenBlock::proposer_previously_observed), | ||||||
|             Ok(true), |             Ok(true), | ||||||
|             "observing again indicates true" |             "observing again indicates true" | ||||||
|         ); |         ); | ||||||
| @ -303,7 +403,10 @@ mod tests { | |||||||
|         assert_eq!( |         assert_eq!( | ||||||
|             cache |             cache | ||||||
|                 .items |                 .items | ||||||
|                 .get(&Slot::new(0)) |                 .get(&ProposalKey { | ||||||
|  |                     slot: Slot::new(0), | ||||||
|  |                     proposer: 0 | ||||||
|  |                 }) | ||||||
|                 .expect("slot zero should be present") |                 .expect("slot zero should be present") | ||||||
|                 .len(), |                 .len(), | ||||||
|             1, |             1, | ||||||
| @ -312,24 +415,33 @@ mod tests { | |||||||
| 
 | 
 | ||||||
|         // Slot 1, proposer 0
 |         // Slot 1, proposer 0
 | ||||||
|         let block_b = get_block(1, 0); |         let block_b = get_block(1, 0); | ||||||
|  |         let block_root_b = block_b.canonical_root(); | ||||||
| 
 | 
 | ||||||
|         assert_eq!( |         assert_eq!( | ||||||
|             cache.proposer_has_been_observed(block_b.to_ref()), |             cache | ||||||
|  |                 .proposer_has_been_observed(block_b.to_ref(), block_b.canonical_root()) | ||||||
|  |                 .map(|x| x.proposer_previously_observed()), | ||||||
|             Ok(false), |             Ok(false), | ||||||
|             "no observation for new slot" |             "no observation for new slot" | ||||||
|         ); |         ); | ||||||
|         assert_eq!( |         assert_eq!( | ||||||
|             cache.observe_proposer(block_b.to_ref()), |             cache | ||||||
|  |                 .observe_proposal(block_root_b, block_b.to_ref()) | ||||||
|  |                 .map(SeenBlock::proposer_previously_observed), | ||||||
|             Ok(false), |             Ok(false), | ||||||
|             "can observe proposer for new slot, indicates proposer unobserved" |             "can observe proposer for new slot, indicates proposer unobserved" | ||||||
|         ); |         ); | ||||||
|         assert_eq!( |         assert_eq!( | ||||||
|             cache.proposer_has_been_observed(block_b.to_ref()), |             cache | ||||||
|  |                 .proposer_has_been_observed(block_b.to_ref(), block_b.canonical_root()) | ||||||
|  |                 .map(|x| x.proposer_previously_observed()), | ||||||
|             Ok(true), |             Ok(true), | ||||||
|             "observed block in slot 1 is indicated as true" |             "observed block in slot 1 is indicated as true" | ||||||
|         ); |         ); | ||||||
|         assert_eq!( |         assert_eq!( | ||||||
|             cache.observe_proposer(block_b.to_ref()), |             cache | ||||||
|  |                 .observe_proposal(block_root_b, block_b.to_ref()) | ||||||
|  |                 .map(SeenBlock::proposer_previously_observed), | ||||||
|             Ok(true), |             Ok(true), | ||||||
|             "observing slot 1 again indicates true" |             "observing slot 1 again indicates true" | ||||||
|         ); |         ); | ||||||
| @ -339,7 +451,10 @@ mod tests { | |||||||
|         assert_eq!( |         assert_eq!( | ||||||
|             cache |             cache | ||||||
|                 .items |                 .items | ||||||
|                 .get(&Slot::new(0)) |                 .get(&ProposalKey { | ||||||
|  |                     slot: Slot::new(0), | ||||||
|  |                     proposer: 0 | ||||||
|  |                 }) | ||||||
|                 .expect("slot zero should be present") |                 .expect("slot zero should be present") | ||||||
|                 .len(), |                 .len(), | ||||||
|             1, |             1, | ||||||
| @ -348,7 +463,10 @@ mod tests { | |||||||
|         assert_eq!( |         assert_eq!( | ||||||
|             cache |             cache | ||||||
|                 .items |                 .items | ||||||
|                 .get(&Slot::new(1)) |                 .get(&ProposalKey { | ||||||
|  |                     slot: Slot::new(1), | ||||||
|  |                     proposer: 0 | ||||||
|  |                 }) | ||||||
|                 .expect("slot zero should be present") |                 .expect("slot zero should be present") | ||||||
|                 .len(), |                 .len(), | ||||||
|             1, |             1, | ||||||
| @ -357,45 +475,54 @@ mod tests { | |||||||
| 
 | 
 | ||||||
|         // Slot 0, proposer 1
 |         // Slot 0, proposer 1
 | ||||||
|         let block_c = get_block(0, 1); |         let block_c = get_block(0, 1); | ||||||
|  |         let block_root_c = block_c.canonical_root(); | ||||||
| 
 | 
 | ||||||
|         assert_eq!( |         assert_eq!( | ||||||
|             cache.proposer_has_been_observed(block_c.to_ref()), |             cache | ||||||
|  |                 .proposer_has_been_observed(block_c.to_ref(), block_c.canonical_root()) | ||||||
|  |                 .map(|x| x.proposer_previously_observed()), | ||||||
|             Ok(false), |             Ok(false), | ||||||
|             "no observation for new proposer" |             "no observation for new proposer" | ||||||
|         ); |         ); | ||||||
|         assert_eq!( |         assert_eq!( | ||||||
|             cache.observe_proposer(block_c.to_ref()), |             cache | ||||||
|  |                 .observe_proposal(block_root_c, block_c.to_ref()) | ||||||
|  |                 .map(SeenBlock::proposer_previously_observed), | ||||||
|             Ok(false), |             Ok(false), | ||||||
|             "can observe new proposer, indicates proposer unobserved" |             "can observe new proposer, indicates proposer unobserved" | ||||||
|         ); |         ); | ||||||
|         assert_eq!( |         assert_eq!( | ||||||
|             cache.proposer_has_been_observed(block_c.to_ref()), |             cache | ||||||
|  |                 .proposer_has_been_observed(block_c.to_ref(), block_c.canonical_root()) | ||||||
|  |                 .map(|x| x.proposer_previously_observed()), | ||||||
|             Ok(true), |             Ok(true), | ||||||
|             "observed new proposer block is indicated as true" |             "observed new proposer block is indicated as true" | ||||||
|         ); |         ); | ||||||
|         assert_eq!( |         assert_eq!( | ||||||
|             cache.observe_proposer(block_c.to_ref()), |             cache | ||||||
|  |                 .observe_proposal(block_root_c, block_c.to_ref()) | ||||||
|  |                 .map(SeenBlock::proposer_previously_observed), | ||||||
|             Ok(true), |             Ok(true), | ||||||
|             "observing new proposer again indicates true" |             "observing new proposer again indicates true" | ||||||
|         ); |         ); | ||||||
| 
 | 
 | ||||||
|         assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); |         assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); | ||||||
|         assert_eq!(cache.items.len(), 2, "two slots should be present"); |         assert_eq!(cache.items.len(), 3, "three slots should be present"); | ||||||
|         assert_eq!( |         assert_eq!( | ||||||
|             cache |             cache | ||||||
|                 .items |                 .items | ||||||
|                 .get(&Slot::new(0)) |                 .iter() | ||||||
|                 .expect("slot zero should be present") |                 .filter(|(k, _)| k.slot == cache.finalized_slot) | ||||||
|                 .len(), |                 .count(), | ||||||
|             2, |             2, | ||||||
|             "two proposers should be present in slot 0" |             "two proposers should be present in slot 0" | ||||||
|         ); |         ); | ||||||
|         assert_eq!( |         assert_eq!( | ||||||
|             cache |             cache | ||||||
|                 .items |                 .items | ||||||
|                 .get(&Slot::new(1)) |                 .iter() | ||||||
|                 .expect("slot zero should be present") |                 .filter(|(k, _)| k.slot == Slot::new(1)) | ||||||
|                 .len(), |                 .count(), | ||||||
|             1, |             1, | ||||||
|             "only one proposer should be present in slot 1" |             "only one proposer should be present in slot 1" | ||||||
|         ); |         ); | ||||||
|  | |||||||
| @ -28,15 +28,14 @@ | |||||||
| 
 | 
 | ||||||
| use crate::observed_attesters::SlotSubcommitteeIndex; | use crate::observed_attesters::SlotSubcommitteeIndex; | ||||||
| use crate::{ | use crate::{ | ||||||
|     beacon_chain::{MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT}, |     beacon_chain::VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, metrics, | ||||||
|     metrics, |     observed_aggregates::ObserveOutcome, BeaconChain, BeaconChainError, BeaconChainTypes, | ||||||
|     observed_aggregates::ObserveOutcome, |  | ||||||
|     BeaconChain, BeaconChainError, BeaconChainTypes, |  | ||||||
| }; | }; | ||||||
| use bls::{verify_signature_sets, PublicKeyBytes}; | use bls::{verify_signature_sets, PublicKeyBytes}; | ||||||
| use derivative::Derivative; | use derivative::Derivative; | ||||||
| use safe_arith::ArithError; | use safe_arith::ArithError; | ||||||
| use slot_clock::SlotClock; | use slot_clock::SlotClock; | ||||||
|  | use ssz_derive::{Decode, Encode}; | ||||||
| use state_processing::per_block_processing::errors::SyncCommitteeMessageValidationError; | use state_processing::per_block_processing::errors::SyncCommitteeMessageValidationError; | ||||||
| use state_processing::signature_sets::{ | use state_processing::signature_sets::{ | ||||||
|     signed_sync_aggregate_selection_proof_signature_set, signed_sync_aggregate_signature_set, |     signed_sync_aggregate_selection_proof_signature_set, signed_sync_aggregate_signature_set, | ||||||
| @ -47,9 +46,11 @@ use std::borrow::Cow; | |||||||
| use std::collections::HashMap; | use std::collections::HashMap; | ||||||
| use strum::AsRefStr; | use strum::AsRefStr; | ||||||
| use tree_hash::TreeHash; | use tree_hash::TreeHash; | ||||||
|  | use tree_hash_derive::TreeHash; | ||||||
| use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; | use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; | ||||||
| use types::slot_data::SlotData; | use types::slot_data::SlotData; | ||||||
| use types::sync_committee::Error as SyncCommitteeError; | use types::sync_committee::Error as SyncCommitteeError; | ||||||
|  | use types::ChainSpec; | ||||||
| use types::{ | use types::{ | ||||||
|     sync_committee_contribution::Error as ContributionError, AggregateSignature, BeaconStateError, |     sync_committee_contribution::Error as ContributionError, AggregateSignature, BeaconStateError, | ||||||
|     EthSpec, Hash256, SignedContributionAndProof, Slot, SyncCommitteeContribution, |     EthSpec, Hash256, SignedContributionAndProof, Slot, SyncCommitteeContribution, | ||||||
| @ -110,14 +111,14 @@ pub enum Error { | |||||||
|     ///
 |     ///
 | ||||||
|     /// The peer has sent an invalid message.
 |     /// The peer has sent an invalid message.
 | ||||||
|     AggregatorPubkeyUnknown(u64), |     AggregatorPubkeyUnknown(u64), | ||||||
|     /// The sync contribution has been seen before; either in a block, on the gossip network or from a
 |     /// The sync contribution or a superset of this sync contribution's aggregation bits for the same data
 | ||||||
|     /// local validator.
 |     /// has been seen before; either in a block on the gossip network or from a local validator.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// ## Peer scoring
 |     /// ## Peer scoring
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// It's unclear if this sync contribution is valid, however we have already observed it and do not
 |     /// It's unclear if this sync contribution is valid, however we have already observed it and do not
 | ||||||
|     /// need to observe it again.
 |     /// need to observe it again.
 | ||||||
|     SyncContributionAlreadyKnown(Hash256), |     SyncContributionSupersetKnown(Hash256), | ||||||
|     /// There has already been an aggregation observed for this validator, we refuse to process a
 |     /// There has already been an aggregation observed for this validator, we refuse to process a
 | ||||||
|     /// second.
 |     /// second.
 | ||||||
|     ///
 |     ///
 | ||||||
| @ -268,6 +269,14 @@ pub struct VerifiedSyncContribution<T: BeaconChainTypes> { | |||||||
|     participant_pubkeys: Vec<PublicKeyBytes>, |     participant_pubkeys: Vec<PublicKeyBytes>, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | /// The sync contribution data.
 | ||||||
|  | #[derive(Encode, Decode, TreeHash)] | ||||||
|  | pub struct SyncCommitteeData { | ||||||
|  |     pub slot: Slot, | ||||||
|  |     pub root: Hash256, | ||||||
|  |     pub subcommittee_index: u64, | ||||||
|  | } | ||||||
|  | 
 | ||||||
| /// Wraps a `SyncCommitteeMessage` that has been verified for propagation on the gossip network.
 | /// Wraps a `SyncCommitteeMessage` that has been verified for propagation on the gossip network.
 | ||||||
| #[derive(Clone)] | #[derive(Clone)] | ||||||
| pub struct VerifiedSyncCommitteeMessage { | pub struct VerifiedSyncCommitteeMessage { | ||||||
| @ -287,7 +296,7 @@ impl<T: BeaconChainTypes> VerifiedSyncContribution<T> { | |||||||
|         let subcommittee_index = contribution.subcommittee_index as usize; |         let subcommittee_index = contribution.subcommittee_index as usize; | ||||||
| 
 | 
 | ||||||
|         // Ensure sync committee contribution is within the MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance.
 |         // Ensure sync committee contribution is within the MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance.
 | ||||||
|         verify_propagation_slot_range(&chain.slot_clock, contribution)?; |         verify_propagation_slot_range(&chain.slot_clock, contribution, &chain.spec)?; | ||||||
| 
 | 
 | ||||||
|         // Validate subcommittee index.
 |         // Validate subcommittee index.
 | ||||||
|         if contribution.subcommittee_index >= SYNC_COMMITTEE_SUBNET_COUNT { |         if contribution.subcommittee_index >= SYNC_COMMITTEE_SUBNET_COUNT { | ||||||
| @ -314,15 +323,22 @@ impl<T: BeaconChainTypes> VerifiedSyncContribution<T> { | |||||||
|             return Err(Error::AggregatorNotInCommittee { aggregator_index }); |             return Err(Error::AggregatorNotInCommittee { aggregator_index }); | ||||||
|         }; |         }; | ||||||
| 
 | 
 | ||||||
|         // Ensure the valid sync contribution has not already been seen locally.
 |         // Ensure the valid sync contribution or its superset has not already been seen locally.
 | ||||||
|         let contribution_root = contribution.tree_hash_root(); |         let contribution_data_root = SyncCommitteeData { | ||||||
|  |             slot: contribution.slot, | ||||||
|  |             root: contribution.beacon_block_root, | ||||||
|  |             subcommittee_index: contribution.subcommittee_index, | ||||||
|  |         } | ||||||
|  |         .tree_hash_root(); | ||||||
|  | 
 | ||||||
|         if chain |         if chain | ||||||
|             .observed_sync_contributions |             .observed_sync_contributions | ||||||
|             .write() |             .write() | ||||||
|             .is_known(contribution, contribution_root) |             .is_known_subset(contribution, contribution_data_root) | ||||||
|             .map_err(|e| Error::BeaconChainError(e.into()))? |             .map_err(|e| Error::BeaconChainError(e.into()))? | ||||||
|         { |         { | ||||||
|             return Err(Error::SyncContributionAlreadyKnown(contribution_root)); |             metrics::inc_counter(&metrics::SYNC_CONTRIBUTION_SUBSETS); | ||||||
|  |             return Err(Error::SyncContributionSupersetKnown(contribution_data_root)); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         // Ensure there has been no other observed aggregate for the given `aggregator_index`.
 |         // Ensure there has been no other observed aggregate for the given `aggregator_index`.
 | ||||||
| @ -376,13 +392,14 @@ impl<T: BeaconChainTypes> VerifiedSyncContribution<T> { | |||||||
|         //
 |         //
 | ||||||
|         // It's important to double check that the contribution is not already known, otherwise two
 |         // It's important to double check that the contribution is not already known, otherwise two
 | ||||||
|         // contribution processed at the same time could be published.
 |         // contribution processed at the same time could be published.
 | ||||||
|         if let ObserveOutcome::AlreadyKnown = chain |         if let ObserveOutcome::Subset = chain | ||||||
|             .observed_sync_contributions |             .observed_sync_contributions | ||||||
|             .write() |             .write() | ||||||
|             .observe_item(contribution, Some(contribution_root)) |             .observe_item(contribution, Some(contribution_data_root)) | ||||||
|             .map_err(|e| Error::BeaconChainError(e.into()))? |             .map_err(|e| Error::BeaconChainError(e.into()))? | ||||||
|         { |         { | ||||||
|             return Err(Error::SyncContributionAlreadyKnown(contribution_root)); |             metrics::inc_counter(&metrics::SYNC_CONTRIBUTION_SUBSETS); | ||||||
|  |             return Err(Error::SyncContributionSupersetKnown(contribution_data_root)); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         // Observe the aggregator so we don't process another aggregate from them.
 |         // Observe the aggregator so we don't process another aggregate from them.
 | ||||||
| @ -442,7 +459,7 @@ impl VerifiedSyncCommitteeMessage { | |||||||
|         // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance).
 |         // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance).
 | ||||||
|         //
 |         //
 | ||||||
|         // We do not queue future sync committee messages for later processing.
 |         // We do not queue future sync committee messages for later processing.
 | ||||||
|         verify_propagation_slot_range(&chain.slot_clock, &sync_message)?; |         verify_propagation_slot_range(&chain.slot_clock, &sync_message, &chain.spec)?; | ||||||
| 
 | 
 | ||||||
|         // Ensure the `subnet_id` is valid for the given validator.
 |         // Ensure the `subnet_id` is valid for the given validator.
 | ||||||
|         let pubkey = chain |         let pubkey = chain | ||||||
| @ -558,11 +575,11 @@ impl VerifiedSyncCommitteeMessage { | |||||||
| pub fn verify_propagation_slot_range<S: SlotClock, U: SlotData>( | pub fn verify_propagation_slot_range<S: SlotClock, U: SlotData>( | ||||||
|     slot_clock: &S, |     slot_clock: &S, | ||||||
|     sync_contribution: &U, |     sync_contribution: &U, | ||||||
|  |     spec: &ChainSpec, | ||||||
| ) -> Result<(), Error> { | ) -> Result<(), Error> { | ||||||
|     let message_slot = sync_contribution.get_slot(); |     let message_slot = sync_contribution.get_slot(); | ||||||
| 
 |  | ||||||
|     let latest_permissible_slot = slot_clock |     let latest_permissible_slot = slot_clock | ||||||
|         .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) |         .now_with_future_tolerance(spec.maximum_gossip_clock_disparity()) | ||||||
|         .ok_or(BeaconChainError::UnableToReadSlot)?; |         .ok_or(BeaconChainError::UnableToReadSlot)?; | ||||||
|     if message_slot > latest_permissible_slot { |     if message_slot > latest_permissible_slot { | ||||||
|         return Err(Error::FutureSlot { |         return Err(Error::FutureSlot { | ||||||
| @ -572,7 +589,7 @@ pub fn verify_propagation_slot_range<S: SlotClock, U: SlotData>( | |||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     let earliest_permissible_slot = slot_clock |     let earliest_permissible_slot = slot_clock | ||||||
|         .now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) |         .now_with_past_tolerance(spec.maximum_gossip_clock_disparity()) | ||||||
|         .ok_or(BeaconChainError::UnableToReadSlot)?; |         .ok_or(BeaconChainError::UnableToReadSlot)?; | ||||||
| 
 | 
 | ||||||
|     if message_slot < earliest_permissible_slot { |     if message_slot < earliest_permissible_slot { | ||||||
|  | |||||||
| @ -17,12 +17,11 @@ use bls::get_withdrawal_credentials; | |||||||
| use execution_layer::{ | use execution_layer::{ | ||||||
|     auth::JwtKey, |     auth::JwtKey, | ||||||
|     test_utils::{ |     test_utils::{ | ||||||
|         ExecutionBlockGenerator, MockExecutionLayer, TestingBuilder, DEFAULT_JWT_SECRET, |         ExecutionBlockGenerator, MockBuilder, MockBuilderServer, MockExecutionLayer, | ||||||
|         DEFAULT_TERMINAL_BLOCK, |         DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, | ||||||
|     }, |     }, | ||||||
|     ExecutionLayer, |     ExecutionLayer, | ||||||
| }; | }; | ||||||
| use fork_choice::CountUnrealized; |  | ||||||
| use futures::channel::mpsc::Receiver; | use futures::channel::mpsc::Receiver; | ||||||
| pub use genesis::{interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH}; | pub use genesis::{interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH}; | ||||||
| use int_to_bytes::int_to_bytes32; | use int_to_bytes::int_to_bytes32; | ||||||
| @ -168,7 +167,6 @@ pub struct Builder<T: BeaconChainTypes> { | |||||||
|     store_mutator: Option<BoxedMutator<T::EthSpec, T::HotStore, T::ColdStore>>, |     store_mutator: Option<BoxedMutator<T::EthSpec, T::HotStore, T::ColdStore>>, | ||||||
|     execution_layer: Option<ExecutionLayer<T::EthSpec>>, |     execution_layer: Option<ExecutionLayer<T::EthSpec>>, | ||||||
|     mock_execution_layer: Option<MockExecutionLayer<T::EthSpec>>, |     mock_execution_layer: Option<MockExecutionLayer<T::EthSpec>>, | ||||||
|     mock_builder: Option<TestingBuilder<T::EthSpec>>, |  | ||||||
|     testing_slot_clock: Option<TestingSlotClock>, |     testing_slot_clock: Option<TestingSlotClock>, | ||||||
|     runtime: TestRuntime, |     runtime: TestRuntime, | ||||||
|     log: Logger, |     log: Logger, | ||||||
| @ -302,7 +300,6 @@ where | |||||||
|             store_mutator: None, |             store_mutator: None, | ||||||
|             execution_layer: None, |             execution_layer: None, | ||||||
|             mock_execution_layer: None, |             mock_execution_layer: None, | ||||||
|             mock_builder: None, |  | ||||||
|             testing_slot_clock: None, |             testing_slot_clock: None, | ||||||
|             runtime, |             runtime, | ||||||
|             log, |             log, | ||||||
| @ -434,7 +431,11 @@ where | |||||||
|         self |         self | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     pub fn mock_execution_layer(mut self) -> Self { |     pub fn mock_execution_layer(self) -> Self { | ||||||
|  |         self.mock_execution_layer_with_config(None) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn mock_execution_layer_with_config(mut self, builder_threshold: Option<u128>) -> Self { | ||||||
|         let spec = self.spec.clone().expect("cannot build without spec"); |         let spec = self.spec.clone().expect("cannot build without spec"); | ||||||
|         let shanghai_time = spec.capella_fork_epoch.map(|epoch| { |         let shanghai_time = spec.capella_fork_epoch.map(|epoch| { | ||||||
|             HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() |             HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() | ||||||
| @ -443,55 +444,15 @@ where | |||||||
|             self.runtime.task_executor.clone(), |             self.runtime.task_executor.clone(), | ||||||
|             DEFAULT_TERMINAL_BLOCK, |             DEFAULT_TERMINAL_BLOCK, | ||||||
|             shanghai_time, |             shanghai_time, | ||||||
|             None, |             builder_threshold, | ||||||
|             Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), |             Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), | ||||||
|             spec, |             spec, | ||||||
|             None, |  | ||||||
|         ); |         ); | ||||||
|         self.execution_layer = Some(mock.el.clone()); |         self.execution_layer = Some(mock.el.clone()); | ||||||
|         self.mock_execution_layer = Some(mock); |         self.mock_execution_layer = Some(mock); | ||||||
|         self |         self | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     pub fn mock_execution_layer_with_builder( |  | ||||||
|         mut self, |  | ||||||
|         beacon_url: SensitiveUrl, |  | ||||||
|         builder_threshold: Option<u128>, |  | ||||||
|     ) -> Self { |  | ||||||
|         // Get a random unused port
 |  | ||||||
|         let port = unused_port::unused_tcp4_port().unwrap(); |  | ||||||
|         let builder_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(); |  | ||||||
| 
 |  | ||||||
|         let spec = self.spec.clone().expect("cannot build without spec"); |  | ||||||
|         let shanghai_time = spec.capella_fork_epoch.map(|epoch| { |  | ||||||
|             HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() |  | ||||||
|         }); |  | ||||||
|         let mock_el = MockExecutionLayer::new( |  | ||||||
|             self.runtime.task_executor.clone(), |  | ||||||
|             DEFAULT_TERMINAL_BLOCK, |  | ||||||
|             shanghai_time, |  | ||||||
|             builder_threshold, |  | ||||||
|             Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), |  | ||||||
|             spec.clone(), |  | ||||||
|             Some(builder_url.clone()), |  | ||||||
|         ) |  | ||||||
|         .move_to_terminal_block(); |  | ||||||
| 
 |  | ||||||
|         let mock_el_url = SensitiveUrl::parse(mock_el.server.url().as_str()).unwrap(); |  | ||||||
| 
 |  | ||||||
|         self.mock_builder = Some(TestingBuilder::new( |  | ||||||
|             mock_el_url, |  | ||||||
|             builder_url, |  | ||||||
|             beacon_url, |  | ||||||
|             spec, |  | ||||||
|             self.runtime.task_executor.clone(), |  | ||||||
|         )); |  | ||||||
|         self.execution_layer = Some(mock_el.el.clone()); |  | ||||||
|         self.mock_execution_layer = Some(mock_el); |  | ||||||
| 
 |  | ||||||
|         self |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     /// Instruct the mock execution engine to always return a "valid" response to any payload it is
 |     /// Instruct the mock execution engine to always return a "valid" response to any payload it is
 | ||||||
|     /// asked to execute.
 |     /// asked to execute.
 | ||||||
|     pub fn mock_execution_layer_all_payloads_valid(self) -> Self { |     pub fn mock_execution_layer_all_payloads_valid(self) -> Self { | ||||||
| @ -517,18 +478,23 @@ where | |||||||
|         let validator_keypairs = self |         let validator_keypairs = self | ||||||
|             .validator_keypairs |             .validator_keypairs | ||||||
|             .expect("cannot build without validator keypairs"); |             .expect("cannot build without validator keypairs"); | ||||||
|  |         let chain_config = self.chain_config.unwrap_or_default(); | ||||||
| 
 | 
 | ||||||
|         let mut builder = BeaconChainBuilder::new(self.eth_spec_instance) |         let mut builder = BeaconChainBuilder::new(self.eth_spec_instance) | ||||||
|             .logger(log.clone()) |             .logger(log.clone()) | ||||||
|             .custom_spec(spec) |             .custom_spec(spec) | ||||||
|             .store(self.store.expect("cannot build without store")) |             .store(self.store.expect("cannot build without store")) | ||||||
|             .store_migrator_config(MigratorConfig::default().blocking()) |             .store_migrator_config( | ||||||
|  |                 MigratorConfig::default() | ||||||
|  |                     .blocking() | ||||||
|  |                     .epochs_per_migration(chain_config.epochs_per_migration), | ||||||
|  |             ) | ||||||
|             .task_executor(self.runtime.task_executor.clone()) |             .task_executor(self.runtime.task_executor.clone()) | ||||||
|             .execution_layer(self.execution_layer) |             .execution_layer(self.execution_layer) | ||||||
|             .dummy_eth1_backend() |             .dummy_eth1_backend() | ||||||
|             .expect("should build dummy backend") |             .expect("should build dummy backend") | ||||||
|             .shutdown_sender(shutdown_tx) |             .shutdown_sender(shutdown_tx) | ||||||
|             .chain_config(self.chain_config.unwrap_or_default()) |             .chain_config(chain_config) | ||||||
|             .event_handler(Some(ServerSentEventHandler::new_with_capacity( |             .event_handler(Some(ServerSentEventHandler::new_with_capacity( | ||||||
|                 log.clone(), |                 log.clone(), | ||||||
|                 5, |                 5, | ||||||
| @ -568,7 +534,7 @@ where | |||||||
|             shutdown_receiver: Arc::new(Mutex::new(shutdown_receiver)), |             shutdown_receiver: Arc::new(Mutex::new(shutdown_receiver)), | ||||||
|             runtime: self.runtime, |             runtime: self.runtime, | ||||||
|             mock_execution_layer: self.mock_execution_layer, |             mock_execution_layer: self.mock_execution_layer, | ||||||
|             mock_builder: self.mock_builder.map(Arc::new), |             mock_builder: None, | ||||||
|             rng: make_rng(), |             rng: make_rng(), | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
| @ -593,7 +559,7 @@ pub struct BeaconChainHarness<T: BeaconChainTypes> { | |||||||
|     pub runtime: TestRuntime, |     pub runtime: TestRuntime, | ||||||
| 
 | 
 | ||||||
|     pub mock_execution_layer: Option<MockExecutionLayer<T::EthSpec>>, |     pub mock_execution_layer: Option<MockExecutionLayer<T::EthSpec>>, | ||||||
|     pub mock_builder: Option<Arc<TestingBuilder<T::EthSpec>>>, |     pub mock_builder: Option<Arc<MockBuilder<T::EthSpec>>>, | ||||||
| 
 | 
 | ||||||
|     pub rng: Mutex<StdRng>, |     pub rng: Mutex<StdRng>, | ||||||
| } | } | ||||||
| @ -629,6 +595,49 @@ where | |||||||
|             .execution_block_generator() |             .execution_block_generator() | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  |     pub fn set_mock_builder(&mut self, beacon_url: SensitiveUrl) -> MockBuilderServer { | ||||||
|  |         let mock_el = self | ||||||
|  |             .mock_execution_layer | ||||||
|  |             .as_ref() | ||||||
|  |             .expect("harness was not built with mock execution layer"); | ||||||
|  | 
 | ||||||
|  |         let mock_el_url = SensitiveUrl::parse(mock_el.server.url().as_str()).unwrap(); | ||||||
|  | 
 | ||||||
|  |         // Create the builder, listening on a free port.
 | ||||||
|  |         let (mock_builder, mock_builder_server) = MockBuilder::new_for_testing( | ||||||
|  |             mock_el_url, | ||||||
|  |             beacon_url, | ||||||
|  |             self.spec.clone(), | ||||||
|  |             self.runtime.task_executor.clone(), | ||||||
|  |         ); | ||||||
|  | 
 | ||||||
|  |         // Set the builder URL in the execution layer now that its port is known.
 | ||||||
|  |         let builder_listen_addr = mock_builder_server.local_addr(); | ||||||
|  |         let port = builder_listen_addr.port(); | ||||||
|  |         mock_el | ||||||
|  |             .el | ||||||
|  |             .set_builder_url( | ||||||
|  |                 SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(), | ||||||
|  |                 None, | ||||||
|  |             ) | ||||||
|  |             .unwrap(); | ||||||
|  | 
 | ||||||
|  |         self.mock_builder = Some(Arc::new(mock_builder)); | ||||||
|  | 
 | ||||||
|  |         // Sanity check.
 | ||||||
|  |         let el_builder = self | ||||||
|  |             .chain | ||||||
|  |             .execution_layer | ||||||
|  |             .as_ref() | ||||||
|  |             .unwrap() | ||||||
|  |             .builder() | ||||||
|  |             .unwrap(); | ||||||
|  |         let mock_el_builder = mock_el.el.builder().unwrap(); | ||||||
|  |         assert!(Arc::ptr_eq(&el_builder, &mock_el_builder)); | ||||||
|  | 
 | ||||||
|  |         mock_builder_server | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|     pub fn get_all_validators(&self) -> Vec<usize> { |     pub fn get_all_validators(&self) -> Vec<usize> { | ||||||
|         (0..self.validator_keypairs.len()).collect() |         (0..self.validator_keypairs.len()).collect() | ||||||
|     } |     } | ||||||
| @ -734,6 +743,15 @@ where | |||||||
|         state.get_block_root(slot).unwrap() == state.get_block_root(slot - 1).unwrap() |         state.get_block_root(slot).unwrap() == state.get_block_root(slot - 1).unwrap() | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  |     pub async fn make_blinded_block( | ||||||
|  |         &self, | ||||||
|  |         state: BeaconState<E>, | ||||||
|  |         slot: Slot, | ||||||
|  |     ) -> (SignedBlindedBeaconBlock<E>, BeaconState<E>) { | ||||||
|  |         let (unblinded, new_state) = self.make_block(state, slot).await; | ||||||
|  |         (unblinded.into(), new_state) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|     /// Returns a newly created block, signed by the proposer for the given slot.
 |     /// Returns a newly created block, signed by the proposer for the given slot.
 | ||||||
|     pub async fn make_block( |     pub async fn make_block( | ||||||
|         &self, |         &self, | ||||||
| @ -746,9 +764,7 @@ where | |||||||
|         complete_state_advance(&mut state, None, slot, &self.spec) |         complete_state_advance(&mut state, None, slot, &self.spec) | ||||||
|             .expect("should be able to advance state to slot"); |             .expect("should be able to advance state to slot"); | ||||||
| 
 | 
 | ||||||
|         state |         state.build_caches(&self.spec).expect("should build caches"); | ||||||
|             .build_all_caches(&self.spec) |  | ||||||
|             .expect("should build caches"); |  | ||||||
| 
 | 
 | ||||||
|         let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); |         let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); | ||||||
| 
 | 
 | ||||||
| @ -795,9 +811,7 @@ where | |||||||
|         complete_state_advance(&mut state, None, slot, &self.spec) |         complete_state_advance(&mut state, None, slot, &self.spec) | ||||||
|             .expect("should be able to advance state to slot"); |             .expect("should be able to advance state to slot"); | ||||||
| 
 | 
 | ||||||
|         state |         state.build_caches(&self.spec).expect("should build caches"); | ||||||
|             .build_all_caches(&self.spec) |  | ||||||
|             .expect("should build caches"); |  | ||||||
| 
 | 
 | ||||||
|         let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); |         let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); | ||||||
| 
 | 
 | ||||||
| @ -1515,6 +1529,36 @@ where | |||||||
|         .sign(sk, &fork, genesis_validators_root, &self.chain.spec) |         .sign(sk, &fork, genesis_validators_root, &self.chain.spec) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  |     pub fn add_proposer_slashing(&self, validator_index: u64) -> Result<(), String> { | ||||||
|  |         let propposer_slashing = self.make_proposer_slashing(validator_index); | ||||||
|  |         if let ObservationOutcome::New(verified_proposer_slashing) = self | ||||||
|  |             .chain | ||||||
|  |             .verify_proposer_slashing_for_gossip(propposer_slashing) | ||||||
|  |             .expect("should verify proposer slashing for gossip") | ||||||
|  |         { | ||||||
|  |             self.chain | ||||||
|  |                 .import_proposer_slashing(verified_proposer_slashing); | ||||||
|  |             Ok(()) | ||||||
|  |         } else { | ||||||
|  |             Err("should observe new proposer slashing".to_string()) | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn add_attester_slashing(&self, validator_indices: Vec<u64>) -> Result<(), String> { | ||||||
|  |         let attester_slashing = self.make_attester_slashing(validator_indices); | ||||||
|  |         if let ObservationOutcome::New(verified_attester_slashing) = self | ||||||
|  |             .chain | ||||||
|  |             .verify_attester_slashing_for_gossip(attester_slashing) | ||||||
|  |             .expect("should verify attester slashing for gossip") | ||||||
|  |         { | ||||||
|  |             self.chain | ||||||
|  |                 .import_attester_slashing(verified_attester_slashing); | ||||||
|  |             Ok(()) | ||||||
|  |         } else { | ||||||
|  |             Err("should observe new attester slashing".to_string()) | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|     pub fn add_bls_to_execution_change( |     pub fn add_bls_to_execution_change( | ||||||
|         &self, |         &self, | ||||||
|         validator_index: u64, |         validator_index: u64, | ||||||
| @ -1696,8 +1740,8 @@ where | |||||||
|             .process_block( |             .process_block( | ||||||
|                 block_root, |                 block_root, | ||||||
|                 Arc::new(block), |                 Arc::new(block), | ||||||
|                 CountUnrealized::True, |  | ||||||
|                 NotifyExecutionLayer::Yes, |                 NotifyExecutionLayer::Yes, | ||||||
|  |                 || Ok(()), | ||||||
|             ) |             ) | ||||||
|             .await? |             .await? | ||||||
|             .into(); |             .into(); | ||||||
| @ -1714,8 +1758,8 @@ where | |||||||
|             .process_block( |             .process_block( | ||||||
|                 block.canonical_root(), |                 block.canonical_root(), | ||||||
|                 Arc::new(block), |                 Arc::new(block), | ||||||
|                 CountUnrealized::True, |  | ||||||
|                 NotifyExecutionLayer::Yes, |                 NotifyExecutionLayer::Yes, | ||||||
|  |                 || Ok(()), | ||||||
|             ) |             ) | ||||||
|             .await? |             .await? | ||||||
|             .into(); |             .into(); | ||||||
|  | |||||||
| @ -5,7 +5,7 @@ use std::time::Duration; | |||||||
| /// A simple wrapper around `parking_lot::RwLock` that only permits read/write access with a
 | /// A simple wrapper around `parking_lot::RwLock` that only permits read/write access with a
 | ||||||
| /// time-out (i.e., no indefinitely-blocking operations).
 | /// time-out (i.e., no indefinitely-blocking operations).
 | ||||||
| ///
 | ///
 | ||||||
| /// Timeouts can be optionally be disabled at runtime for all instances of this type by calling
 | /// Timeouts can be optionally disabled at runtime for all instances of this type by calling
 | ||||||
| /// `TimeoutRwLock::disable_timeouts()`.
 | /// `TimeoutRwLock::disable_timeouts()`.
 | ||||||
| pub struct TimeoutRwLock<T>(RwLock<T>); | pub struct TimeoutRwLock<T>(RwLock<T>); | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -9,7 +9,7 @@ use beacon_chain::{ | |||||||
|     test_utils::{ |     test_utils::{ | ||||||
|         test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, |         test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, | ||||||
|     }, |     }, | ||||||
|     BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped, |     BeaconChain, BeaconChainError, BeaconChainTypes, ChainConfig, WhenSlotSkipped, | ||||||
| }; | }; | ||||||
| use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; | use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; | ||||||
| use int_to_bytes::int_to_bytes32; | use int_to_bytes::int_to_bytes32; | ||||||
| @ -47,6 +47,10 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessTyp | |||||||
| 
 | 
 | ||||||
|     let harness = BeaconChainHarness::builder(MainnetEthSpec) |     let harness = BeaconChainHarness::builder(MainnetEthSpec) | ||||||
|         .spec(spec) |         .spec(spec) | ||||||
|  |         .chain_config(ChainConfig { | ||||||
|  |             reconstruct_historic_states: true, | ||||||
|  |             ..ChainConfig::default() | ||||||
|  |         }) | ||||||
|         .keypairs(KEYPAIRS[0..validator_count].to_vec()) |         .keypairs(KEYPAIRS[0..validator_count].to_vec()) | ||||||
|         .fresh_ephemeral_store() |         .fresh_ephemeral_store() | ||||||
|         .mock_execution_layer() |         .mock_execution_layer() | ||||||
| @ -79,6 +83,10 @@ fn get_harness_capella_spec( | |||||||
| 
 | 
 | ||||||
|     let harness = BeaconChainHarness::builder(MainnetEthSpec) |     let harness = BeaconChainHarness::builder(MainnetEthSpec) | ||||||
|         .spec(spec.clone()) |         .spec(spec.clone()) | ||||||
|  |         .chain_config(ChainConfig { | ||||||
|  |             reconstruct_historic_states: true, | ||||||
|  |             ..ChainConfig::default() | ||||||
|  |         }) | ||||||
|         .keypairs(validator_keypairs) |         .keypairs(validator_keypairs) | ||||||
|         .withdrawal_keypairs( |         .withdrawal_keypairs( | ||||||
|             KEYPAIRS[0..validator_count] |             KEYPAIRS[0..validator_count] | ||||||
| @ -699,8 +707,8 @@ async fn aggregated_gossip_verification() { | |||||||
|             |tester, err| { |             |tester, err| { | ||||||
|                 assert!(matches!( |                 assert!(matches!( | ||||||
|                     err, |                     err, | ||||||
|                     AttnError::AttestationAlreadyKnown(hash) |                     AttnError::AttestationSupersetKnown(hash) | ||||||
|                     if hash == tester.valid_aggregate.message.aggregate.tree_hash_root() |                     if hash == tester.valid_aggregate.message.aggregate.data.tree_hash_root() | ||||||
|                 )) |                 )) | ||||||
|             }, |             }, | ||||||
|         ) |         ) | ||||||
|  | |||||||
| @ -3,8 +3,10 @@ | |||||||
| use beacon_chain::test_utils::{ | use beacon_chain::test_utils::{ | ||||||
|     AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, |     AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, | ||||||
| }; | }; | ||||||
| use beacon_chain::{BeaconSnapshot, BlockError, ChainSegmentResult, NotifyExecutionLayer}; | use beacon_chain::{ | ||||||
| use fork_choice::CountUnrealized; |     BeaconSnapshot, BlockError, ChainConfig, ChainSegmentResult, IntoExecutionPendingBlock, | ||||||
|  |     NotifyExecutionLayer, | ||||||
|  | }; | ||||||
| use lazy_static::lazy_static; | use lazy_static::lazy_static; | ||||||
| use logging::test_logger; | use logging::test_logger; | ||||||
| use slasher::{Config as SlasherConfig, Slasher}; | use slasher::{Config as SlasherConfig, Slasher}; | ||||||
| @ -68,6 +70,10 @@ async fn get_chain_segment() -> Vec<BeaconSnapshot<E>> { | |||||||
| fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessType<E>> { | fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessType<E>> { | ||||||
|     let harness = BeaconChainHarness::builder(MainnetEthSpec) |     let harness = BeaconChainHarness::builder(MainnetEthSpec) | ||||||
|         .default_spec() |         .default_spec() | ||||||
|  |         .chain_config(ChainConfig { | ||||||
|  |             reconstruct_historic_states: true, | ||||||
|  |             ..ChainConfig::default() | ||||||
|  |         }) | ||||||
|         .keypairs(KEYPAIRS[0..validator_count].to_vec()) |         .keypairs(KEYPAIRS[0..validator_count].to_vec()) | ||||||
|         .fresh_ephemeral_store() |         .fresh_ephemeral_store() | ||||||
|         .mock_execution_layer() |         .mock_execution_layer() | ||||||
| @ -148,18 +154,14 @@ async fn chain_segment_full_segment() { | |||||||
|     // Sneak in a little check to ensure we can process empty chain segments.
 |     // Sneak in a little check to ensure we can process empty chain segments.
 | ||||||
|     harness |     harness | ||||||
|         .chain |         .chain | ||||||
|         .process_chain_segment(vec![], CountUnrealized::True, NotifyExecutionLayer::Yes) |         .process_chain_segment(vec![], NotifyExecutionLayer::Yes) | ||||||
|         .await |         .await | ||||||
|         .into_block_error() |         .into_block_error() | ||||||
|         .expect("should import empty chain segment"); |         .expect("should import empty chain segment"); | ||||||
| 
 | 
 | ||||||
|     harness |     harness | ||||||
|         .chain |         .chain | ||||||
|         .process_chain_segment( |         .process_chain_segment(blocks.clone(), NotifyExecutionLayer::Yes) | ||||||
|             blocks.clone(), |  | ||||||
|             CountUnrealized::True, |  | ||||||
|             NotifyExecutionLayer::Yes, |  | ||||||
|         ) |  | ||||||
|         .await |         .await | ||||||
|         .into_block_error() |         .into_block_error() | ||||||
|         .expect("should import chain segment"); |         .expect("should import chain segment"); | ||||||
| @ -188,11 +190,7 @@ async fn chain_segment_varying_chunk_size() { | |||||||
|         for chunk in blocks.chunks(*chunk_size) { |         for chunk in blocks.chunks(*chunk_size) { | ||||||
|             harness |             harness | ||||||
|                 .chain |                 .chain | ||||||
|                 .process_chain_segment( |                 .process_chain_segment(chunk.to_vec(), NotifyExecutionLayer::Yes) | ||||||
|                     chunk.to_vec(), |  | ||||||
|                     CountUnrealized::True, |  | ||||||
|                     NotifyExecutionLayer::Yes, |  | ||||||
|                 ) |  | ||||||
|                 .await |                 .await | ||||||
|                 .into_block_error() |                 .into_block_error() | ||||||
|                 .unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size)); |                 .unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size)); | ||||||
| @ -228,7 +226,7 @@ async fn chain_segment_non_linear_parent_roots() { | |||||||
|         matches!( |         matches!( | ||||||
|             harness |             harness | ||||||
|                 .chain |                 .chain | ||||||
|                 .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) |                 .process_chain_segment(blocks, NotifyExecutionLayer::Yes) | ||||||
|                 .await |                 .await | ||||||
|                 .into_block_error(), |                 .into_block_error(), | ||||||
|             Err(BlockError::NonLinearParentRoots) |             Err(BlockError::NonLinearParentRoots) | ||||||
| @ -248,7 +246,7 @@ async fn chain_segment_non_linear_parent_roots() { | |||||||
|         matches!( |         matches!( | ||||||
|             harness |             harness | ||||||
|                 .chain |                 .chain | ||||||
|                 .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) |                 .process_chain_segment(blocks, NotifyExecutionLayer::Yes) | ||||||
|                 .await |                 .await | ||||||
|                 .into_block_error(), |                 .into_block_error(), | ||||||
|             Err(BlockError::NonLinearParentRoots) |             Err(BlockError::NonLinearParentRoots) | ||||||
| @ -279,7 +277,7 @@ async fn chain_segment_non_linear_slots() { | |||||||
|         matches!( |         matches!( | ||||||
|             harness |             harness | ||||||
|                 .chain |                 .chain | ||||||
|                 .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) |                 .process_chain_segment(blocks, NotifyExecutionLayer::Yes) | ||||||
|                 .await |                 .await | ||||||
|                 .into_block_error(), |                 .into_block_error(), | ||||||
|             Err(BlockError::NonLinearSlots) |             Err(BlockError::NonLinearSlots) | ||||||
| @ -300,7 +298,7 @@ async fn chain_segment_non_linear_slots() { | |||||||
|         matches!( |         matches!( | ||||||
|             harness |             harness | ||||||
|                 .chain |                 .chain | ||||||
|                 .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) |                 .process_chain_segment(blocks, NotifyExecutionLayer::Yes) | ||||||
|                 .await |                 .await | ||||||
|                 .into_block_error(), |                 .into_block_error(), | ||||||
|             Err(BlockError::NonLinearSlots) |             Err(BlockError::NonLinearSlots) | ||||||
| @ -326,7 +324,7 @@ async fn assert_invalid_signature( | |||||||
|         matches!( |         matches!( | ||||||
|             harness |             harness | ||||||
|                 .chain |                 .chain | ||||||
|                 .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) |                 .process_chain_segment(blocks, NotifyExecutionLayer::Yes) | ||||||
|                 .await |                 .await | ||||||
|                 .into_block_error(), |                 .into_block_error(), | ||||||
|             Err(BlockError::InvalidSignature) |             Err(BlockError::InvalidSignature) | ||||||
| @ -348,11 +346,7 @@ async fn assert_invalid_signature( | |||||||
|     // imported prior to this test.
 |     // imported prior to this test.
 | ||||||
|     let _ = harness |     let _ = harness | ||||||
|         .chain |         .chain | ||||||
|         .process_chain_segment( |         .process_chain_segment(ancestor_blocks, NotifyExecutionLayer::Yes) | ||||||
|             ancestor_blocks, |  | ||||||
|             CountUnrealized::True, |  | ||||||
|             NotifyExecutionLayer::Yes, |  | ||||||
|         ) |  | ||||||
|         .await; |         .await; | ||||||
|     harness.chain.recompute_head_at_current_slot().await; |     harness.chain.recompute_head_at_current_slot().await; | ||||||
| 
 | 
 | ||||||
| @ -361,8 +355,8 @@ async fn assert_invalid_signature( | |||||||
|         .process_block( |         .process_block( | ||||||
|             snapshots[block_index].beacon_block.canonical_root(), |             snapshots[block_index].beacon_block.canonical_root(), | ||||||
|             snapshots[block_index].beacon_block.clone(), |             snapshots[block_index].beacon_block.clone(), | ||||||
|             CountUnrealized::True, |  | ||||||
|             NotifyExecutionLayer::Yes, |             NotifyExecutionLayer::Yes, | ||||||
|  |             || Ok(()), | ||||||
|         ) |         ) | ||||||
|         .await; |         .await; | ||||||
|     assert!( |     assert!( | ||||||
| @ -414,11 +408,7 @@ async fn invalid_signature_gossip_block() { | |||||||
|             .collect(); |             .collect(); | ||||||
|         harness |         harness | ||||||
|             .chain |             .chain | ||||||
|             .process_chain_segment( |             .process_chain_segment(ancestor_blocks, NotifyExecutionLayer::Yes) | ||||||
|                 ancestor_blocks, |  | ||||||
|                 CountUnrealized::True, |  | ||||||
|                 NotifyExecutionLayer::Yes, |  | ||||||
|             ) |  | ||||||
|             .await |             .await | ||||||
|             .into_block_error() |             .into_block_error() | ||||||
|             .expect("should import all blocks prior to the one being tested"); |             .expect("should import all blocks prior to the one being tested"); | ||||||
| @ -430,8 +420,8 @@ async fn invalid_signature_gossip_block() { | |||||||
|                     .process_block( |                     .process_block( | ||||||
|                         signed_block.canonical_root(), |                         signed_block.canonical_root(), | ||||||
|                         Arc::new(signed_block), |                         Arc::new(signed_block), | ||||||
|                         CountUnrealized::True, |  | ||||||
|                         NotifyExecutionLayer::Yes, |                         NotifyExecutionLayer::Yes, | ||||||
|  |                         || Ok(()), | ||||||
|                     ) |                     ) | ||||||
|                     .await, |                     .await, | ||||||
|                 Err(BlockError::InvalidSignature) |                 Err(BlockError::InvalidSignature) | ||||||
| @ -465,7 +455,7 @@ async fn invalid_signature_block_proposal() { | |||||||
|             matches!( |             matches!( | ||||||
|                 harness |                 harness | ||||||
|                     .chain |                     .chain | ||||||
|                     .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) |                     .process_chain_segment(blocks, NotifyExecutionLayer::Yes) | ||||||
|                     .await |                     .await | ||||||
|                     .into_block_error(), |                     .into_block_error(), | ||||||
|                 Err(BlockError::InvalidSignature) |                 Err(BlockError::InvalidSignature) | ||||||
| @ -663,7 +653,7 @@ async fn invalid_signature_deposit() { | |||||||
|             !matches!( |             !matches!( | ||||||
|                 harness |                 harness | ||||||
|                     .chain |                     .chain | ||||||
|                     .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) |                     .process_chain_segment(blocks, NotifyExecutionLayer::Yes) | ||||||
|                     .await |                     .await | ||||||
|                     .into_block_error(), |                     .into_block_error(), | ||||||
|                 Err(BlockError::InvalidSignature) |                 Err(BlockError::InvalidSignature) | ||||||
| @ -743,8 +733,8 @@ async fn block_gossip_verification() { | |||||||
|             .process_block( |             .process_block( | ||||||
|                 gossip_verified.block_root, |                 gossip_verified.block_root, | ||||||
|                 gossip_verified, |                 gossip_verified, | ||||||
|                 CountUnrealized::True, |  | ||||||
|                 NotifyExecutionLayer::Yes, |                 NotifyExecutionLayer::Yes, | ||||||
|  |                 || Ok(()), | ||||||
|             ) |             ) | ||||||
|             .await |             .await | ||||||
|             .expect("should import valid gossip verified block"); |             .expect("should import valid gossip verified block"); | ||||||
| @ -941,11 +931,7 @@ async fn block_gossip_verification() { | |||||||
|     assert!( |     assert!( | ||||||
|         matches!( |         matches!( | ||||||
|             unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await), |             unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await), | ||||||
|             BlockError::RepeatProposal { |             BlockError::BlockIsAlreadyKnown, | ||||||
|                 proposer, |  | ||||||
|                 slot, |  | ||||||
|             } |  | ||||||
|             if proposer == other_proposer && slot == block.message().slot() |  | ||||||
|         ), |         ), | ||||||
|         "should register any valid signature against the proposer, even if the block failed later verification" |         "should register any valid signature against the proposer, even if the block failed later verification" | ||||||
|     ); |     ); | ||||||
| @ -974,11 +960,7 @@ async fn block_gossip_verification() { | |||||||
|                 .await |                 .await | ||||||
|                 .err() |                 .err() | ||||||
|                 .expect("should error when processing known block"), |                 .expect("should error when processing known block"), | ||||||
|             BlockError::RepeatProposal { |             BlockError::BlockIsAlreadyKnown | ||||||
|                 proposer, |  | ||||||
|                 slot, |  | ||||||
|             } |  | ||||||
|             if proposer == block.message().proposer_index() && slot == block.message().slot() |  | ||||||
|         ), |         ), | ||||||
|         "the second proposal by this validator should be rejected" |         "the second proposal by this validator should be rejected" | ||||||
|     ); |     ); | ||||||
| @ -1015,8 +997,8 @@ async fn verify_block_for_gossip_slashing_detection() { | |||||||
|         .process_block( |         .process_block( | ||||||
|             verified_block.block_root, |             verified_block.block_root, | ||||||
|             verified_block, |             verified_block, | ||||||
|             CountUnrealized::True, |  | ||||||
|             NotifyExecutionLayer::Yes, |             NotifyExecutionLayer::Yes, | ||||||
|  |             || Ok(()), | ||||||
|         ) |         ) | ||||||
|         .await |         .await | ||||||
|         .unwrap(); |         .unwrap(); | ||||||
| @ -1055,8 +1037,8 @@ async fn verify_block_for_gossip_doppelganger_detection() { | |||||||
|         .process_block( |         .process_block( | ||||||
|             verified_block.block_root, |             verified_block.block_root, | ||||||
|             verified_block, |             verified_block, | ||||||
|             CountUnrealized::True, |  | ||||||
|             NotifyExecutionLayer::Yes, |             NotifyExecutionLayer::Yes, | ||||||
|  |             || Ok(()), | ||||||
|         ) |         ) | ||||||
|         .await |         .await | ||||||
|         .unwrap(); |         .unwrap(); | ||||||
| @ -1203,8 +1185,8 @@ async fn add_base_block_to_altair_chain() { | |||||||
|             .process_block( |             .process_block( | ||||||
|                 base_block.canonical_root(), |                 base_block.canonical_root(), | ||||||
|                 Arc::new(base_block.clone()), |                 Arc::new(base_block.clone()), | ||||||
|                 CountUnrealized::True, |  | ||||||
|                 NotifyExecutionLayer::Yes, |                 NotifyExecutionLayer::Yes, | ||||||
|  |                 || Ok(()), | ||||||
|             ) |             ) | ||||||
|             .await |             .await | ||||||
|             .err() |             .err() | ||||||
| @ -1219,11 +1201,7 @@ async fn add_base_block_to_altair_chain() { | |||||||
|     assert!(matches!( |     assert!(matches!( | ||||||
|         harness |         harness | ||||||
|             .chain |             .chain | ||||||
|             .process_chain_segment( |             .process_chain_segment(vec![Arc::new(base_block)], NotifyExecutionLayer::Yes,) | ||||||
|                 vec![Arc::new(base_block)], |  | ||||||
|                 CountUnrealized::True, |  | ||||||
|                 NotifyExecutionLayer::Yes, |  | ||||||
|             ) |  | ||||||
|             .await, |             .await, | ||||||
|         ChainSegmentResult::Failed { |         ChainSegmentResult::Failed { | ||||||
|             imported_blocks: 0, |             imported_blocks: 0, | ||||||
| @ -1342,8 +1320,8 @@ async fn add_altair_block_to_base_chain() { | |||||||
|             .process_block( |             .process_block( | ||||||
|                 altair_block.canonical_root(), |                 altair_block.canonical_root(), | ||||||
|                 Arc::new(altair_block.clone()), |                 Arc::new(altair_block.clone()), | ||||||
|                 CountUnrealized::True, |  | ||||||
|                 NotifyExecutionLayer::Yes, |                 NotifyExecutionLayer::Yes, | ||||||
|  |                 || Ok(()), | ||||||
|             ) |             ) | ||||||
|             .await |             .await | ||||||
|             .err() |             .err() | ||||||
| @ -1358,11 +1336,7 @@ async fn add_altair_block_to_base_chain() { | |||||||
|     assert!(matches!( |     assert!(matches!( | ||||||
|         harness |         harness | ||||||
|             .chain |             .chain | ||||||
|             .process_chain_segment( |             .process_chain_segment(vec![Arc::new(altair_block)], NotifyExecutionLayer::Yes) | ||||||
|                 vec![Arc::new(altair_block)], |  | ||||||
|                 CountUnrealized::True, |  | ||||||
|                 NotifyExecutionLayer::Yes |  | ||||||
|             ) |  | ||||||
|             .await, |             .await, | ||||||
|         ChainSegmentResult::Failed { |         ChainSegmentResult::Failed { | ||||||
|             imported_blocks: 0, |             imported_blocks: 0, | ||||||
| @ -1373,3 +1347,100 @@ async fn add_altair_block_to_base_chain() { | |||||||
|         } |         } | ||||||
|     )); |     )); | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | #[tokio::test] | ||||||
|  | async fn import_duplicate_block_unrealized_justification() { | ||||||
|  |     let spec = MainnetEthSpec::default_spec(); | ||||||
|  | 
 | ||||||
|  |     let harness = BeaconChainHarness::builder(MainnetEthSpec) | ||||||
|  |         .spec(spec) | ||||||
|  |         .keypairs(KEYPAIRS[..].to_vec()) | ||||||
|  |         .fresh_ephemeral_store() | ||||||
|  |         .mock_execution_layer() | ||||||
|  |         .build(); | ||||||
|  |     let chain = &harness.chain; | ||||||
|  | 
 | ||||||
|  |     // Move out of the genesis slot.
 | ||||||
|  |     harness.advance_slot(); | ||||||
|  | 
 | ||||||
|  |     // Build the chain out to the first justification opportunity 2/3rds of the way through epoch 2.
 | ||||||
|  |     let num_slots = E::slots_per_epoch() as usize * 8 / 3; | ||||||
|  |     harness | ||||||
|  |         .extend_chain( | ||||||
|  |             num_slots, | ||||||
|  |             BlockStrategy::OnCanonicalHead, | ||||||
|  |             AttestationStrategy::AllValidators, | ||||||
|  |         ) | ||||||
|  |         .await; | ||||||
|  | 
 | ||||||
|  |     // Move into the next empty slot.
 | ||||||
|  |     harness.advance_slot(); | ||||||
|  | 
 | ||||||
|  |     // The store's justified checkpoint must still be at epoch 0, while unrealized justification
 | ||||||
|  |     // must be at epoch 1.
 | ||||||
|  |     let fc = chain.canonical_head.fork_choice_read_lock(); | ||||||
|  |     assert_eq!(fc.justified_checkpoint().epoch, 0); | ||||||
|  |     assert_eq!(fc.unrealized_justified_checkpoint().epoch, 1); | ||||||
|  |     drop(fc); | ||||||
|  | 
 | ||||||
|  |     // Produce a block to justify epoch 2.
 | ||||||
|  |     let state = harness.get_current_state(); | ||||||
|  |     let slot = harness.get_current_slot(); | ||||||
|  |     let (block, _) = harness.make_block(state.clone(), slot).await; | ||||||
|  |     let block = Arc::new(block); | ||||||
|  |     let block_root = block.canonical_root(); | ||||||
|  | 
 | ||||||
|  |     // Create two verified variants of the block, representing the same block being processed in
 | ||||||
|  |     // parallel.
 | ||||||
|  |     let notify_execution_layer = NotifyExecutionLayer::Yes; | ||||||
|  |     let verified_block1 = block | ||||||
|  |         .clone() | ||||||
|  |         .into_execution_pending_block(block_root, &chain, notify_execution_layer) | ||||||
|  |         .unwrap(); | ||||||
|  |     let verified_block2 = block | ||||||
|  |         .into_execution_pending_block(block_root, &chain, notify_execution_layer) | ||||||
|  |         .unwrap(); | ||||||
|  | 
 | ||||||
|  |     // Import the first block, simulating a block processed via a finalized chain segment.
 | ||||||
|  |     chain | ||||||
|  |         .clone() | ||||||
|  |         .import_execution_pending_block(verified_block1) | ||||||
|  |         .await | ||||||
|  |         .unwrap(); | ||||||
|  | 
 | ||||||
|  |     // Unrealized justification should NOT have updated.
 | ||||||
|  |     let fc = chain.canonical_head.fork_choice_read_lock(); | ||||||
|  |     assert_eq!(fc.justified_checkpoint().epoch, 0); | ||||||
|  |     let unrealized_justification = fc.unrealized_justified_checkpoint(); | ||||||
|  |     assert_eq!(unrealized_justification.epoch, 2); | ||||||
|  | 
 | ||||||
|  |     // The fork choice node for the block should have unrealized justification.
 | ||||||
|  |     let fc_block = fc.get_block(&block_root).unwrap(); | ||||||
|  |     assert_eq!( | ||||||
|  |         fc_block.unrealized_justified_checkpoint, | ||||||
|  |         Some(unrealized_justification) | ||||||
|  |     ); | ||||||
|  |     drop(fc); | ||||||
|  | 
 | ||||||
|  |     // Import the second verified block, simulating a block processed via RPC.
 | ||||||
|  |     chain | ||||||
|  |         .clone() | ||||||
|  |         .import_execution_pending_block(verified_block2) | ||||||
|  |         .await | ||||||
|  |         .unwrap(); | ||||||
|  | 
 | ||||||
|  |     // Unrealized justification should still be updated.
 | ||||||
|  |     let fc = chain.canonical_head.fork_choice_read_lock(); | ||||||
|  |     assert_eq!(fc.justified_checkpoint().epoch, 0); | ||||||
|  |     assert_eq!( | ||||||
|  |         fc.unrealized_justified_checkpoint(), | ||||||
|  |         unrealized_justification | ||||||
|  |     ); | ||||||
|  | 
 | ||||||
|  |     // The fork choice node for the block should still have the unrealized justified checkpoint.
 | ||||||
|  |     let fc_block = fc.get_block(&block_root).unwrap(); | ||||||
|  |     assert_eq!( | ||||||
|  |         fc_block.unrealized_justified_checkpoint, | ||||||
|  |         Some(unrealized_justification) | ||||||
|  |     ); | ||||||
|  | } | ||||||
|  | |||||||
| @ -133,13 +133,8 @@ async fn base_altair_merge_capella() { | |||||||
|     for _ in (merge_fork_slot.as_u64() + 3)..capella_fork_slot.as_u64() { |     for _ in (merge_fork_slot.as_u64() + 3)..capella_fork_slot.as_u64() { | ||||||
|         harness.extend_slots(1).await; |         harness.extend_slots(1).await; | ||||||
|         let block = &harness.chain.head_snapshot().beacon_block; |         let block = &harness.chain.head_snapshot().beacon_block; | ||||||
|         let full_payload: FullPayload<E> = block |         let full_payload: FullPayload<E> = | ||||||
|             .message() |             block.message().body().execution_payload().unwrap().into(); | ||||||
|             .body() |  | ||||||
|             .execution_payload() |  | ||||||
|             .unwrap() |  | ||||||
|             .clone() |  | ||||||
|             .into(); |  | ||||||
|         // pre-capella shouldn't have withdrawals
 |         // pre-capella shouldn't have withdrawals
 | ||||||
|         assert!(full_payload.withdrawals_root().is_err()); |         assert!(full_payload.withdrawals_root().is_err()); | ||||||
|         execution_payloads.push(full_payload); |         execution_payloads.push(full_payload); | ||||||
| @ -151,13 +146,8 @@ async fn base_altair_merge_capella() { | |||||||
|     for _ in 0..16 { |     for _ in 0..16 { | ||||||
|         harness.extend_slots(1).await; |         harness.extend_slots(1).await; | ||||||
|         let block = &harness.chain.head_snapshot().beacon_block; |         let block = &harness.chain.head_snapshot().beacon_block; | ||||||
|         let full_payload: FullPayload<E> = block |         let full_payload: FullPayload<E> = | ||||||
|             .message() |             block.message().body().execution_payload().unwrap().into(); | ||||||
|             .body() |  | ||||||
|             .execution_payload() |  | ||||||
|             .unwrap() |  | ||||||
|             .clone() |  | ||||||
|             .into(); |  | ||||||
|         // post-capella should have withdrawals
 |         // post-capella should have withdrawals
 | ||||||
|         assert!(full_payload.withdrawals_root().is_ok()); |         assert!(full_payload.withdrawals_root().is_ok()); | ||||||
|         execution_payloads.push(full_payload); |         execution_payloads.push(full_payload); | ||||||
|  | |||||||
| @ -7,7 +7,7 @@ use beacon_chain::otb_verification_service::{ | |||||||
| use beacon_chain::{ | use beacon_chain::{ | ||||||
|     canonical_head::{CachedHead, CanonicalHead}, |     canonical_head::{CachedHead, CanonicalHead}, | ||||||
|     test_utils::{BeaconChainHarness, EphemeralHarnessType}, |     test_utils::{BeaconChainHarness, EphemeralHarnessType}, | ||||||
|     BeaconChainError, BlockError, ExecutionPayloadError, NotifyExecutionLayer, |     BeaconChainError, BlockError, ChainConfig, ExecutionPayloadError, NotifyExecutionLayer, | ||||||
|     OverrideForkchoiceUpdate, StateSkipConfig, WhenSlotSkipped, |     OverrideForkchoiceUpdate, StateSkipConfig, WhenSlotSkipped, | ||||||
|     INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, |     INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, | ||||||
|     INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, |     INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, | ||||||
| @ -17,9 +17,7 @@ use execution_layer::{ | |||||||
|     test_utils::ExecutionBlockGenerator, |     test_utils::ExecutionBlockGenerator, | ||||||
|     ExecutionLayer, ForkchoiceState, PayloadAttributes, |     ExecutionLayer, ForkchoiceState, PayloadAttributes, | ||||||
| }; | }; | ||||||
| use fork_choice::{ | use fork_choice::{Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus}; | ||||||
|     CountUnrealized, Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus, |  | ||||||
| }; |  | ||||||
| use logging::test_logger; | use logging::test_logger; | ||||||
| use proto_array::{Error as ProtoArrayError, ExecutionStatus}; | use proto_array::{Error as ProtoArrayError, ExecutionStatus}; | ||||||
| use slot_clock::SlotClock; | use slot_clock::SlotClock; | ||||||
| @ -61,6 +59,10 @@ impl InvalidPayloadRig { | |||||||
| 
 | 
 | ||||||
|         let harness = BeaconChainHarness::builder(MainnetEthSpec) |         let harness = BeaconChainHarness::builder(MainnetEthSpec) | ||||||
|             .spec(spec) |             .spec(spec) | ||||||
|  |             .chain_config(ChainConfig { | ||||||
|  |                 reconstruct_historic_states: true, | ||||||
|  |                 ..ChainConfig::default() | ||||||
|  |             }) | ||||||
|             .logger(test_logger()) |             .logger(test_logger()) | ||||||
|             .deterministic_keypairs(VALIDATOR_COUNT) |             .deterministic_keypairs(VALIDATOR_COUNT) | ||||||
|             .mock_execution_layer() |             .mock_execution_layer() | ||||||
| @ -698,8 +700,8 @@ async fn invalidates_all_descendants() { | |||||||
|         .process_block( |         .process_block( | ||||||
|             fork_block.canonical_root(), |             fork_block.canonical_root(), | ||||||
|             Arc::new(fork_block), |             Arc::new(fork_block), | ||||||
|             CountUnrealized::True, |  | ||||||
|             NotifyExecutionLayer::Yes, |             NotifyExecutionLayer::Yes, | ||||||
|  |             || Ok(()), | ||||||
|         ) |         ) | ||||||
|         .await |         .await | ||||||
|         .unwrap(); |         .unwrap(); | ||||||
| @ -795,8 +797,8 @@ async fn switches_heads() { | |||||||
|         .process_block( |         .process_block( | ||||||
|             fork_block.canonical_root(), |             fork_block.canonical_root(), | ||||||
|             Arc::new(fork_block), |             Arc::new(fork_block), | ||||||
|             CountUnrealized::True, |  | ||||||
|             NotifyExecutionLayer::Yes, |             NotifyExecutionLayer::Yes, | ||||||
|  |             || Ok(()), | ||||||
|         ) |         ) | ||||||
|         .await |         .await | ||||||
|         .unwrap(); |         .unwrap(); | ||||||
| @ -1050,7 +1052,9 @@ async fn invalid_parent() { | |||||||
| 
 | 
 | ||||||
|     // Ensure the block built atop an invalid payload is invalid for import.
 |     // Ensure the block built atop an invalid payload is invalid for import.
 | ||||||
|     assert!(matches!( |     assert!(matches!( | ||||||
|         rig.harness.chain.process_block(block.canonical_root(), block.clone(), CountUnrealized::True, NotifyExecutionLayer::Yes).await, |         rig.harness.chain.process_block(block.canonical_root(), block.clone(), NotifyExecutionLayer::Yes, | ||||||
|  |             || Ok(()), | ||||||
|  |         ).await, | ||||||
|         Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) |         Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) | ||||||
|         if invalid_root == parent_root |         if invalid_root == parent_root | ||||||
|     )); |     )); | ||||||
| @ -1064,8 +1068,9 @@ async fn invalid_parent() { | |||||||
|             Duration::from_secs(0), |             Duration::from_secs(0), | ||||||
|             &state, |             &state, | ||||||
|             PayloadVerificationStatus::Optimistic, |             PayloadVerificationStatus::Optimistic, | ||||||
|  |             rig.harness.chain.config.progressive_balances_mode, | ||||||
|             &rig.harness.chain.spec, |             &rig.harness.chain.spec, | ||||||
|             CountUnrealized::True, |             rig.harness.logger() | ||||||
|         ), |         ), | ||||||
|         Err(ForkChoiceError::ProtoArrayStringError(message)) |         Err(ForkChoiceError::ProtoArrayStringError(message)) | ||||||
|         if message.contains(&format!( |         if message.contains(&format!( | ||||||
| @ -1339,8 +1344,8 @@ async fn build_optimistic_chain( | |||||||
|             .process_block( |             .process_block( | ||||||
|                 block.canonical_root(), |                 block.canonical_root(), | ||||||
|                 block, |                 block, | ||||||
|                 CountUnrealized::True, |  | ||||||
|                 NotifyExecutionLayer::Yes, |                 NotifyExecutionLayer::Yes, | ||||||
|  |                 || Ok(()), | ||||||
|             ) |             ) | ||||||
|             .await |             .await | ||||||
|             .unwrap(); |             .unwrap(); | ||||||
| @ -1900,8 +1905,8 @@ async fn recover_from_invalid_head_by_importing_blocks() { | |||||||
|         .process_block( |         .process_block( | ||||||
|             fork_block.canonical_root(), |             fork_block.canonical_root(), | ||||||
|             fork_block.clone(), |             fork_block.clone(), | ||||||
|             CountUnrealized::True, |  | ||||||
|             NotifyExecutionLayer::Yes, |             NotifyExecutionLayer::Yes, | ||||||
|  |             || Ok(()), | ||||||
|         ) |         ) | ||||||
|         .await |         .await | ||||||
|         .unwrap(); |         .unwrap(); | ||||||
|  | |||||||
| @ -9,19 +9,22 @@ use beacon_chain::{ | |||||||
|     test_utils::{AttestationStrategy, BlockStrategy, RelativeSyncCommittee}, |     test_utils::{AttestationStrategy, BlockStrategy, RelativeSyncCommittee}, | ||||||
|     types::{Epoch, EthSpec, Keypair, MinimalEthSpec}, |     types::{Epoch, EthSpec, Keypair, MinimalEthSpec}, | ||||||
| }; | }; | ||||||
|  | use eth2::lighthouse::attestation_rewards::TotalAttestationRewards; | ||||||
|  | use eth2::lighthouse::StandardAttestationRewards; | ||||||
|  | use eth2::types::ValidatorId; | ||||||
| use lazy_static::lazy_static; | use lazy_static::lazy_static; | ||||||
|  | use types::beacon_state::Error as BeaconStateError; | ||||||
|  | use types::{BeaconState, ChainSpec}; | ||||||
| 
 | 
 | ||||||
| pub const VALIDATOR_COUNT: usize = 64; | pub const VALIDATOR_COUNT: usize = 64; | ||||||
| 
 | 
 | ||||||
|  | type E = MinimalEthSpec; | ||||||
|  | 
 | ||||||
| lazy_static! { | lazy_static! { | ||||||
|     static ref KEYPAIRS: Vec<Keypair> = generate_deterministic_keypairs(VALIDATOR_COUNT); |     static ref KEYPAIRS: Vec<Keypair> = generate_deterministic_keypairs(VALIDATOR_COUNT); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| fn get_harness<E: EthSpec>() -> BeaconChainHarness<EphemeralHarnessType<E>> { | fn get_harness(spec: ChainSpec) -> BeaconChainHarness<EphemeralHarnessType<E>> { | ||||||
|     let mut spec = E::default_spec(); |  | ||||||
| 
 |  | ||||||
|     spec.altair_fork_epoch = Some(Epoch::new(0)); // We use altair for all tests
 |  | ||||||
| 
 |  | ||||||
|     let harness = BeaconChainHarness::builder(E::default()) |     let harness = BeaconChainHarness::builder(E::default()) | ||||||
|         .spec(spec) |         .spec(spec) | ||||||
|         .keypairs(KEYPAIRS.to_vec()) |         .keypairs(KEYPAIRS.to_vec()) | ||||||
| @ -35,8 +38,11 @@ fn get_harness<E: EthSpec>() -> BeaconChainHarness<EphemeralHarnessType<E>> { | |||||||
| 
 | 
 | ||||||
| #[tokio::test] | #[tokio::test] | ||||||
| async fn test_sync_committee_rewards() { | async fn test_sync_committee_rewards() { | ||||||
|     let num_block_produced = MinimalEthSpec::slots_per_epoch(); |     let mut spec = E::default_spec(); | ||||||
|     let harness = get_harness::<MinimalEthSpec>(); |     spec.altair_fork_epoch = Some(Epoch::new(0)); | ||||||
|  | 
 | ||||||
|  |     let harness = get_harness(spec); | ||||||
|  |     let num_block_produced = E::slots_per_epoch(); | ||||||
| 
 | 
 | ||||||
|     let latest_block_root = harness |     let latest_block_root = harness | ||||||
|         .extend_chain( |         .extend_chain( | ||||||
| @ -119,3 +125,175 @@ async fn test_sync_committee_rewards() { | |||||||
|         mismatches.join(",") |         mismatches.join(",") | ||||||
|     ); |     ); | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | #[tokio::test] | ||||||
|  | async fn test_verify_attestation_rewards_base() { | ||||||
|  |     let harness = get_harness(E::default_spec()); | ||||||
|  | 
 | ||||||
|  |     // epoch 0 (N), only two thirds of validators vote.
 | ||||||
|  |     let two_thirds = (VALIDATOR_COUNT / 3) * 2; | ||||||
|  |     let two_thirds_validators: Vec<usize> = (0..two_thirds).collect(); | ||||||
|  |     harness | ||||||
|  |         .extend_chain( | ||||||
|  |             E::slots_per_epoch() as usize, | ||||||
|  |             BlockStrategy::OnCanonicalHead, | ||||||
|  |             AttestationStrategy::SomeValidators(two_thirds_validators), | ||||||
|  |         ) | ||||||
|  |         .await; | ||||||
|  | 
 | ||||||
|  |     let initial_balances: Vec<u64> = harness.get_current_state().balances().clone().into(); | ||||||
|  | 
 | ||||||
|  |     // extend slots to beginning of epoch N + 2
 | ||||||
|  |     harness.extend_slots(E::slots_per_epoch() as usize).await; | ||||||
|  | 
 | ||||||
|  |     // compute reward deltas for all validators in epoch N
 | ||||||
|  |     let StandardAttestationRewards { | ||||||
|  |         ideal_rewards, | ||||||
|  |         total_rewards, | ||||||
|  |     } = harness | ||||||
|  |         .chain | ||||||
|  |         .compute_attestation_rewards(Epoch::new(0), vec![]) | ||||||
|  |         .unwrap(); | ||||||
|  | 
 | ||||||
|  |     // assert no inactivity penalty for both ideal rewards and individual validators
 | ||||||
|  |     assert!(ideal_rewards.iter().all(|reward| reward.inactivity == 0)); | ||||||
|  |     assert!(total_rewards.iter().all(|reward| reward.inactivity == 0)); | ||||||
|  | 
 | ||||||
|  |     // apply attestation rewards to initial balances
 | ||||||
|  |     let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); | ||||||
|  | 
 | ||||||
|  |     // verify expected balances against actual balances
 | ||||||
|  |     let balances: Vec<u64> = harness.get_current_state().balances().clone().into(); | ||||||
|  |     assert_eq!(expected_balances, balances); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[tokio::test] | ||||||
|  | async fn test_verify_attestation_rewards_base_inactivity_leak() { | ||||||
|  |     let spec = E::default_spec(); | ||||||
|  |     let harness = get_harness(spec.clone()); | ||||||
|  | 
 | ||||||
|  |     let half = VALIDATOR_COUNT / 2; | ||||||
|  |     let half_validators: Vec<usize> = (0..half).collect(); | ||||||
|  |     // target epoch is the epoch where the chain enters inactivity leak
 | ||||||
|  |     let target_epoch = &spec.min_epochs_to_inactivity_penalty + 1; | ||||||
|  | 
 | ||||||
|  |     // advance until beginning of epoch N + 1 and get balances
 | ||||||
|  |     harness | ||||||
|  |         .extend_chain( | ||||||
|  |             (E::slots_per_epoch() * (target_epoch + 1)) as usize, | ||||||
|  |             BlockStrategy::OnCanonicalHead, | ||||||
|  |             AttestationStrategy::SomeValidators(half_validators.clone()), | ||||||
|  |         ) | ||||||
|  |         .await; | ||||||
|  |     let initial_balances: Vec<u64> = harness.get_current_state().balances().clone().into(); | ||||||
|  | 
 | ||||||
|  |     // extend slots to beginning of epoch N + 2
 | ||||||
|  |     harness.advance_slot(); | ||||||
|  |     harness | ||||||
|  |         .extend_chain( | ||||||
|  |             E::slots_per_epoch() as usize, | ||||||
|  |             BlockStrategy::OnCanonicalHead, | ||||||
|  |             AttestationStrategy::SomeValidators(half_validators), | ||||||
|  |         ) | ||||||
|  |         .await; | ||||||
|  |     let _slot = harness.get_current_slot(); | ||||||
|  | 
 | ||||||
|  |     // compute reward deltas for all validators in epoch N
 | ||||||
|  |     let StandardAttestationRewards { | ||||||
|  |         ideal_rewards, | ||||||
|  |         total_rewards, | ||||||
|  |     } = harness | ||||||
|  |         .chain | ||||||
|  |         .compute_attestation_rewards(Epoch::new(target_epoch), vec![]) | ||||||
|  |         .unwrap(); | ||||||
|  | 
 | ||||||
|  |     // assert inactivity penalty for both ideal rewards and individual validators
 | ||||||
|  |     assert!(ideal_rewards.iter().all(|reward| reward.inactivity < 0)); | ||||||
|  |     assert!(total_rewards.iter().all(|reward| reward.inactivity < 0)); | ||||||
|  | 
 | ||||||
|  |     // apply attestation rewards to initial balances
 | ||||||
|  |     let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); | ||||||
|  | 
 | ||||||
|  |     // verify expected balances against actual balances
 | ||||||
|  |     let balances: Vec<u64> = harness.get_current_state().balances().clone().into(); | ||||||
|  |     assert_eq!(expected_balances, balances); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[tokio::test] | ||||||
|  | async fn test_verify_attestation_rewards_base_subset_only() { | ||||||
|  |     let harness = get_harness(E::default_spec()); | ||||||
|  | 
 | ||||||
|  |     // epoch 0 (N), only two thirds of validators vote.
 | ||||||
|  |     let two_thirds = (VALIDATOR_COUNT / 3) * 2; | ||||||
|  |     let two_thirds_validators: Vec<usize> = (0..two_thirds).collect(); | ||||||
|  |     harness | ||||||
|  |         .extend_chain( | ||||||
|  |             E::slots_per_epoch() as usize, | ||||||
|  |             BlockStrategy::OnCanonicalHead, | ||||||
|  |             AttestationStrategy::SomeValidators(two_thirds_validators), | ||||||
|  |         ) | ||||||
|  |         .await; | ||||||
|  | 
 | ||||||
|  |     // a small subset of validators to compute attestation rewards for
 | ||||||
|  |     let validators_subset = [0, VALIDATOR_COUNT / 2, VALIDATOR_COUNT - 1]; | ||||||
|  | 
 | ||||||
|  |     // capture balances before transitioning to N + 2
 | ||||||
|  |     let initial_balances = get_validator_balances(harness.get_current_state(), &validators_subset); | ||||||
|  | 
 | ||||||
|  |     // extend slots to beginning of epoch N + 2
 | ||||||
|  |     harness.extend_slots(E::slots_per_epoch() as usize).await; | ||||||
|  | 
 | ||||||
|  |     let validators_subset_ids: Vec<ValidatorId> = validators_subset | ||||||
|  |         .into_iter() | ||||||
|  |         .map(|idx| ValidatorId::Index(idx as u64)) | ||||||
|  |         .collect(); | ||||||
|  | 
 | ||||||
|  |     // compute reward deltas for the subset of validators in epoch N
 | ||||||
|  |     let StandardAttestationRewards { | ||||||
|  |         ideal_rewards: _, | ||||||
|  |         total_rewards, | ||||||
|  |     } = harness | ||||||
|  |         .chain | ||||||
|  |         .compute_attestation_rewards(Epoch::new(0), validators_subset_ids) | ||||||
|  |         .unwrap(); | ||||||
|  | 
 | ||||||
|  |     // apply attestation rewards to initial balances
 | ||||||
|  |     let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); | ||||||
|  | 
 | ||||||
|  |     // verify expected balances against actual balances
 | ||||||
|  |     let balances = get_validator_balances(harness.get_current_state(), &validators_subset); | ||||||
|  |     assert_eq!(expected_balances, balances); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// Apply a vec of `TotalAttestationRewards` to initial balances, and return
 | ||||||
|  | fn apply_attestation_rewards( | ||||||
|  |     initial_balances: &[u64], | ||||||
|  |     attestation_rewards: Vec<TotalAttestationRewards>, | ||||||
|  | ) -> Vec<u64> { | ||||||
|  |     initial_balances | ||||||
|  |         .iter() | ||||||
|  |         .zip(attestation_rewards) | ||||||
|  |         .map(|(&initial_balance, rewards)| { | ||||||
|  |             let expected_balance = initial_balance as i64 | ||||||
|  |                 + rewards.head | ||||||
|  |                 + rewards.source | ||||||
|  |                 + rewards.target | ||||||
|  |                 + rewards.inclusion_delay.map(|q| q.value).unwrap_or(0) as i64 | ||||||
|  |                 + rewards.inactivity; | ||||||
|  |             expected_balance as u64 | ||||||
|  |         }) | ||||||
|  |         .collect::<Vec<u64>>() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | fn get_validator_balances(state: BeaconState<E>, validators: &[usize]) -> Vec<u64> { | ||||||
|  |     validators | ||||||
|  |         .iter() | ||||||
|  |         .flat_map(|&id| { | ||||||
|  |             state | ||||||
|  |                 .balances() | ||||||
|  |                 .get(id) | ||||||
|  |                 .cloned() | ||||||
|  |                 .ok_or(BeaconStateError::BalancesOutOfBounds(id)) | ||||||
|  |         }) | ||||||
|  |         .collect() | ||||||
|  | } | ||||||
|  | |||||||
| @ -9,15 +9,15 @@ use beacon_chain::test_utils::{ | |||||||
| use beacon_chain::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD; | use beacon_chain::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD; | ||||||
| use beacon_chain::{ | use beacon_chain::{ | ||||||
|     historical_blocks::HistoricalBlockError, migrate::MigratorConfig, BeaconChain, |     historical_blocks::HistoricalBlockError, migrate::MigratorConfig, BeaconChain, | ||||||
|     BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, NotifyExecutionLayer, |     BeaconChainError, BeaconChainTypes, BeaconSnapshot, BlockError, ChainConfig, | ||||||
|     ServerSentEventHandler, WhenSlotSkipped, |     NotifyExecutionLayer, ServerSentEventHandler, WhenSlotSkipped, | ||||||
| }; | }; | ||||||
| use fork_choice::CountUnrealized; |  | ||||||
| use lazy_static::lazy_static; | use lazy_static::lazy_static; | ||||||
| use logging::test_logger; | use logging::test_logger; | ||||||
| use maplit::hashset; | use maplit::hashset; | ||||||
| use rand::Rng; | use rand::Rng; | ||||||
| use state_processing::BlockReplayer; | use slot_clock::{SlotClock, TestingSlotClock}; | ||||||
|  | use state_processing::{state_advance::complete_state_advance, BlockReplayer}; | ||||||
| use std::collections::HashMap; | use std::collections::HashMap; | ||||||
| use std::collections::HashSet; | use std::collections::HashSet; | ||||||
| use std::convert::TryInto; | use std::convert::TryInto; | ||||||
| @ -66,6 +66,19 @@ fn get_store_with_spec( | |||||||
| fn get_harness( | fn get_harness( | ||||||
|     store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>, |     store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>, | ||||||
|     validator_count: usize, |     validator_count: usize, | ||||||
|  | ) -> TestHarness { | ||||||
|  |     // Most tests expect to retain historic states, so we use this as the default.
 | ||||||
|  |     let chain_config = ChainConfig { | ||||||
|  |         reconstruct_historic_states: true, | ||||||
|  |         ..ChainConfig::default() | ||||||
|  |     }; | ||||||
|  |     get_harness_generic(store, validator_count, chain_config) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | fn get_harness_generic( | ||||||
|  |     store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>, | ||||||
|  |     validator_count: usize, | ||||||
|  |     chain_config: ChainConfig, | ||||||
| ) -> TestHarness { | ) -> TestHarness { | ||||||
|     let harness = BeaconChainHarness::builder(MinimalEthSpec) |     let harness = BeaconChainHarness::builder(MinimalEthSpec) | ||||||
|         .default_spec() |         .default_spec() | ||||||
| @ -73,6 +86,7 @@ fn get_harness( | |||||||
|         .logger(store.logger().clone()) |         .logger(store.logger().clone()) | ||||||
|         .fresh_disk_store(store) |         .fresh_disk_store(store) | ||||||
|         .mock_execution_layer() |         .mock_execution_layer() | ||||||
|  |         .chain_config(chain_config) | ||||||
|         .build(); |         .build(); | ||||||
|     harness.advance_slot(); |     harness.advance_slot(); | ||||||
|     harness |     harness | ||||||
| @ -407,7 +421,7 @@ async fn forwards_iter_block_and_state_roots_until() { | |||||||
| 
 | 
 | ||||||
|     // The last restore point slot is the point at which the hybrid forwards iterator behaviour
 |     // The last restore point slot is the point at which the hybrid forwards iterator behaviour
 | ||||||
|     // changes.
 |     // changes.
 | ||||||
|     let last_restore_point_slot = store.get_latest_restore_point_slot(); |     let last_restore_point_slot = store.get_latest_restore_point_slot().unwrap(); | ||||||
|     assert!(last_restore_point_slot > 0); |     assert!(last_restore_point_slot > 0); | ||||||
| 
 | 
 | ||||||
|     let chain = &harness.chain; |     let chain = &harness.chain; | ||||||
| @ -461,13 +475,15 @@ async fn block_replay_with_inaccurate_state_roots() { | |||||||
|         .await; |         .await; | ||||||
| 
 | 
 | ||||||
|     // Slot must not be 0 mod 32 or else no blocks will be replayed.
 |     // Slot must not be 0 mod 32 or else no blocks will be replayed.
 | ||||||
|     let (mut head_state, head_root) = harness.get_current_state_and_root(); |     let (mut head_state, head_state_root) = harness.get_current_state_and_root(); | ||||||
|  |     let head_block_root = harness.head_block_root(); | ||||||
|     assert_ne!(head_state.slot() % 32, 0); |     assert_ne!(head_state.slot() % 32, 0); | ||||||
| 
 | 
 | ||||||
|     let mut fast_head_state = store |     let (_, mut fast_head_state) = store | ||||||
|         .get_inconsistent_state_for_attestation_verification_only( |         .get_inconsistent_state_for_attestation_verification_only( | ||||||
|             &head_root, |             &head_block_root, | ||||||
|             Some(head_state.slot()), |             head_state.slot(), | ||||||
|  |             head_state_root, | ||||||
|         ) |         ) | ||||||
|         .unwrap() |         .unwrap() | ||||||
|         .unwrap(); |         .unwrap(); | ||||||
| @ -566,14 +582,7 @@ async fn block_replayer_hooks() { | |||||||
| async fn delete_blocks_and_states() { | async fn delete_blocks_and_states() { | ||||||
|     let db_path = tempdir().unwrap(); |     let db_path = tempdir().unwrap(); | ||||||
|     let store = get_store(&db_path); |     let store = get_store(&db_path); | ||||||
|     let validators_keypairs = |     let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); | ||||||
|         types::test_utils::generate_deterministic_keypairs(LOW_VALIDATOR_COUNT); |  | ||||||
|     let harness = BeaconChainHarness::builder(MinimalEthSpec) |  | ||||||
|         .default_spec() |  | ||||||
|         .keypairs(validators_keypairs) |  | ||||||
|         .fresh_disk_store(store.clone()) |  | ||||||
|         .mock_execution_layer() |  | ||||||
|         .build(); |  | ||||||
| 
 | 
 | ||||||
|     let unforked_blocks: u64 = 4 * E::slots_per_epoch(); |     let unforked_blocks: u64 = 4 * E::slots_per_epoch(); | ||||||
| 
 | 
 | ||||||
| @ -1016,18 +1025,14 @@ fn check_shuffling_compatible( | |||||||
| // Ensure blocks from abandoned forks are pruned from the Hot DB
 | // Ensure blocks from abandoned forks are pruned from the Hot DB
 | ||||||
| #[tokio::test] | #[tokio::test] | ||||||
| async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { | async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { | ||||||
|     const HONEST_VALIDATOR_COUNT: usize = 32 + 0; |     const HONEST_VALIDATOR_COUNT: usize = 32; | ||||||
|     const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; |     const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; | ||||||
|     const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; |     const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; | ||||||
|     let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); |  | ||||||
|     let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect(); |     let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect(); | ||||||
|     let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); |     let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); | ||||||
|     let rig = BeaconChainHarness::builder(MinimalEthSpec) |     let db_path = tempdir().unwrap(); | ||||||
|         .default_spec() |     let store = get_store(&db_path); | ||||||
|         .keypairs(validators_keypairs) |     let rig = get_harness(store.clone(), VALIDATOR_COUNT); | ||||||
|         .fresh_ephemeral_store() |  | ||||||
|         .mock_execution_layer() |  | ||||||
|         .build(); |  | ||||||
|     let slots_per_epoch = rig.slots_per_epoch(); |     let slots_per_epoch = rig.slots_per_epoch(); | ||||||
|     let (mut state, state_root) = rig.get_current_state_and_root(); |     let (mut state, state_root) = rig.get_current_state_and_root(); | ||||||
| 
 | 
 | ||||||
| @ -1126,18 +1131,14 @@ async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { | |||||||
| 
 | 
 | ||||||
| #[tokio::test] | #[tokio::test] | ||||||
| async fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { | async fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { | ||||||
|     const HONEST_VALIDATOR_COUNT: usize = 32 + 0; |     const HONEST_VALIDATOR_COUNT: usize = 32; | ||||||
|     const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; |     const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; | ||||||
|     const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; |     const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; | ||||||
|     let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); |  | ||||||
|     let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect(); |     let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect(); | ||||||
|     let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); |     let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); | ||||||
|     let rig = BeaconChainHarness::builder(MinimalEthSpec) |     let db_path = tempdir().unwrap(); | ||||||
|         .default_spec() |     let store = get_store(&db_path); | ||||||
|         .keypairs(validators_keypairs) |     let rig = get_harness(store.clone(), VALIDATOR_COUNT); | ||||||
|         .fresh_ephemeral_store() |  | ||||||
|         .mock_execution_layer() |  | ||||||
|         .build(); |  | ||||||
|     let slots_per_epoch = rig.slots_per_epoch(); |     let slots_per_epoch = rig.slots_per_epoch(); | ||||||
|     let (state, state_root) = rig.get_current_state_and_root(); |     let (state, state_root) = rig.get_current_state_and_root(); | ||||||
| 
 | 
 | ||||||
| @ -1261,15 +1262,11 @@ async fn pruning_does_not_touch_blocks_prior_to_finalization() { | |||||||
|     const HONEST_VALIDATOR_COUNT: usize = 32; |     const HONEST_VALIDATOR_COUNT: usize = 32; | ||||||
|     const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; |     const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; | ||||||
|     const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; |     const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; | ||||||
|     let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); |  | ||||||
|     let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect(); |     let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect(); | ||||||
|     let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); |     let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); | ||||||
|     let rig = BeaconChainHarness::builder(MinimalEthSpec) |     let db_path = tempdir().unwrap(); | ||||||
|         .default_spec() |     let store = get_store(&db_path); | ||||||
|         .keypairs(validators_keypairs) |     let rig = get_harness(store.clone(), VALIDATOR_COUNT); | ||||||
|         .fresh_ephemeral_store() |  | ||||||
|         .mock_execution_layer() |  | ||||||
|         .build(); |  | ||||||
|     let slots_per_epoch = rig.slots_per_epoch(); |     let slots_per_epoch = rig.slots_per_epoch(); | ||||||
|     let (mut state, state_root) = rig.get_current_state_and_root(); |     let (mut state, state_root) = rig.get_current_state_and_root(); | ||||||
| 
 | 
 | ||||||
| @ -1353,18 +1350,14 @@ async fn pruning_does_not_touch_blocks_prior_to_finalization() { | |||||||
| 
 | 
 | ||||||
| #[tokio::test] | #[tokio::test] | ||||||
| async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { | async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { | ||||||
|     const HONEST_VALIDATOR_COUNT: usize = 32 + 0; |     const HONEST_VALIDATOR_COUNT: usize = 32; | ||||||
|     const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; |     const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; | ||||||
|     const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; |     const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; | ||||||
|     let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); |  | ||||||
|     let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect(); |     let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect(); | ||||||
|     let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); |     let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); | ||||||
|     let rig = BeaconChainHarness::builder(MinimalEthSpec) |     let db_path = tempdir().unwrap(); | ||||||
|         .default_spec() |     let store = get_store(&db_path); | ||||||
|         .keypairs(validators_keypairs) |     let rig = get_harness(store.clone(), VALIDATOR_COUNT); | ||||||
|         .fresh_ephemeral_store() |  | ||||||
|         .mock_execution_layer() |  | ||||||
|         .build(); |  | ||||||
|     let (state, state_root) = rig.get_current_state_and_root(); |     let (state, state_root) = rig.get_current_state_and_root(); | ||||||
| 
 | 
 | ||||||
|     // Fill up 0th epoch with canonical chain blocks
 |     // Fill up 0th epoch with canonical chain blocks
 | ||||||
| @ -1498,18 +1491,14 @@ async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { | |||||||
| // This is to check if state outside of normal block processing are pruned correctly.
 | // This is to check if state outside of normal block processing are pruned correctly.
 | ||||||
| #[tokio::test] | #[tokio::test] | ||||||
| async fn prunes_skipped_slots_states() { | async fn prunes_skipped_slots_states() { | ||||||
|     const HONEST_VALIDATOR_COUNT: usize = 32 + 0; |     const HONEST_VALIDATOR_COUNT: usize = 32; | ||||||
|     const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; |     const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; | ||||||
|     const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; |     const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; | ||||||
|     let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); |  | ||||||
|     let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect(); |     let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect(); | ||||||
|     let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); |     let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); | ||||||
|     let rig = BeaconChainHarness::builder(MinimalEthSpec) |     let db_path = tempdir().unwrap(); | ||||||
|         .default_spec() |     let store = get_store(&db_path); | ||||||
|         .keypairs(validators_keypairs) |     let rig = get_harness(store.clone(), VALIDATOR_COUNT); | ||||||
|         .fresh_ephemeral_store() |  | ||||||
|         .mock_execution_layer() |  | ||||||
|         .build(); |  | ||||||
|     let (state, state_root) = rig.get_current_state_and_root(); |     let (state, state_root) = rig.get_current_state_and_root(); | ||||||
| 
 | 
 | ||||||
|     let canonical_slots_zeroth_epoch: Vec<Slot> = |     let canonical_slots_zeroth_epoch: Vec<Slot> = | ||||||
| @ -1627,18 +1616,14 @@ async fn prunes_skipped_slots_states() { | |||||||
| // This is to check if state outside of normal block processing are pruned correctly.
 | // This is to check if state outside of normal block processing are pruned correctly.
 | ||||||
| #[tokio::test] | #[tokio::test] | ||||||
| async fn finalizes_non_epoch_start_slot() { | async fn finalizes_non_epoch_start_slot() { | ||||||
|     const HONEST_VALIDATOR_COUNT: usize = 32 + 0; |     const HONEST_VALIDATOR_COUNT: usize = 32; | ||||||
|     const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; |     const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; | ||||||
|     const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; |     const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; | ||||||
|     let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); |  | ||||||
|     let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect(); |     let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect(); | ||||||
|     let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); |     let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); | ||||||
|     let rig = BeaconChainHarness::builder(MinimalEthSpec) |     let db_path = tempdir().unwrap(); | ||||||
|         .default_spec() |     let store = get_store(&db_path); | ||||||
|         .keypairs(validators_keypairs) |     let rig = get_harness(store.clone(), VALIDATOR_COUNT); | ||||||
|         .fresh_ephemeral_store() |  | ||||||
|         .mock_execution_layer() |  | ||||||
|         .build(); |  | ||||||
|     let (state, state_root) = rig.get_current_state_and_root(); |     let (state, state_root) = rig.get_current_state_and_root(); | ||||||
| 
 | 
 | ||||||
|     let canonical_slots_zeroth_epoch: Vec<Slot> = |     let canonical_slots_zeroth_epoch: Vec<Slot> = | ||||||
| @ -2054,39 +2039,82 @@ async fn garbage_collect_temp_states_from_failed_block() { | |||||||
| } | } | ||||||
| 
 | 
 | ||||||
| #[tokio::test] | #[tokio::test] | ||||||
| async fn weak_subjectivity_sync() { | async fn weak_subjectivity_sync_easy() { | ||||||
|  |     let num_initial_slots = E::slots_per_epoch() * 11; | ||||||
|  |     let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9); | ||||||
|  |     let slots = (1..num_initial_slots).map(Slot::new).collect(); | ||||||
|  |     weak_subjectivity_sync_test(slots, checkpoint_slot).await | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[tokio::test] | ||||||
|  | async fn weak_subjectivity_sync_unaligned_advanced_checkpoint() { | ||||||
|  |     let num_initial_slots = E::slots_per_epoch() * 11; | ||||||
|  |     let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9); | ||||||
|  |     let slots = (1..num_initial_slots) | ||||||
|  |         .map(Slot::new) | ||||||
|  |         .filter(|&slot| { | ||||||
|  |             // Skip 3 slots leading up to the checkpoint slot.
 | ||||||
|  |             slot <= checkpoint_slot - 3 || slot > checkpoint_slot | ||||||
|  |         }) | ||||||
|  |         .collect(); | ||||||
|  |     weak_subjectivity_sync_test(slots, checkpoint_slot).await | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[tokio::test] | ||||||
|  | async fn weak_subjectivity_sync_unaligned_unadvanced_checkpoint() { | ||||||
|  |     let num_initial_slots = E::slots_per_epoch() * 11; | ||||||
|  |     let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9 - 3); | ||||||
|  |     let slots = (1..num_initial_slots) | ||||||
|  |         .map(Slot::new) | ||||||
|  |         .filter(|&slot| { | ||||||
|  |             // Skip 3 slots after the checkpoint slot.
 | ||||||
|  |             slot <= checkpoint_slot || slot > checkpoint_slot + 3 | ||||||
|  |         }) | ||||||
|  |         .collect(); | ||||||
|  |     weak_subjectivity_sync_test(slots, checkpoint_slot).await | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | async fn weak_subjectivity_sync_test(slots: Vec<Slot>, checkpoint_slot: Slot) { | ||||||
|     // Build an initial chain on one harness, representing a synced node with full history.
 |     // Build an initial chain on one harness, representing a synced node with full history.
 | ||||||
|     let num_initial_blocks = E::slots_per_epoch() * 11; |  | ||||||
|     let num_final_blocks = E::slots_per_epoch() * 2; |     let num_final_blocks = E::slots_per_epoch() * 2; | ||||||
| 
 | 
 | ||||||
|     let temp1 = tempdir().unwrap(); |     let temp1 = tempdir().unwrap(); | ||||||
|     let full_store = get_store(&temp1); |     let full_store = get_store(&temp1); | ||||||
|     let harness = get_harness(full_store.clone(), LOW_VALIDATOR_COUNT); |     let harness = get_harness(full_store.clone(), LOW_VALIDATOR_COUNT); | ||||||
| 
 | 
 | ||||||
|  |     let all_validators = (0..LOW_VALIDATOR_COUNT).collect::<Vec<_>>(); | ||||||
|  | 
 | ||||||
|  |     let (genesis_state, genesis_state_root) = harness.get_current_state_and_root(); | ||||||
|     harness |     harness | ||||||
|         .extend_chain( |         .add_attested_blocks_at_slots( | ||||||
|             num_initial_blocks as usize, |             genesis_state.clone(), | ||||||
|             BlockStrategy::OnCanonicalHead, |             genesis_state_root, | ||||||
|             AttestationStrategy::AllValidators, |             &slots, | ||||||
|  |             &all_validators, | ||||||
|         ) |         ) | ||||||
|         .await; |         .await; | ||||||
| 
 | 
 | ||||||
|     let genesis_state = full_store |     let wss_block_root = harness | ||||||
|         .get_state(&harness.chain.genesis_state_root, Some(Slot::new(0))) |         .chain | ||||||
|  |         .block_root_at_slot(checkpoint_slot, WhenSlotSkipped::Prev) | ||||||
|         .unwrap() |         .unwrap() | ||||||
|         .unwrap(); |         .unwrap(); | ||||||
|     let wss_checkpoint = harness.finalized_checkpoint(); |     let wss_state_root = harness | ||||||
|  |         .chain | ||||||
|  |         .state_root_at_slot(checkpoint_slot) | ||||||
|  |         .unwrap() | ||||||
|  |         .unwrap(); | ||||||
|  | 
 | ||||||
|     let wss_block = harness |     let wss_block = harness | ||||||
|         .chain |         .chain | ||||||
|         .store |         .store | ||||||
|         .get_full_block(&wss_checkpoint.root) |         .get_full_block(&wss_block_root) | ||||||
|         .unwrap() |         .unwrap() | ||||||
|         .unwrap(); |         .unwrap(); | ||||||
|     let wss_state = full_store |     let wss_state = full_store | ||||||
|         .get_state(&wss_block.state_root(), None) |         .get_state(&wss_state_root, Some(checkpoint_slot)) | ||||||
|         .unwrap() |         .unwrap() | ||||||
|         .unwrap(); |         .unwrap(); | ||||||
|     let wss_slot = wss_block.slot(); |  | ||||||
| 
 | 
 | ||||||
|     // Add more blocks that advance finalization further.
 |     // Add more blocks that advance finalization further.
 | ||||||
|     harness.advance_slot(); |     harness.advance_slot(); | ||||||
| @ -2105,20 +2133,26 @@ async fn weak_subjectivity_sync() { | |||||||
|     let spec = test_spec::<E>(); |     let spec = test_spec::<E>(); | ||||||
|     let seconds_per_slot = spec.seconds_per_slot; |     let seconds_per_slot = spec.seconds_per_slot; | ||||||
| 
 | 
 | ||||||
|     // Initialise a new beacon chain from the finalized checkpoint
 |     // Initialise a new beacon chain from the finalized checkpoint.
 | ||||||
|  |     // The slot clock must be set to a time ahead of the checkpoint state.
 | ||||||
|  |     let slot_clock = TestingSlotClock::new( | ||||||
|  |         Slot::new(0), | ||||||
|  |         Duration::from_secs(harness.chain.genesis_time), | ||||||
|  |         Duration::from_secs(seconds_per_slot), | ||||||
|  |     ); | ||||||
|  |     slot_clock.set_slot(harness.get_current_slot().as_u64()); | ||||||
|     let beacon_chain = Arc::new( |     let beacon_chain = Arc::new( | ||||||
|         BeaconChainBuilder::new(MinimalEthSpec) |         BeaconChainBuilder::new(MinimalEthSpec) | ||||||
|             .store(store.clone()) |             .store(store.clone()) | ||||||
|             .custom_spec(test_spec::<E>()) |             .custom_spec(test_spec::<E>()) | ||||||
|             .task_executor(harness.chain.task_executor.clone()) |             .task_executor(harness.chain.task_executor.clone()) | ||||||
|  |             .logger(log.clone()) | ||||||
|             .weak_subjectivity_state(wss_state, wss_block.clone(), genesis_state) |             .weak_subjectivity_state(wss_state, wss_block.clone(), genesis_state) | ||||||
|             .unwrap() |             .unwrap() | ||||||
|             .logger(log.clone()) |  | ||||||
|             .store_migrator_config(MigratorConfig::default().blocking()) |             .store_migrator_config(MigratorConfig::default().blocking()) | ||||||
|             .dummy_eth1_backend() |             .dummy_eth1_backend() | ||||||
|             .expect("should build dummy backend") |             .expect("should build dummy backend") | ||||||
|             .testing_slot_clock(Duration::from_secs(seconds_per_slot)) |             .slot_clock(slot_clock) | ||||||
|             .expect("should configure testing slot clock") |  | ||||||
|             .shutdown_sender(shutdown_tx) |             .shutdown_sender(shutdown_tx) | ||||||
|             .chain_config(ChainConfig::default()) |             .chain_config(ChainConfig::default()) | ||||||
|             .event_handler(Some(ServerSentEventHandler::new_with_capacity( |             .event_handler(Some(ServerSentEventHandler::new_with_capacity( | ||||||
| @ -2132,9 +2166,9 @@ async fn weak_subjectivity_sync() { | |||||||
| 
 | 
 | ||||||
|     // Apply blocks forward to reach head.
 |     // Apply blocks forward to reach head.
 | ||||||
|     let chain_dump = harness.chain.chain_dump().unwrap(); |     let chain_dump = harness.chain.chain_dump().unwrap(); | ||||||
|     let new_blocks = &chain_dump[wss_slot.as_usize() + 1..]; |     let new_blocks = chain_dump | ||||||
| 
 |         .iter() | ||||||
|     assert_eq!(new_blocks[0].beacon_block.slot(), wss_slot + 1); |         .filter(|snapshot| snapshot.beacon_block.slot() > checkpoint_slot); | ||||||
| 
 | 
 | ||||||
|     for snapshot in new_blocks { |     for snapshot in new_blocks { | ||||||
|         let full_block = harness |         let full_block = harness | ||||||
| @ -2151,8 +2185,8 @@ async fn weak_subjectivity_sync() { | |||||||
|             .process_block( |             .process_block( | ||||||
|                 full_block.canonical_root(), |                 full_block.canonical_root(), | ||||||
|                 Arc::new(full_block), |                 Arc::new(full_block), | ||||||
|                 CountUnrealized::True, |  | ||||||
|                 NotifyExecutionLayer::Yes, |                 NotifyExecutionLayer::Yes, | ||||||
|  |                 || Ok(()), | ||||||
|             ) |             ) | ||||||
|             .await |             .await | ||||||
|             .unwrap(); |             .unwrap(); | ||||||
| @ -2220,13 +2254,17 @@ async fn weak_subjectivity_sync() { | |||||||
|     assert_eq!(forwards, expected); |     assert_eq!(forwards, expected); | ||||||
| 
 | 
 | ||||||
|     // All blocks can be loaded.
 |     // All blocks can be loaded.
 | ||||||
|  |     let mut prev_block_root = Hash256::zero(); | ||||||
|     for (block_root, slot) in beacon_chain |     for (block_root, slot) in beacon_chain | ||||||
|         .forwards_iter_block_roots(Slot::new(0)) |         .forwards_iter_block_roots(Slot::new(0)) | ||||||
|         .unwrap() |         .unwrap() | ||||||
|         .map(Result::unwrap) |         .map(Result::unwrap) | ||||||
|     { |     { | ||||||
|         let block = store.get_blinded_block(&block_root).unwrap().unwrap(); |         let block = store.get_blinded_block(&block_root).unwrap().unwrap(); | ||||||
|         assert_eq!(block.slot(), slot); |         if block_root != prev_block_root { | ||||||
|  |             assert_eq!(block.slot(), slot); | ||||||
|  |         } | ||||||
|  |         prev_block_root = block_root; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     // All states from the oldest state slot can be loaded.
 |     // All states from the oldest state slot can be loaded.
 | ||||||
| @ -2241,14 +2279,141 @@ async fn weak_subjectivity_sync() { | |||||||
|         assert_eq!(state.canonical_root(), state_root); |         assert_eq!(state.canonical_root(), state_root); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     // Anchor slot is still set to the starting slot.
 |     // Anchor slot is still set to the slot of the checkpoint block.
 | ||||||
|     assert_eq!(store.get_anchor_slot(), Some(wss_slot)); |     assert_eq!(store.get_anchor_slot(), Some(wss_block.slot())); | ||||||
| 
 | 
 | ||||||
|     // Reconstruct states.
 |     // Reconstruct states.
 | ||||||
|     store.clone().reconstruct_historic_states().unwrap(); |     store.clone().reconstruct_historic_states().unwrap(); | ||||||
|     assert_eq!(store.get_anchor_slot(), None); |     assert_eq!(store.get_anchor_slot(), None); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | /// Test that blocks and attestations that refer to states around an unaligned split state are
 | ||||||
|  | /// processed correctly.
 | ||||||
|  | #[tokio::test] | ||||||
|  | async fn process_blocks_and_attestations_for_unaligned_checkpoint() { | ||||||
|  |     let temp = tempdir().unwrap(); | ||||||
|  |     let store = get_store(&temp); | ||||||
|  |     let chain_config = ChainConfig { | ||||||
|  |         reconstruct_historic_states: false, | ||||||
|  |         ..ChainConfig::default() | ||||||
|  |     }; | ||||||
|  |     let harness = get_harness_generic(store.clone(), LOW_VALIDATOR_COUNT, chain_config); | ||||||
|  | 
 | ||||||
|  |     let all_validators = (0..LOW_VALIDATOR_COUNT).collect::<Vec<_>>(); | ||||||
|  | 
 | ||||||
|  |     let split_slot = Slot::new(E::slots_per_epoch() * 4); | ||||||
|  |     let pre_skips = 1; | ||||||
|  |     let post_skips = 1; | ||||||
|  | 
 | ||||||
|  |     // Build the chain up to the intended split slot, with 3 skips before the split.
 | ||||||
|  |     let slots = (1..=split_slot.as_u64() - pre_skips) | ||||||
|  |         .map(Slot::new) | ||||||
|  |         .collect::<Vec<_>>(); | ||||||
|  | 
 | ||||||
|  |     let (genesis_state, genesis_state_root) = harness.get_current_state_and_root(); | ||||||
|  |     harness | ||||||
|  |         .add_attested_blocks_at_slots( | ||||||
|  |             genesis_state.clone(), | ||||||
|  |             genesis_state_root, | ||||||
|  |             &slots, | ||||||
|  |             &all_validators, | ||||||
|  |         ) | ||||||
|  |         .await; | ||||||
|  | 
 | ||||||
|  |     // Before the split slot becomes finalized, create two forking blocks that build on the split
 | ||||||
|  |     // block:
 | ||||||
|  |     //
 | ||||||
|  |     // - one that is invalid because it conflicts with finalization (slot <= finalized_slot)
 | ||||||
|  |     // - one that is valid because its slot is not finalized (slot > finalized_slot)
 | ||||||
|  |     let (unadvanced_split_state, unadvanced_split_state_root) = | ||||||
|  |         harness.get_current_state_and_root(); | ||||||
|  | 
 | ||||||
|  |     let (invalid_fork_block, _) = harness | ||||||
|  |         .make_block(unadvanced_split_state.clone(), split_slot) | ||||||
|  |         .await; | ||||||
|  |     let (valid_fork_block, _) = harness | ||||||
|  |         .make_block(unadvanced_split_state.clone(), split_slot + 1) | ||||||
|  |         .await; | ||||||
|  | 
 | ||||||
|  |     // Advance the chain so that the intended split slot is finalized.
 | ||||||
|  |     // Do not attest in the epoch boundary slot, to make attestation production later easier (no
 | ||||||
|  |     // equivocations).
 | ||||||
|  |     let finalizing_slot = split_slot + 2 * E::slots_per_epoch(); | ||||||
|  |     for _ in 0..pre_skips + post_skips { | ||||||
|  |         harness.advance_slot(); | ||||||
|  |     } | ||||||
|  |     harness.extend_to_slot(finalizing_slot - 1).await; | ||||||
|  |     harness | ||||||
|  |         .add_block_at_slot(finalizing_slot, harness.get_current_state()) | ||||||
|  |         .await | ||||||
|  |         .unwrap(); | ||||||
|  | 
 | ||||||
|  |     // Check that the split slot is as intended.
 | ||||||
|  |     let split = store.get_split_info(); | ||||||
|  |     assert_eq!(split.slot, split_slot); | ||||||
|  |     assert_eq!(split.block_root, valid_fork_block.parent_root()); | ||||||
|  |     assert_ne!(split.state_root, unadvanced_split_state_root); | ||||||
|  | 
 | ||||||
|  |     // Applying the invalid block should fail.
 | ||||||
|  |     let err = harness | ||||||
|  |         .chain | ||||||
|  |         .process_block( | ||||||
|  |             invalid_fork_block.canonical_root(), | ||||||
|  |             Arc::new(invalid_fork_block.clone()), | ||||||
|  |             NotifyExecutionLayer::Yes, | ||||||
|  |             || Ok(()), | ||||||
|  |         ) | ||||||
|  |         .await | ||||||
|  |         .unwrap_err(); | ||||||
|  |     assert!(matches!(err, BlockError::WouldRevertFinalizedSlot { .. })); | ||||||
|  | 
 | ||||||
|  |     // Applying the valid block should succeed, but it should not become head.
 | ||||||
|  |     harness | ||||||
|  |         .chain | ||||||
|  |         .process_block( | ||||||
|  |             valid_fork_block.canonical_root(), | ||||||
|  |             Arc::new(valid_fork_block.clone()), | ||||||
|  |             NotifyExecutionLayer::Yes, | ||||||
|  |             || Ok(()), | ||||||
|  |         ) | ||||||
|  |         .await | ||||||
|  |         .unwrap(); | ||||||
|  |     harness.chain.recompute_head_at_current_slot().await; | ||||||
|  |     assert_ne!(harness.head_block_root(), valid_fork_block.canonical_root()); | ||||||
|  | 
 | ||||||
|  |     // Attestations to the split block in the next 2 epochs should be processed successfully.
 | ||||||
|  |     let attestation_start_slot = harness.get_current_slot(); | ||||||
|  |     let attestation_end_slot = attestation_start_slot + 2 * E::slots_per_epoch(); | ||||||
|  |     let (split_state_root, mut advanced_split_state) = harness | ||||||
|  |         .chain | ||||||
|  |         .store | ||||||
|  |         .get_advanced_hot_state(split.block_root, split.slot, split.state_root) | ||||||
|  |         .unwrap() | ||||||
|  |         .unwrap(); | ||||||
|  |     complete_state_advance( | ||||||
|  |         &mut advanced_split_state, | ||||||
|  |         Some(split_state_root), | ||||||
|  |         attestation_start_slot, | ||||||
|  |         &harness.chain.spec, | ||||||
|  |     ) | ||||||
|  |     .unwrap(); | ||||||
|  |     advanced_split_state | ||||||
|  |         .build_caches(&harness.chain.spec) | ||||||
|  |         .unwrap(); | ||||||
|  |     let advanced_split_state_root = advanced_split_state.update_tree_hash_cache().unwrap(); | ||||||
|  |     for slot in (attestation_start_slot.as_u64()..attestation_end_slot.as_u64()).map(Slot::new) { | ||||||
|  |         let attestations = harness.make_attestations( | ||||||
|  |             &all_validators, | ||||||
|  |             &advanced_split_state, | ||||||
|  |             advanced_split_state_root, | ||||||
|  |             split.block_root.into(), | ||||||
|  |             slot, | ||||||
|  |         ); | ||||||
|  |         harness.advance_slot(); | ||||||
|  |         harness.process_attestations(attestations); | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
| #[tokio::test] | #[tokio::test] | ||||||
| async fn finalizes_after_resuming_from_db() { | async fn finalizes_after_resuming_from_db() { | ||||||
|     let validator_count = 16; |     let validator_count = 16; | ||||||
| @ -2307,6 +2472,7 @@ async fn finalizes_after_resuming_from_db() { | |||||||
|         .default_spec() |         .default_spec() | ||||||
|         .keypairs(KEYPAIRS[0..validator_count].to_vec()) |         .keypairs(KEYPAIRS[0..validator_count].to_vec()) | ||||||
|         .resumed_disk_store(store) |         .resumed_disk_store(store) | ||||||
|  |         .testing_slot_clock(original_chain.slot_clock.clone()) | ||||||
|         .mock_execution_layer() |         .mock_execution_layer() | ||||||
|         .build(); |         .build(); | ||||||
| 
 | 
 | ||||||
| @ -2560,6 +2726,9 @@ async fn schema_downgrade_to_min_version() { | |||||||
|         SchemaVersion(11) |         SchemaVersion(11) | ||||||
|     }; |     }; | ||||||
| 
 | 
 | ||||||
|  |     // Save the slot clock so that the new harness doesn't revert in time.
 | ||||||
|  |     let slot_clock = harness.chain.slot_clock.clone(); | ||||||
|  | 
 | ||||||
|     // Close the database to ensure everything is written to disk.
 |     // Close the database to ensure everything is written to disk.
 | ||||||
|     drop(store); |     drop(store); | ||||||
|     drop(harness); |     drop(harness); | ||||||
| @ -2590,11 +2759,21 @@ async fn schema_downgrade_to_min_version() { | |||||||
|     ) |     ) | ||||||
|     .expect("schema upgrade from minimum version should work"); |     .expect("schema upgrade from minimum version should work"); | ||||||
| 
 | 
 | ||||||
|     // Rescreate the harness.
 |     // Recreate the harness.
 | ||||||
|  |     /* | ||||||
|  |     let slot_clock = TestingSlotClock::new( | ||||||
|  |         Slot::new(0), | ||||||
|  |         Duration::from_secs(harness.chain.genesis_time), | ||||||
|  |         Duration::from_secs(spec.seconds_per_slot), | ||||||
|  |     ); | ||||||
|  |     slot_clock.set_slot(harness.get_current_slot().as_u64()); | ||||||
|  |     */ | ||||||
|  | 
 | ||||||
|     let harness = BeaconChainHarness::builder(MinimalEthSpec) |     let harness = BeaconChainHarness::builder(MinimalEthSpec) | ||||||
|         .default_spec() |         .default_spec() | ||||||
|         .keypairs(KEYPAIRS[0..LOW_VALIDATOR_COUNT].to_vec()) |         .keypairs(KEYPAIRS[0..LOW_VALIDATOR_COUNT].to_vec()) | ||||||
|         .logger(store.logger().clone()) |         .logger(store.logger().clone()) | ||||||
|  |         .testing_slot_clock(slot_clock) | ||||||
|         .resumed_disk_store(store.clone()) |         .resumed_disk_store(store.clone()) | ||||||
|         .mock_execution_layer() |         .mock_execution_layer() | ||||||
|         .build(); |         .build(); | ||||||
|  | |||||||
| @ -1,6 +1,6 @@ | |||||||
| #![cfg(not(debug_assertions))] | #![cfg(not(debug_assertions))] | ||||||
| 
 | 
 | ||||||
| use beacon_chain::sync_committee_verification::Error as SyncCommitteeError; | use beacon_chain::sync_committee_verification::{Error as SyncCommitteeError, SyncCommitteeData}; | ||||||
| use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType, RelativeSyncCommittee}; | use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType, RelativeSyncCommittee}; | ||||||
| use int_to_bytes::int_to_bytes32; | use int_to_bytes::int_to_bytes32; | ||||||
| use lazy_static::lazy_static; | use lazy_static::lazy_static; | ||||||
| @ -444,11 +444,17 @@ async fn aggregated_gossip_verification() { | |||||||
|      * subcommittee index contribution.subcommittee_index. |      * subcommittee index contribution.subcommittee_index. | ||||||
|      */ |      */ | ||||||
| 
 | 
 | ||||||
|  |     let contribution = &valid_aggregate.message.contribution; | ||||||
|  |     let sync_committee_data = SyncCommitteeData { | ||||||
|  |         slot: contribution.slot, | ||||||
|  |         root: contribution.beacon_block_root, | ||||||
|  |         subcommittee_index: contribution.subcommittee_index, | ||||||
|  |     }; | ||||||
|     assert_invalid!( |     assert_invalid!( | ||||||
|         "aggregate that has already been seen", |         "aggregate that has already been seen", | ||||||
|         valid_aggregate.clone(), |         valid_aggregate.clone(), | ||||||
|         SyncCommitteeError::SyncContributionAlreadyKnown(hash) |         SyncCommitteeError::SyncContributionSupersetKnown(hash) | ||||||
|         if hash == valid_aggregate.message.contribution.tree_hash_root() |         if hash == sync_committee_data.tree_hash_root() | ||||||
|     ); |     ); | ||||||
| 
 | 
 | ||||||
|     /* |     /* | ||||||
|  | |||||||
| @ -6,9 +6,8 @@ use beacon_chain::{ | |||||||
|         AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, |         AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, | ||||||
|         OP_POOL_DB_KEY, |         OP_POOL_DB_KEY, | ||||||
|     }, |     }, | ||||||
|     BeaconChain, NotifyExecutionLayer, StateSkipConfig, WhenSlotSkipped, |     BeaconChain, ChainConfig, NotifyExecutionLayer, StateSkipConfig, WhenSlotSkipped, | ||||||
| }; | }; | ||||||
| use fork_choice::CountUnrealized; |  | ||||||
| use lazy_static::lazy_static; | use lazy_static::lazy_static; | ||||||
| use operation_pool::PersistedOperationPool; | use operation_pool::PersistedOperationPool; | ||||||
| use state_processing::{ | use state_processing::{ | ||||||
| @ -29,6 +28,10 @@ lazy_static! { | |||||||
| fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessType<MinimalEthSpec>> { | fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessType<MinimalEthSpec>> { | ||||||
|     let harness = BeaconChainHarness::builder(MinimalEthSpec) |     let harness = BeaconChainHarness::builder(MinimalEthSpec) | ||||||
|         .default_spec() |         .default_spec() | ||||||
|  |         .chain_config(ChainConfig { | ||||||
|  |             reconstruct_historic_states: true, | ||||||
|  |             ..ChainConfig::default() | ||||||
|  |         }) | ||||||
|         .keypairs(KEYPAIRS[0..validator_count].to_vec()) |         .keypairs(KEYPAIRS[0..validator_count].to_vec()) | ||||||
|         .fresh_ephemeral_store() |         .fresh_ephemeral_store() | ||||||
|         .mock_execution_layer() |         .mock_execution_layer() | ||||||
| @ -687,8 +690,8 @@ async fn run_skip_slot_test(skip_slots: u64) { | |||||||
|             .process_block( |             .process_block( | ||||||
|                 harness_a.chain.head_snapshot().beacon_block_root, |                 harness_a.chain.head_snapshot().beacon_block_root, | ||||||
|                 harness_a.chain.head_snapshot().beacon_block.clone(), |                 harness_a.chain.head_snapshot().beacon_block.clone(), | ||||||
|                 CountUnrealized::True, |  | ||||||
|                 NotifyExecutionLayer::Yes, |                 NotifyExecutionLayer::Yes, | ||||||
|  |                 || Ok(()) | ||||||
|             ) |             ) | ||||||
|             .await |             .await | ||||||
|             .unwrap(), |             .unwrap(), | ||||||
|  | |||||||
							
								
								
									
										26
									
								
								beacon_node/beacon_processor/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										26
									
								
								beacon_node/beacon_processor/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,26 @@ | |||||||
|  | [package] | ||||||
|  | name = "beacon_processor" | ||||||
|  | version = "0.1.0" | ||||||
|  | edition = { workspace = true } | ||||||
|  | 
 | ||||||
|  | [dependencies] | ||||||
|  | slog = { workspace = true } | ||||||
|  | itertools = { workspace = true } | ||||||
|  | logging = { workspace = true } | ||||||
|  | tokio = { workspace = true } | ||||||
|  | tokio-util = { workspace = true } | ||||||
|  | futures = { workspace = true } | ||||||
|  | fnv = { workspace = true } | ||||||
|  | strum = { workspace = true } | ||||||
|  | task_executor = { workspace = true } | ||||||
|  | slot_clock = { workspace = true } | ||||||
|  | lighthouse_network = { workspace = true } | ||||||
|  | hex = { workspace = true } | ||||||
|  | derivative = { workspace = true } | ||||||
|  | types = { workspace = true } | ||||||
|  | ethereum_ssz = { workspace = true } | ||||||
|  | lazy_static = { workspace = true } | ||||||
|  | lighthouse_metrics = { workspace = true } | ||||||
|  | parking_lot = { workspace = true } | ||||||
|  | num_cpus = { workspace = true } | ||||||
|  | serde = { workspace = true } | ||||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										150
									
								
								beacon_node/beacon_processor/src/metrics.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										150
									
								
								beacon_node/beacon_processor/src/metrics.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,150 @@ | |||||||
|  | pub use lighthouse_metrics::*; | ||||||
|  | 
 | ||||||
|  | lazy_static::lazy_static! { | ||||||
|  | 
 | ||||||
|  |     /* | ||||||
|  |      * Gossip processor | ||||||
|  |      */ | ||||||
|  |     pub static ref BEACON_PROCESSOR_WORK_EVENTS_RX_COUNT: Result<IntCounterVec> = try_create_int_counter_vec( | ||||||
|  |         "beacon_processor_work_events_rx_count", | ||||||
|  |         "Count of work events received (but not necessarily processed)", | ||||||
|  |         &["type"] | ||||||
|  |     ); | ||||||
|  |     pub static ref BEACON_PROCESSOR_WORK_EVENTS_IGNORED_COUNT: Result<IntCounterVec> = try_create_int_counter_vec( | ||||||
|  |         "beacon_processor_work_events_ignored_count", | ||||||
|  |         "Count of work events purposefully ignored", | ||||||
|  |         &["type"] | ||||||
|  |     ); | ||||||
|  |     pub static ref BEACON_PROCESSOR_WORK_EVENTS_STARTED_COUNT: Result<IntCounterVec> = try_create_int_counter_vec( | ||||||
|  |         "beacon_processor_work_events_started_count", | ||||||
|  |         "Count of work events which have been started by a worker", | ||||||
|  |         &["type"] | ||||||
|  |     ); | ||||||
|  |     pub static ref BEACON_PROCESSOR_WORKER_TIME: Result<HistogramVec> = try_create_histogram_vec( | ||||||
|  |         "beacon_processor_worker_time", | ||||||
|  |         "Time taken for a worker to fully process some parcel of work.", | ||||||
|  |         &["type"] | ||||||
|  |     ); | ||||||
|  |     pub static ref BEACON_PROCESSOR_WORKERS_SPAWNED_TOTAL: Result<IntCounter> = try_create_int_counter( | ||||||
|  |         "beacon_processor_workers_spawned_total", | ||||||
|  |         "The number of workers ever spawned by the gossip processing pool." | ||||||
|  |     ); | ||||||
|  |     pub static ref BEACON_PROCESSOR_WORKERS_ACTIVE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||||
|  |         "beacon_processor_workers_active_total", | ||||||
|  |         "Count of active workers in the gossip processing pool." | ||||||
|  |     ); | ||||||
|  |     pub static ref BEACON_PROCESSOR_IDLE_EVENTS_TOTAL: Result<IntCounter> = try_create_int_counter( | ||||||
|  |         "beacon_processor_idle_events_total", | ||||||
|  |         "Count of idle events processed by the gossip processor manager." | ||||||
|  |     ); | ||||||
|  |     pub static ref BEACON_PROCESSOR_EVENT_HANDLING_SECONDS: Result<Histogram> = try_create_histogram( | ||||||
|  |         "beacon_processor_event_handling_seconds", | ||||||
|  |         "Time spent handling a new message and allocating it to a queue or worker." | ||||||
|  |     ); | ||||||
|  |     // Gossip blocks.
 | ||||||
|  |     pub static ref BEACON_PROCESSOR_GOSSIP_BLOCK_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||||
|  |         "beacon_processor_gossip_block_queue_total", | ||||||
|  |         "Count of blocks from gossip waiting to be verified." | ||||||
|  |     ); | ||||||
|  |     // Gossip Exits.
 | ||||||
|  |     pub static ref BEACON_PROCESSOR_EXIT_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||||
|  |         "beacon_processor_exit_queue_total", | ||||||
|  |         "Count of exits from gossip waiting to be verified." | ||||||
|  |     ); | ||||||
|  |     // Gossip proposer slashings.
 | ||||||
|  |     pub static ref BEACON_PROCESSOR_PROPOSER_SLASHING_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||||
|  |         "beacon_processor_proposer_slashing_queue_total", | ||||||
|  |         "Count of proposer slashings from gossip waiting to be verified." | ||||||
|  |     ); | ||||||
|  |     // Gossip attester slashings.
 | ||||||
|  |     pub static ref BEACON_PROCESSOR_ATTESTER_SLASHING_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||||
|  |         "beacon_processor_attester_slashing_queue_total", | ||||||
|  |         "Count of attester slashings from gossip waiting to be verified." | ||||||
|  |     ); | ||||||
|  |     // Gossip BLS to execution changes.
 | ||||||
|  |     pub static ref BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||||
|  |         "beacon_processor_bls_to_execution_change_queue_total", | ||||||
|  |         "Count of address changes from gossip waiting to be verified." | ||||||
|  |     ); | ||||||
|  |     // Rpc blocks.
 | ||||||
|  |     pub static ref BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||||
|  |         "beacon_processor_rpc_block_queue_total", | ||||||
|  |         "Count of blocks from the rpc waiting to be verified." | ||||||
|  |     ); | ||||||
|  |     // Chain segments.
 | ||||||
|  |     pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||||
|  |         "beacon_processor_chain_segment_queue_total", | ||||||
|  |         "Count of chain segments from the rpc waiting to be verified." | ||||||
|  |     ); | ||||||
|  |     pub static ref BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||||
|  |         "beacon_processor_backfill_chain_segment_queue_total", | ||||||
|  |         "Count of backfill chain segments from the rpc waiting to be verified." | ||||||
|  |     ); | ||||||
|  |     // Unaggregated attestations.
 | ||||||
|  |     pub static ref BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||||
|  |         "beacon_processor_unaggregated_attestation_queue_total", | ||||||
|  |         "Count of unagg. attestations waiting to be processed." | ||||||
|  |     ); | ||||||
|  |     // Aggregated attestations.
 | ||||||
|  |     pub static ref BEACON_PROCESSOR_AGGREGATED_ATTESTATION_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||||
|  |         "beacon_processor_aggregated_attestation_queue_total", | ||||||
|  |         "Count of agg. attestations waiting to be processed." | ||||||
|  |     ); | ||||||
|  |     // Sync committee messages.
 | ||||||
|  |     pub static ref BEACON_PROCESSOR_SYNC_MESSAGE_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||||
|  |         "beacon_processor_sync_message_queue_total", | ||||||
|  |         "Count of sync committee messages waiting to be processed." | ||||||
|  |     ); | ||||||
|  |     // Sync contribution.
 | ||||||
|  |     pub static ref BEACON_PROCESSOR_SYNC_CONTRIBUTION_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||||
|  |         "beacon_processor_sync_contribution_queue_total", | ||||||
|  |         "Count of sync committee contributions waiting to be processed." | ||||||
|  |     ); | ||||||
|  |     // HTTP API requests.
 | ||||||
|  |     pub static ref BEACON_PROCESSOR_API_REQUEST_P0_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||||
|  |         "beacon_processor_api_request_p0_queue_total", | ||||||
|  |         "Count of P0 HTTP requesets waiting to be processed." | ||||||
|  |     ); | ||||||
|  |     pub static ref BEACON_PROCESSOR_API_REQUEST_P1_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( | ||||||
|  |         "beacon_processor_api_request_p1_queue_total", | ||||||
|  |         "Count of P1 HTTP requesets waiting to be processed." | ||||||
|  |     ); | ||||||
|  | 
 | ||||||
|  |     /* | ||||||
|  |      * Attestation reprocessing queue metrics. | ||||||
|  |      */ | ||||||
|  |     pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_TOTAL: Result<IntGaugeVec> = | ||||||
|  |         try_create_int_gauge_vec( | ||||||
|  |         "beacon_processor_reprocessing_queue_total", | ||||||
|  |         "Count of items in a reprocessing queue.", | ||||||
|  |         &["type"] | ||||||
|  |     ); | ||||||
|  |     pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_ATTESTATIONS: Result<IntCounter> = try_create_int_counter( | ||||||
|  |         "beacon_processor_reprocessing_queue_expired_attestations", | ||||||
|  |         "Number of queued attestations which have expired before a matching block has been found." | ||||||
|  |     ); | ||||||
|  |     pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_ATTESTATIONS: Result<IntCounter> = try_create_int_counter( | ||||||
|  |         "beacon_processor_reprocessing_queue_matched_attestations", | ||||||
|  |         "Number of queued attestations where as matching block has been imported." | ||||||
|  |     ); | ||||||
|  | 
 | ||||||
|  |     /* | ||||||
|  |      * Light client update reprocessing queue metrics. | ||||||
|  |      */ | ||||||
|  |     pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_OPTIMISTIC_UPDATES: Result<IntCounter> = try_create_int_counter( | ||||||
|  |         "beacon_processor_reprocessing_queue_expired_optimistic_updates", | ||||||
|  |         "Number of queued light client optimistic updates which have expired before a matching block has been found." | ||||||
|  |     ); | ||||||
|  |     pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_OPTIMISTIC_UPDATES: Result<IntCounter> = try_create_int_counter( | ||||||
|  |         "beacon_processor_reprocessing_queue_matched_optimistic_updates", | ||||||
|  |         "Number of queued light client optimistic updates where as matching block has been imported." | ||||||
|  |     ); | ||||||
|  | 
 | ||||||
|  |     /// Errors and Debugging Stats
 | ||||||
|  |     pub static ref BEACON_PROCESSOR_SEND_ERROR_PER_WORK_TYPE: Result<IntCounterVec> = | ||||||
|  |         try_create_int_counter_vec( | ||||||
|  |             "beacon_processor_send_error_per_work_type", | ||||||
|  |             "Total number of beacon processor send error per work type", | ||||||
|  |             &["type"] | ||||||
|  |         ); | ||||||
|  | } | ||||||
| @ -10,23 +10,18 @@ | |||||||
| //!
 | //!
 | ||||||
| //! Aggregated and unaggregated attestations that failed verification due to referencing an unknown
 | //! Aggregated and unaggregated attestations that failed verification due to referencing an unknown
 | ||||||
| //! block will be re-queued until their block is imported, or until they expire.
 | //! block will be re-queued until their block is imported, or until they expire.
 | ||||||
| use super::MAX_SCHEDULED_WORK_QUEUE_LEN; |  | ||||||
| use crate::beacon_processor::{ChainSegmentProcessId, Work, WorkEvent}; |  | ||||||
| use crate::metrics; | use crate::metrics; | ||||||
| use crate::sync::manager::BlockProcessType; | use crate::{AsyncFn, BlockingFn, Work, WorkEvent}; | ||||||
| use beacon_chain::{BeaconChainTypes, GossipVerifiedBlock, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; |  | ||||||
| use fnv::FnvHashMap; | use fnv::FnvHashMap; | ||||||
| use futures::task::Poll; | use futures::task::Poll; | ||||||
| use futures::{Stream, StreamExt}; | use futures::{Stream, StreamExt}; | ||||||
| use itertools::Itertools; | use itertools::Itertools; | ||||||
| use lighthouse_network::{MessageId, PeerId}; |  | ||||||
| use logging::TimeLatch; | use logging::TimeLatch; | ||||||
| use slog::{crit, debug, error, trace, warn, Logger}; | use slog::{crit, debug, error, trace, warn, Logger}; | ||||||
| use slot_clock::SlotClock; | use slot_clock::SlotClock; | ||||||
| use std::collections::{HashMap, HashSet}; | use std::collections::{HashMap, HashSet}; | ||||||
| use std::future::Future; | use std::future::Future; | ||||||
| use std::pin::Pin; | use std::pin::Pin; | ||||||
| use std::sync::Arc; |  | ||||||
| use std::task::Context; | use std::task::Context; | ||||||
| use std::time::Duration; | use std::time::Duration; | ||||||
| use strum::AsRefStr; | use strum::AsRefStr; | ||||||
| @ -34,10 +29,7 @@ use task_executor::TaskExecutor; | |||||||
| use tokio::sync::mpsc::{self, Receiver, Sender}; | use tokio::sync::mpsc::{self, Receiver, Sender}; | ||||||
| use tokio::time::error::Error as TimeError; | use tokio::time::error::Error as TimeError; | ||||||
| use tokio_util::time::delay_queue::{DelayQueue, Key as DelayKey}; | use tokio_util::time::delay_queue::{DelayQueue, Key as DelayKey}; | ||||||
| use types::{ | use types::{EthSpec, Hash256, Slot}; | ||||||
|     Attestation, EthSpec, Hash256, LightClientOptimisticUpdate, SignedAggregateAndProof, |  | ||||||
|     SignedBeaconBlock, SubnetId, |  | ||||||
| }; |  | ||||||
| 
 | 
 | ||||||
| const TASK_NAME: &str = "beacon_processor_reprocess_queue"; | const TASK_NAME: &str = "beacon_processor_reprocess_queue"; | ||||||
| const GOSSIP_BLOCKS: &str = "gossip_blocks"; | const GOSSIP_BLOCKS: &str = "gossip_blocks"; | ||||||
| @ -47,7 +39,7 @@ const LIGHT_CLIENT_UPDATES: &str = "lc_updates"; | |||||||
| 
 | 
 | ||||||
| /// Queue blocks for re-processing with an `ADDITIONAL_QUEUED_BLOCK_DELAY` after the slot starts.
 | /// Queue blocks for re-processing with an `ADDITIONAL_QUEUED_BLOCK_DELAY` after the slot starts.
 | ||||||
| /// This is to account for any slight drift in the system clock.
 | /// This is to account for any slight drift in the system clock.
 | ||||||
| const ADDITIONAL_QUEUED_BLOCK_DELAY: Duration = Duration::from_millis(5); | pub const ADDITIONAL_QUEUED_BLOCK_DELAY: Duration = Duration::from_millis(5); | ||||||
| 
 | 
 | ||||||
| /// For how long to queue aggregated and unaggregated attestations for re-processing.
 | /// For how long to queue aggregated and unaggregated attestations for re-processing.
 | ||||||
| pub const QUEUED_ATTESTATION_DELAY: Duration = Duration::from_secs(12); | pub const QUEUED_ATTESTATION_DELAY: Duration = Duration::from_secs(12); | ||||||
| @ -84,12 +76,12 @@ pub const BACKFILL_SCHEDULE_IN_SLOT: [(u32, u32); 3] = [ | |||||||
| 
 | 
 | ||||||
| /// Messages that the scheduler can receive.
 | /// Messages that the scheduler can receive.
 | ||||||
| #[derive(AsRefStr)] | #[derive(AsRefStr)] | ||||||
| pub enum ReprocessQueueMessage<T: BeaconChainTypes> { | pub enum ReprocessQueueMessage { | ||||||
|     /// A block that has been received early and we should queue for later processing.
 |     /// A block that has been received early and we should queue for later processing.
 | ||||||
|     EarlyBlock(QueuedGossipBlock<T>), |     EarlyBlock(QueuedGossipBlock), | ||||||
|     /// A gossip block for hash `X` is being imported, we should queue the rpc block for the same
 |     /// A gossip block for hash `X` is being imported, we should queue the rpc block for the same
 | ||||||
|     /// hash until the gossip block is imported.
 |     /// hash until the gossip block is imported.
 | ||||||
|     RpcBlock(QueuedRpcBlock<T::EthSpec>), |     RpcBlock(QueuedRpcBlock), | ||||||
|     /// A block that was successfully processed. We use this to handle attestations and light client updates
 |     /// A block that was successfully processed. We use this to handle attestations and light client updates
 | ||||||
|     /// for unknown blocks.
 |     /// for unknown blocks.
 | ||||||
|     BlockImported { |     BlockImported { | ||||||
| @ -97,139 +89,127 @@ pub enum ReprocessQueueMessage<T: BeaconChainTypes> { | |||||||
|         parent_root: Hash256, |         parent_root: Hash256, | ||||||
|     }, |     }, | ||||||
|     /// An unaggregated attestation that references an unknown block.
 |     /// An unaggregated attestation that references an unknown block.
 | ||||||
|     UnknownBlockUnaggregate(QueuedUnaggregate<T::EthSpec>), |     UnknownBlockUnaggregate(QueuedUnaggregate), | ||||||
|     /// An aggregated attestation that references an unknown block.
 |     /// An aggregated attestation that references an unknown block.
 | ||||||
|     UnknownBlockAggregate(QueuedAggregate<T::EthSpec>), |     UnknownBlockAggregate(QueuedAggregate), | ||||||
|     /// A light client optimistic update that references a parent root that has not been seen as a parent.
 |     /// A light client optimistic update that references a parent root that has not been seen as a parent.
 | ||||||
|     UnknownLightClientOptimisticUpdate(QueuedLightClientUpdate<T::EthSpec>), |     UnknownLightClientOptimisticUpdate(QueuedLightClientUpdate), | ||||||
|     /// A new backfill batch that needs to be scheduled for processing.
 |     /// A new backfill batch that needs to be scheduled for processing.
 | ||||||
|     BackfillSync(QueuedBackfillBatch<T::EthSpec>), |     BackfillSync(QueuedBackfillBatch), | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// Events sent by the scheduler once they are ready for re-processing.
 | /// Events sent by the scheduler once they are ready for re-processing.
 | ||||||
| pub enum ReadyWork<T: BeaconChainTypes> { | pub enum ReadyWork { | ||||||
|     Block(QueuedGossipBlock<T>), |     Block(QueuedGossipBlock), | ||||||
|     RpcBlock(QueuedRpcBlock<T::EthSpec>), |     RpcBlock(QueuedRpcBlock), | ||||||
|     Unaggregate(QueuedUnaggregate<T::EthSpec>), |     IgnoredRpcBlock(IgnoredRpcBlock), | ||||||
|     Aggregate(QueuedAggregate<T::EthSpec>), |     Unaggregate(QueuedUnaggregate), | ||||||
|     LightClientUpdate(QueuedLightClientUpdate<T::EthSpec>), |     Aggregate(QueuedAggregate), | ||||||
|     BackfillSync(QueuedBackfillBatch<T::EthSpec>), |     LightClientUpdate(QueuedLightClientUpdate), | ||||||
|  |     BackfillSync(QueuedBackfillBatch), | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// An Attestation for which the corresponding block was not seen while processing, queued for
 | /// An Attestation for which the corresponding block was not seen while processing, queued for
 | ||||||
| /// later.
 | /// later.
 | ||||||
| pub struct QueuedUnaggregate<T: EthSpec> { | pub struct QueuedUnaggregate { | ||||||
|     pub peer_id: PeerId, |     pub beacon_block_root: Hash256, | ||||||
|     pub message_id: MessageId, |     pub process_fn: BlockingFn, | ||||||
|     pub attestation: Box<Attestation<T>>, |  | ||||||
|     pub subnet_id: SubnetId, |  | ||||||
|     pub should_import: bool, |  | ||||||
|     pub seen_timestamp: Duration, |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// An aggregated attestation for which the corresponding block was not seen while processing, queued for
 | /// An aggregated attestation for which the corresponding block was not seen while processing, queued for
 | ||||||
| /// later.
 | /// later.
 | ||||||
| pub struct QueuedAggregate<T: EthSpec> { | pub struct QueuedAggregate { | ||||||
|     pub peer_id: PeerId, |     pub beacon_block_root: Hash256, | ||||||
|     pub message_id: MessageId, |     pub process_fn: BlockingFn, | ||||||
|     pub attestation: Box<SignedAggregateAndProof<T>>, |  | ||||||
|     pub seen_timestamp: Duration, |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// A light client update for which the corresponding parent block was not seen while processing,
 | /// A light client update for which the corresponding parent block was not seen while processing,
 | ||||||
| /// queued for later.
 | /// queued for later.
 | ||||||
| pub struct QueuedLightClientUpdate<T: EthSpec> { | pub struct QueuedLightClientUpdate { | ||||||
|     pub peer_id: PeerId, |  | ||||||
|     pub message_id: MessageId, |  | ||||||
|     pub light_client_optimistic_update: Box<LightClientOptimisticUpdate<T>>, |  | ||||||
|     pub parent_root: Hash256, |     pub parent_root: Hash256, | ||||||
|     pub seen_timestamp: Duration, |     pub process_fn: BlockingFn, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// A block that arrived early and has been queued for later import.
 | /// A block that arrived early and has been queued for later import.
 | ||||||
| pub struct QueuedGossipBlock<T: BeaconChainTypes> { | pub struct QueuedGossipBlock { | ||||||
|     pub peer_id: PeerId, |     pub beacon_block_slot: Slot, | ||||||
|     pub block: Box<GossipVerifiedBlock<T>>, |     pub beacon_block_root: Hash256, | ||||||
|     pub seen_timestamp: Duration, |     pub process_fn: AsyncFn, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// A block that arrived for processing when the same block was being imported over gossip.
 | /// A block that arrived for processing when the same block was being imported over gossip.
 | ||||||
| /// It is queued for later import.
 | /// It is queued for later import.
 | ||||||
| pub struct QueuedRpcBlock<T: EthSpec> { | pub struct QueuedRpcBlock { | ||||||
|     pub block_root: Hash256, |     pub beacon_block_root: Hash256, | ||||||
|     pub block: Arc<SignedBeaconBlock<T>>, |     /// Processes/imports the block.
 | ||||||
|     pub process_type: BlockProcessType, |     pub process_fn: AsyncFn, | ||||||
|     pub seen_timestamp: Duration, |     /// Ignores the block.
 | ||||||
|     /// Indicates if the beacon chain should process this block or not.
 |     pub ignore_fn: BlockingFn, | ||||||
|     /// We use this to ignore block processing when rpc block queues are full.
 | } | ||||||
|     pub should_process: bool, | 
 | ||||||
|  | /// A block that arrived for processing when the same block was being imported over gossip.
 | ||||||
|  | /// It is queued for later import.
 | ||||||
|  | pub struct IgnoredRpcBlock { | ||||||
|  |     pub process_fn: BlockingFn, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// A backfill batch work that has been queued for processing later.
 | /// A backfill batch work that has been queued for processing later.
 | ||||||
| #[derive(Clone)] | pub struct QueuedBackfillBatch(pub AsyncFn); | ||||||
| pub struct QueuedBackfillBatch<E: EthSpec> { |  | ||||||
|     pub process_id: ChainSegmentProcessId, |  | ||||||
|     pub blocks: Vec<Arc<SignedBeaconBlock<E>>>, |  | ||||||
| } |  | ||||||
| 
 | 
 | ||||||
| impl<T: BeaconChainTypes> TryFrom<WorkEvent<T>> for QueuedBackfillBatch<T::EthSpec> { | impl<T: EthSpec> TryFrom<WorkEvent<T>> for QueuedBackfillBatch { | ||||||
|     type Error = WorkEvent<T>; |     type Error = WorkEvent<T>; | ||||||
| 
 | 
 | ||||||
|     fn try_from(event: WorkEvent<T>) -> Result<Self, WorkEvent<T>> { |     fn try_from(event: WorkEvent<T>) -> Result<Self, WorkEvent<T>> { | ||||||
|         match event { |         match event { | ||||||
|             WorkEvent { |             WorkEvent { | ||||||
|                 work: |                 work: Work::ChainSegmentBackfill(process_fn), | ||||||
|                     Work::ChainSegment { |  | ||||||
|                         process_id: process_id @ ChainSegmentProcessId::BackSyncBatchId(_), |  | ||||||
|                         blocks, |  | ||||||
|                     }, |  | ||||||
|                 .. |                 .. | ||||||
|             } => Ok(QueuedBackfillBatch { process_id, blocks }), |             } => Ok(QueuedBackfillBatch(process_fn)), | ||||||
|             _ => Err(event), |             _ => Err(event), | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl<T: BeaconChainTypes> From<QueuedBackfillBatch<T::EthSpec>> for WorkEvent<T> { | impl<T: EthSpec> From<QueuedBackfillBatch> for WorkEvent<T> { | ||||||
|     fn from(queued_backfill_batch: QueuedBackfillBatch<T::EthSpec>) -> WorkEvent<T> { |     fn from(queued_backfill_batch: QueuedBackfillBatch) -> WorkEvent<T> { | ||||||
|         WorkEvent::chain_segment( |         WorkEvent { | ||||||
|             queued_backfill_batch.process_id, |             drop_during_sync: false, | ||||||
|             queued_backfill_batch.blocks, |             work: Work::ChainSegmentBackfill(queued_backfill_batch.0), | ||||||
|         ) |         } | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// Unifies the different messages processed by the block delay queue.
 | /// Unifies the different messages processed by the block delay queue.
 | ||||||
| enum InboundEvent<T: BeaconChainTypes> { | enum InboundEvent { | ||||||
|     /// A gossip block that was queued for later processing and is ready for import.
 |     /// A gossip block that was queued for later processing and is ready for import.
 | ||||||
|     ReadyGossipBlock(QueuedGossipBlock<T>), |     ReadyGossipBlock(QueuedGossipBlock), | ||||||
|     /// A rpc block that was queued because the same gossip block was being imported
 |     /// A rpc block that was queued because the same gossip block was being imported
 | ||||||
|     /// will now be retried for import.
 |     /// will now be retried for import.
 | ||||||
|     ReadyRpcBlock(QueuedRpcBlock<T::EthSpec>), |     ReadyRpcBlock(QueuedRpcBlock), | ||||||
|     /// An aggregated or unaggregated attestation is ready for re-processing.
 |     /// An aggregated or unaggregated attestation is ready for re-processing.
 | ||||||
|     ReadyAttestation(QueuedAttestationId), |     ReadyAttestation(QueuedAttestationId), | ||||||
|     /// A light client update that is ready for re-processing.
 |     /// A light client update that is ready for re-processing.
 | ||||||
|     ReadyLightClientUpdate(QueuedLightClientUpdateId), |     ReadyLightClientUpdate(QueuedLightClientUpdateId), | ||||||
|     /// A backfill batch that was queued is ready for processing.
 |     /// A backfill batch that was queued is ready for processing.
 | ||||||
|     ReadyBackfillSync(QueuedBackfillBatch<T::EthSpec>), |     ReadyBackfillSync(QueuedBackfillBatch), | ||||||
|     /// A `DelayQueue` returned an error.
 |     /// A `DelayQueue` returned an error.
 | ||||||
|     DelayQueueError(TimeError, &'static str), |     DelayQueueError(TimeError, &'static str), | ||||||
|     /// A message sent to the `ReprocessQueue`
 |     /// A message sent to the `ReprocessQueue`
 | ||||||
|     Msg(ReprocessQueueMessage<T>), |     Msg(ReprocessQueueMessage), | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// Manages scheduling works that need to be later re-processed.
 | /// Manages scheduling works that need to be later re-processed.
 | ||||||
| struct ReprocessQueue<T: BeaconChainTypes> { | struct ReprocessQueue<S> { | ||||||
|     /// Receiver of messages relevant to schedule works for reprocessing.
 |     /// Receiver of messages relevant to schedule works for reprocessing.
 | ||||||
|     work_reprocessing_rx: Receiver<ReprocessQueueMessage<T>>, |     work_reprocessing_rx: Receiver<ReprocessQueueMessage>, | ||||||
|     /// Sender of works once they become ready
 |     /// Sender of works once they become ready
 | ||||||
|     ready_work_tx: Sender<ReadyWork<T>>, |     ready_work_tx: Sender<ReadyWork>, | ||||||
| 
 | 
 | ||||||
|     /* Queues */ |     /* Queues */ | ||||||
|     /// Queue to manage scheduled early blocks.
 |     /// Queue to manage scheduled early blocks.
 | ||||||
|     gossip_block_delay_queue: DelayQueue<QueuedGossipBlock<T>>, |     gossip_block_delay_queue: DelayQueue<QueuedGossipBlock>, | ||||||
|     /// Queue to manage scheduled early blocks.
 |     /// Queue to manage scheduled early blocks.
 | ||||||
|     rpc_block_delay_queue: DelayQueue<QueuedRpcBlock<T::EthSpec>>, |     rpc_block_delay_queue: DelayQueue<QueuedRpcBlock>, | ||||||
|     /// Queue to manage scheduled attestations.
 |     /// Queue to manage scheduled attestations.
 | ||||||
|     attestations_delay_queue: DelayQueue<QueuedAttestationId>, |     attestations_delay_queue: DelayQueue<QueuedAttestationId>, | ||||||
|     /// Queue to manage scheduled light client updates.
 |     /// Queue to manage scheduled light client updates.
 | ||||||
| @ -239,17 +219,17 @@ struct ReprocessQueue<T: BeaconChainTypes> { | |||||||
|     /// Queued blocks.
 |     /// Queued blocks.
 | ||||||
|     queued_gossip_block_roots: HashSet<Hash256>, |     queued_gossip_block_roots: HashSet<Hash256>, | ||||||
|     /// Queued aggregated attestations.
 |     /// Queued aggregated attestations.
 | ||||||
|     queued_aggregates: FnvHashMap<usize, (QueuedAggregate<T::EthSpec>, DelayKey)>, |     queued_aggregates: FnvHashMap<usize, (QueuedAggregate, DelayKey)>, | ||||||
|     /// Queued attestations.
 |     /// Queued attestations.
 | ||||||
|     queued_unaggregates: FnvHashMap<usize, (QueuedUnaggregate<T::EthSpec>, DelayKey)>, |     queued_unaggregates: FnvHashMap<usize, (QueuedUnaggregate, DelayKey)>, | ||||||
|     /// Attestations (aggregated and unaggregated) per root.
 |     /// Attestations (aggregated and unaggregated) per root.
 | ||||||
|     awaiting_attestations_per_root: HashMap<Hash256, Vec<QueuedAttestationId>>, |     awaiting_attestations_per_root: HashMap<Hash256, Vec<QueuedAttestationId>>, | ||||||
|     /// Queued Light Client Updates.
 |     /// Queued Light Client Updates.
 | ||||||
|     queued_lc_updates: FnvHashMap<usize, (QueuedLightClientUpdate<T::EthSpec>, DelayKey)>, |     queued_lc_updates: FnvHashMap<usize, (QueuedLightClientUpdate, DelayKey)>, | ||||||
|     /// Light Client Updates per parent_root.
 |     /// Light Client Updates per parent_root.
 | ||||||
|     awaiting_lc_updates_per_parent_root: HashMap<Hash256, Vec<QueuedLightClientUpdateId>>, |     awaiting_lc_updates_per_parent_root: HashMap<Hash256, Vec<QueuedLightClientUpdateId>>, | ||||||
|     /// Queued backfill batches
 |     /// Queued backfill batches
 | ||||||
|     queued_backfill_batches: Vec<QueuedBackfillBatch<T::EthSpec>>, |     queued_backfill_batches: Vec<QueuedBackfillBatch>, | ||||||
| 
 | 
 | ||||||
|     /* Aux */ |     /* Aux */ | ||||||
|     /// Next attestation id, used for both aggregated and unaggregated attestations
 |     /// Next attestation id, used for both aggregated and unaggregated attestations
 | ||||||
| @ -260,7 +240,7 @@ struct ReprocessQueue<T: BeaconChainTypes> { | |||||||
|     attestation_delay_debounce: TimeLatch, |     attestation_delay_debounce: TimeLatch, | ||||||
|     lc_update_delay_debounce: TimeLatch, |     lc_update_delay_debounce: TimeLatch, | ||||||
|     next_backfill_batch_event: Option<Pin<Box<tokio::time::Sleep>>>, |     next_backfill_batch_event: Option<Pin<Box<tokio::time::Sleep>>>, | ||||||
|     slot_clock: Pin<Box<T::SlotClock>>, |     slot_clock: Pin<Box<S>>, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| pub type QueuedLightClientUpdateId = usize; | pub type QueuedLightClientUpdateId = usize; | ||||||
| @ -271,20 +251,20 @@ enum QueuedAttestationId { | |||||||
|     Unaggregate(usize), |     Unaggregate(usize), | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl<T: EthSpec> QueuedAggregate<T> { | impl QueuedAggregate { | ||||||
|     pub fn beacon_block_root(&self) -> &Hash256 { |     pub fn beacon_block_root(&self) -> &Hash256 { | ||||||
|         &self.attestation.message.aggregate.data.beacon_block_root |         &self.beacon_block_root | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl<T: EthSpec> QueuedUnaggregate<T> { | impl QueuedUnaggregate { | ||||||
|     pub fn beacon_block_root(&self) -> &Hash256 { |     pub fn beacon_block_root(&self) -> &Hash256 { | ||||||
|         &self.attestation.data.beacon_block_root |         &self.beacon_block_root | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl<T: BeaconChainTypes> Stream for ReprocessQueue<T> { | impl<S: SlotClock> Stream for ReprocessQueue<S> { | ||||||
|     type Item = InboundEvent<T>; |     type Item = InboundEvent; | ||||||
| 
 | 
 | ||||||
|     fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { |     fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { | ||||||
|         // NOTE: implementing `Stream` is not necessary but allows to maintain the future selection
 |         // NOTE: implementing `Stream` is not necessary but allows to maintain the future selection
 | ||||||
| @ -375,16 +355,18 @@ impl<T: BeaconChainTypes> Stream for ReprocessQueue<T> { | |||||||
| /// Starts the job that manages scheduling works that need re-processing. The returned `Sender`
 | /// Starts the job that manages scheduling works that need re-processing. The returned `Sender`
 | ||||||
| /// gives the communicating channel to receive those works. Once a work is ready, it is sent back
 | /// gives the communicating channel to receive those works. Once a work is ready, it is sent back
 | ||||||
| /// via `ready_work_tx`.
 | /// via `ready_work_tx`.
 | ||||||
| pub fn spawn_reprocess_scheduler<T: BeaconChainTypes>( | pub fn spawn_reprocess_scheduler<S: SlotClock + 'static>( | ||||||
|     ready_work_tx: Sender<ReadyWork<T>>, |     ready_work_tx: Sender<ReadyWork>, | ||||||
|  |     work_reprocessing_rx: Receiver<ReprocessQueueMessage>, | ||||||
|     executor: &TaskExecutor, |     executor: &TaskExecutor, | ||||||
|     slot_clock: T::SlotClock, |     slot_clock: S, | ||||||
|     log: Logger, |     log: Logger, | ||||||
| ) -> Sender<ReprocessQueueMessage<T>> { |     maximum_gossip_clock_disparity: Duration, | ||||||
|     let (work_reprocessing_tx, work_reprocessing_rx) = mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN); | ) -> Result<(), String> { | ||||||
|     // Basic sanity check.
 |     // Sanity check
 | ||||||
|     assert!(ADDITIONAL_QUEUED_BLOCK_DELAY < MAXIMUM_GOSSIP_CLOCK_DISPARITY); |     if ADDITIONAL_QUEUED_BLOCK_DELAY >= maximum_gossip_clock_disparity { | ||||||
| 
 |         return Err("The block delay and gossip disparity don't match.".to_string()); | ||||||
|  |     } | ||||||
|     let mut queue = ReprocessQueue { |     let mut queue = ReprocessQueue { | ||||||
|         work_reprocessing_rx, |         work_reprocessing_rx, | ||||||
|         ready_work_tx, |         ready_work_tx, | ||||||
| @ -423,19 +405,18 @@ pub fn spawn_reprocess_scheduler<T: BeaconChainTypes>( | |||||||
|         }, |         }, | ||||||
|         TASK_NAME, |         TASK_NAME, | ||||||
|     ); |     ); | ||||||
| 
 |     Ok(()) | ||||||
|     work_reprocessing_tx |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl<T: BeaconChainTypes> ReprocessQueue<T> { | impl<S: SlotClock> ReprocessQueue<S> { | ||||||
|     fn handle_message(&mut self, msg: InboundEvent<T>, slot_clock: &T::SlotClock, log: &Logger) { |     fn handle_message(&mut self, msg: InboundEvent, slot_clock: &S, log: &Logger) { | ||||||
|         use ReprocessQueueMessage::*; |         use ReprocessQueueMessage::*; | ||||||
|         match msg { |         match msg { | ||||||
|             // Some block has been indicated as "early" and should be processed when the
 |             // Some block has been indicated as "early" and should be processed when the
 | ||||||
|             // appropriate slot arrives.
 |             // appropriate slot arrives.
 | ||||||
|             InboundEvent::Msg(EarlyBlock(early_block)) => { |             InboundEvent::Msg(EarlyBlock(early_block)) => { | ||||||
|                 let block_slot = early_block.block.block.slot(); |                 let block_slot = early_block.beacon_block_slot; | ||||||
|                 let block_root = early_block.block.block_root; |                 let block_root = early_block.beacon_block_root; | ||||||
| 
 | 
 | ||||||
|                 // Don't add the same block to the queue twice. This prevents DoS attacks.
 |                 // Don't add the same block to the queue twice. This prevents DoS attacks.
 | ||||||
|                 if self.queued_gossip_block_roots.contains(&block_root) { |                 if self.queued_gossip_block_roots.contains(&block_root) { | ||||||
| @ -494,7 +475,7 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> { | |||||||
|             // for the same block hash is being imported. We wait for `QUEUED_RPC_BLOCK_DELAY`
 |             // for the same block hash is being imported. We wait for `QUEUED_RPC_BLOCK_DELAY`
 | ||||||
|             // and then send the rpc block back for processing assuming the gossip import
 |             // and then send the rpc block back for processing assuming the gossip import
 | ||||||
|             // has completed by then.
 |             // has completed by then.
 | ||||||
|             InboundEvent::Msg(RpcBlock(mut rpc_block)) => { |             InboundEvent::Msg(RpcBlock(rpc_block)) => { | ||||||
|                 // Check to ensure this won't over-fill the queue.
 |                 // Check to ensure this won't over-fill the queue.
 | ||||||
|                 if self.rpc_block_delay_queue.len() >= MAXIMUM_QUEUED_BLOCKS { |                 if self.rpc_block_delay_queue.len() >= MAXIMUM_QUEUED_BLOCKS { | ||||||
|                     if self.rpc_block_debounce.elapsed() { |                     if self.rpc_block_debounce.elapsed() { | ||||||
| @ -507,10 +488,11 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> { | |||||||
|                     } |                     } | ||||||
|                     // Return the block to the beacon processor signalling to
 |                     // Return the block to the beacon processor signalling to
 | ||||||
|                     // ignore processing for this block
 |                     // ignore processing for this block
 | ||||||
|                     rpc_block.should_process = false; |  | ||||||
|                     if self |                     if self | ||||||
|                         .ready_work_tx |                         .ready_work_tx | ||||||
|                         .try_send(ReadyWork::RpcBlock(rpc_block)) |                         .try_send(ReadyWork::IgnoredRpcBlock(IgnoredRpcBlock { | ||||||
|  |                             process_fn: rpc_block.ignore_fn, | ||||||
|  |                         })) | ||||||
|                         .is_err() |                         .is_err() | ||||||
|                     { |                     { | ||||||
|                         error!( |                         error!( | ||||||
| @ -529,7 +511,7 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> { | |||||||
|                 debug!( |                 debug!( | ||||||
|                     log, |                     log, | ||||||
|                     "Sending rpc block for reprocessing"; |                     "Sending rpc block for reprocessing"; | ||||||
|                     "block_root" => %queued_rpc_block.block.canonical_root() |                     "block_root" => %queued_rpc_block.beacon_block_root | ||||||
|                 ); |                 ); | ||||||
|                 if self |                 if self | ||||||
|                     .ready_work_tx |                     .ready_work_tx | ||||||
| @ -767,7 +749,7 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> { | |||||||
|             } |             } | ||||||
|             // A block that was queued for later processing is now ready to be processed.
 |             // A block that was queued for later processing is now ready to be processed.
 | ||||||
|             InboundEvent::ReadyGossipBlock(ready_block) => { |             InboundEvent::ReadyGossipBlock(ready_block) => { | ||||||
|                 let block_root = ready_block.block.block_root; |                 let block_root = ready_block.beacon_block_root; | ||||||
| 
 | 
 | ||||||
|                 if !self.queued_gossip_block_roots.remove(&block_root) { |                 if !self.queued_gossip_block_roots.remove(&block_root) { | ||||||
|                     // Log an error to alert that we've made a bad assumption about how this
 |                     // Log an error to alert that we've made a bad assumption about how this
 | ||||||
| @ -885,18 +867,28 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> { | |||||||
|                     "millis_from_slot_start" => millis_from_slot_start |                     "millis_from_slot_start" => millis_from_slot_start | ||||||
|                 ); |                 ); | ||||||
| 
 | 
 | ||||||
|                 if self |                 match self | ||||||
|                     .ready_work_tx |                     .ready_work_tx | ||||||
|                     .try_send(ReadyWork::BackfillSync(queued_backfill_batch.clone())) |                     .try_send(ReadyWork::BackfillSync(queued_backfill_batch)) | ||||||
|                     .is_err() |  | ||||||
|                 { |                 { | ||||||
|                     error!( |                     // The message was sent successfully.
 | ||||||
|  |                     Ok(()) => (), | ||||||
|  |                     // The message was not sent, recover it from the returned `Err`.
 | ||||||
|  |                     Err(mpsc::error::TrySendError::Full(ReadyWork::BackfillSync(batch))) | ||||||
|  |                     | Err(mpsc::error::TrySendError::Closed(ReadyWork::BackfillSync(batch))) => { | ||||||
|  |                         error!( | ||||||
|  |                             log, | ||||||
|  |                             "Failed to send scheduled backfill work"; | ||||||
|  |                             "info" => "sending work back to queue" | ||||||
|  |                         ); | ||||||
|  |                         self.queued_backfill_batches.insert(0, batch) | ||||||
|  |                     } | ||||||
|  |                     // The message was not sent and we didn't get the correct
 | ||||||
|  |                     // return result. This is a logic error.
 | ||||||
|  |                     _ => crit!( | ||||||
|                         log, |                         log, | ||||||
|                         "Failed to send scheduled backfill work"; |                         "Unexpected return from try_send error"; | ||||||
|                         "info" => "sending work back to queue" |                     ), | ||||||
|                     ); |  | ||||||
|                     self.queued_backfill_batches |  | ||||||
|                         .insert(0, queued_backfill_batch); |  | ||||||
|                 } |                 } | ||||||
|             } |             } | ||||||
|         } |         } | ||||||
| @ -927,7 +919,7 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> { | |||||||
|         // only recompute the `next_backfill_batch_event` if there are backfill batches in the queue
 |         // only recompute the `next_backfill_batch_event` if there are backfill batches in the queue
 | ||||||
|         if !self.queued_backfill_batches.is_empty() { |         if !self.queued_backfill_batches.is_empty() { | ||||||
|             self.next_backfill_batch_event = Some(Box::pin(tokio::time::sleep( |             self.next_backfill_batch_event = Some(Box::pin(tokio::time::sleep( | ||||||
|                 ReprocessQueue::<T>::duration_until_next_backfill_batch_event(&self.slot_clock), |                 ReprocessQueue::<S>::duration_until_next_backfill_batch_event(&self.slot_clock), | ||||||
|             ))); |             ))); | ||||||
|         } else { |         } else { | ||||||
|             self.next_backfill_batch_event = None |             self.next_backfill_batch_event = None | ||||||
| @ -936,7 +928,7 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> { | |||||||
| 
 | 
 | ||||||
|     /// Returns duration until the next scheduled processing time. The schedule ensure that backfill
 |     /// Returns duration until the next scheduled processing time. The schedule ensure that backfill
 | ||||||
|     /// processing is done in windows of time that aren't critical
 |     /// processing is done in windows of time that aren't critical
 | ||||||
|     fn duration_until_next_backfill_batch_event(slot_clock: &T::SlotClock) -> Duration { |     fn duration_until_next_backfill_batch_event(slot_clock: &S) -> Duration { | ||||||
|         let slot_duration = slot_clock.slot_duration(); |         let slot_duration = slot_clock.slot_duration(); | ||||||
|         slot_clock |         slot_clock | ||||||
|             .millis_from_current_slot_start() |             .millis_from_current_slot_start() | ||||||
| @ -966,16 +958,9 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> { | |||||||
| #[cfg(test)] | #[cfg(test)] | ||||||
| mod tests { | mod tests { | ||||||
|     use super::*; |     use super::*; | ||||||
|     use beacon_chain::builder::Witness; |  | ||||||
|     use beacon_chain::eth1_chain::CachingEth1Backend; |  | ||||||
|     use slot_clock::TestingSlotClock; |     use slot_clock::TestingSlotClock; | ||||||
|     use store::MemoryStore; |  | ||||||
|     use types::MainnetEthSpec as E; |  | ||||||
|     use types::Slot; |     use types::Slot; | ||||||
| 
 | 
 | ||||||
|     type TestBeaconChainType = |  | ||||||
|         Witness<TestingSlotClock, CachingEth1Backend<E>, E, MemoryStore<E>, MemoryStore<E>>; |  | ||||||
| 
 |  | ||||||
|     #[test] |     #[test] | ||||||
|     fn backfill_processing_schedule_calculation() { |     fn backfill_processing_schedule_calculation() { | ||||||
|         let slot_duration = Duration::from_secs(12); |         let slot_duration = Duration::from_secs(12); | ||||||
| @ -988,7 +973,7 @@ mod tests { | |||||||
| 
 | 
 | ||||||
|         for &event_duration_from_slot_start in event_times.iter() { |         for &event_duration_from_slot_start in event_times.iter() { | ||||||
|             let duration_to_next_event = |             let duration_to_next_event = | ||||||
|                 ReprocessQueue::<TestBeaconChainType>::duration_until_next_backfill_batch_event( |                 ReprocessQueue::<TestingSlotClock>::duration_until_next_backfill_batch_event( | ||||||
|                     &slot_clock, |                     &slot_clock, | ||||||
|                 ); |                 ); | ||||||
| 
 | 
 | ||||||
| @ -1005,7 +990,7 @@ mod tests { | |||||||
|         // check for next event beyond the current slot
 |         // check for next event beyond the current slot
 | ||||||
|         let duration_to_next_slot = slot_clock.duration_to_next_slot().unwrap(); |         let duration_to_next_slot = slot_clock.duration_to_next_slot().unwrap(); | ||||||
|         let duration_to_next_event = |         let duration_to_next_event = | ||||||
|             ReprocessQueue::<TestBeaconChainType>::duration_until_next_backfill_batch_event( |             ReprocessQueue::<TestingSlotClock>::duration_until_next_backfill_batch_event( | ||||||
|                 &slot_clock, |                 &slot_clock, | ||||||
|             ); |             ); | ||||||
|         assert_eq!( |         assert_eq!( | ||||||
| @ -1,13 +1,13 @@ | |||||||
| [package] | [package] | ||||||
| name = "builder_client" | name = "builder_client" | ||||||
| version = "0.1.0" | version = "0.1.0" | ||||||
| edition = "2021" | edition = { workspace = true } | ||||||
| authors = ["Sean Anderson <sean@sigmaprime.io>"] | authors = ["Sean Anderson <sean@sigmaprime.io>"] | ||||||
| 
 | 
 | ||||||
| [dependencies] | [dependencies] | ||||||
| reqwest = { version = "0.11.0", features = ["json","stream"] } | reqwest = { workspace = true } | ||||||
| sensitive_url = { path = "../../common/sensitive_url" } | sensitive_url = { workspace = true } | ||||||
| eth2 = { path = "../../common/eth2" } | eth2 = { workspace = true } | ||||||
| serde = { version = "1.0.116", features = ["derive"] } | serde = { workspace = true } | ||||||
| serde_json = "1.0.58" | serde_json = { workspace = true } | ||||||
| lighthouse_version = { path = "../../common/lighthouse_version" } | lighthouse_version = { workspace = true } | ||||||
|  | |||||||
| @ -72,7 +72,7 @@ impl BuilderHttpClient { | |||||||
|             .await? |             .await? | ||||||
|             .json() |             .json() | ||||||
|             .await |             .await | ||||||
|             .map_err(Error::Reqwest) |             .map_err(Into::into) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Perform a HTTP GET request, returning the `Response` for further processing.
 |     /// Perform a HTTP GET request, returning the `Response` for further processing.
 | ||||||
| @ -85,7 +85,7 @@ impl BuilderHttpClient { | |||||||
|         if let Some(timeout) = timeout { |         if let Some(timeout) = timeout { | ||||||
|             builder = builder.timeout(timeout); |             builder = builder.timeout(timeout); | ||||||
|         } |         } | ||||||
|         let response = builder.send().await.map_err(Error::Reqwest)?; |         let response = builder.send().await.map_err(Error::from)?; | ||||||
|         ok_or_error(response).await |         ok_or_error(response).await | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
| @ -114,7 +114,7 @@ impl BuilderHttpClient { | |||||||
|         if let Some(timeout) = timeout { |         if let Some(timeout) = timeout { | ||||||
|             builder = builder.timeout(timeout); |             builder = builder.timeout(timeout); | ||||||
|         } |         } | ||||||
|         let response = builder.json(body).send().await.map_err(Error::Reqwest)?; |         let response = builder.json(body).send().await.map_err(Error::from)?; | ||||||
|         ok_or_error(response).await |         ok_or_error(response).await | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -2,44 +2,46 @@ | |||||||
| name = "client" | name = "client" | ||||||
| version = "0.2.0" | version = "0.2.0" | ||||||
| authors = ["Sigma Prime <contact@sigmaprime.io>"] | authors = ["Sigma Prime <contact@sigmaprime.io>"] | ||||||
| edition = "2021" | edition = { workspace = true } | ||||||
| 
 | 
 | ||||||
| [dev-dependencies] | [dev-dependencies] | ||||||
| serde_yaml = "0.8.13" | serde_yaml = { workspace = true } | ||||||
| state_processing = { path = "../../consensus/state_processing" } | state_processing = { workspace = true } | ||||||
| operation_pool = { path = "../operation_pool" } | operation_pool = { workspace = true } | ||||||
| tokio = "1.14.0" | tokio = { workspace = true } | ||||||
| 
 | 
 | ||||||
| [dependencies] | [dependencies] | ||||||
| beacon_chain = { path = "../beacon_chain" } | beacon_chain = { workspace = true } | ||||||
| store = { path = "../store" } | store = { workspace = true } | ||||||
| network = { path = "../network" } | network = { workspace = true } | ||||||
| timer = { path = "../timer" } | timer = { path = "../timer" } | ||||||
| lighthouse_network = { path = "../lighthouse_network" } | lighthouse_network = { workspace = true } | ||||||
| logging = { path = "../../common/logging" } | logging = { workspace = true } | ||||||
| parking_lot = "0.12.0" | parking_lot = { workspace = true } | ||||||
| types = { path = "../../consensus/types" } | types = { workspace = true } | ||||||
| eth2_config = { path = "../../common/eth2_config" } | eth2_config = { workspace = true } | ||||||
| slot_clock = { path = "../../common/slot_clock" } | slot_clock = { workspace = true } | ||||||
| serde = "1.0.116" | serde = { workspace = true } | ||||||
| serde_derive = "1.0.116" | serde_derive = "1.0.116" | ||||||
| error-chain = "0.12.4" | error-chain = { workspace = true } | ||||||
| slog = { version = "2.5.2", features = ["max_level_trace"] } | slog = { workspace = true } | ||||||
| tokio = "1.14.0" | tokio = { workspace = true } | ||||||
| dirs = "3.0.1" | dirs = { workspace = true } | ||||||
| eth1 = { path = "../eth1" } | eth1 = { workspace = true } | ||||||
| eth2 = { path = "../../common/eth2" } | eth2 = { workspace = true } | ||||||
| sensitive_url = { path = "../../common/sensitive_url" } | sensitive_url = { workspace = true } | ||||||
| genesis = { path = "../genesis" } | genesis = { workspace = true } | ||||||
| task_executor = { path = "../../common/task_executor" } | task_executor = { workspace = true } | ||||||
| environment = { path = "../../lighthouse/environment" } | environment = { workspace = true } | ||||||
| lazy_static = "1.4.0" | lazy_static = { workspace = true } | ||||||
| lighthouse_metrics = { path = "../../common/lighthouse_metrics" } | lighthouse_metrics = { workspace = true } | ||||||
| time = "0.3.5" | time = "0.3.5" | ||||||
| directory = {path = "../../common/directory"} | directory = { workspace = true } | ||||||
| http_api = { path = "../http_api" } | http_api = { workspace = true } | ||||||
| http_metrics = { path = "../http_metrics" } | http_metrics = { path = "../http_metrics" } | ||||||
| slasher = { path = "../../slasher", default-features = false } | slasher = { workspace = true } | ||||||
| slasher_service = { path = "../../slasher/service" } | slasher_service = { path = "../../slasher/service" } | ||||||
| monitoring_api = {path = "../../common/monitoring_api"} | monitoring_api = { workspace = true } | ||||||
| execution_layer = { path = "../execution_layer" } | execution_layer = { workspace = true } | ||||||
|  | beacon_processor = { workspace = true } | ||||||
|  | num_cpus = { workspace = true } | ||||||
|  | |||||||
| @ -11,8 +11,10 @@ use beacon_chain::{ | |||||||
|     slot_clock::{SlotClock, SystemTimeSlotClock}, |     slot_clock::{SlotClock, SystemTimeSlotClock}, | ||||||
|     state_advance_timer::spawn_state_advance_timer, |     state_advance_timer::spawn_state_advance_timer, | ||||||
|     store::{HotColdDB, ItemStore, LevelDB, StoreConfig}, |     store::{HotColdDB, ItemStore, LevelDB, StoreConfig}, | ||||||
|     BeaconChain, BeaconChainTypes, Eth1ChainBackend, ServerSentEventHandler, |     BeaconChain, BeaconChainTypes, Eth1ChainBackend, MigratorConfig, ServerSentEventHandler, | ||||||
| }; | }; | ||||||
|  | use beacon_processor::BeaconProcessorConfig; | ||||||
|  | use beacon_processor::{BeaconProcessor, BeaconProcessorChannels}; | ||||||
| use environment::RuntimeContext; | use environment::RuntimeContext; | ||||||
| use eth1::{Config as Eth1Config, Service as Eth1Service}; | use eth1::{Config as Eth1Config, Service as Eth1Service}; | ||||||
| use eth2::{ | use eth2::{ | ||||||
| @ -71,6 +73,8 @@ pub struct ClientBuilder<T: BeaconChainTypes> { | |||||||
|     http_api_config: http_api::Config, |     http_api_config: http_api::Config, | ||||||
|     http_metrics_config: http_metrics::Config, |     http_metrics_config: http_metrics::Config, | ||||||
|     slasher: Option<Arc<Slasher<T::EthSpec>>>, |     slasher: Option<Arc<Slasher<T::EthSpec>>>, | ||||||
|  |     beacon_processor_config: Option<BeaconProcessorConfig>, | ||||||
|  |     beacon_processor_channels: Option<BeaconProcessorChannels<T::EthSpec>>, | ||||||
|     eth_spec_instance: T::EthSpec, |     eth_spec_instance: T::EthSpec, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| @ -104,6 +108,8 @@ where | |||||||
|             http_metrics_config: <_>::default(), |             http_metrics_config: <_>::default(), | ||||||
|             slasher: None, |             slasher: None, | ||||||
|             eth_spec_instance, |             eth_spec_instance, | ||||||
|  |             beacon_processor_config: None, | ||||||
|  |             beacon_processor_channels: None, | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
| @ -119,6 +125,12 @@ where | |||||||
|         self |         self | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  |     pub fn beacon_processor(mut self, config: BeaconProcessorConfig) -> Self { | ||||||
|  |         self.beacon_processor_channels = Some(BeaconProcessorChannels::new(&config)); | ||||||
|  |         self.beacon_processor_config = Some(config); | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|     pub fn slasher(mut self, slasher: Arc<Slasher<TEthSpec>>) -> Self { |     pub fn slasher(mut self, slasher: Arc<Slasher<TEthSpec>>) -> Self { | ||||||
|         self.slasher = Some(slasher); |         self.slasher = Some(slasher); | ||||||
|         self |         self | ||||||
| @ -142,14 +154,18 @@ where | |||||||
|         let runtime_context = |         let runtime_context = | ||||||
|             runtime_context.ok_or("beacon_chain_start_method requires a runtime context")?; |             runtime_context.ok_or("beacon_chain_start_method requires a runtime context")?; | ||||||
|         let context = runtime_context.service_context("beacon".into()); |         let context = runtime_context.service_context("beacon".into()); | ||||||
|  |         let log = context.log(); | ||||||
|         let spec = chain_spec.ok_or("beacon_chain_start_method requires a chain spec")?; |         let spec = chain_spec.ok_or("beacon_chain_start_method requires a chain spec")?; | ||||||
|         let event_handler = if self.http_api_config.enabled { |         let event_handler = if self.http_api_config.enabled { | ||||||
|             Some(ServerSentEventHandler::new(context.log().clone())) |             Some(ServerSentEventHandler::new( | ||||||
|  |                 context.log().clone(), | ||||||
|  |                 self.http_api_config.sse_capacity_multiplier, | ||||||
|  |             )) | ||||||
|         } else { |         } else { | ||||||
|             None |             None | ||||||
|         }; |         }; | ||||||
| 
 | 
 | ||||||
|         let execution_layer = if let Some(config) = config.execution_layer { |         let execution_layer = if let Some(config) = config.execution_layer.clone() { | ||||||
|             let context = runtime_context.service_context("exec".into()); |             let context = runtime_context.service_context("exec".into()); | ||||||
|             let execution_layer = ExecutionLayer::from_config( |             let execution_layer = ExecutionLayer::from_config( | ||||||
|                 config, |                 config, | ||||||
| @ -167,6 +183,9 @@ where | |||||||
|             .store(store) |             .store(store) | ||||||
|             .task_executor(context.executor.clone()) |             .task_executor(context.executor.clone()) | ||||||
|             .custom_spec(spec.clone()) |             .custom_spec(spec.clone()) | ||||||
|  |             .store_migrator_config( | ||||||
|  |                 MigratorConfig::default().epochs_per_migration(chain_config.epochs_per_migration), | ||||||
|  |             ) | ||||||
|             .chain_config(chain_config) |             .chain_config(chain_config) | ||||||
|             .graffiti(graffiti) |             .graffiti(graffiti) | ||||||
|             .event_handler(event_handler) |             .event_handler(event_handler) | ||||||
| @ -231,23 +250,19 @@ where | |||||||
|                 )?; |                 )?; | ||||||
|                 builder.genesis_state(genesis_state).map(|v| (v, None))? |                 builder.genesis_state(genesis_state).map(|v| (v, None))? | ||||||
|             } |             } | ||||||
|             ClientGenesis::SszBytes { |             ClientGenesis::GenesisState => { | ||||||
|                 genesis_state_bytes, |  | ||||||
|             } => { |  | ||||||
|                 info!( |                 info!( | ||||||
|                     context.log(), |                     context.log(), | ||||||
|                     "Starting from known genesis state"; |                     "Starting from known genesis state"; | ||||||
|                 ); |                 ); | ||||||
| 
 | 
 | ||||||
|                 let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec) |                 let genesis_state = genesis_state(&runtime_context, &config, log).await?; | ||||||
|                     .map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?; |  | ||||||
| 
 | 
 | ||||||
|                 builder.genesis_state(genesis_state).map(|v| (v, None))? |                 builder.genesis_state(genesis_state).map(|v| (v, None))? | ||||||
|             } |             } | ||||||
|             ClientGenesis::WeakSubjSszBytes { |             ClientGenesis::WeakSubjSszBytes { | ||||||
|                 anchor_state_bytes, |                 anchor_state_bytes, | ||||||
|                 anchor_block_bytes, |                 anchor_block_bytes, | ||||||
|                 genesis_state_bytes, |  | ||||||
|             } => { |             } => { | ||||||
|                 info!(context.log(), "Starting checkpoint sync"); |                 info!(context.log(), "Starting checkpoint sync"); | ||||||
|                 if config.chain.genesis_backfill { |                 if config.chain.genesis_backfill { | ||||||
| @ -261,17 +276,13 @@ where | |||||||
|                     .map_err(|e| format!("Unable to parse weak subj state SSZ: {:?}", e))?; |                     .map_err(|e| format!("Unable to parse weak subj state SSZ: {:?}", e))?; | ||||||
|                 let anchor_block = SignedBeaconBlock::from_ssz_bytes(&anchor_block_bytes, &spec) |                 let anchor_block = SignedBeaconBlock::from_ssz_bytes(&anchor_block_bytes, &spec) | ||||||
|                     .map_err(|e| format!("Unable to parse weak subj block SSZ: {:?}", e))?; |                     .map_err(|e| format!("Unable to parse weak subj block SSZ: {:?}", e))?; | ||||||
|                 let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec) |                 let genesis_state = genesis_state(&runtime_context, &config, log).await?; | ||||||
|                     .map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?; |  | ||||||
| 
 | 
 | ||||||
|                 builder |                 builder | ||||||
|                     .weak_subjectivity_state(anchor_state, anchor_block, genesis_state) |                     .weak_subjectivity_state(anchor_state, anchor_block, genesis_state) | ||||||
|                     .map(|v| (v, None))? |                     .map(|v| (v, None))? | ||||||
|             } |             } | ||||||
|             ClientGenesis::CheckpointSyncUrl { |             ClientGenesis::CheckpointSyncUrl { url } => { | ||||||
|                 genesis_state_bytes, |  | ||||||
|                 url, |  | ||||||
|             } => { |  | ||||||
|                 info!( |                 info!( | ||||||
|                     context.log(), |                     context.log(), | ||||||
|                     "Starting checkpoint sync"; |                     "Starting checkpoint sync"; | ||||||
| @ -290,7 +301,6 @@ where | |||||||
|                         config.chain.checkpoint_sync_url_timeout, |                         config.chain.checkpoint_sync_url_timeout, | ||||||
|                     )), |                     )), | ||||||
|                 ); |                 ); | ||||||
|                 let slots_per_epoch = TEthSpec::slots_per_epoch(); |  | ||||||
| 
 | 
 | ||||||
|                 let deposit_snapshot = if config.sync_eth1_chain { |                 let deposit_snapshot = if config.sync_eth1_chain { | ||||||
|                     // We want to fetch deposit snapshot before fetching the finalized beacon state to
 |                     // We want to fetch deposit snapshot before fetching the finalized beacon state to
 | ||||||
| @ -337,10 +347,23 @@ where | |||||||
|                     None |                     None | ||||||
|                 }; |                 }; | ||||||
| 
 | 
 | ||||||
|                 debug!(context.log(), "Downloading finalized block"); |                 debug!( | ||||||
|                 // Find a suitable finalized block on an epoch boundary.
 |                     context.log(), | ||||||
|                 let mut block = remote |                     "Downloading finalized state"; | ||||||
|                     .get_beacon_blocks_ssz::<TEthSpec>(BlockId::Finalized, &spec) |                 ); | ||||||
|  |                 let state = remote | ||||||
|  |                     .get_debug_beacon_states_ssz::<TEthSpec>(StateId::Finalized, &spec) | ||||||
|  |                     .await | ||||||
|  |                     .map_err(|e| format!("Error loading checkpoint state from remote: {:?}", e))? | ||||||
|  |                     .ok_or_else(|| "Checkpoint state missing from remote".to_string())?; | ||||||
|  | 
 | ||||||
|  |                 debug!(context.log(), "Downloaded finalized state"; "slot" => ?state.slot()); | ||||||
|  | 
 | ||||||
|  |                 let finalized_block_slot = state.latest_block_header().slot; | ||||||
|  | 
 | ||||||
|  |                 debug!(context.log(), "Downloading finalized block"; "block_slot" => ?finalized_block_slot); | ||||||
|  |                 let block = remote | ||||||
|  |                     .get_beacon_blocks_ssz::<TEthSpec>(BlockId::Slot(finalized_block_slot), &spec) | ||||||
|                     .await |                     .await | ||||||
|                     .map_err(|e| match e { |                     .map_err(|e| match e { | ||||||
|                         ApiError::InvalidSsz(e) => format!( |                         ApiError::InvalidSsz(e) => format!( | ||||||
| @ -354,65 +377,14 @@ where | |||||||
| 
 | 
 | ||||||
|                 debug!(context.log(), "Downloaded finalized block"); |                 debug!(context.log(), "Downloaded finalized block"); | ||||||
| 
 | 
 | ||||||
|                 let mut block_slot = block.slot(); |                 let genesis_state = genesis_state(&runtime_context, &config, log).await?; | ||||||
| 
 |  | ||||||
|                 while block.slot() % slots_per_epoch != 0 { |  | ||||||
|                     block_slot = (block_slot / slots_per_epoch - 1) * slots_per_epoch; |  | ||||||
| 
 |  | ||||||
|                     debug!( |  | ||||||
|                         context.log(), |  | ||||||
|                         "Searching for aligned checkpoint block"; |  | ||||||
|                         "block_slot" => block_slot |  | ||||||
|                     ); |  | ||||||
| 
 |  | ||||||
|                     if let Some(found_block) = remote |  | ||||||
|                         .get_beacon_blocks_ssz::<TEthSpec>(BlockId::Slot(block_slot), &spec) |  | ||||||
|                         .await |  | ||||||
|                         .map_err(|e| { |  | ||||||
|                             format!("Error fetching block at slot {}: {:?}", block_slot, e) |  | ||||||
|                         })? |  | ||||||
|                     { |  | ||||||
|                         block = found_block; |  | ||||||
|                     } |  | ||||||
|                 } |  | ||||||
| 
 |  | ||||||
|                 debug!( |  | ||||||
|                     context.log(), |  | ||||||
|                     "Downloaded aligned finalized block"; |  | ||||||
|                     "block_root" => ?block.canonical_root(), |  | ||||||
|                     "block_slot" => block.slot(), |  | ||||||
|                 ); |  | ||||||
| 
 |  | ||||||
|                 let state_root = block.state_root(); |  | ||||||
|                 debug!( |  | ||||||
|                     context.log(), |  | ||||||
|                     "Downloading finalized state"; |  | ||||||
|                     "state_root" => ?state_root |  | ||||||
|                 ); |  | ||||||
|                 let state = remote |  | ||||||
|                     .get_debug_beacon_states_ssz::<TEthSpec>(StateId::Root(state_root), &spec) |  | ||||||
|                     .await |  | ||||||
|                     .map_err(|e| { |  | ||||||
|                         format!( |  | ||||||
|                             "Error loading checkpoint state from remote {:?}: {:?}", |  | ||||||
|                             state_root, e |  | ||||||
|                         ) |  | ||||||
|                     })? |  | ||||||
|                     .ok_or_else(|| { |  | ||||||
|                         format!("Checkpoint state missing from remote: {:?}", state_root) |  | ||||||
|                     })?; |  | ||||||
| 
 |  | ||||||
|                 debug!(context.log(), "Downloaded finalized state"); |  | ||||||
| 
 |  | ||||||
|                 let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec) |  | ||||||
|                     .map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?; |  | ||||||
| 
 | 
 | ||||||
|                 info!( |                 info!( | ||||||
|                     context.log(), |                     context.log(), | ||||||
|                     "Loaded checkpoint block and state"; |                     "Loaded checkpoint block and state"; | ||||||
|                     "slot" => block.slot(), |                     "block_slot" => block.slot(), | ||||||
|  |                     "state_slot" => state.slot(), | ||||||
|                     "block_root" => ?block.canonical_root(), |                     "block_root" => ?block.canonical_root(), | ||||||
|                     "state_root" => ?state_root, |  | ||||||
|                 ); |                 ); | ||||||
| 
 | 
 | ||||||
|                 let service = |                 let service = | ||||||
| @ -476,6 +448,7 @@ where | |||||||
|                         chain: None, |                         chain: None, | ||||||
|                         network_senders: None, |                         network_senders: None, | ||||||
|                         network_globals: None, |                         network_globals: None, | ||||||
|  |                         beacon_processor_send: None, | ||||||
|                         eth1_service: Some(genesis_service.eth1_service.clone()), |                         eth1_service: Some(genesis_service.eth1_service.clone()), | ||||||
|                         log: context.log().clone(), |                         log: context.log().clone(), | ||||||
|                         sse_logging_components: runtime_context.sse_logging_components.clone(), |                         sse_logging_components: runtime_context.sse_logging_components.clone(), | ||||||
| @ -553,6 +526,10 @@ where | |||||||
|             .as_ref() |             .as_ref() | ||||||
|             .ok_or("network requires a runtime_context")? |             .ok_or("network requires a runtime_context")? | ||||||
|             .clone(); |             .clone(); | ||||||
|  |         let beacon_processor_channels = self | ||||||
|  |             .beacon_processor_channels | ||||||
|  |             .as_ref() | ||||||
|  |             .ok_or("network requires beacon_processor_channels")?; | ||||||
| 
 | 
 | ||||||
|         // If gossipsub metrics are required we build a registry to record them
 |         // If gossipsub metrics are required we build a registry to record them
 | ||||||
|         let mut gossipsub_registry = if config.metrics_enabled { |         let mut gossipsub_registry = if config.metrics_enabled { | ||||||
| @ -568,6 +545,8 @@ where | |||||||
|             gossipsub_registry |             gossipsub_registry | ||||||
|                 .as_mut() |                 .as_mut() | ||||||
|                 .map(|registry| registry.sub_registry_with_prefix("gossipsub")), |                 .map(|registry| registry.sub_registry_with_prefix("gossipsub")), | ||||||
|  |             beacon_processor_channels.beacon_processor_tx.clone(), | ||||||
|  |             beacon_processor_channels.work_reprocessing_tx.clone(), | ||||||
|         ) |         ) | ||||||
|         .await |         .await | ||||||
|         .map_err(|e| format!("Failed to start network: {:?}", e))?; |         .map_err(|e| format!("Failed to start network: {:?}", e))?; | ||||||
| @ -690,6 +669,14 @@ where | |||||||
|             .runtime_context |             .runtime_context | ||||||
|             .as_ref() |             .as_ref() | ||||||
|             .ok_or("build requires a runtime context")?; |             .ok_or("build requires a runtime context")?; | ||||||
|  |         let beacon_processor_channels = self | ||||||
|  |             .beacon_processor_channels | ||||||
|  |             .take() | ||||||
|  |             .ok_or("build requires beacon_processor_channels")?; | ||||||
|  |         let beacon_processor_config = self | ||||||
|  |             .beacon_processor_config | ||||||
|  |             .take() | ||||||
|  |             .ok_or("build requires a beacon_processor_config")?; | ||||||
|         let log = runtime_context.log().clone(); |         let log = runtime_context.log().clone(); | ||||||
| 
 | 
 | ||||||
|         let http_api_listen_addr = if self.http_api_config.enabled { |         let http_api_listen_addr = if self.http_api_config.enabled { | ||||||
| @ -699,6 +686,7 @@ where | |||||||
|                 network_senders: self.network_senders.clone(), |                 network_senders: self.network_senders.clone(), | ||||||
|                 network_globals: self.network_globals.clone(), |                 network_globals: self.network_globals.clone(), | ||||||
|                 eth1_service: self.eth1_service.clone(), |                 eth1_service: self.eth1_service.clone(), | ||||||
|  |                 beacon_processor_send: Some(beacon_processor_channels.beacon_processor_tx.clone()), | ||||||
|                 sse_logging_components: runtime_context.sse_logging_components.clone(), |                 sse_logging_components: runtime_context.sse_logging_components.clone(), | ||||||
|                 log: log.clone(), |                 log: log.clone(), | ||||||
|             }); |             }); | ||||||
| @ -742,7 +730,7 @@ where | |||||||
| 
 | 
 | ||||||
|             runtime_context |             runtime_context | ||||||
|                 .executor |                 .executor | ||||||
|                 .spawn_without_exit(async move { server.await }, "http-metrics"); |                 .spawn_without_exit(server, "http-metrics"); | ||||||
| 
 | 
 | ||||||
|             Some(listen_addr) |             Some(listen_addr) | ||||||
|         } else { |         } else { | ||||||
| @ -755,6 +743,25 @@ where | |||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         if let Some(beacon_chain) = self.beacon_chain.as_ref() { |         if let Some(beacon_chain) = self.beacon_chain.as_ref() { | ||||||
|  |             if let Some(network_globals) = &self.network_globals { | ||||||
|  |                 let beacon_processor_context = runtime_context.service_context("bproc".into()); | ||||||
|  |                 BeaconProcessor { | ||||||
|  |                     network_globals: network_globals.clone(), | ||||||
|  |                     executor: beacon_processor_context.executor.clone(), | ||||||
|  |                     current_workers: 0, | ||||||
|  |                     config: beacon_processor_config, | ||||||
|  |                     log: beacon_processor_context.log().clone(), | ||||||
|  |                 } | ||||||
|  |                 .spawn_manager( | ||||||
|  |                     beacon_processor_channels.beacon_processor_rx, | ||||||
|  |                     beacon_processor_channels.work_reprocessing_tx, | ||||||
|  |                     beacon_processor_channels.work_reprocessing_rx, | ||||||
|  |                     None, | ||||||
|  |                     beacon_chain.slot_clock.clone(), | ||||||
|  |                     beacon_chain.spec.maximum_gossip_clock_disparity(), | ||||||
|  |                 )?; | ||||||
|  |             } | ||||||
|  | 
 | ||||||
|             let state_advance_context = runtime_context.service_context("state_advance".into()); |             let state_advance_context = runtime_context.service_context("state_advance".into()); | ||||||
|             let state_advance_log = state_advance_context.log().clone(); |             let state_advance_log = state_advance_context.log().clone(); | ||||||
|             spawn_state_advance_timer( |             spawn_state_advance_timer( | ||||||
| @ -807,9 +814,6 @@ where | |||||||
|                     execution_layer.spawn_clean_proposer_caches_routine::<TSlotClock>( |                     execution_layer.spawn_clean_proposer_caches_routine::<TSlotClock>( | ||||||
|                         beacon_chain.slot_clock.clone(), |                         beacon_chain.slot_clock.clone(), | ||||||
|                     ); |                     ); | ||||||
| 
 |  | ||||||
|                     // Spawns a routine that polls the `exchange_transition_configuration` endpoint.
 |  | ||||||
|                     execution_layer.spawn_transition_configuration_poll(beacon_chain.spec.clone()); |  | ||||||
|                 } |                 } | ||||||
| 
 | 
 | ||||||
|                 // Spawn a service to publish BLS to execution changes at the Capella fork.
 |                 // Spawn a service to publish BLS to execution changes at the Capella fork.
 | ||||||
| @ -1077,3 +1081,23 @@ where | |||||||
|         Ok(self) |         Ok(self) | ||||||
|     } |     } | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | /// Obtain the genesis state from the `eth2_network_config` in `context`.
 | ||||||
|  | async fn genesis_state<T: EthSpec>( | ||||||
|  |     context: &RuntimeContext<T>, | ||||||
|  |     config: &ClientConfig, | ||||||
|  |     log: &Logger, | ||||||
|  | ) -> Result<BeaconState<T>, String> { | ||||||
|  |     let eth2_network_config = context | ||||||
|  |         .eth2_network_config | ||||||
|  |         .as_ref() | ||||||
|  |         .ok_or("An eth2_network_config is required to obtain the genesis state")?; | ||||||
|  |     eth2_network_config | ||||||
|  |         .genesis_state::<T>( | ||||||
|  |             config.genesis_state_url.as_deref(), | ||||||
|  |             config.genesis_state_url_timeout, | ||||||
|  |             log, | ||||||
|  |         ) | ||||||
|  |         .await? | ||||||
|  |         .ok_or_else(|| "Genesis state is unknown".to_string()) | ||||||
|  | } | ||||||
|  | |||||||
| @ -1,4 +1,5 @@ | |||||||
| use beacon_chain::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD; | use beacon_chain::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD; | ||||||
|  | use beacon_processor::BeaconProcessorConfig; | ||||||
| use directory::DEFAULT_ROOT_DIR; | use directory::DEFAULT_ROOT_DIR; | ||||||
| use environment::LoggerConfig; | use environment::LoggerConfig; | ||||||
| use network::NetworkConfig; | use network::NetworkConfig; | ||||||
| @ -6,6 +7,7 @@ use sensitive_url::SensitiveUrl; | |||||||
| use serde_derive::{Deserialize, Serialize}; | use serde_derive::{Deserialize, Serialize}; | ||||||
| use std::fs; | use std::fs; | ||||||
| use std::path::PathBuf; | use std::path::PathBuf; | ||||||
|  | use std::time::Duration; | ||||||
| use types::{Graffiti, PublicKeyBytes}; | use types::{Graffiti, PublicKeyBytes}; | ||||||
| /// Default directory name for the freezer database under the top-level data dir.
 | /// Default directory name for the freezer database under the top-level data dir.
 | ||||||
| const DEFAULT_FREEZER_DB_DIR: &str = "freezer_db"; | const DEFAULT_FREEZER_DB_DIR: &str = "freezer_db"; | ||||||
| @ -24,18 +26,13 @@ pub enum ClientGenesis { | |||||||
|     /// contract.
 |     /// contract.
 | ||||||
|     #[default] |     #[default] | ||||||
|     DepositContract, |     DepositContract, | ||||||
|     /// Loads the genesis state from SSZ-encoded `BeaconState` bytes.
 |     /// Loads the genesis state from the genesis state in the `Eth2NetworkConfig`.
 | ||||||
|     ///
 |     GenesisState, | ||||||
|     /// We include the bytes instead of the `BeaconState<E>` because the `EthSpec` type
 |  | ||||||
|     /// parameter would be very annoying.
 |  | ||||||
|     SszBytes { genesis_state_bytes: Vec<u8> }, |  | ||||||
|     WeakSubjSszBytes { |     WeakSubjSszBytes { | ||||||
|         genesis_state_bytes: Vec<u8>, |  | ||||||
|         anchor_state_bytes: Vec<u8>, |         anchor_state_bytes: Vec<u8>, | ||||||
|         anchor_block_bytes: Vec<u8>, |         anchor_block_bytes: Vec<u8>, | ||||||
|     }, |     }, | ||||||
|     CheckpointSyncUrl { |     CheckpointSyncUrl { | ||||||
|         genesis_state_bytes: Vec<u8>, |  | ||||||
|         url: SensitiveUrl, |         url: SensitiveUrl, | ||||||
|     }, |     }, | ||||||
| } | } | ||||||
| @ -79,7 +76,9 @@ pub struct Config { | |||||||
|     pub monitoring_api: Option<monitoring_api::Config>, |     pub monitoring_api: Option<monitoring_api::Config>, | ||||||
|     pub slasher: Option<slasher::Config>, |     pub slasher: Option<slasher::Config>, | ||||||
|     pub logger_config: LoggerConfig, |     pub logger_config: LoggerConfig, | ||||||
|     pub always_prefer_builder_payload: bool, |     pub beacon_processor: BeaconProcessorConfig, | ||||||
|  |     pub genesis_state_url: Option<String>, | ||||||
|  |     pub genesis_state_url_timeout: Duration, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl Default for Config { | impl Default for Config { | ||||||
| @ -106,7 +105,10 @@ impl Default for Config { | |||||||
|             validator_monitor_pubkeys: vec![], |             validator_monitor_pubkeys: vec![], | ||||||
|             validator_monitor_individual_tracking_threshold: DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, |             validator_monitor_individual_tracking_threshold: DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, | ||||||
|             logger_config: LoggerConfig::default(), |             logger_config: LoggerConfig::default(), | ||||||
|             always_prefer_builder_payload: false, |             beacon_processor: <_>::default(), | ||||||
|  |             genesis_state_url: <_>::default(), | ||||||
|  |             // This default value should always be overwritten by the CLI default value.
 | ||||||
|  |             genesis_state_url_timeout: Duration::from_secs(60), | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
| } | } | ||||||
|  | |||||||
| @ -46,20 +46,6 @@ impl<T: BeaconChainTypes> Client<T> { | |||||||
|         self.http_metrics_listen_addr |         self.http_metrics_listen_addr | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Returns the ipv4 port of the client's libp2p stack, if it was started.
 |  | ||||||
|     pub fn libp2p_listen_ipv4_port(&self) -> Option<u16> { |  | ||||||
|         self.network_globals |  | ||||||
|             .as_ref() |  | ||||||
|             .and_then(|n| n.listen_port_tcp4()) |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     /// Returns the ipv6 port of the client's libp2p stack, if it was started.
 |  | ||||||
|     pub fn libp2p_listen_ipv6_port(&self) -> Option<u16> { |  | ||||||
|         self.network_globals |  | ||||||
|             .as_ref() |  | ||||||
|             .and_then(|n| n.listen_port_tcp6()) |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     /// Returns the list of libp2p addresses the client is listening to.
 |     /// Returns the list of libp2p addresses the client is listening to.
 | ||||||
|     pub fn libp2p_listen_addresses(&self) -> Option<Vec<Multiaddr>> { |     pub fn libp2p_listen_addresses(&self) -> Option<Vec<Multiaddr>> { | ||||||
|         self.network_globals.as_ref().map(|n| n.listen_multiaddrs()) |         self.network_globals.as_ref().map(|n| n.listen_multiaddrs()) | ||||||
|  | |||||||
| @ -1,7 +1,7 @@ | |||||||
| use crate::metrics; | use crate::metrics; | ||||||
| use beacon_chain::{ | use beacon_chain::{ | ||||||
|     capella_readiness::CapellaReadiness, |     capella_readiness::CapellaReadiness, | ||||||
|     merge_readiness::{MergeConfig, MergeReadiness}, |     merge_readiness::{GenesisExecutionPayloadStatus, MergeConfig, MergeReadiness}, | ||||||
|     BeaconChain, BeaconChainTypes, ExecutionStatus, |     BeaconChain, BeaconChainTypes, ExecutionStatus, | ||||||
| }; | }; | ||||||
| use lighthouse_network::{types::SyncState, NetworkGlobals}; | use lighthouse_network::{types::SyncState, NetworkGlobals}; | ||||||
| @ -62,6 +62,9 @@ pub fn spawn_notifier<T: BeaconChainTypes>( | |||||||
|                         "wait_time" => estimated_time_pretty(Some(next_slot.as_secs() as f64)), |                         "wait_time" => estimated_time_pretty(Some(next_slot.as_secs() as f64)), | ||||||
|                     ); |                     ); | ||||||
|                     eth1_logging(&beacon_chain, &log); |                     eth1_logging(&beacon_chain, &log); | ||||||
|  |                     merge_readiness_logging(Slot::new(0), &beacon_chain, &log).await; | ||||||
|  |                     capella_readiness_logging(Slot::new(0), &beacon_chain, &log).await; | ||||||
|  |                     genesis_execution_payload_logging(&beacon_chain, &log).await; | ||||||
|                     sleep(slot_duration).await; |                     sleep(slot_duration).await; | ||||||
|                 } |                 } | ||||||
|                 _ => break, |                 _ => break, | ||||||
| @ -365,7 +368,7 @@ async fn merge_readiness_logging<T: BeaconChainTypes>( | |||||||
|         return; |         return; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     match beacon_chain.check_merge_readiness().await { |     match beacon_chain.check_merge_readiness(current_slot).await { | ||||||
|         MergeReadiness::Ready { |         MergeReadiness::Ready { | ||||||
|             config, |             config, | ||||||
|             current_difficulty, |             current_difficulty, | ||||||
| @ -404,14 +407,6 @@ async fn merge_readiness_logging<T: BeaconChainTypes>( | |||||||
|                 "config" => ?other |                 "config" => ?other | ||||||
|             ), |             ), | ||||||
|         }, |         }, | ||||||
|         readiness @ MergeReadiness::ExchangeTransitionConfigurationFailed { error: _ } => { |  | ||||||
|             error!( |  | ||||||
|                 log, |  | ||||||
|                 "Not ready for merge"; |  | ||||||
|                 "info" => %readiness, |  | ||||||
|                 "hint" => "try updating Lighthouse and/or the execution layer", |  | ||||||
|             ) |  | ||||||
|         } |  | ||||||
|         readiness @ MergeReadiness::NotSynced => warn!( |         readiness @ MergeReadiness::NotSynced => warn!( | ||||||
|             log, |             log, | ||||||
|             "Not ready for merge"; |             "Not ready for merge"; | ||||||
| @ -484,6 +479,79 @@ async fn capella_readiness_logging<T: BeaconChainTypes>( | |||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | async fn genesis_execution_payload_logging<T: BeaconChainTypes>( | ||||||
|  |     beacon_chain: &BeaconChain<T>, | ||||||
|  |     log: &Logger, | ||||||
|  | ) { | ||||||
|  |     match beacon_chain | ||||||
|  |         .check_genesis_execution_payload_is_correct() | ||||||
|  |         .await | ||||||
|  |     { | ||||||
|  |         Ok(GenesisExecutionPayloadStatus::Correct(block_hash)) => { | ||||||
|  |             info!( | ||||||
|  |                 log, | ||||||
|  |                 "Execution enabled from genesis"; | ||||||
|  |                 "genesis_payload_block_hash" => ?block_hash, | ||||||
|  |             ); | ||||||
|  |         } | ||||||
|  |         Ok(GenesisExecutionPayloadStatus::BlockHashMismatch { got, expected }) => { | ||||||
|  |             error!( | ||||||
|  |                 log, | ||||||
|  |                 "Genesis payload block hash mismatch"; | ||||||
|  |                 "info" => "genesis is misconfigured and likely to fail", | ||||||
|  |                 "consensus_node_block_hash" => ?expected, | ||||||
|  |                 "execution_node_block_hash" => ?got, | ||||||
|  |             ); | ||||||
|  |         } | ||||||
|  |         Ok(GenesisExecutionPayloadStatus::TransactionsRootMismatch { got, expected }) => { | ||||||
|  |             error!( | ||||||
|  |                 log, | ||||||
|  |                 "Genesis payload transactions root mismatch"; | ||||||
|  |                 "info" => "genesis is misconfigured and likely to fail", | ||||||
|  |                 "consensus_node_transactions_root" => ?expected, | ||||||
|  |                 "execution_node_transactions_root" => ?got, | ||||||
|  |             ); | ||||||
|  |         } | ||||||
|  |         Ok(GenesisExecutionPayloadStatus::WithdrawalsRootMismatch { got, expected }) => { | ||||||
|  |             error!( | ||||||
|  |                 log, | ||||||
|  |                 "Genesis payload withdrawals root mismatch"; | ||||||
|  |                 "info" => "genesis is misconfigured and likely to fail", | ||||||
|  |                 "consensus_node_withdrawals_root" => ?expected, | ||||||
|  |                 "execution_node_withdrawals_root" => ?got, | ||||||
|  |             ); | ||||||
|  |         } | ||||||
|  |         Ok(GenesisExecutionPayloadStatus::OtherMismatch) => { | ||||||
|  |             error!( | ||||||
|  |                 log, | ||||||
|  |                 "Genesis payload header mismatch"; | ||||||
|  |                 "info" => "genesis is misconfigured and likely to fail", | ||||||
|  |                 "detail" => "see debug logs for payload headers" | ||||||
|  |             ); | ||||||
|  |         } | ||||||
|  |         Ok(GenesisExecutionPayloadStatus::Irrelevant) => { | ||||||
|  |             info!( | ||||||
|  |                 log, | ||||||
|  |                 "Execution is not enabled from genesis"; | ||||||
|  |             ); | ||||||
|  |         } | ||||||
|  |         Ok(GenesisExecutionPayloadStatus::AlreadyHappened) => { | ||||||
|  |             warn!( | ||||||
|  |                 log, | ||||||
|  |                 "Unable to check genesis which has already occurred"; | ||||||
|  |                 "info" => "this is probably a race condition or a bug" | ||||||
|  |             ); | ||||||
|  |         } | ||||||
|  |         Err(e) => { | ||||||
|  |             error!( | ||||||
|  |                 log, | ||||||
|  |                 "Unable to check genesis execution payload"; | ||||||
|  |                 "error" => ?e | ||||||
|  |             ); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
| fn eth1_logging<T: BeaconChainTypes>(beacon_chain: &BeaconChain<T>, log: &Logger) { | fn eth1_logging<T: BeaconChainTypes>(beacon_chain: &BeaconChain<T>, log: &Logger) { | ||||||
|     let current_slot_opt = beacon_chain.slot().ok(); |     let current_slot_opt = beacon_chain.slot().ok(); | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -2,33 +2,33 @@ | |||||||
| name = "eth1" | name = "eth1" | ||||||
| version = "0.2.0" | version = "0.2.0" | ||||||
| authors = ["Paul Hauner <paul@paulhauner.com>"] | authors = ["Paul Hauner <paul@paulhauner.com>"] | ||||||
| edition = "2021" | edition = { workspace = true } | ||||||
| 
 | 
 | ||||||
| [dev-dependencies] | [dev-dependencies] | ||||||
| eth1_test_rig = { path = "../../testing/eth1_test_rig" } | eth1_test_rig = { workspace = true } | ||||||
| serde_yaml = "0.8.13" | serde_yaml = { workspace = true } | ||||||
| sloggers = { version = "2.1.1", features = ["json"] } | sloggers = { workspace = true } | ||||||
| environment = { path = "../../lighthouse/environment" } | environment = { workspace = true } | ||||||
| 
 | 
 | ||||||
| [dependencies] | [dependencies] | ||||||
| reqwest = { version = "0.11.0", features = ["native-tls-vendored"] } | reqwest = { workspace = true } | ||||||
| execution_layer = { path = "../execution_layer" } | execution_layer = { workspace = true } | ||||||
| futures = "0.3.7" | futures = { workspace = true } | ||||||
| serde_json = "1.0.58" | serde_json = { workspace = true } | ||||||
| serde = { version = "1.0.116", features = ["derive"] } | serde = { workspace = true } | ||||||
| hex = "0.4.2" | hex = { workspace = true } | ||||||
| types = { path = "../../consensus/types"} | types = { workspace = true } | ||||||
| merkle_proof = { path = "../../consensus/merkle_proof"} | merkle_proof = { workspace = true } | ||||||
| ethereum_ssz = "0.5.0" | ethereum_ssz = { workspace = true } | ||||||
| ethereum_ssz_derive = "0.5.0" | ethereum_ssz_derive = { workspace = true } | ||||||
| tree_hash = "0.5.0" | tree_hash = { workspace = true } | ||||||
| parking_lot = "0.12.0" | parking_lot = { workspace = true } | ||||||
| slog = "2.5.2" | slog = { workspace = true } | ||||||
| superstruct = "0.5.0" | superstruct = { workspace = true } | ||||||
| tokio = { version = "1.14.0", features = ["full"] } | tokio = { workspace = true } | ||||||
| state_processing = { path = "../../consensus/state_processing" } | state_processing = { workspace = true } | ||||||
| lighthouse_metrics = { path = "../../common/lighthouse_metrics"} | lighthouse_metrics = { workspace = true } | ||||||
| lazy_static = "1.4.0" | lazy_static = { workspace = true } | ||||||
| task_executor = { path = "../../common/task_executor" } | task_executor = { workspace = true } | ||||||
| eth2 = { path = "../../common/eth2" } | eth2 = { workspace = true } | ||||||
| sensitive_url = { path = "../../common/sensitive_url" } | sensitive_url = { workspace = true } | ||||||
|  | |||||||
| @ -1,52 +1,56 @@ | |||||||
| [package] | [package] | ||||||
| name = "execution_layer" | name = "execution_layer" | ||||||
| version = "0.1.0" | version = "0.1.0" | ||||||
| edition = "2021" | edition = { workspace = true } | ||||||
| 
 | 
 | ||||||
| # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html | ||||||
| 
 | 
 | ||||||
| [dependencies] | [dependencies] | ||||||
| types = { path = "../../consensus/types"} | types = { workspace = true } | ||||||
| tokio = { version = "1.10.0", features = ["full"] } | tokio = { workspace = true } | ||||||
| async-trait = "0.1.51" | async-trait = "0.1.51" | ||||||
| slog = "2.5.2" | slog = { workspace = true } | ||||||
| futures = "0.3.7" | futures = { workspace = true } | ||||||
| sensitive_url = { path = "../../common/sensitive_url" } | sensitive_url = { workspace = true } | ||||||
| reqwest = { version = "0.11.0", features = ["json","stream"] } | reqwest = { workspace = true } | ||||||
| ethereum_serde_utils = "0.5.0" | ethereum_serde_utils = { workspace = true } | ||||||
| serde_json = "1.0.58" | serde_json = { workspace = true } | ||||||
| serde = { version = "1.0.116", features = ["derive"] } | serde = { workspace = true } | ||||||
| warp = { version = "0.3.2", features = ["tls"] } | warp = { workspace = true } | ||||||
| jsonwebtoken = "8" | jsonwebtoken = "8" | ||||||
| environment = { path = "../../lighthouse/environment" } | environment = { workspace = true } | ||||||
| bytes = "1.1.0" | bytes = { workspace = true } | ||||||
| task_executor = { path = "../../common/task_executor" } | task_executor = { workspace = true } | ||||||
| hex = "0.4.2" | hex = { workspace = true } | ||||||
| ethereum_ssz = "0.5.0" | ethereum_ssz = { workspace = true } | ||||||
| ssz_types = "0.5.0" | ssz_types = { workspace = true } | ||||||
| eth2 = { path = "../../common/eth2" } | eth2 = { workspace = true } | ||||||
| state_processing = { path = "../../consensus/state_processing" } | state_processing = { workspace = true } | ||||||
| superstruct = "0.6.0" | superstruct = { workspace = true } | ||||||
| lru = "0.7.1" | lru = { workspace = true } | ||||||
| exit-future = "0.2.0" | exit-future = { workspace = true } | ||||||
| tree_hash = "0.5.0" | tree_hash = { workspace = true } | ||||||
| tree_hash_derive = "0.5.0" | tree_hash_derive = { workspace = true } | ||||||
| parking_lot = "0.12.0" | parking_lot = { workspace = true } | ||||||
| slot_clock = { path = "../../common/slot_clock" } | slot_clock = { workspace = true } | ||||||
| tempfile = "3.1.0" | tempfile = { workspace = true } | ||||||
| rand = "0.8.5" | rand = { workspace = true } | ||||||
| zeroize = { version = "1.4.2", features = ["zeroize_derive"] } | zeroize = { workspace = true } | ||||||
| lighthouse_metrics = { path = "../../common/lighthouse_metrics" } | lighthouse_metrics = { workspace = true } | ||||||
| lazy_static = "1.4.0" | lazy_static = { workspace = true } | ||||||
| ethers-core = "1.0.2" | ethers-core = { workspace = true } | ||||||
| builder_client = { path = "../builder_client" } | builder_client = { path = "../builder_client" } | ||||||
| fork_choice = { path = "../../consensus/fork_choice" } | fork_choice = { workspace = true } | ||||||
| mev-rs = { git = "https://github.com/ralexstokes/mev-rs" } | mev-rs = { git = "https://github.com/ralexstokes/mev-rs", rev = "216657016d5c0889b505857c89ae42c7aa2764af" } | ||||||
| ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus" } | axum = "0.6" | ||||||
| ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs" } | hyper = "0.14" | ||||||
| tokio-stream = { version = "0.1.9", features = [ "sync" ] } | ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus", rev = "e380108" } | ||||||
| strum = "0.24.0" | ssz_rs = "0.9.0" | ||||||
|  | tokio-stream = { workspace = true } | ||||||
|  | strum = { workspace = true } | ||||||
| keccak-hash = "0.10.0" | keccak-hash = "0.10.0" | ||||||
| hash256-std-hasher = "0.15.2" | hash256-std-hasher = "0.15.2" | ||||||
| triehash = "0.8.4" | triehash = "0.8.4" | ||||||
| hash-db = "0.15.2" | hash-db = "0.15.2" | ||||||
|  | pretty_reqwest_error = { workspace = true } | ||||||
|  | arc-swap = "1.6.0" | ||||||
|  | |||||||
| @ -12,12 +12,13 @@ use types::{ | |||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| impl<T: EthSpec> ExecutionLayer<T> { | impl<T: EthSpec> ExecutionLayer<T> { | ||||||
|     /// Verify `payload.block_hash` locally within Lighthouse.
 |     /// Calculate the block hash of an execution block.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// No remote calls to the execution client will be made, so this is quite a cheap check.
 |     /// Return `(block_hash, transactions_root)`, where `transactions_root` is the root of the RLP
 | ||||||
|     pub fn verify_payload_block_hash(&self, payload: ExecutionPayloadRef<T>) -> Result<(), Error> { |     /// transactions.
 | ||||||
|         let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_VERIFY_BLOCK_HASH); |     pub fn calculate_execution_block_hash( | ||||||
| 
 |         payload: ExecutionPayloadRef<T>, | ||||||
|  |     ) -> (ExecutionBlockHash, Hash256) { | ||||||
|         // Calculate the transactions root.
 |         // Calculate the transactions root.
 | ||||||
|         // We're currently using a deprecated Parity library for this. We should move to a
 |         // We're currently using a deprecated Parity library for this. We should move to a
 | ||||||
|         // better alternative when one appears, possibly following Reth.
 |         // better alternative when one appears, possibly following Reth.
 | ||||||
| @ -46,7 +47,19 @@ impl<T: EthSpec> ExecutionLayer<T> { | |||||||
| 
 | 
 | ||||||
|         // Hash the RLP encoding of the block header.
 |         // Hash the RLP encoding of the block header.
 | ||||||
|         let rlp_block_header = rlp_encode_block_header(&exec_block_header); |         let rlp_block_header = rlp_encode_block_header(&exec_block_header); | ||||||
|         let header_hash = ExecutionBlockHash::from_root(keccak256(&rlp_block_header)); |         ( | ||||||
|  |             ExecutionBlockHash::from_root(keccak256(&rlp_block_header)), | ||||||
|  |             rlp_transactions_root, | ||||||
|  |         ) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     /// Verify `payload.block_hash` locally within Lighthouse.
 | ||||||
|  |     ///
 | ||||||
|  |     /// No remote calls to the execution client will be made, so this is quite a cheap check.
 | ||||||
|  |     pub fn verify_payload_block_hash(&self, payload: ExecutionPayloadRef<T>) -> Result<(), Error> { | ||||||
|  |         let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_VERIFY_BLOCK_HASH); | ||||||
|  | 
 | ||||||
|  |         let (header_hash, rlp_transactions_root) = Self::calculate_execution_block_hash(payload); | ||||||
| 
 | 
 | ||||||
|         if header_hash != payload.block_hash() { |         if header_hash != payload.block_hash() { | ||||||
|             return Err(Error::BlockHashMismatch { |             return Err(Error::BlockHashMismatch { | ||||||
|  | |||||||
| @ -1,15 +1,15 @@ | |||||||
| use crate::engines::ForkchoiceState; | use crate::engines::ForkchoiceState; | ||||||
| use crate::http::{ | use crate::http::{ | ||||||
|     ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, ENGINE_FORKCHOICE_UPDATED_V1, |     ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, | ||||||
|     ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, |     ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, | ||||||
|     ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, |     ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, | ||||||
|     ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, |  | ||||||
| }; | }; | ||||||
| use eth2::types::{SsePayloadAttributes, SsePayloadAttributesV1, SsePayloadAttributesV2}; | use eth2::types::{SsePayloadAttributes, SsePayloadAttributesV1, SsePayloadAttributesV2}; | ||||||
| pub use ethers_core::types::Transaction; | pub use ethers_core::types::Transaction; | ||||||
| use ethers_core::utils::rlp::{self, Decodable, Rlp}; | use ethers_core::utils::rlp::{self, Decodable, Rlp}; | ||||||
| use http::deposit_methods::RpcError; | use http::deposit_methods::RpcError; | ||||||
| pub use json_structures::{JsonWithdrawal, TransitionConfigurationV1}; | pub use json_structures::{JsonWithdrawal, TransitionConfigurationV1}; | ||||||
|  | use pretty_reqwest_error::PrettyReqwestError; | ||||||
| use reqwest::StatusCode; | use reqwest::StatusCode; | ||||||
| use serde::{Deserialize, Serialize}; | use serde::{Deserialize, Serialize}; | ||||||
| use std::convert::TryFrom; | use std::convert::TryFrom; | ||||||
| @ -32,7 +32,7 @@ pub type PayloadId = [u8; 8]; | |||||||
| 
 | 
 | ||||||
| #[derive(Debug)] | #[derive(Debug)] | ||||||
| pub enum Error { | pub enum Error { | ||||||
|     Reqwest(reqwest::Error), |     HttpClient(PrettyReqwestError), | ||||||
|     Auth(auth::Error), |     Auth(auth::Error), | ||||||
|     BadResponse(String), |     BadResponse(String), | ||||||
|     RequestFailed(String), |     RequestFailed(String), | ||||||
| @ -67,7 +67,7 @@ impl From<reqwest::Error> for Error { | |||||||
|         ) { |         ) { | ||||||
|             Error::Auth(auth::Error::InvalidToken) |             Error::Auth(auth::Error::InvalidToken) | ||||||
|         } else { |         } else { | ||||||
|             Error::Reqwest(e) |             Error::HttpClient(e.into()) | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
| } | } | ||||||
| @ -449,7 +449,6 @@ pub struct EngineCapabilities { | |||||||
|     pub get_payload_bodies_by_range_v1: bool, |     pub get_payload_bodies_by_range_v1: bool, | ||||||
|     pub get_payload_v1: bool, |     pub get_payload_v1: bool, | ||||||
|     pub get_payload_v2: bool, |     pub get_payload_v2: bool, | ||||||
|     pub exchange_transition_configuration_v1: bool, |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl EngineCapabilities { | impl EngineCapabilities { | ||||||
| @ -479,9 +478,6 @@ impl EngineCapabilities { | |||||||
|         if self.get_payload_v2 { |         if self.get_payload_v2 { | ||||||
|             response.push(ENGINE_GET_PAYLOAD_V2); |             response.push(ENGINE_GET_PAYLOAD_V2); | ||||||
|         } |         } | ||||||
|         if self.exchange_transition_configuration_v1 { |  | ||||||
|             response.push(ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1); |  | ||||||
|         } |  | ||||||
| 
 | 
 | ||||||
|         response |         response | ||||||
|     } |     } | ||||||
|  | |||||||
| @ -46,10 +46,6 @@ pub const ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1: &str = "engine_getPayloadBodiesB | |||||||
| pub const ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1: &str = "engine_getPayloadBodiesByRangeV1"; | pub const ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1: &str = "engine_getPayloadBodiesByRangeV1"; | ||||||
| pub const ENGINE_GET_PAYLOAD_BODIES_TIMEOUT: Duration = Duration::from_secs(10); | pub const ENGINE_GET_PAYLOAD_BODIES_TIMEOUT: Duration = Duration::from_secs(10); | ||||||
| 
 | 
 | ||||||
| pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1: &str = |  | ||||||
|     "engine_exchangeTransitionConfigurationV1"; |  | ||||||
| pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT: Duration = Duration::from_secs(1); |  | ||||||
| 
 |  | ||||||
| pub const ENGINE_EXCHANGE_CAPABILITIES: &str = "engine_exchangeCapabilities"; | pub const ENGINE_EXCHANGE_CAPABILITIES: &str = "engine_exchangeCapabilities"; | ||||||
| pub const ENGINE_EXCHANGE_CAPABILITIES_TIMEOUT: Duration = Duration::from_secs(1); | pub const ENGINE_EXCHANGE_CAPABILITIES_TIMEOUT: Duration = Duration::from_secs(1); | ||||||
| 
 | 
 | ||||||
| @ -68,7 +64,6 @@ pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ | |||||||
|     ENGINE_FORKCHOICE_UPDATED_V2, |     ENGINE_FORKCHOICE_UPDATED_V2, | ||||||
|     ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, |     ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, | ||||||
|     ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, |     ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, | ||||||
|     ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, |  | ||||||
| ]; | ]; | ||||||
| 
 | 
 | ||||||
| /// This is necessary because a user might run a capella-enabled version of
 | /// This is necessary because a user might run a capella-enabled version of
 | ||||||
| @ -83,7 +78,6 @@ pub static PRE_CAPELLA_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilit | |||||||
|     get_payload_bodies_by_range_v1: false, |     get_payload_bodies_by_range_v1: false, | ||||||
|     get_payload_v1: true, |     get_payload_v1: true, | ||||||
|     get_payload_v2: false, |     get_payload_v2: false, | ||||||
|     exchange_transition_configuration_v1: true, |  | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| /// Contains methods to convert arbitrary bytes to an ETH2 deposit contract object.
 | /// Contains methods to convert arbitrary bytes to an ETH2 deposit contract object.
 | ||||||
| @ -934,24 +928,6 @@ impl HttpJsonRpc { | |||||||
|             .collect()) |             .collect()) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     pub async fn exchange_transition_configuration_v1( |  | ||||||
|         &self, |  | ||||||
|         transition_configuration: TransitionConfigurationV1, |  | ||||||
|     ) -> Result<TransitionConfigurationV1, Error> { |  | ||||||
|         let params = json!([transition_configuration]); |  | ||||||
| 
 |  | ||||||
|         let response = self |  | ||||||
|             .rpc_request( |  | ||||||
|                 ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, |  | ||||||
|                 params, |  | ||||||
|                 ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT |  | ||||||
|                     * self.execution_timeout_multiplier, |  | ||||||
|             ) |  | ||||||
|             .await?; |  | ||||||
| 
 |  | ||||||
|         Ok(response) |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     pub async fn exchange_capabilities(&self) -> Result<EngineCapabilities, Error> { |     pub async fn exchange_capabilities(&self) -> Result<EngineCapabilities, Error> { | ||||||
|         let params = json!([LIGHTHOUSE_CAPABILITIES]); |         let params = json!([LIGHTHOUSE_CAPABILITIES]); | ||||||
| 
 | 
 | ||||||
| @ -982,8 +958,6 @@ impl HttpJsonRpc { | |||||||
|                     .contains(ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1), |                     .contains(ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1), | ||||||
|                 get_payload_v1: capabilities.contains(ENGINE_GET_PAYLOAD_V1), |                 get_payload_v1: capabilities.contains(ENGINE_GET_PAYLOAD_V1), | ||||||
|                 get_payload_v2: capabilities.contains(ENGINE_GET_PAYLOAD_V2), |                 get_payload_v2: capabilities.contains(ENGINE_GET_PAYLOAD_V2), | ||||||
|                 exchange_transition_configuration_v1: capabilities |  | ||||||
|                     .contains(ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1), |  | ||||||
|             }), |             }), | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
|  | |||||||
| @ -5,6 +5,7 @@ | |||||||
| //! deposit-contract functionality that the `beacon_node/eth1` crate already provides.
 | //! deposit-contract functionality that the `beacon_node/eth1` crate already provides.
 | ||||||
| 
 | 
 | ||||||
| use crate::payload_cache::PayloadCache; | use crate::payload_cache::PayloadCache; | ||||||
|  | use arc_swap::ArcSwapOption; | ||||||
| use auth::{strip_prefix, Auth, JwtKey}; | use auth::{strip_prefix, Auth, JwtKey}; | ||||||
| use builder_client::BuilderHttpClient; | use builder_client::BuilderHttpClient; | ||||||
| pub use engine_api::EngineCapabilities; | pub use engine_api::EngineCapabilities; | ||||||
| @ -38,11 +39,11 @@ use tokio::{ | |||||||
| }; | }; | ||||||
| use tokio_stream::wrappers::WatchStream; | use tokio_stream::wrappers::WatchStream; | ||||||
| use tree_hash::TreeHash; | use tree_hash::TreeHash; | ||||||
| use types::{AbstractExecPayload, BeaconStateError, ExecPayload, Withdrawals}; | use types::{AbstractExecPayload, BeaconStateError, ExecPayload}; | ||||||
| use types::{ | use types::{ | ||||||
|     BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ExecutionPayload, |     BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionPayloadCapella, ExecutionPayloadMerge, | ||||||
|     ExecutionPayloadCapella, ExecutionPayloadMerge, ForkName, ForkVersionedResponse, |     ForkVersionedResponse, ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, | ||||||
|     ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, Uint256, |     Slot, | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| mod block_hash; | mod block_hash; | ||||||
| @ -74,11 +75,9 @@ const EXECUTION_BLOCKS_LRU_CACHE_SIZE: usize = 128; | |||||||
| const DEFAULT_SUGGESTED_FEE_RECIPIENT: [u8; 20] = | const DEFAULT_SUGGESTED_FEE_RECIPIENT: [u8; 20] = | ||||||
|     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; |     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; | ||||||
| 
 | 
 | ||||||
| const CONFIG_POLL_INTERVAL: Duration = Duration::from_secs(60); |  | ||||||
| 
 |  | ||||||
| /// A payload alongside some information about where it came from.
 | /// A payload alongside some information about where it came from.
 | ||||||
| pub enum ProvenancedPayload<P> { | pub enum ProvenancedPayload<P> { | ||||||
|     /// A good ol' fashioned farm-to-table payload from your local EE.
 |     /// A good old fashioned farm-to-table payload from your local EE.
 | ||||||
|     Local(P), |     Local(P), | ||||||
|     /// A payload from a builder (e.g. mev-boost).
 |     /// A payload from a builder (e.g. mev-boost).
 | ||||||
|     Builder(P), |     Builder(P), | ||||||
| @ -163,7 +162,7 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> BlockProposalContents<T, Paylo | |||||||
|                 BlockProposalContents::Payload { |                 BlockProposalContents::Payload { | ||||||
|                     payload: Payload::default_at_fork(fork_name)?, |                     payload: Payload::default_at_fork(fork_name)?, | ||||||
|                     block_value: Uint256::zero(), |                     block_value: Uint256::zero(), | ||||||
|                     _phantom: PhantomData::default(), |                     _phantom: PhantomData, | ||||||
|                 } |                 } | ||||||
|             } |             } | ||||||
|         }) |         }) | ||||||
| @ -211,7 +210,7 @@ pub enum FailedCondition { | |||||||
| 
 | 
 | ||||||
| struct Inner<E: EthSpec> { | struct Inner<E: EthSpec> { | ||||||
|     engine: Arc<Engine>, |     engine: Arc<Engine>, | ||||||
|     builder: Option<BuilderHttpClient>, |     builder: ArcSwapOption<BuilderHttpClient>, | ||||||
|     execution_engine_forkchoice_lock: Mutex<()>, |     execution_engine_forkchoice_lock: Mutex<()>, | ||||||
|     suggested_fee_recipient: Option<Address>, |     suggested_fee_recipient: Option<Address>, | ||||||
|     proposer_preparation_data: Mutex<HashMap<u64, ProposerPreparationDataEntry>>, |     proposer_preparation_data: Mutex<HashMap<u64, ProposerPreparationDataEntry>>, | ||||||
| @ -326,25 +325,9 @@ impl<T: EthSpec> ExecutionLayer<T> { | |||||||
|             Engine::new(api, executor.clone(), &log) |             Engine::new(api, executor.clone(), &log) | ||||||
|         }; |         }; | ||||||
| 
 | 
 | ||||||
|         let builder = builder_url |  | ||||||
|             .map(|url| { |  | ||||||
|                 let builder_client = BuilderHttpClient::new(url.clone(), builder_user_agent) |  | ||||||
|                     .map_err(Error::Builder)?; |  | ||||||
| 
 |  | ||||||
|                 info!( |  | ||||||
|                     log, |  | ||||||
|                     "Using external block builder"; |  | ||||||
|                     "builder_url" => ?url, |  | ||||||
|                     "builder_profit_threshold" => builder_profit_threshold, |  | ||||||
|                     "local_user_agent" => builder_client.get_user_agent(), |  | ||||||
|                 ); |  | ||||||
|                 Ok::<_, Error>(builder_client) |  | ||||||
|             }) |  | ||||||
|             .transpose()?; |  | ||||||
| 
 |  | ||||||
|         let inner = Inner { |         let inner = Inner { | ||||||
|             engine: Arc::new(engine), |             engine: Arc::new(engine), | ||||||
|             builder, |             builder: ArcSwapOption::empty(), | ||||||
|             execution_engine_forkchoice_lock: <_>::default(), |             execution_engine_forkchoice_lock: <_>::default(), | ||||||
|             suggested_fee_recipient, |             suggested_fee_recipient, | ||||||
|             proposer_preparation_data: Mutex::new(HashMap::new()), |             proposer_preparation_data: Mutex::new(HashMap::new()), | ||||||
| @ -358,19 +341,45 @@ impl<T: EthSpec> ExecutionLayer<T> { | |||||||
|             last_new_payload_errored: RwLock::new(false), |             last_new_payload_errored: RwLock::new(false), | ||||||
|         }; |         }; | ||||||
| 
 | 
 | ||||||
|         Ok(Self { |         let el = Self { | ||||||
|             inner: Arc::new(inner), |             inner: Arc::new(inner), | ||||||
|         }) |         }; | ||||||
|     } | 
 | ||||||
| } |         if let Some(builder_url) = builder_url { | ||||||
|  |             el.set_builder_url(builder_url, builder_user_agent)?; | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         Ok(el) | ||||||
|  |     } | ||||||
| 
 | 
 | ||||||
| impl<T: EthSpec> ExecutionLayer<T> { |  | ||||||
|     fn engine(&self) -> &Arc<Engine> { |     fn engine(&self) -> &Arc<Engine> { | ||||||
|         &self.inner.engine |         &self.inner.engine | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     pub fn builder(&self) -> &Option<BuilderHttpClient> { |     pub fn builder(&self) -> Option<Arc<BuilderHttpClient>> { | ||||||
|         &self.inner.builder |         self.inner.builder.load_full() | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     /// Set the builder URL after initialization.
 | ||||||
|  |     ///
 | ||||||
|  |     /// This is useful for breaking circular dependencies between mock ELs and mock builders in
 | ||||||
|  |     /// tests.
 | ||||||
|  |     pub fn set_builder_url( | ||||||
|  |         &self, | ||||||
|  |         builder_url: SensitiveUrl, | ||||||
|  |         builder_user_agent: Option<String>, | ||||||
|  |     ) -> Result<(), Error> { | ||||||
|  |         let builder_client = BuilderHttpClient::new(builder_url.clone(), builder_user_agent) | ||||||
|  |             .map_err(Error::Builder)?; | ||||||
|  |         info!( | ||||||
|  |             self.log(), | ||||||
|  |             "Using external block builder"; | ||||||
|  |             "builder_url" => ?builder_url, | ||||||
|  |             "builder_profit_threshold" => self.inner.builder_profit_threshold.as_u128(), | ||||||
|  |             "local_user_agent" => builder_client.get_user_agent(), | ||||||
|  |         ); | ||||||
|  |         self.inner.builder.swap(Some(Arc::new(builder_client))); | ||||||
|  |         Ok(()) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Cache a full payload, keyed on the `tree_hash_root` of the payload
 |     /// Cache a full payload, keyed on the `tree_hash_root` of the payload
 | ||||||
| @ -380,7 +389,7 @@ impl<T: EthSpec> ExecutionLayer<T> { | |||||||
| 
 | 
 | ||||||
|     /// Attempt to retrieve a full payload from the payload cache by the payload root
 |     /// Attempt to retrieve a full payload from the payload cache by the payload root
 | ||||||
|     pub fn get_payload_by_root(&self, root: &Hash256) -> Option<ExecutionPayload<T>> { |     pub fn get_payload_by_root(&self, root: &Hash256) -> Option<ExecutionPayload<T>> { | ||||||
|         self.inner.payload_cache.pop(root) |         self.inner.payload_cache.get(root) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     pub fn executor(&self) -> &TaskExecutor { |     pub fn executor(&self) -> &TaskExecutor { | ||||||
| @ -502,24 +511,6 @@ impl<T: EthSpec> ExecutionLayer<T> { | |||||||
|         self.spawn(preparation_cleaner, "exec_preparation_cleanup"); |         self.spawn(preparation_cleaner, "exec_preparation_cleanup"); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Spawns a routine that polls the `exchange_transition_configuration` endpoint.
 |  | ||||||
|     pub fn spawn_transition_configuration_poll(&self, spec: ChainSpec) { |  | ||||||
|         let routine = |el: ExecutionLayer<T>| async move { |  | ||||||
|             loop { |  | ||||||
|                 if let Err(e) = el.exchange_transition_configuration(&spec).await { |  | ||||||
|                     error!( |  | ||||||
|                         el.log(), |  | ||||||
|                         "Failed to check transition config"; |  | ||||||
|                         "error" => ?e |  | ||||||
|                     ); |  | ||||||
|                 } |  | ||||||
|                 sleep(CONFIG_POLL_INTERVAL).await; |  | ||||||
|             } |  | ||||||
|         }; |  | ||||||
| 
 |  | ||||||
|         self.spawn(routine, "exec_config_poll"); |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     /// Returns `true` if the execution engine is synced and reachable.
 |     /// Returns `true` if the execution engine is synced and reachable.
 | ||||||
|     pub async fn is_synced(&self) -> bool { |     pub async fn is_synced(&self) -> bool { | ||||||
|         self.engine().is_synced().await |         self.engine().is_synced().await | ||||||
| @ -529,9 +520,9 @@ impl<T: EthSpec> ExecutionLayer<T> { | |||||||
|     ///
 |     ///
 | ||||||
|     /// This function is a wrapper over `Self::is_synced` that makes an additional
 |     /// This function is a wrapper over `Self::is_synced` that makes an additional
 | ||||||
|     /// check for the execution layer sync status. Checks if the latest block has
 |     /// check for the execution layer sync status. Checks if the latest block has
 | ||||||
|     /// a `block_number != 0`.
 |     /// a `block_number != 0` *if* the `current_slot` is also `> 0`.
 | ||||||
|     /// Returns the `Self::is_synced` response if unable to get latest block.
 |     /// Returns the `Self::is_synced` response if unable to get latest block.
 | ||||||
|     pub async fn is_synced_for_notifier(&self) -> bool { |     pub async fn is_synced_for_notifier(&self, current_slot: Slot) -> bool { | ||||||
|         let synced = self.is_synced().await; |         let synced = self.is_synced().await; | ||||||
|         if synced { |         if synced { | ||||||
|             if let Ok(Some(block)) = self |             if let Ok(Some(block)) = self | ||||||
| @ -540,7 +531,7 @@ impl<T: EthSpec> ExecutionLayer<T> { | |||||||
|                 .get_block_by_number(BlockByNumberQuery::Tag(LATEST_TAG)) |                 .get_block_by_number(BlockByNumberQuery::Tag(LATEST_TAG)) | ||||||
|                 .await |                 .await | ||||||
|             { |             { | ||||||
|                 if block.block_number == 0 { |                 if block.block_number == 0 && current_slot > 0 { | ||||||
|                     return false; |                     return false; | ||||||
|                 } |                 } | ||||||
|             } |             } | ||||||
| @ -826,16 +817,23 @@ impl<T: EthSpec> ExecutionLayer<T> { | |||||||
| 
 | 
 | ||||||
|                             let relay_value = relay.data.message.value; |                             let relay_value = relay.data.message.value; | ||||||
|                             let local_value = *local.block_value(); |                             let local_value = *local.block_value(); | ||||||
|                             if !self.inner.always_prefer_builder_payload |                             if !self.inner.always_prefer_builder_payload { | ||||||
|                                 && local_value >= relay_value |                                 if local_value >= relay_value { | ||||||
|                             { |                                     info!( | ||||||
|                                 info!( |                                         self.log(), | ||||||
|                                     self.log(), |                                         "Local block is more profitable than relay block"; | ||||||
|                                     "Local block is more profitable than relay block"; |                                         "local_block_value" => %local_value, | ||||||
|                                     "local_block_value" => %local_value, |                                         "relay_value" => %relay_value | ||||||
|                                     "relay_value" => %relay_value |                                     ); | ||||||
|                                 ); |                                     return Ok(ProvenancedPayload::Local(local)); | ||||||
|                                 return Ok(ProvenancedPayload::Local(local)); |                                 } else { | ||||||
|  |                                     info!( | ||||||
|  |                                         self.log(), | ||||||
|  |                                         "Relay block is more profitable than local block"; | ||||||
|  |                                         "local_block_value" => %local_value, | ||||||
|  |                                         "relay_value" => %relay_value | ||||||
|  |                                     ); | ||||||
|  |                                 } | ||||||
|                             } |                             } | ||||||
| 
 | 
 | ||||||
|                             match verify_builder_bid( |                             match verify_builder_bid( | ||||||
| @ -851,7 +849,7 @@ impl<T: EthSpec> ExecutionLayer<T> { | |||||||
|                                     BlockProposalContents::Payload { |                                     BlockProposalContents::Payload { | ||||||
|                                         payload: relay.data.message.header, |                                         payload: relay.data.message.header, | ||||||
|                                         block_value: relay.data.message.value, |                                         block_value: relay.data.message.value, | ||||||
|                                         _phantom: PhantomData::default(), |                                         _phantom: PhantomData, | ||||||
|                                     }, |                                     }, | ||||||
|                                 )), |                                 )), | ||||||
|                                 Err(reason) if !reason.payload_invalid() => { |                                 Err(reason) if !reason.payload_invalid() => { | ||||||
| @ -906,7 +904,7 @@ impl<T: EthSpec> ExecutionLayer<T> { | |||||||
|                                     BlockProposalContents::Payload { |                                     BlockProposalContents::Payload { | ||||||
|                                         payload: relay.data.message.header, |                                         payload: relay.data.message.header, | ||||||
|                                         block_value: relay.data.message.value, |                                         block_value: relay.data.message.value, | ||||||
|                                         _phantom: PhantomData::default(), |                                         _phantom: PhantomData, | ||||||
|                                     }, |                                     }, | ||||||
|                                 )), |                                 )), | ||||||
|                                 // If the payload is valid then use it. The local EE failed
 |                                 // If the payload is valid then use it. The local EE failed
 | ||||||
| @ -915,7 +913,7 @@ impl<T: EthSpec> ExecutionLayer<T> { | |||||||
|                                     BlockProposalContents::Payload { |                                     BlockProposalContents::Payload { | ||||||
|                                         payload: relay.data.message.header, |                                         payload: relay.data.message.header, | ||||||
|                                         block_value: relay.data.message.value, |                                         block_value: relay.data.message.value, | ||||||
|                                         _phantom: PhantomData::default(), |                                         _phantom: PhantomData, | ||||||
|                                     }, |                                     }, | ||||||
|                                 )), |                                 )), | ||||||
|                                 Err(reason) => { |                                 Err(reason) => { | ||||||
| @ -1122,7 +1120,7 @@ impl<T: EthSpec> ExecutionLayer<T> { | |||||||
|                 Ok(BlockProposalContents::Payload { |                 Ok(BlockProposalContents::Payload { | ||||||
|                     payload: execution_payload.into(), |                     payload: execution_payload.into(), | ||||||
|                     block_value, |                     block_value, | ||||||
|                     _phantom: PhantomData::default(), |                     _phantom: PhantomData, | ||||||
|                 }) |                 }) | ||||||
|             }) |             }) | ||||||
|             .await |             .await | ||||||
| @ -1311,53 +1309,6 @@ impl<T: EthSpec> ExecutionLayer<T> { | |||||||
|         .map_err(Error::EngineError) |         .map_err(Error::EngineError) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     pub async fn exchange_transition_configuration(&self, spec: &ChainSpec) -> Result<(), Error> { |  | ||||||
|         let local = TransitionConfigurationV1 { |  | ||||||
|             terminal_total_difficulty: spec.terminal_total_difficulty, |  | ||||||
|             terminal_block_hash: spec.terminal_block_hash, |  | ||||||
|             terminal_block_number: 0, |  | ||||||
|         }; |  | ||||||
| 
 |  | ||||||
|         let result = self |  | ||||||
|             .engine() |  | ||||||
|             .request(|engine| engine.api.exchange_transition_configuration_v1(local)) |  | ||||||
|             .await; |  | ||||||
| 
 |  | ||||||
|         match result { |  | ||||||
|             Ok(remote) => { |  | ||||||
|                 if local.terminal_total_difficulty != remote.terminal_total_difficulty |  | ||||||
|                     || local.terminal_block_hash != remote.terminal_block_hash |  | ||||||
|                 { |  | ||||||
|                     error!( |  | ||||||
|                         self.log(), |  | ||||||
|                         "Execution client config mismatch"; |  | ||||||
|                         "msg" => "ensure lighthouse and the execution client are up-to-date and \ |  | ||||||
|                                   configured consistently",
 |  | ||||||
|                         "remote" => ?remote, |  | ||||||
|                         "local" => ?local, |  | ||||||
|                     ); |  | ||||||
|                     Err(Error::EngineError(Box::new(EngineError::Api { |  | ||||||
|                         error: ApiError::TransitionConfigurationMismatch, |  | ||||||
|                     }))) |  | ||||||
|                 } else { |  | ||||||
|                     debug!( |  | ||||||
|                         self.log(), |  | ||||||
|                         "Execution client config is OK"; |  | ||||||
|                     ); |  | ||||||
|                     Ok(()) |  | ||||||
|                 } |  | ||||||
|             } |  | ||||||
|             Err(e) => { |  | ||||||
|                 error!( |  | ||||||
|                     self.log(), |  | ||||||
|                     "Unable to get transition config"; |  | ||||||
|                     "error" => ?e, |  | ||||||
|                 ); |  | ||||||
|                 Err(Error::EngineError(Box::new(e))) |  | ||||||
|             } |  | ||||||
|         } |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     /// Returns the execution engine capabilities resulting from a call to
 |     /// Returns the execution engine capabilities resulting from a call to
 | ||||||
|     /// engine_exchangeCapabilities. If the capabilities cache is not populated,
 |     /// engine_exchangeCapabilities. If the capabilities cache is not populated,
 | ||||||
|     /// or if it is populated with a cached result of age >= `age_limit`, this
 |     /// or if it is populated with a cached result of age >= `age_limit`, this
 | ||||||
| @ -1654,6 +1605,17 @@ impl<T: EthSpec> ExecutionLayer<T> { | |||||||
|         } |         } | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  |     pub async fn get_block_by_number( | ||||||
|  |         &self, | ||||||
|  |         query: BlockByNumberQuery<'_>, | ||||||
|  |     ) -> Result<Option<ExecutionBlock>, Error> { | ||||||
|  |         self.engine() | ||||||
|  |             .request(|engine| async move { engine.api.get_block_by_number(query).await }) | ||||||
|  |             .await | ||||||
|  |             .map_err(Box::new) | ||||||
|  |             .map_err(Error::EngineError) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|     pub async fn get_payload_by_hash_legacy( |     pub async fn get_payload_by_hash_legacy( | ||||||
|         &self, |         &self, | ||||||
|         hash: ExecutionBlockHash, |         hash: ExecutionBlockHash, | ||||||
| @ -2011,6 +1973,22 @@ async fn timed_future<F: Future<Output = T>, T>(metric: &str, future: F) -> (T, | |||||||
|     (result, duration) |     (result, duration) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | fn noop<T: EthSpec>( | ||||||
|  |     _: &ExecutionLayer<T>, | ||||||
|  |     _: ExecutionPayloadRef<T>, | ||||||
|  | ) -> Option<ExecutionPayload<T>> { | ||||||
|  |     None | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #[cfg(test)] | ||||||
|  | /// Returns the duration since the unix epoch.
 | ||||||
|  | fn timestamp_now() -> u64 { | ||||||
|  |     SystemTime::now() | ||||||
|  |         .duration_since(UNIX_EPOCH) | ||||||
|  |         .unwrap_or_else(|_| Duration::from_secs(0)) | ||||||
|  |         .as_secs() | ||||||
|  | } | ||||||
|  | 
 | ||||||
| #[cfg(test)] | #[cfg(test)] | ||||||
| mod test { | mod test { | ||||||
|     use super::*; |     use super::*; | ||||||
| @ -2157,19 +2135,3 @@ mod test { | |||||||
|             .await; |             .await; | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 |  | ||||||
| fn noop<T: EthSpec>( |  | ||||||
|     _: &ExecutionLayer<T>, |  | ||||||
|     _: ExecutionPayloadRef<T>, |  | ||||||
| ) -> Option<ExecutionPayload<T>> { |  | ||||||
|     None |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| #[cfg(test)] |  | ||||||
| /// Returns the duration since the unix epoch.
 |  | ||||||
| fn timestamp_now() -> u64 { |  | ||||||
|     SystemTime::now() |  | ||||||
|         .duration_since(UNIX_EPOCH) |  | ||||||
|         .unwrap_or_else(|_| Duration::from_secs(0)) |  | ||||||
|         .as_secs() |  | ||||||
| } |  | ||||||
|  | |||||||
| @ -30,4 +30,8 @@ impl<T: EthSpec> PayloadCache<T> { | |||||||
|     pub fn pop(&self, root: &Hash256) -> Option<ExecutionPayload<T>> { |     pub fn pop(&self, root: &Hash256) -> Option<ExecutionPayload<T>> { | ||||||
|         self.payloads.lock().pop(&PayloadCacheId(*root)) |         self.payloads.lock().pop(&PayloadCacheId(*root)) | ||||||
|     } |     } | ||||||
|  | 
 | ||||||
|  |     pub fn get(&self, hash: &Hash256) -> Option<ExecutionPayload<T>> { | ||||||
|  |         self.payloads.lock().get(&PayloadCacheId(*hash)).cloned() | ||||||
|  |     } | ||||||
| } | } | ||||||
|  | |||||||
| @ -357,15 +357,6 @@ pub async fn handle_rpc<T: EthSpec>( | |||||||
| 
 | 
 | ||||||
|             Ok(serde_json::to_value(response).unwrap()) |             Ok(serde_json::to_value(response).unwrap()) | ||||||
|         } |         } | ||||||
|         ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1 => { |  | ||||||
|             let block_generator = ctx.execution_block_generator.read(); |  | ||||||
|             let transition_config: TransitionConfigurationV1 = TransitionConfigurationV1 { |  | ||||||
|                 terminal_total_difficulty: block_generator.terminal_total_difficulty, |  | ||||||
|                 terminal_block_hash: block_generator.terminal_block_hash, |  | ||||||
|                 terminal_block_number: block_generator.terminal_block_number, |  | ||||||
|             }; |  | ||||||
|             Ok(serde_json::to_value(transition_config).unwrap()) |  | ||||||
|         } |  | ||||||
|         ENGINE_EXCHANGE_CAPABILITIES => { |         ENGINE_EXCHANGE_CAPABILITIES => { | ||||||
|             let engine_capabilities = ctx.engine_capabilities.read(); |             let engine_capabilities = ctx.engine_capabilities.read(); | ||||||
|             Ok(serde_json::to_value(engine_capabilities.to_response()).unwrap()) |             Ok(serde_json::to_value(engine_capabilities.to_response()).unwrap()) | ||||||
|  | |||||||
| @ -11,11 +11,17 @@ use ethereum_consensus::{ | |||||||
| }; | }; | ||||||
| use fork_choice::ForkchoiceUpdateParameters; | use fork_choice::ForkchoiceUpdateParameters; | ||||||
| use mev_rs::{ | use mev_rs::{ | ||||||
|     bellatrix::{BuilderBid as BuilderBidBellatrix, SignedBuilderBid as SignedBuilderBidBellatrix}, |     blinded_block_provider::Server as BlindedBlockProviderServer, | ||||||
|     capella::{BuilderBid as BuilderBidCapella, SignedBuilderBid as SignedBuilderBidCapella}, |     signing::{sign_builder_message, verify_signed_builder_message}, | ||||||
|     sign_builder_message, verify_signed_builder_message, BidRequest, BlindedBlockProviderError, |     types::{ | ||||||
|     BlindedBlockProviderServer, BuilderBid, ExecutionPayload as ServerPayload, |         bellatrix::{ | ||||||
|     SignedBlindedBeaconBlock, SignedBuilderBid, SignedValidatorRegistration, |             BuilderBid as BuilderBidBellatrix, SignedBuilderBid as SignedBuilderBidBellatrix, | ||||||
|  |         }, | ||||||
|  |         capella::{BuilderBid as BuilderBidCapella, SignedBuilderBid as SignedBuilderBidCapella}, | ||||||
|  |         BidRequest, BuilderBid, ExecutionPayload as ServerPayload, SignedBlindedBeaconBlock, | ||||||
|  |         SignedBuilderBid, SignedValidatorRegistration, | ||||||
|  |     }, | ||||||
|  |     Error as MevError, | ||||||
| }; | }; | ||||||
| use parking_lot::RwLock; | use parking_lot::RwLock; | ||||||
| use sensitive_url::SensitiveUrl; | use sensitive_url::SensitiveUrl; | ||||||
| @ -34,6 +40,11 @@ use types::{ | |||||||
|     Uint256, |     Uint256, | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | pub type MockBuilderServer = axum::Server< | ||||||
|  |     hyper::server::conn::AddrIncoming, | ||||||
|  |     axum::routing::IntoMakeService<axum::routing::Router>, | ||||||
|  | >; | ||||||
|  | 
 | ||||||
| #[derive(Clone)] | #[derive(Clone)] | ||||||
| pub enum Operation { | pub enum Operation { | ||||||
|     FeeRecipient(Address), |     FeeRecipient(Address), | ||||||
| @ -47,7 +58,7 @@ pub enum Operation { | |||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl Operation { | impl Operation { | ||||||
|     fn apply<B: BidStuff>(self, bid: &mut B) -> Result<(), BlindedBlockProviderError> { |     fn apply<B: BidStuff>(self, bid: &mut B) -> Result<(), MevError> { | ||||||
|         match self { |         match self { | ||||||
|             Operation::FeeRecipient(fee_recipient) => { |             Operation::FeeRecipient(fee_recipient) => { | ||||||
|                 *bid.fee_recipient_mut() = to_ssz_rs(&fee_recipient)? |                 *bid.fee_recipient_mut() = to_ssz_rs(&fee_recipient)? | ||||||
| @ -73,7 +84,7 @@ pub trait BidStuff { | |||||||
|     fn prev_randao_mut(&mut self) -> &mut Hash32; |     fn prev_randao_mut(&mut self) -> &mut Hash32; | ||||||
|     fn block_number_mut(&mut self) -> &mut u64; |     fn block_number_mut(&mut self) -> &mut u64; | ||||||
|     fn timestamp_mut(&mut self) -> &mut u64; |     fn timestamp_mut(&mut self) -> &mut u64; | ||||||
|     fn withdrawals_root_mut(&mut self) -> Result<&mut Root, BlindedBlockProviderError>; |     fn withdrawals_root_mut(&mut self) -> Result<&mut Root, MevError>; | ||||||
| 
 | 
 | ||||||
|     fn sign_builder_message( |     fn sign_builder_message( | ||||||
|         &mut self, |         &mut self, | ||||||
| @ -134,11 +145,9 @@ impl BidStuff for BuilderBid { | |||||||
|         } |         } | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     fn withdrawals_root_mut(&mut self) -> Result<&mut Root, BlindedBlockProviderError> { |     fn withdrawals_root_mut(&mut self) -> Result<&mut Root, MevError> { | ||||||
|         match self { |         match self { | ||||||
|             Self::Bellatrix(_) => Err(BlindedBlockProviderError::Custom( |             Self::Bellatrix(_) => Err(MevError::InvalidFork), | ||||||
|                 "withdrawals_root called on bellatrix bid".to_string(), |  | ||||||
|             )), |  | ||||||
|             Self::Capella(bid) => Ok(&mut bid.header.withdrawals_root), |             Self::Capella(bid) => Ok(&mut bid.header.withdrawals_root), | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
| @ -166,19 +175,25 @@ impl BidStuff for BuilderBid { | |||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| pub struct TestingBuilder<E: EthSpec> { | #[derive(Clone)] | ||||||
|     server: BlindedBlockProviderServer<MockBuilder<E>>, | pub struct MockBuilder<E: EthSpec> { | ||||||
|     pub builder: MockBuilder<E>, |     el: ExecutionLayer<E>, | ||||||
|  |     beacon_client: BeaconNodeHttpClient, | ||||||
|  |     spec: ChainSpec, | ||||||
|  |     context: Arc<Context>, | ||||||
|  |     val_registration_cache: Arc<RwLock<HashMap<BlsPublicKey, SignedValidatorRegistration>>>, | ||||||
|  |     builder_sk: SecretKey, | ||||||
|  |     operations: Arc<RwLock<Vec<Operation>>>, | ||||||
|  |     invalidate_signatures: Arc<RwLock<bool>>, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl<E: EthSpec> TestingBuilder<E> { | impl<E: EthSpec> MockBuilder<E> { | ||||||
|     pub fn new( |     pub fn new_for_testing( | ||||||
|         mock_el_url: SensitiveUrl, |         mock_el_url: SensitiveUrl, | ||||||
|         builder_url: SensitiveUrl, |  | ||||||
|         beacon_url: SensitiveUrl, |         beacon_url: SensitiveUrl, | ||||||
|         spec: ChainSpec, |         spec: ChainSpec, | ||||||
|         executor: TaskExecutor, |         executor: TaskExecutor, | ||||||
|     ) -> Self { |     ) -> (Self, MockBuilderServer) { | ||||||
|         let file = NamedTempFile::new().unwrap(); |         let file = NamedTempFile::new().unwrap(); | ||||||
|         let path = file.path().into(); |         let path = file.path().into(); | ||||||
|         std::fs::write(&path, hex::encode(DEFAULT_JWT_SECRET)).unwrap(); |         std::fs::write(&path, hex::encode(DEFAULT_JWT_SECRET)).unwrap(); | ||||||
| @ -207,39 +222,13 @@ impl<E: EthSpec> TestingBuilder<E> { | |||||||
|             spec, |             spec, | ||||||
|             context, |             context, | ||||||
|         ); |         ); | ||||||
|         let port = builder_url.full.port().unwrap(); |         let host: Ipv4Addr = Ipv4Addr::LOCALHOST; | ||||||
|         let host: Ipv4Addr = builder_url |         let port = 0; | ||||||
|             .full |         let provider = BlindedBlockProviderServer::new(host, port, builder.clone()); | ||||||
|             .host_str() |         let server = provider.serve(); | ||||||
|             .unwrap() |         (builder, server) | ||||||
|             .to_string() |  | ||||||
|             .parse() |  | ||||||
|             .unwrap(); |  | ||||||
|         let server = BlindedBlockProviderServer::new(host, port, builder.clone()); |  | ||||||
|         Self { server, builder } |  | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     pub async fn run(&self) { |  | ||||||
|         let server = self.server.serve(); |  | ||||||
|         if let Err(err) = server.await { |  | ||||||
|             println!("error while listening for incoming: {err}") |  | ||||||
|         } |  | ||||||
|     } |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| #[derive(Clone)] |  | ||||||
| pub struct MockBuilder<E: EthSpec> { |  | ||||||
|     el: ExecutionLayer<E>, |  | ||||||
|     beacon_client: BeaconNodeHttpClient, |  | ||||||
|     spec: ChainSpec, |  | ||||||
|     context: Arc<Context>, |  | ||||||
|     val_registration_cache: Arc<RwLock<HashMap<BlsPublicKey, SignedValidatorRegistration>>>, |  | ||||||
|     builder_sk: SecretKey, |  | ||||||
|     operations: Arc<RwLock<Vec<Operation>>>, |  | ||||||
|     invalidate_signatures: Arc<RwLock<bool>>, |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| impl<E: EthSpec> MockBuilder<E> { |  | ||||||
|     pub fn new( |     pub fn new( | ||||||
|         el: ExecutionLayer<E>, |         el: ExecutionLayer<E>, | ||||||
|         beacon_client: BeaconNodeHttpClient, |         beacon_client: BeaconNodeHttpClient, | ||||||
| @ -274,7 +263,7 @@ impl<E: EthSpec> MockBuilder<E> { | |||||||
|         *self.invalidate_signatures.write() = false; |         *self.invalidate_signatures.write() = false; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     fn apply_operations<B: BidStuff>(&self, bid: &mut B) -> Result<(), BlindedBlockProviderError> { |     fn apply_operations<B: BidStuff>(&self, bid: &mut B) -> Result<(), MevError> { | ||||||
|         let mut guard = self.operations.write(); |         let mut guard = self.operations.write(); | ||||||
|         while let Some(op) = guard.pop() { |         while let Some(op) = guard.pop() { | ||||||
|             op.apply(bid)?; |             op.apply(bid)?; | ||||||
| @ -288,7 +277,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> { | |||||||
|     async fn register_validators( |     async fn register_validators( | ||||||
|         &self, |         &self, | ||||||
|         registrations: &mut [SignedValidatorRegistration], |         registrations: &mut [SignedValidatorRegistration], | ||||||
|     ) -> Result<(), BlindedBlockProviderError> { |     ) -> Result<(), MevError> { | ||||||
|         for registration in registrations { |         for registration in registrations { | ||||||
|             let pubkey = registration.message.public_key.clone(); |             let pubkey = registration.message.public_key.clone(); | ||||||
|             let message = &mut registration.message; |             let message = &mut registration.message; | ||||||
| @ -307,10 +296,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> { | |||||||
|         Ok(()) |         Ok(()) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     async fn fetch_best_bid( |     async fn fetch_best_bid(&self, bid_request: &BidRequest) -> Result<SignedBuilderBid, MevError> { | ||||||
|         &self, |  | ||||||
|         bid_request: &BidRequest, |  | ||||||
|     ) -> Result<SignedBuilderBid, BlindedBlockProviderError> { |  | ||||||
|         let slot = Slot::new(bid_request.slot); |         let slot = Slot::new(bid_request.slot); | ||||||
|         let fork = self.spec.fork_name_at_slot::<E>(slot); |         let fork = self.spec.fork_name_at_slot::<E>(slot); | ||||||
|         let signed_cached_data = self |         let signed_cached_data = self | ||||||
| @ -336,7 +322,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> { | |||||||
|             .map_err(convert_err)? |             .map_err(convert_err)? | ||||||
|             .block_hash(); |             .block_hash(); | ||||||
|         if head_execution_hash != from_ssz_rs(&bid_request.parent_hash)? { |         if head_execution_hash != from_ssz_rs(&bid_request.parent_hash)? { | ||||||
|             return Err(BlindedBlockProviderError::Custom(format!( |             return Err(custom_err(format!( | ||||||
|                 "head mismatch: {} {}", |                 "head mismatch: {} {}", | ||||||
|                 head_execution_hash, bid_request.parent_hash |                 head_execution_hash, bid_request.parent_hash | ||||||
|             ))); |             ))); | ||||||
| @ -396,7 +382,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> { | |||||||
|             .get_debug_beacon_states(StateId::Head) |             .get_debug_beacon_states(StateId::Head) | ||||||
|             .await |             .await | ||||||
|             .map_err(convert_err)? |             .map_err(convert_err)? | ||||||
|             .ok_or_else(|| BlindedBlockProviderError::Custom("missing head state".to_string()))? |             .ok_or_else(|| custom_err("missing head state".to_string()))? | ||||||
|             .data; |             .data; | ||||||
|         let prev_randao = head_state |         let prev_randao = head_state | ||||||
|             .get_randao_mix(head_state.current_epoch()) |             .get_randao_mix(head_state.current_epoch()) | ||||||
| @ -409,10 +395,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> { | |||||||
|                 PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, Some(vec![])) |                 PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, Some(vec![])) | ||||||
|             } |             } | ||||||
|             ForkName::Base | ForkName::Altair => { |             ForkName::Base | ForkName::Altair => { | ||||||
|                 return Err(BlindedBlockProviderError::Custom(format!( |                 return Err(MevError::InvalidFork); | ||||||
|                     "Unsupported fork: {}", |  | ||||||
|                     fork |  | ||||||
|                 ))); |  | ||||||
|             } |             } | ||||||
|         }; |         }; | ||||||
| 
 | 
 | ||||||
| @ -452,12 +435,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> { | |||||||
|                 value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?, |                 value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?, | ||||||
|                 public_key: self.builder_sk.public_key(), |                 public_key: self.builder_sk.public_key(), | ||||||
|             }), |             }), | ||||||
|             ForkName::Base | ForkName::Altair => { |             ForkName::Base | ForkName::Altair => return Err(MevError::InvalidFork), | ||||||
|                 return Err(BlindedBlockProviderError::Custom(format!( |  | ||||||
|                     "Unsupported fork: {}", |  | ||||||
|                     fork |  | ||||||
|                 ))) |  | ||||||
|             } |  | ||||||
|         }; |         }; | ||||||
|         *message.gas_limit_mut() = cached_data.gas_limit; |         *message.gas_limit_mut() = cached_data.gas_limit; | ||||||
| 
 | 
 | ||||||
| @ -475,7 +453,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> { | |||||||
|     async fn open_bid( |     async fn open_bid( | ||||||
|         &self, |         &self, | ||||||
|         signed_block: &mut SignedBlindedBeaconBlock, |         signed_block: &mut SignedBlindedBeaconBlock, | ||||||
|     ) -> Result<ServerPayload, BlindedBlockProviderError> { |     ) -> Result<ServerPayload, MevError> { | ||||||
|         let node = match signed_block { |         let node = match signed_block { | ||||||
|             SignedBlindedBeaconBlock::Bellatrix(block) => { |             SignedBlindedBeaconBlock::Bellatrix(block) => { | ||||||
|                 block.message.body.execution_payload_header.hash_tree_root() |                 block.message.body.execution_payload_header.hash_tree_root() | ||||||
| @ -496,9 +474,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> { | |||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| pub fn from_ssz_rs<T: SimpleSerialize, U: Decode>( | pub fn from_ssz_rs<T: SimpleSerialize, U: Decode>(ssz_rs_data: &T) -> Result<U, MevError> { | ||||||
|     ssz_rs_data: &T, |  | ||||||
| ) -> Result<U, BlindedBlockProviderError> { |  | ||||||
|     U::from_ssz_bytes( |     U::from_ssz_bytes( | ||||||
|         ssz_rs::serialize(ssz_rs_data) |         ssz_rs::serialize(ssz_rs_data) | ||||||
|             .map_err(convert_err)? |             .map_err(convert_err)? | ||||||
| @ -507,12 +483,17 @@ pub fn from_ssz_rs<T: SimpleSerialize, U: Decode>( | |||||||
|     .map_err(convert_err) |     .map_err(convert_err) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| pub fn to_ssz_rs<T: Encode, U: SimpleSerialize>( | pub fn to_ssz_rs<T: Encode, U: SimpleSerialize>(ssz_data: &T) -> Result<U, MevError> { | ||||||
|     ssz_data: &T, |  | ||||||
| ) -> Result<U, BlindedBlockProviderError> { |  | ||||||
|     ssz_rs::deserialize::<U>(&ssz_data.as_ssz_bytes()).map_err(convert_err) |     ssz_rs::deserialize::<U>(&ssz_data.as_ssz_bytes()).map_err(convert_err) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| fn convert_err<E: Debug>(e: E) -> BlindedBlockProviderError { | fn convert_err<E: Debug>(e: E) -> MevError { | ||||||
|     BlindedBlockProviderError::Custom(format!("{e:?}")) |     custom_err(format!("{e:?}")) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // This is a bit of a hack since the `Custom` variant was removed from `mev_rs::Error`.
 | ||||||
|  | fn custom_err(s: String) -> MevError { | ||||||
|  |     MevError::Consensus(ethereum_consensus::state_transition::Error::Io( | ||||||
|  |         std::io::Error::new(std::io::ErrorKind::Other, s), | ||||||
|  |     )) | ||||||
| } | } | ||||||
|  | |||||||
| @ -31,7 +31,6 @@ impl<T: EthSpec> MockExecutionLayer<T> { | |||||||
|             None, |             None, | ||||||
|             Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), |             Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), | ||||||
|             spec, |             spec, | ||||||
|             None, |  | ||||||
|         ) |         ) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
| @ -43,7 +42,6 @@ impl<T: EthSpec> MockExecutionLayer<T> { | |||||||
|         builder_threshold: Option<u128>, |         builder_threshold: Option<u128>, | ||||||
|         jwt_key: Option<JwtKey>, |         jwt_key: Option<JwtKey>, | ||||||
|         spec: ChainSpec, |         spec: ChainSpec, | ||||||
|         builder_url: Option<SensitiveUrl>, |  | ||||||
|     ) -> Self { |     ) -> Self { | ||||||
|         let handle = executor.handle().unwrap(); |         let handle = executor.handle().unwrap(); | ||||||
| 
 | 
 | ||||||
| @ -65,7 +63,6 @@ impl<T: EthSpec> MockExecutionLayer<T> { | |||||||
| 
 | 
 | ||||||
|         let config = Config { |         let config = Config { | ||||||
|             execution_endpoints: vec![url], |             execution_endpoints: vec![url], | ||||||
|             builder_url, |  | ||||||
|             secret_files: vec![path], |             secret_files: vec![path], | ||||||
|             suggested_fee_recipient: Some(Address::repeat_byte(42)), |             suggested_fee_recipient: Some(Address::repeat_byte(42)), | ||||||
|             builder_profit_threshold: builder_threshold.unwrap_or(DEFAULT_BUILDER_THRESHOLD_WEI), |             builder_profit_threshold: builder_threshold.unwrap_or(DEFAULT_BUILDER_THRESHOLD_WEI), | ||||||
|  | |||||||
| @ -25,7 +25,7 @@ use warp::{http::StatusCode, Filter, Rejection}; | |||||||
| use crate::EngineCapabilities; | use crate::EngineCapabilities; | ||||||
| pub use execution_block_generator::{generate_pow_block, Block, ExecutionBlockGenerator}; | pub use execution_block_generator::{generate_pow_block, Block, ExecutionBlockGenerator}; | ||||||
| pub use hook::Hook; | pub use hook::Hook; | ||||||
| pub use mock_builder::{Context as MockBuilderContext, MockBuilder, Operation, TestingBuilder}; | pub use mock_builder::{Context as MockBuilderContext, MockBuilder, MockBuilderServer, Operation}; | ||||||
| pub use mock_execution_layer::MockExecutionLayer; | pub use mock_execution_layer::MockExecutionLayer; | ||||||
| 
 | 
 | ||||||
| pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; | pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; | ||||||
| @ -43,7 +43,6 @@ pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { | |||||||
|     get_payload_bodies_by_range_v1: true, |     get_payload_bodies_by_range_v1: true, | ||||||
|     get_payload_v1: true, |     get_payload_v1: true, | ||||||
|     get_payload_v2: true, |     get_payload_v2: true, | ||||||
|     exchange_transition_configuration_v1: true, |  | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| mod execution_block_generator; | mod execution_block_generator; | ||||||
|  | |||||||
| @ -2,23 +2,23 @@ | |||||||
| name = "genesis" | name = "genesis" | ||||||
| version = "0.2.0" | version = "0.2.0" | ||||||
| authors = ["Paul Hauner <paul@paulhauner.com>"] | authors = ["Paul Hauner <paul@paulhauner.com>"] | ||||||
| edition = "2021" | edition = { workspace = true } | ||||||
| 
 | 
 | ||||||
| [dev-dependencies] | [dev-dependencies] | ||||||
| eth1_test_rig = { path = "../../testing/eth1_test_rig" } | eth1_test_rig = { workspace = true } | ||||||
| sensitive_url = { path = "../../common/sensitive_url" } | sensitive_url = { workspace = true } | ||||||
| 
 | 
 | ||||||
| [dependencies] | [dependencies] | ||||||
| futures = "0.3.7" | futures = { workspace = true } | ||||||
| types = { path = "../../consensus/types"} | types = { workspace = true } | ||||||
| environment = { path = "../../lighthouse/environment"} | environment = { workspace = true } | ||||||
| eth1 = { path = "../eth1"} | eth1 = { workspace = true } | ||||||
| rayon = "1.4.1" | rayon = { workspace = true } | ||||||
| state_processing = { path = "../../consensus/state_processing" } | state_processing = { workspace = true } | ||||||
| merkle_proof = { path = "../../consensus/merkle_proof" } | merkle_proof = { workspace = true } | ||||||
| ethereum_ssz = "0.5.0" | ethereum_ssz = { workspace = true } | ||||||
| ethereum_hashing = "1.0.0-beta.2" | ethereum_hashing = { workspace = true } | ||||||
| tree_hash = "0.5.0" | tree_hash = { workspace = true } | ||||||
| tokio = { version = "1.14.0", features = ["full"] } | tokio = { workspace = true } | ||||||
| slog = "2.5.2" | slog = { workspace = true } | ||||||
| int_to_bytes = { path = "../../consensus/int_to_bytes" } | int_to_bytes = { workspace = true } | ||||||
|  | |||||||
| @ -39,7 +39,7 @@ pub fn genesis_deposits( | |||||||
| 
 | 
 | ||||||
|     Ok(deposit_data |     Ok(deposit_data | ||||||
|         .into_iter() |         .into_iter() | ||||||
|         .zip(proofs.into_iter()) |         .zip(proofs) | ||||||
|         .map(|(data, proof)| (data, proof.into())) |         .map(|(data, proof)| (data, proof.into())) | ||||||
|         .map(|(data, proof)| Deposit { proof, data }) |         .map(|(data, proof)| Deposit { proof, data }) | ||||||
|         .collect()) |         .collect()) | ||||||
|  | |||||||
| @ -2,53 +2,54 @@ | |||||||
| name = "http_api" | name = "http_api" | ||||||
| version = "0.1.0" | version = "0.1.0" | ||||||
| authors = ["Paul Hauner <paul@paulhauner.com>"] | authors = ["Paul Hauner <paul@paulhauner.com>"] | ||||||
| edition = "2021" | edition = { workspace = true } | ||||||
| autotests = false # using a single test binary compiles faster | autotests = false                               # using a single test binary compiles faster | ||||||
| 
 | 
 | ||||||
| [dependencies] | [dependencies] | ||||||
| warp = { version = "0.3.2", features = ["tls"] } | warp = { workspace = true } | ||||||
| serde = { version = "1.0.116", features = ["derive"] } | serde = { workspace = true } | ||||||
| tokio = { version = "1.14.0", features = ["macros","sync"] } | tokio = { workspace = true } | ||||||
| tokio-stream = { version = "0.1.3", features = ["sync"] } | tokio-stream = { workspace = true } | ||||||
| types = { path = "../../consensus/types" } | types = { workspace = true } | ||||||
| hex = "0.4.2" | hex = { workspace = true } | ||||||
| beacon_chain = { path = "../beacon_chain" } | beacon_chain = { workspace = true } | ||||||
| eth2 = { path = "../../common/eth2", features = ["lighthouse"] } | eth2 = { workspace = true } | ||||||
| slog = "2.5.2" | slog = { workspace = true } | ||||||
| network = { path = "../network" } | network = { workspace = true } | ||||||
| lighthouse_network = { path = "../lighthouse_network" } | lighthouse_network = { workspace = true } | ||||||
| eth1 = { path = "../eth1" } | eth1 = { workspace = true } | ||||||
| state_processing = { path = "../../consensus/state_processing" } | state_processing = { workspace = true } | ||||||
| lighthouse_version = { path = "../../common/lighthouse_version" } | lighthouse_version = { workspace = true } | ||||||
| lighthouse_metrics = { path = "../../common/lighthouse_metrics" } | lighthouse_metrics = { workspace = true } | ||||||
| lazy_static = "1.4.0" | lazy_static = { workspace = true } | ||||||
| warp_utils = { path = "../../common/warp_utils" } | warp_utils = { workspace = true } | ||||||
| slot_clock = { path = "../../common/slot_clock" } | slot_clock = { workspace = true } | ||||||
| ethereum_ssz = "0.5.0" | ethereum_ssz = { workspace = true } | ||||||
| bs58 = "0.4.0" | bs58 = "0.4.0" | ||||||
| futures = "0.3.8" | futures = { workspace = true } | ||||||
| execution_layer = {path = "../execution_layer"} | execution_layer = { workspace = true } | ||||||
| parking_lot = "0.12.0" | parking_lot = { workspace = true } | ||||||
| safe_arith = {path = "../../consensus/safe_arith"} | safe_arith = { workspace = true } | ||||||
| task_executor = { path = "../../common/task_executor" } | task_executor = { workspace = true } | ||||||
| lru = "0.7.7" | lru = { workspace = true } | ||||||
| tree_hash = "0.5.0" | tree_hash = { workspace = true } | ||||||
| sysinfo = "0.26.5" | sysinfo = { workspace = true } | ||||||
| system_health = { path = "../../common/system_health" } | system_health = { path = "../../common/system_health" } | ||||||
| directory = { path = "../../common/directory" } | directory = { workspace = true } | ||||||
| logging = { path = "../../common/logging" } | logging = { workspace = true } | ||||||
| ethereum_serde_utils = "0.5.0" | ethereum_serde_utils = { workspace = true } | ||||||
| operation_pool = { path = "../operation_pool" } | operation_pool = { workspace = true } | ||||||
| sensitive_url = { path = "../../common/sensitive_url" } | sensitive_url = { workspace = true } | ||||||
| unused_port = {path = "../../common/unused_port"} | store = { workspace = true } | ||||||
| store = { path = "../store" } | bytes = { workspace = true } | ||||||
|  | beacon_processor = { workspace = true } | ||||||
| 
 | 
 | ||||||
| [dev-dependencies] | [dev-dependencies] | ||||||
| environment = { path = "../../lighthouse/environment" } | environment = { workspace = true } | ||||||
| serde_json = "1.0.58" | serde_json = { workspace = true } | ||||||
| proto_array = { path = "../../consensus/proto_array" } | proto_array = { workspace = true } | ||||||
| genesis = { path = "../genesis" } | genesis = { workspace = true } | ||||||
| 
 | 
 | ||||||
| [[test]] | [[test]] | ||||||
| name = "bn_http_api_tests" | name = "bn_http_api_tests" | ||||||
| path = "tests/main.rs" | path = "tests/main.rs" | ||||||
|  | |||||||
| @ -1,9 +1,7 @@ | |||||||
| //! Contains the handler for the `GET validator/duties/attester/{epoch}` endpoint.
 | //! Contains the handler for the `GET validator/duties/attester/{epoch}` endpoint.
 | ||||||
| 
 | 
 | ||||||
| use crate::state_id::StateId; | use crate::state_id::StateId; | ||||||
| use beacon_chain::{ | use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; | ||||||
|     BeaconChain, BeaconChainError, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY, |  | ||||||
| }; |  | ||||||
| use eth2::types::{self as api_types}; | use eth2::types::{self as api_types}; | ||||||
| use slot_clock::SlotClock; | use slot_clock::SlotClock; | ||||||
| use state_processing::state_advance::partial_state_advance; | use state_processing::state_advance::partial_state_advance; | ||||||
| @ -32,12 +30,11 @@ pub fn attester_duties<T: BeaconChainTypes>( | |||||||
|     // will equal `current_epoch + 1`
 |     // will equal `current_epoch + 1`
 | ||||||
|     let tolerant_current_epoch = chain |     let tolerant_current_epoch = chain | ||||||
|         .slot_clock |         .slot_clock | ||||||
|         .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) |         .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity()) | ||||||
|         .ok_or_else(|| warp_utils::reject::custom_server_error("unable to read slot clock".into()))? |         .ok_or_else(|| warp_utils::reject::custom_server_error("unable to read slot clock".into()))? | ||||||
|         .epoch(T::EthSpec::slots_per_epoch()); |         .epoch(T::EthSpec::slots_per_epoch()); | ||||||
| 
 | 
 | ||||||
|     if request_epoch == current_epoch |     if request_epoch == current_epoch | ||||||
|         || request_epoch == tolerant_current_epoch |  | ||||||
|         || request_epoch == current_epoch + 1 |         || request_epoch == current_epoch + 1 | ||||||
|         || request_epoch == tolerant_current_epoch + 1 |         || request_epoch == tolerant_current_epoch + 1 | ||||||
|     { |     { | ||||||
| @ -48,7 +45,7 @@ pub fn attester_duties<T: BeaconChainTypes>( | |||||||
|             request_epoch, current_epoch |             request_epoch, current_epoch | ||||||
|         ))) |         ))) | ||||||
|     } else { |     } else { | ||||||
|         // request_epoch < current_epoch
 |         // request_epoch < current_epoch, in fact we only allow `request_epoch == current_epoch-1` in this case
 | ||||||
|         compute_historic_attester_duties(request_epoch, request_indices, chain) |         compute_historic_attester_duties(request_epoch, request_indices, chain) | ||||||
|     } |     } | ||||||
| } | } | ||||||
|  | |||||||
| @ -75,7 +75,7 @@ impl<T: EthSpec> PackingEfficiencyHandler<T> { | |||||||
|             available_attestations: HashSet::new(), |             available_attestations: HashSet::new(), | ||||||
|             included_attestations: HashMap::new(), |             included_attestations: HashMap::new(), | ||||||
|             committee_store: CommitteeStore::new(), |             committee_store: CommitteeStore::new(), | ||||||
|             _phantom: PhantomData::default(), |             _phantom: PhantomData, | ||||||
|         }; |         }; | ||||||
| 
 | 
 | ||||||
|         handler.compute_epoch(start_epoch, &starting_state, spec)?; |         handler.compute_epoch(start_epoch, &starting_state, spec)?; | ||||||
|  | |||||||
| @ -49,7 +49,7 @@ pub fn get_block_rewards<T: BeaconChainTypes>( | |||||||
|         .map_err(beacon_chain_error)?; |         .map_err(beacon_chain_error)?; | ||||||
| 
 | 
 | ||||||
|     state |     state | ||||||
|         .build_all_caches(&chain.spec) |         .build_caches(&chain.spec) | ||||||
|         .map_err(beacon_state_error)?; |         .map_err(beacon_state_error)?; | ||||||
| 
 | 
 | ||||||
|     let mut reward_cache = Default::default(); |     let mut reward_cache = Default::default(); | ||||||
|  | |||||||
							
								
								
									
										72
									
								
								beacon_node/http_api/src/builder_states.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										72
									
								
								beacon_node/http_api/src/builder_states.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,72 @@ | |||||||
|  | use crate::StateId; | ||||||
|  | use beacon_chain::{BeaconChain, BeaconChainTypes}; | ||||||
|  | use safe_arith::SafeArith; | ||||||
|  | use state_processing::per_block_processing::get_expected_withdrawals; | ||||||
|  | use state_processing::state_advance::partial_state_advance; | ||||||
|  | use std::sync::Arc; | ||||||
|  | use types::{BeaconState, EthSpec, ForkName, Slot, Withdrawals}; | ||||||
|  | 
 | ||||||
|  | const MAX_EPOCH_LOOKAHEAD: u64 = 2; | ||||||
|  | 
 | ||||||
|  | /// Get the withdrawals computed from the specified state, that will be included in the block
 | ||||||
|  | /// that gets built on the specified state.
 | ||||||
|  | pub fn get_next_withdrawals<T: BeaconChainTypes>( | ||||||
|  |     chain: &Arc<BeaconChain<T>>, | ||||||
|  |     mut state: BeaconState<T::EthSpec>, | ||||||
|  |     state_id: StateId, | ||||||
|  |     proposal_slot: Slot, | ||||||
|  | ) -> Result<Withdrawals<T::EthSpec>, warp::Rejection> { | ||||||
|  |     get_next_withdrawals_sanity_checks(chain, &state, proposal_slot)?; | ||||||
|  | 
 | ||||||
|  |     // advance the state to the epoch of the proposal slot.
 | ||||||
|  |     let proposal_epoch = proposal_slot.epoch(T::EthSpec::slots_per_epoch()); | ||||||
|  |     let (state_root, _, _) = state_id.root(chain)?; | ||||||
|  |     if proposal_epoch != state.current_epoch() { | ||||||
|  |         if let Err(e) = | ||||||
|  |             partial_state_advance(&mut state, Some(state_root), proposal_slot, &chain.spec) | ||||||
|  |         { | ||||||
|  |             return Err(warp_utils::reject::custom_server_error(format!( | ||||||
|  |                 "failed to advance to the epoch of the proposal slot: {:?}", | ||||||
|  |                 e | ||||||
|  |             ))); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     match get_expected_withdrawals(&state, &chain.spec) { | ||||||
|  |         Ok(withdrawals) => Ok(withdrawals), | ||||||
|  |         Err(e) => Err(warp_utils::reject::custom_server_error(format!( | ||||||
|  |             "failed to get expected withdrawal: {:?}", | ||||||
|  |             e | ||||||
|  |         ))), | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | fn get_next_withdrawals_sanity_checks<T: BeaconChainTypes>( | ||||||
|  |     chain: &BeaconChain<T>, | ||||||
|  |     state: &BeaconState<T::EthSpec>, | ||||||
|  |     proposal_slot: Slot, | ||||||
|  | ) -> Result<(), warp::Rejection> { | ||||||
|  |     if proposal_slot <= state.slot() { | ||||||
|  |         return Err(warp_utils::reject::custom_bad_request( | ||||||
|  |             "proposal slot must be greater than the pre-state slot".to_string(), | ||||||
|  |         )); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     let fork = chain.spec.fork_name_at_slot::<T::EthSpec>(proposal_slot); | ||||||
|  |     if let ForkName::Base | ForkName::Altair | ForkName::Merge = fork { | ||||||
|  |         return Err(warp_utils::reject::custom_bad_request( | ||||||
|  |             "the specified state is a pre-capella state.".to_string(), | ||||||
|  |         )); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     let look_ahead_limit = MAX_EPOCH_LOOKAHEAD | ||||||
|  |         .safe_mul(T::EthSpec::slots_per_epoch()) | ||||||
|  |         .map_err(warp_utils::reject::arith_error)?; | ||||||
|  |     if proposal_slot >= state.slot() + look_ahead_limit { | ||||||
|  |         return Err(warp_utils::reject::custom_bad_request(format!( | ||||||
|  |             "proposal slot is greater than or equal to the look ahead limit: {look_ahead_limit}" | ||||||
|  |         ))); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     Ok(()) | ||||||
|  | } | ||||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @ -3,7 +3,7 @@ | |||||||
| use crate::state_id::StateId; | use crate::state_id::StateId; | ||||||
| use beacon_chain::{ | use beacon_chain::{ | ||||||
|     beacon_proposer_cache::{compute_proposer_duties_from_head, ensure_state_is_in_epoch}, |     beacon_proposer_cache::{compute_proposer_duties_from_head, ensure_state_is_in_epoch}, | ||||||
|     BeaconChain, BeaconChainError, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY, |     BeaconChain, BeaconChainError, BeaconChainTypes, | ||||||
| }; | }; | ||||||
| use eth2::types::{self as api_types}; | use eth2::types::{self as api_types}; | ||||||
| use safe_arith::SafeArith; | use safe_arith::SafeArith; | ||||||
| @ -33,7 +33,7 @@ pub fn proposer_duties<T: BeaconChainTypes>( | |||||||
|     // will equal `current_epoch + 1`
 |     // will equal `current_epoch + 1`
 | ||||||
|     let tolerant_current_epoch = chain |     let tolerant_current_epoch = chain | ||||||
|         .slot_clock |         .slot_clock | ||||||
|         .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) |         .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity()) | ||||||
|         .ok_or_else(|| warp_utils::reject::custom_server_error("unable to read slot clock".into()))? |         .ok_or_else(|| warp_utils::reject::custom_server_error("unable to read slot clock".into()))? | ||||||
|         .epoch(T::EthSpec::slots_per_epoch()); |         .epoch(T::EthSpec::slots_per_epoch()); | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -1,13 +1,16 @@ | |||||||
| use crate::metrics; | use crate::metrics; | ||||||
| use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; | use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; | ||||||
| use beacon_chain::{ | use beacon_chain::{ | ||||||
|     BeaconChain, BeaconChainTypes, BlockError, CountUnrealized, NotifyExecutionLayer, |     BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, IntoGossipVerifiedBlock, | ||||||
|  |     NotifyExecutionLayer, | ||||||
| }; | }; | ||||||
|  | use eth2::types::{BroadcastValidation, ErrorMessage}; | ||||||
| use execution_layer::ProvenancedPayload; | use execution_layer::ProvenancedPayload; | ||||||
| use lighthouse_network::PubsubMessage; | use lighthouse_network::PubsubMessage; | ||||||
| use network::NetworkMessage; | use network::NetworkMessage; | ||||||
| use slog::{debug, error, info, warn, Logger}; | use slog::{debug, error, info, warn, Logger}; | ||||||
| use slot_clock::SlotClock; | use slot_clock::SlotClock; | ||||||
|  | use std::marker::PhantomData; | ||||||
| use std::sync::Arc; | use std::sync::Arc; | ||||||
| use std::time::Duration; | use std::time::Duration; | ||||||
| use tokio::sync::mpsc::UnboundedSender; | use tokio::sync::mpsc::UnboundedSender; | ||||||
| @ -16,51 +19,138 @@ use types::{ | |||||||
|     AbstractExecPayload, BeaconBlockRef, BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, |     AbstractExecPayload, BeaconBlockRef, BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, | ||||||
|     FullPayload, Hash256, SignedBeaconBlock, |     FullPayload, Hash256, SignedBeaconBlock, | ||||||
| }; | }; | ||||||
| use warp::Rejection; | use warp::http::StatusCode; | ||||||
|  | use warp::{reply::Response, Rejection, Reply}; | ||||||
| 
 | 
 | ||||||
| pub enum ProvenancedBlock<T: EthSpec> { | pub enum ProvenancedBlock<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>> { | ||||||
|     /// The payload was built using a local EE.
 |     /// The payload was built using a local EE.
 | ||||||
|     Local(Arc<SignedBeaconBlock<T, FullPayload<T>>>), |     Local(B, PhantomData<T>), | ||||||
|     /// The payload was build using a remote builder (e.g., via a mev-boost
 |     /// The payload was build using a remote builder (e.g., via a mev-boost
 | ||||||
|     /// compatible relay).
 |     /// compatible relay).
 | ||||||
|     Builder(Arc<SignedBeaconBlock<T, FullPayload<T>>>), |     Builder(B, PhantomData<T>), | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>> ProvenancedBlock<T, B> { | ||||||
|  |     pub fn local(block: B) -> Self { | ||||||
|  |         Self::Local(block, PhantomData) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn builder(block: B) -> Self { | ||||||
|  |         Self::Builder(block, PhantomData) | ||||||
|  |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// Handles a request from the HTTP API for full blocks.
 | /// Handles a request from the HTTP API for full blocks.
 | ||||||
| pub async fn publish_block<T: BeaconChainTypes>( | pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>>( | ||||||
|     block_root: Option<Hash256>, |     block_root: Option<Hash256>, | ||||||
|     provenanced_block: ProvenancedBlock<T::EthSpec>, |     provenanced_block: ProvenancedBlock<T, B>, | ||||||
|     chain: Arc<BeaconChain<T>>, |     chain: Arc<BeaconChain<T>>, | ||||||
|     network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>, |     network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>, | ||||||
|     log: Logger, |     log: Logger, | ||||||
| ) -> Result<(), Rejection> { |     validation_level: BroadcastValidation, | ||||||
|  |     duplicate_status_code: StatusCode, | ||||||
|  | ) -> Result<Response, Rejection> { | ||||||
|     let seen_timestamp = timestamp_now(); |     let seen_timestamp = timestamp_now(); | ||||||
|     let (block, is_locally_built_block) = match provenanced_block { |     let (block, is_locally_built_block) = match provenanced_block { | ||||||
|         ProvenancedBlock::Local(block) => (block, true), |         ProvenancedBlock::Local(block, _) => (block, true), | ||||||
|         ProvenancedBlock::Builder(block) => (block, false), |         ProvenancedBlock::Builder(block, _) => (block, false), | ||||||
|     }; |     }; | ||||||
|     let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); |     let beacon_block = block.inner(); | ||||||
|  |     let delay = get_block_delay_ms(seen_timestamp, beacon_block.message(), &chain.slot_clock); | ||||||
|  |     debug!(log, "Signed block received in HTTP API"; "slot" => beacon_block.slot()); | ||||||
| 
 | 
 | ||||||
|     debug!( |     /* actually publish a block */ | ||||||
|         log, |     let publish_block = move |block: Arc<SignedBeaconBlock<T::EthSpec>>, | ||||||
|         "Signed block published to HTTP API"; |                               sender, | ||||||
|         "slot" => block.slot() |                               log, | ||||||
|     ); |                               seen_timestamp| { | ||||||
|  |         let publish_timestamp = timestamp_now(); | ||||||
|  |         let publish_delay = publish_timestamp | ||||||
|  |             .checked_sub(seen_timestamp) | ||||||
|  |             .unwrap_or_else(|| Duration::from_secs(0)); | ||||||
| 
 | 
 | ||||||
|     // Send the block, regardless of whether or not it is valid. The API
 |         info!(log, "Signed block published to network via HTTP API"; "slot" => block.slot(), "publish_delay" => ?publish_delay); | ||||||
|     // specification is very clear that this is the desired behaviour.
 |  | ||||||
| 
 | 
 | ||||||
|     let message = PubsubMessage::BeaconBlock(block.clone()); |         let message = PubsubMessage::BeaconBlock(block); | ||||||
|     crate::publish_pubsub_message(network_tx, message)?; |         crate::publish_pubsub_message(&sender, message) | ||||||
|  |             .map_err(|_| BeaconChainError::UnableToPublish.into()) | ||||||
|  |     }; | ||||||
| 
 | 
 | ||||||
|     let block_root = block_root.unwrap_or_else(|| block.canonical_root()); |     /* if we can form a `GossipVerifiedBlock`, we've passed our basic gossip checks */ | ||||||
|  |     let gossip_verified_block = match block.into_gossip_verified_block(&chain) { | ||||||
|  |         Ok(b) => b, | ||||||
|  |         Err(BlockError::BlockIsAlreadyKnown) => { | ||||||
|  |             // Allow the status code for duplicate blocks to be overridden based on config.
 | ||||||
|  |             return Ok(warp::reply::with_status( | ||||||
|  |                 warp::reply::json(&ErrorMessage { | ||||||
|  |                     code: duplicate_status_code.as_u16(), | ||||||
|  |                     message: "duplicate block".to_string(), | ||||||
|  |                     stacktraces: vec![], | ||||||
|  |                 }), | ||||||
|  |                 duplicate_status_code, | ||||||
|  |             ) | ||||||
|  |             .into_response()); | ||||||
|  |         } | ||||||
|  |         Err(e) => { | ||||||
|  |             warn!( | ||||||
|  |                 log, | ||||||
|  |                 "Not publishing block - not gossip verified"; | ||||||
|  |                 "slot" => beacon_block.slot(), | ||||||
|  |                 "error" => ?e | ||||||
|  |             ); | ||||||
|  |             return Err(warp_utils::reject::custom_bad_request(e.to_string())); | ||||||
|  |         } | ||||||
|  |     }; | ||||||
|  | 
 | ||||||
|  |     let block_root = block_root.unwrap_or(gossip_verified_block.block_root); | ||||||
|  | 
 | ||||||
|  |     if let BroadcastValidation::Gossip = validation_level { | ||||||
|  |         publish_block( | ||||||
|  |             beacon_block.clone(), | ||||||
|  |             network_tx.clone(), | ||||||
|  |             log.clone(), | ||||||
|  |             seen_timestamp, | ||||||
|  |         ) | ||||||
|  |         .map_err(|_| warp_utils::reject::custom_server_error("unable to publish".into()))?; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     /* only publish if gossip- and consensus-valid and equivocation-free */ | ||||||
|  |     let chain_clone = chain.clone(); | ||||||
|  |     let block_clone = beacon_block.clone(); | ||||||
|  |     let log_clone = log.clone(); | ||||||
|  |     let sender_clone = network_tx.clone(); | ||||||
|  | 
 | ||||||
|  |     let publish_fn = move || match validation_level { | ||||||
|  |         BroadcastValidation::Gossip => Ok(()), | ||||||
|  |         BroadcastValidation::Consensus => { | ||||||
|  |             publish_block(block_clone, sender_clone, log_clone, seen_timestamp) | ||||||
|  |         } | ||||||
|  |         BroadcastValidation::ConsensusAndEquivocation => { | ||||||
|  |             if chain_clone | ||||||
|  |                 .observed_block_producers | ||||||
|  |                 .read() | ||||||
|  |                 .proposer_has_been_observed(block_clone.message(), block_root) | ||||||
|  |                 .map_err(|e| BlockError::BeaconChainError(e.into()))? | ||||||
|  |                 .is_slashable() | ||||||
|  |             { | ||||||
|  |                 warn!( | ||||||
|  |                     log_clone, | ||||||
|  |                     "Not publishing equivocating block"; | ||||||
|  |                     "slot" => block_clone.slot() | ||||||
|  |                 ); | ||||||
|  |                 Err(BlockError::Slashable) | ||||||
|  |             } else { | ||||||
|  |                 publish_block(block_clone, sender_clone, log_clone, seen_timestamp) | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  |     }; | ||||||
| 
 | 
 | ||||||
|     match chain |     match chain | ||||||
|         .process_block( |         .process_block( | ||||||
|             block_root, |             block_root, | ||||||
|             block.clone(), |             gossip_verified_block, | ||||||
|             CountUnrealized::True, |  | ||||||
|             NotifyExecutionLayer::Yes, |             NotifyExecutionLayer::Yes, | ||||||
|  |             publish_fn, | ||||||
|         ) |         ) | ||||||
|         .await |         .await | ||||||
|     { |     { | ||||||
| @ -70,14 +160,14 @@ pub async fn publish_block<T: BeaconChainTypes>( | |||||||
|                 "Valid block from HTTP API"; |                 "Valid block from HTTP API"; | ||||||
|                 "block_delay" => ?delay, |                 "block_delay" => ?delay, | ||||||
|                 "root" => format!("{}", root), |                 "root" => format!("{}", root), | ||||||
|                 "proposer_index" => block.message().proposer_index(), |                 "proposer_index" => beacon_block.message().proposer_index(), | ||||||
|                 "slot" => block.slot(), |                 "slot" => beacon_block.slot(), | ||||||
|             ); |             ); | ||||||
| 
 | 
 | ||||||
|             // Notify the validator monitor.
 |             // Notify the validator monitor.
 | ||||||
|             chain.validator_monitor.read().register_api_block( |             chain.validator_monitor.read().register_api_block( | ||||||
|                 seen_timestamp, |                 seen_timestamp, | ||||||
|                 block.message(), |                 beacon_block.message(), | ||||||
|                 root, |                 root, | ||||||
|                 &chain.slot_clock, |                 &chain.slot_clock, | ||||||
|             ); |             ); | ||||||
| @ -90,40 +180,39 @@ pub async fn publish_block<T: BeaconChainTypes>( | |||||||
|             // blocks built with builders we consider the broadcast time to be
 |             // blocks built with builders we consider the broadcast time to be
 | ||||||
|             // when the blinded block is published to the builder.
 |             // when the blinded block is published to the builder.
 | ||||||
|             if is_locally_built_block { |             if is_locally_built_block { | ||||||
|                 late_block_logging(&chain, seen_timestamp, block.message(), root, "local", &log) |                 late_block_logging( | ||||||
|  |                     &chain, | ||||||
|  |                     seen_timestamp, | ||||||
|  |                     beacon_block.message(), | ||||||
|  |                     root, | ||||||
|  |                     "local", | ||||||
|  |                     &log, | ||||||
|  |                 ) | ||||||
|             } |             } | ||||||
| 
 |             Ok(warp::reply().into_response()) | ||||||
|             Ok(()) |  | ||||||
|         } |         } | ||||||
|         Err(BlockError::BlockIsAlreadyKnown) => { |         Err(BlockError::BeaconChainError(BeaconChainError::UnableToPublish)) => { | ||||||
|             info!( |             Err(warp_utils::reject::custom_server_error( | ||||||
|                 log, |                 "unable to publish to network channel".to_string(), | ||||||
|                 "Block from HTTP API already known"; |             )) | ||||||
|                 "block" => ?block.canonical_root(), |  | ||||||
|                 "slot" => block.slot(), |  | ||||||
|             ); |  | ||||||
|             Ok(()) |  | ||||||
|         } |  | ||||||
|         Err(BlockError::RepeatProposal { proposer, slot }) => { |  | ||||||
|             warn!( |  | ||||||
|                 log, |  | ||||||
|                 "Block ignored due to repeat proposal"; |  | ||||||
|                 "msg" => "this can happen when a VC uses fallback BNs. \ |  | ||||||
|                     whilst this is not necessarily an error, it can indicate issues with a BN \ |  | ||||||
|                     or between the VC and BN.",
 |  | ||||||
|                 "slot" => slot, |  | ||||||
|                 "proposer" => proposer, |  | ||||||
|             ); |  | ||||||
|             Ok(()) |  | ||||||
|         } |         } | ||||||
|  |         Err(BlockError::Slashable) => Err(warp_utils::reject::custom_bad_request( | ||||||
|  |             "proposal for this slot and proposer has already been seen".to_string(), | ||||||
|  |         )), | ||||||
|         Err(e) => { |         Err(e) => { | ||||||
|             let msg = format!("{:?}", e); |             if let BroadcastValidation::Gossip = validation_level { | ||||||
|             error!( |                 Err(warp_utils::reject::broadcast_without_import(format!("{e}"))) | ||||||
|                 log, |             } else { | ||||||
|                 "Invalid block provided to HTTP API"; |                 let msg = format!("{:?}", e); | ||||||
|                 "reason" => &msg |                 error!( | ||||||
|             ); |                     log, | ||||||
|             Err(warp_utils::reject::broadcast_without_import(msg)) |                     "Invalid block provided to HTTP API"; | ||||||
|  |                     "reason" => &msg | ||||||
|  |                 ); | ||||||
|  |                 Err(warp_utils::reject::custom_bad_request(format!( | ||||||
|  |                     "Invalid block: {e}" | ||||||
|  |                 ))) | ||||||
|  |             } | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
| } | } | ||||||
| @ -135,21 +224,33 @@ pub async fn publish_blinded_block<T: BeaconChainTypes>( | |||||||
|     chain: Arc<BeaconChain<T>>, |     chain: Arc<BeaconChain<T>>, | ||||||
|     network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>, |     network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>, | ||||||
|     log: Logger, |     log: Logger, | ||||||
| ) -> Result<(), Rejection> { |     validation_level: BroadcastValidation, | ||||||
|  |     duplicate_status_code: StatusCode, | ||||||
|  | ) -> Result<Response, Rejection> { | ||||||
|     let block_root = block.canonical_root(); |     let block_root = block.canonical_root(); | ||||||
|     let full_block = reconstruct_block(chain.clone(), block_root, block, log.clone()).await?; |     let full_block: ProvenancedBlock<T, Arc<SignedBeaconBlock<T::EthSpec>>> = | ||||||
|     publish_block::<T>(Some(block_root), full_block, chain, network_tx, log).await |         reconstruct_block(chain.clone(), block_root, block, log.clone()).await?; | ||||||
|  |     publish_block::<T, _>( | ||||||
|  |         Some(block_root), | ||||||
|  |         full_block, | ||||||
|  |         chain, | ||||||
|  |         network_tx, | ||||||
|  |         log, | ||||||
|  |         validation_level, | ||||||
|  |         duplicate_status_code, | ||||||
|  |     ) | ||||||
|  |     .await | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// Deconstruct the given blinded block, and construct a full block. This attempts to use the
 | /// Deconstruct the given blinded block, and construct a full block. This attempts to use the
 | ||||||
| /// execution layer's payload cache, and if that misses, attempts a blind block proposal to retrieve
 | /// execution layer's payload cache, and if that misses, attempts a blind block proposal to retrieve
 | ||||||
| /// the full payload.
 | /// the full payload.
 | ||||||
| async fn reconstruct_block<T: BeaconChainTypes>( | pub async fn reconstruct_block<T: BeaconChainTypes>( | ||||||
|     chain: Arc<BeaconChain<T>>, |     chain: Arc<BeaconChain<T>>, | ||||||
|     block_root: Hash256, |     block_root: Hash256, | ||||||
|     block: SignedBeaconBlock<T::EthSpec, BlindedPayload<T::EthSpec>>, |     block: SignedBeaconBlock<T::EthSpec, BlindedPayload<T::EthSpec>>, | ||||||
|     log: Logger, |     log: Logger, | ||||||
| ) -> Result<ProvenancedBlock<T::EthSpec>, Rejection> { | ) -> Result<ProvenancedBlock<T, Arc<SignedBeaconBlock<T::EthSpec>>>, Rejection> { | ||||||
|     let full_payload_opt = if let Ok(payload_header) = block.message().body().execution_payload() { |     let full_payload_opt = if let Ok(payload_header) = block.message().body().execution_payload() { | ||||||
|         let el = chain.execution_layer.as_ref().ok_or_else(|| { |         let el = chain.execution_layer.as_ref().ok_or_else(|| { | ||||||
|             warp_utils::reject::custom_server_error("Missing execution layer".to_string()) |             warp_utils::reject::custom_server_error("Missing execution layer".to_string()) | ||||||
| @ -215,15 +316,15 @@ async fn reconstruct_block<T: BeaconChainTypes>( | |||||||
|         None => block |         None => block | ||||||
|             .try_into_full_block(None) |             .try_into_full_block(None) | ||||||
|             .map(Arc::new) |             .map(Arc::new) | ||||||
|             .map(ProvenancedBlock::Local), |             .map(ProvenancedBlock::local), | ||||||
|         Some(ProvenancedPayload::Local(full_payload)) => block |         Some(ProvenancedPayload::Local(full_payload)) => block | ||||||
|             .try_into_full_block(Some(full_payload)) |             .try_into_full_block(Some(full_payload)) | ||||||
|             .map(Arc::new) |             .map(Arc::new) | ||||||
|             .map(ProvenancedBlock::Local), |             .map(ProvenancedBlock::local), | ||||||
|         Some(ProvenancedPayload::Builder(full_payload)) => block |         Some(ProvenancedPayload::Builder(full_payload)) => block | ||||||
|             .try_into_full_block(Some(full_payload)) |             .try_into_full_block(Some(full_payload)) | ||||||
|             .map(Arc::new) |             .map(Arc::new) | ||||||
|             .map(ProvenancedBlock::Builder), |             .map(ProvenancedBlock::builder), | ||||||
|     } |     } | ||||||
|     .ok_or_else(|| { |     .ok_or_else(|| { | ||||||
|         warp_utils::reject::custom_server_error("Unable to add payload to block".to_string()) |         warp_utils::reject::custom_server_error("Unable to add payload to block".to_string()) | ||||||
|  | |||||||
| @ -70,15 +70,30 @@ impl StateId { | |||||||
|                     .map_err(BeaconChainError::DBError) |                     .map_err(BeaconChainError::DBError) | ||||||
|                     .map_err(warp_utils::reject::beacon_chain_error)? |                     .map_err(warp_utils::reject::beacon_chain_error)? | ||||||
|                 { |                 { | ||||||
|                     let execution_optimistic = chain |                     let finalization_status = chain | ||||||
|                         .canonical_head |                         .state_finalization_and_canonicity(root, hot_summary.slot) | ||||||
|                         .fork_choice_read_lock() |  | ||||||
|                         .is_optimistic_or_invalid_block_no_fallback(&hot_summary.latest_block_root) |  | ||||||
|                         .map_err(BeaconChainError::ForkChoiceError) |  | ||||||
|                         .map_err(warp_utils::reject::beacon_chain_error)?; |  | ||||||
|                     let finalized = chain |  | ||||||
|                         .is_finalized_state(root, hot_summary.slot) |  | ||||||
|                         .map_err(warp_utils::reject::beacon_chain_error)?; |                         .map_err(warp_utils::reject::beacon_chain_error)?; | ||||||
|  |                     let finalized = finalization_status.is_finalized(); | ||||||
|  |                     let fork_choice = chain.canonical_head.fork_choice_read_lock(); | ||||||
|  |                     let execution_optimistic = if finalization_status.slot_is_finalized | ||||||
|  |                         && !finalization_status.canonical | ||||||
|  |                     { | ||||||
|  |                         // This block is permanently orphaned and has likely been pruned from fork
 | ||||||
|  |                         // choice. If it isn't found in fork choice, mark it optimistic to be on the
 | ||||||
|  |                         // safe side.
 | ||||||
|  |                         fork_choice | ||||||
|  |                             .is_optimistic_or_invalid_block_no_fallback( | ||||||
|  |                                 &hot_summary.latest_block_root, | ||||||
|  |                             ) | ||||||
|  |                             .unwrap_or(true) | ||||||
|  |                     } else { | ||||||
|  |                         // This block is either old and finalized, or recent and unfinalized, so
 | ||||||
|  |                         // it's safe to fallback to the optimistic status of the finalized block.
 | ||||||
|  |                         fork_choice | ||||||
|  |                             .is_optimistic_or_invalid_block(&hot_summary.latest_block_root) | ||||||
|  |                             .map_err(BeaconChainError::ForkChoiceError) | ||||||
|  |                             .map_err(warp_utils::reject::beacon_chain_error)? | ||||||
|  |                     }; | ||||||
|                     return Ok((*root, execution_optimistic, finalized)); |                     return Ok((*root, execution_optimistic, finalized)); | ||||||
|                 } else if let Some(_cold_state_slot) = chain |                 } else if let Some(_cold_state_slot) = chain | ||||||
|                     .store |                     .store | ||||||
|  | |||||||
| @ -6,7 +6,7 @@ use beacon_chain::sync_committee_verification::{ | |||||||
| }; | }; | ||||||
| use beacon_chain::{ | use beacon_chain::{ | ||||||
|     validator_monitor::timestamp_now, BeaconChain, BeaconChainError, BeaconChainTypes, |     validator_monitor::timestamp_now, BeaconChain, BeaconChainError, BeaconChainTypes, | ||||||
|     StateSkipConfig, MAXIMUM_GOSSIP_CLOCK_DISPARITY, |     StateSkipConfig, | ||||||
| }; | }; | ||||||
| use eth2::types::{self as api_types}; | use eth2::types::{self as api_types}; | ||||||
| use lighthouse_network::PubsubMessage; | use lighthouse_network::PubsubMessage; | ||||||
| @ -85,7 +85,7 @@ fn duties_from_state_load<T: BeaconChainTypes>( | |||||||
|     let current_epoch = chain.epoch()?; |     let current_epoch = chain.epoch()?; | ||||||
|     let tolerant_current_epoch = chain |     let tolerant_current_epoch = chain | ||||||
|         .slot_clock |         .slot_clock | ||||||
|         .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) |         .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity()) | ||||||
|         .ok_or(BeaconChainError::UnableToReadSlot)? |         .ok_or(BeaconChainError::UnableToReadSlot)? | ||||||
|         .epoch(T::EthSpec::slots_per_epoch()); |         .epoch(T::EthSpec::slots_per_epoch()); | ||||||
| 
 | 
 | ||||||
| @ -304,7 +304,7 @@ pub fn process_signed_contribution_and_proofs<T: BeaconChainTypes>( | |||||||
|             } |             } | ||||||
|             // If we already know the contribution, don't broadcast it or attempt to
 |             // If we already know the contribution, don't broadcast it or attempt to
 | ||||||
|             // further verify it. Return success.
 |             // further verify it. Return success.
 | ||||||
|             Err(SyncVerificationError::SyncContributionAlreadyKnown(_)) => continue, |             Err(SyncVerificationError::SyncContributionSupersetKnown(_)) => continue, | ||||||
|             // If we've already seen this aggregator produce an aggregate, just
 |             // If we've already seen this aggregator produce an aggregate, just
 | ||||||
|             // skip this one.
 |             // skip this one.
 | ||||||
|             //
 |             //
 | ||||||
|  | |||||||
							
								
								
									
										192
									
								
								beacon_node/http_api/src/task_spawner.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										192
									
								
								beacon_node/http_api/src/task_spawner.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,192 @@ | |||||||
|  | use beacon_processor::{BeaconProcessorSend, BlockingOrAsync, Work, WorkEvent}; | ||||||
|  | use serde::Serialize; | ||||||
|  | use std::future::Future; | ||||||
|  | use tokio::sync::{mpsc::error::TrySendError, oneshot}; | ||||||
|  | use types::EthSpec; | ||||||
|  | use warp::reply::{Reply, Response}; | ||||||
|  | 
 | ||||||
|  | /// Maps a request to a queue in the `BeaconProcessor`.
 | ||||||
|  | #[derive(Clone, Copy)] | ||||||
|  | pub enum Priority { | ||||||
|  |     /// The highest priority.
 | ||||||
|  |     P0, | ||||||
|  |     /// The lowest priority.
 | ||||||
|  |     P1, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl Priority { | ||||||
|  |     /// Wrap `self` in a `WorkEvent` with an appropriate priority.
 | ||||||
|  |     fn work_event<E: EthSpec>(&self, process_fn: BlockingOrAsync) -> WorkEvent<E> { | ||||||
|  |         let work = match self { | ||||||
|  |             Priority::P0 => Work::ApiRequestP0(process_fn), | ||||||
|  |             Priority::P1 => Work::ApiRequestP1(process_fn), | ||||||
|  |         }; | ||||||
|  |         WorkEvent { | ||||||
|  |             drop_during_sync: false, | ||||||
|  |             work, | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// Spawns tasks on the `BeaconProcessor` or directly on the tokio executor.
 | ||||||
|  | pub struct TaskSpawner<E: EthSpec> { | ||||||
|  |     /// Used to send tasks to the `BeaconProcessor`. The tokio executor will be
 | ||||||
|  |     /// used if this is `None`.
 | ||||||
|  |     beacon_processor_send: Option<BeaconProcessorSend<E>>, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// Convert a warp `Rejection` into a `Response`.
 | ||||||
|  | ///
 | ||||||
|  | /// This function should *always* be used to convert rejections into responses. This prevents warp
 | ||||||
|  | /// from trying to backtrack in strange ways. See: https://github.com/sigp/lighthouse/issues/3404
 | ||||||
|  | pub async fn convert_rejection<T: Reply>(res: Result<T, warp::Rejection>) -> Response { | ||||||
|  |     match res { | ||||||
|  |         Ok(response) => response.into_response(), | ||||||
|  |         Err(e) => match warp_utils::reject::handle_rejection(e).await { | ||||||
|  |             Ok(reply) => reply.into_response(), | ||||||
|  |             Err(_) => warp::reply::with_status( | ||||||
|  |                 warp::reply::json(&"unhandled error"), | ||||||
|  |                 eth2::StatusCode::INTERNAL_SERVER_ERROR, | ||||||
|  |             ) | ||||||
|  |             .into_response(), | ||||||
|  |         }, | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl<E: EthSpec> TaskSpawner<E> { | ||||||
|  |     pub fn new(beacon_processor_send: Option<BeaconProcessorSend<E>>) -> Self { | ||||||
|  |         Self { | ||||||
|  |             beacon_processor_send, | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     /// Executes a "blocking" (non-async) task which returns a `Response`.
 | ||||||
|  |     pub async fn blocking_response_task<F, T>(self, priority: Priority, func: F) -> Response | ||||||
|  |     where | ||||||
|  |         F: FnOnce() -> Result<T, warp::Rejection> + Send + Sync + 'static, | ||||||
|  |         T: Reply + Send + 'static, | ||||||
|  |     { | ||||||
|  |         if let Some(beacon_processor_send) = &self.beacon_processor_send { | ||||||
|  |             // Create a closure that will execute `func` and send the result to
 | ||||||
|  |             // a channel held by this thread.
 | ||||||
|  |             let (tx, rx) = oneshot::channel(); | ||||||
|  |             let process_fn = move || { | ||||||
|  |                 // Execute the function, collect the return value.
 | ||||||
|  |                 let func_result = func(); | ||||||
|  |                 // Send the result down the channel. Ignore any failures; the
 | ||||||
|  |                 // send can only fail if the receiver is dropped.
 | ||||||
|  |                 let _ = tx.send(func_result); | ||||||
|  |             }; | ||||||
|  | 
 | ||||||
|  |             // Send the function to the beacon processor for execution at some arbitrary time.
 | ||||||
|  |             let result = send_to_beacon_processor( | ||||||
|  |                 beacon_processor_send, | ||||||
|  |                 priority, | ||||||
|  |                 BlockingOrAsync::Blocking(Box::new(process_fn)), | ||||||
|  |                 rx, | ||||||
|  |             ) | ||||||
|  |             .await | ||||||
|  |             .and_then(|x| x); | ||||||
|  |             convert_rejection(result).await | ||||||
|  |         } else { | ||||||
|  |             // There is no beacon processor so spawn a task directly on the
 | ||||||
|  |             // tokio executor.
 | ||||||
|  |             convert_rejection(warp_utils::task::blocking_response_task(func).await).await | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     /// Executes a "blocking" (non-async) task which returns a JSON-serializable
 | ||||||
|  |     /// object.
 | ||||||
|  |     pub async fn blocking_json_task<F, T>(self, priority: Priority, func: F) -> Response | ||||||
|  |     where | ||||||
|  |         F: FnOnce() -> Result<T, warp::Rejection> + Send + Sync + 'static, | ||||||
|  |         T: Serialize + Send + 'static, | ||||||
|  |     { | ||||||
|  |         let func = || func().map(|t| warp::reply::json(&t).into_response()); | ||||||
|  |         self.blocking_response_task(priority, func).await | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     /// Executes an async task which may return a `Rejection`, which will be converted to a response.
 | ||||||
|  |     pub async fn spawn_async_with_rejection( | ||||||
|  |         self, | ||||||
|  |         priority: Priority, | ||||||
|  |         func: impl Future<Output = Result<Response, warp::Rejection>> + Send + Sync + 'static, | ||||||
|  |     ) -> Response { | ||||||
|  |         let result = self | ||||||
|  |             .spawn_async_with_rejection_no_conversion(priority, func) | ||||||
|  |             .await; | ||||||
|  |         convert_rejection(result).await | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     /// Same as `spawn_async_with_rejection` but returning a result with the unhandled rejection.
 | ||||||
|  |     ///
 | ||||||
|  |     /// If you call this function you MUST convert the rejection to a response and not let it
 | ||||||
|  |     /// propagate into Warp's filters. See `convert_rejection`.
 | ||||||
|  |     pub async fn spawn_async_with_rejection_no_conversion( | ||||||
|  |         self, | ||||||
|  |         priority: Priority, | ||||||
|  |         func: impl Future<Output = Result<Response, warp::Rejection>> + Send + Sync + 'static, | ||||||
|  |     ) -> Result<Response, warp::Rejection> { | ||||||
|  |         if let Some(beacon_processor_send) = &self.beacon_processor_send { | ||||||
|  |             // Create a wrapper future that will execute `func` and send the
 | ||||||
|  |             // result to a channel held by this thread.
 | ||||||
|  |             let (tx, rx) = oneshot::channel(); | ||||||
|  |             let process_fn = async move { | ||||||
|  |                 // Await the future, collect the return value.
 | ||||||
|  |                 let func_result = func.await; | ||||||
|  |                 // Send the result down the channel. Ignore any failures; the
 | ||||||
|  |                 // send can only fail if the receiver is dropped.
 | ||||||
|  |                 let _ = tx.send(func_result); | ||||||
|  |             }; | ||||||
|  | 
 | ||||||
|  |             // Send the function to the beacon processor for execution at some arbitrary time.
 | ||||||
|  |             send_to_beacon_processor( | ||||||
|  |                 beacon_processor_send, | ||||||
|  |                 priority, | ||||||
|  |                 BlockingOrAsync::Async(Box::pin(process_fn)), | ||||||
|  |                 rx, | ||||||
|  |             ) | ||||||
|  |             .await | ||||||
|  |             .and_then(|x| x) | ||||||
|  |         } else { | ||||||
|  |             // There is no beacon processor so spawn a task directly on the
 | ||||||
|  |             // tokio executor.
 | ||||||
|  |             tokio::task::spawn(func) | ||||||
|  |                 .await | ||||||
|  |                 .map_err(|_| { | ||||||
|  |                     warp_utils::reject::custom_server_error("Tokio failed to spawn task".into()) | ||||||
|  |                 }) | ||||||
|  |                 .and_then(|x| x) | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// Send a task to the beacon processor and await execution.
 | ||||||
|  | ///
 | ||||||
|  | /// If the task is not executed, return an `Err` with an error message
 | ||||||
|  | /// for the API consumer.
 | ||||||
|  | async fn send_to_beacon_processor<E: EthSpec, T>( | ||||||
|  |     beacon_processor_send: &BeaconProcessorSend<E>, | ||||||
|  |     priority: Priority, | ||||||
|  |     process_fn: BlockingOrAsync, | ||||||
|  |     rx: oneshot::Receiver<T>, | ||||||
|  | ) -> Result<T, warp::Rejection> { | ||||||
|  |     let error_message = match beacon_processor_send.try_send(priority.work_event(process_fn)) { | ||||||
|  |         Ok(()) => { | ||||||
|  |             match rx.await { | ||||||
|  |                 // The beacon processor executed the task and sent a result.
 | ||||||
|  |                 Ok(func_result) => return Ok(func_result), | ||||||
|  |                 // The beacon processor dropped the channel without sending a
 | ||||||
|  |                 // result. The beacon processor dropped this task because its
 | ||||||
|  |                 // queues are full or it's shutting down.
 | ||||||
|  |                 Err(_) => "The task did not execute. The server is overloaded or shutting down.", | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  |         Err(TrySendError::Full(_)) => "The task was dropped. The server is overloaded.", | ||||||
|  |         Err(TrySendError::Closed(_)) => "The task was dropped. The server is shutting down.", | ||||||
|  |     }; | ||||||
|  | 
 | ||||||
|  |     Err(warp_utils::reject::custom_server_error( | ||||||
|  |         error_message.to_string(), | ||||||
|  |     )) | ||||||
|  | } | ||||||
| @ -5,16 +5,14 @@ use beacon_chain::{ | |||||||
|     }, |     }, | ||||||
|     BeaconChain, BeaconChainTypes, |     BeaconChain, BeaconChainTypes, | ||||||
| }; | }; | ||||||
|  | use beacon_processor::{BeaconProcessor, BeaconProcessorChannels, BeaconProcessorConfig}; | ||||||
| use directory::DEFAULT_ROOT_DIR; | use directory::DEFAULT_ROOT_DIR; | ||||||
| use eth2::{BeaconNodeHttpClient, Timeouts}; | use eth2::{BeaconNodeHttpClient, Timeouts}; | ||||||
| use lighthouse_network::{ | use lighthouse_network::{ | ||||||
|     discv5::enr::{CombinedKey, EnrBuilder}, |     discv5::enr::{CombinedKey, EnrBuilder}, | ||||||
|     libp2p::{ |     libp2p::swarm::{ | ||||||
|         core::connection::ConnectionId, |         behaviour::{ConnectionEstablished, FromSwarm}, | ||||||
|         swarm::{ |         ConnectionId, NetworkBehaviour, | ||||||
|             behaviour::{ConnectionEstablished, FromSwarm}, |  | ||||||
|             NetworkBehaviour, |  | ||||||
|         }, |  | ||||||
|     }, |     }, | ||||||
|     rpc::methods::{MetaData, MetaDataV2}, |     rpc::methods::{MetaData, MetaDataV2}, | ||||||
|     types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, SyncState}, |     types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, SyncState}, | ||||||
| @ -25,11 +23,11 @@ use network::{NetworkReceivers, NetworkSenders}; | |||||||
| use sensitive_url::SensitiveUrl; | use sensitive_url::SensitiveUrl; | ||||||
| use slog::Logger; | use slog::Logger; | ||||||
| use std::future::Future; | use std::future::Future; | ||||||
| use std::net::{IpAddr, Ipv4Addr, SocketAddr}; | use std::net::SocketAddr; | ||||||
| use std::sync::Arc; | use std::sync::Arc; | ||||||
| use std::time::Duration; | use std::time::Duration; | ||||||
| use store::MemoryStore; | use store::MemoryStore; | ||||||
| use tokio::sync::oneshot; | use task_executor::test_utils::TestRuntime; | ||||||
| use types::{ChainSpec, EthSpec}; | use types::{ChainSpec, EthSpec}; | ||||||
| 
 | 
 | ||||||
| pub const TCP_PORT: u16 = 42; | pub const TCP_PORT: u16 = 42; | ||||||
| @ -42,7 +40,6 @@ pub struct InteractiveTester<E: EthSpec> { | |||||||
|     pub harness: BeaconChainHarness<EphemeralHarnessType<E>>, |     pub harness: BeaconChainHarness<EphemeralHarnessType<E>>, | ||||||
|     pub client: BeaconNodeHttpClient, |     pub client: BeaconNodeHttpClient, | ||||||
|     pub network_rx: NetworkReceivers<E>, |     pub network_rx: NetworkReceivers<E>, | ||||||
|     _server_shutdown: oneshot::Sender<()>, |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// The result of calling `create_api_server`.
 | /// The result of calling `create_api_server`.
 | ||||||
| @ -51,7 +48,6 @@ pub struct InteractiveTester<E: EthSpec> { | |||||||
| pub struct ApiServer<E: EthSpec, SFut: Future<Output = ()>> { | pub struct ApiServer<E: EthSpec, SFut: Future<Output = ()>> { | ||||||
|     pub server: SFut, |     pub server: SFut, | ||||||
|     pub listening_socket: SocketAddr, |     pub listening_socket: SocketAddr, | ||||||
|     pub shutdown_tx: oneshot::Sender<()>, |  | ||||||
|     pub network_rx: NetworkReceivers<E>, |     pub network_rx: NetworkReceivers<E>, | ||||||
|     pub local_enr: Enr, |     pub local_enr: Enr, | ||||||
|     pub external_peer_id: PeerId, |     pub external_peer_id: PeerId, | ||||||
| @ -99,10 +95,14 @@ impl<E: EthSpec> InteractiveTester<E> { | |||||||
|         let ApiServer { |         let ApiServer { | ||||||
|             server, |             server, | ||||||
|             listening_socket, |             listening_socket, | ||||||
|             shutdown_tx: _server_shutdown, |  | ||||||
|             network_rx, |             network_rx, | ||||||
|             .. |             .. | ||||||
|         } = create_api_server(harness.chain.clone(), harness.logger().clone()).await; |         } = create_api_server( | ||||||
|  |             harness.chain.clone(), | ||||||
|  |             &harness.runtime, | ||||||
|  |             harness.logger().clone(), | ||||||
|  |         ) | ||||||
|  |         .await; | ||||||
| 
 | 
 | ||||||
|         tokio::spawn(server); |         tokio::spawn(server); | ||||||
| 
 | 
 | ||||||
| @ -120,25 +120,18 @@ impl<E: EthSpec> InteractiveTester<E> { | |||||||
|             harness, |             harness, | ||||||
|             client, |             client, | ||||||
|             network_rx, |             network_rx, | ||||||
|             _server_shutdown, |  | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| pub async fn create_api_server<T: BeaconChainTypes>( | pub async fn create_api_server<T: BeaconChainTypes>( | ||||||
|     chain: Arc<BeaconChain<T>>, |     chain: Arc<BeaconChain<T>>, | ||||||
|  |     test_runtime: &TestRuntime, | ||||||
|     log: Logger, |     log: Logger, | ||||||
| ) -> ApiServer<T::EthSpec, impl Future<Output = ()>> { | ) -> ApiServer<T::EthSpec, impl Future<Output = ()>> { | ||||||
|     // Get a random unused port.
 |     // Use port 0 to allocate a new unused port.
 | ||||||
|     let port = unused_port::unused_tcp4_port().unwrap(); |     let port = 0; | ||||||
|     create_api_server_on_port(chain, log, port).await |  | ||||||
| } |  | ||||||
| 
 | 
 | ||||||
| pub async fn create_api_server_on_port<T: BeaconChainTypes>( |  | ||||||
|     chain: Arc<BeaconChain<T>>, |  | ||||||
|     log: Logger, |  | ||||||
|     port: u16, |  | ||||||
| ) -> ApiServer<T::EthSpec, impl Future<Output = ()>> { |  | ||||||
|     let (network_senders, network_receivers) = NetworkSenders::new(); |     let (network_senders, network_receivers) = NetworkSenders::new(); | ||||||
| 
 | 
 | ||||||
|     // Default metadata
 |     // Default metadata
 | ||||||
| @ -151,8 +144,6 @@ pub async fn create_api_server_on_port<T: BeaconChainTypes>( | |||||||
|     let enr = EnrBuilder::new("v4").build(&enr_key).unwrap(); |     let enr = EnrBuilder::new("v4").build(&enr_key).unwrap(); | ||||||
|     let network_globals = Arc::new(NetworkGlobals::new( |     let network_globals = Arc::new(NetworkGlobals::new( | ||||||
|         enr.clone(), |         enr.clone(), | ||||||
|         Some(TCP_PORT), |  | ||||||
|         None, |  | ||||||
|         meta_data, |         meta_data, | ||||||
|         vec![], |         vec![], | ||||||
|         false, |         false, | ||||||
| @ -170,7 +161,7 @@ pub async fn create_api_server_on_port<T: BeaconChainTypes>( | |||||||
|         local_addr: EXTERNAL_ADDR.parse().unwrap(), |         local_addr: EXTERNAL_ADDR.parse().unwrap(), | ||||||
|         send_back_addr: EXTERNAL_ADDR.parse().unwrap(), |         send_back_addr: EXTERNAL_ADDR.parse().unwrap(), | ||||||
|     }; |     }; | ||||||
|     let connection_id = ConnectionId::new(1); |     let connection_id = ConnectionId::new_unchecked(1); | ||||||
|     pm.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { |     pm.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { | ||||||
|         peer_id, |         peer_id, | ||||||
|         connection_id, |         connection_id, | ||||||
| @ -183,36 +174,60 @@ pub async fn create_api_server_on_port<T: BeaconChainTypes>( | |||||||
|     let eth1_service = |     let eth1_service = | ||||||
|         eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone()).unwrap(); |         eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone()).unwrap(); | ||||||
| 
 | 
 | ||||||
|  |     let beacon_processor_config = BeaconProcessorConfig { | ||||||
|  |         // The number of workers must be greater than one. Tests which use the
 | ||||||
|  |         // builder workflow sometimes require an internal HTTP request in order
 | ||||||
|  |         // to fulfill an already in-flight HTTP request, therefore having only
 | ||||||
|  |         // one worker will result in a deadlock.
 | ||||||
|  |         max_workers: 2, | ||||||
|  |         ..BeaconProcessorConfig::default() | ||||||
|  |     }; | ||||||
|  |     let BeaconProcessorChannels { | ||||||
|  |         beacon_processor_tx, | ||||||
|  |         beacon_processor_rx, | ||||||
|  |         work_reprocessing_tx, | ||||||
|  |         work_reprocessing_rx, | ||||||
|  |     } = BeaconProcessorChannels::new(&beacon_processor_config); | ||||||
|  | 
 | ||||||
|  |     let beacon_processor_send = beacon_processor_tx; | ||||||
|  |     BeaconProcessor { | ||||||
|  |         network_globals: network_globals.clone(), | ||||||
|  |         executor: test_runtime.task_executor.clone(), | ||||||
|  |         current_workers: 0, | ||||||
|  |         config: beacon_processor_config, | ||||||
|  |         log: log.clone(), | ||||||
|  |     } | ||||||
|  |     .spawn_manager( | ||||||
|  |         beacon_processor_rx, | ||||||
|  |         work_reprocessing_tx, | ||||||
|  |         work_reprocessing_rx, | ||||||
|  |         None, | ||||||
|  |         chain.slot_clock.clone(), | ||||||
|  |         chain.spec.maximum_gossip_clock_disparity(), | ||||||
|  |     ) | ||||||
|  |     .unwrap(); | ||||||
|  | 
 | ||||||
|     let ctx = Arc::new(Context { |     let ctx = Arc::new(Context { | ||||||
|         config: Config { |         config: Config { | ||||||
|             enabled: true, |             enabled: true, | ||||||
|             listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), |  | ||||||
|             listen_port: port, |             listen_port: port, | ||||||
|             allow_origin: None, |  | ||||||
|             tls_config: None, |  | ||||||
|             allow_sync_stalled: false, |  | ||||||
|             data_dir: std::path::PathBuf::from(DEFAULT_ROOT_DIR), |             data_dir: std::path::PathBuf::from(DEFAULT_ROOT_DIR), | ||||||
|             spec_fork_name: None, |             ..Config::default() | ||||||
|         }, |         }, | ||||||
|         chain: Some(chain), |         chain: Some(chain), | ||||||
|         network_senders: Some(network_senders), |         network_senders: Some(network_senders), | ||||||
|         network_globals: Some(network_globals), |         network_globals: Some(network_globals), | ||||||
|  |         beacon_processor_send: Some(beacon_processor_send), | ||||||
|         eth1_service: Some(eth1_service), |         eth1_service: Some(eth1_service), | ||||||
|         sse_logging_components: None, |         sse_logging_components: None, | ||||||
|         log, |         log, | ||||||
|     }); |     }); | ||||||
| 
 | 
 | ||||||
|     let (shutdown_tx, shutdown_rx) = oneshot::channel(); |     let (listening_socket, server) = crate::serve(ctx, test_runtime.task_executor.exit()).unwrap(); | ||||||
|     let server_shutdown = async { |  | ||||||
|         // It's not really interesting why this triggered, just that it happened.
 |  | ||||||
|         let _ = shutdown_rx.await; |  | ||||||
|     }; |  | ||||||
|     let (listening_socket, server) = crate::serve(ctx, server_shutdown).unwrap(); |  | ||||||
| 
 | 
 | ||||||
|     ApiServer { |     ApiServer { | ||||||
|         server, |         server, | ||||||
|         listening_socket, |         listening_socket, | ||||||
|         shutdown_tx, |  | ||||||
|         network_rx: network_receivers, |         network_rx: network_receivers, | ||||||
|         local_enr: enr, |         local_enr: enr, | ||||||
|         external_peer_id: peer_id, |         external_peer_id: peer_id, | ||||||
|  | |||||||
							
								
								
									
										21
									
								
								beacon_node/http_api/src/validator.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										21
									
								
								beacon_node/http_api/src/validator.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,21 @@ | |||||||
|  | use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; | ||||||
|  | use types::*; | ||||||
|  | 
 | ||||||
|  | /// Uses the `chain.validator_pubkey_cache` to resolve a pubkey to a validator
 | ||||||
|  | /// index and then ensures that the validator exists in the given `state`.
 | ||||||
|  | pub fn pubkey_to_validator_index<T: BeaconChainTypes>( | ||||||
|  |     chain: &BeaconChain<T>, | ||||||
|  |     state: &BeaconState<T::EthSpec>, | ||||||
|  |     pubkey: &PublicKeyBytes, | ||||||
|  | ) -> Result<Option<usize>, BeaconChainError> { | ||||||
|  |     chain | ||||||
|  |         .validator_index(pubkey)? | ||||||
|  |         .filter(|&index| { | ||||||
|  |             state | ||||||
|  |                 .validators() | ||||||
|  |                 .get(index) | ||||||
|  |                 .map_or(false, |v| v.pubkey == *pubkey) | ||||||
|  |         }) | ||||||
|  |         .map(Result::Ok) | ||||||
|  |         .transpose() | ||||||
|  | } | ||||||
							
								
								
									
										1358
									
								
								beacon_node/http_api/tests/broadcast_validation_tests.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1358
									
								
								beacon_node/http_api/tests/broadcast_validation_tests.rs
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @ -326,11 +326,8 @@ async fn sync_committee_indices_across_fork() { | |||||||
| 
 | 
 | ||||||
| /// Assert that an HTTP API error has the given status code and indexed errors for the given indices.
 | /// Assert that an HTTP API error has the given status code and indexed errors for the given indices.
 | ||||||
| fn assert_server_indexed_error(error: eth2::Error, status_code: u16, indices: Vec<usize>) { | fn assert_server_indexed_error(error: eth2::Error, status_code: u16, indices: Vec<usize>) { | ||||||
|     let eth2::Error::ServerIndexedMessage(IndexedErrorMessage { |     let eth2::Error::ServerIndexedMessage(IndexedErrorMessage { code, failures, .. }) = error | ||||||
|         code, |     else { | ||||||
|         failures, |  | ||||||
|         .. |  | ||||||
|     }) = error else { |  | ||||||
|         panic!("wrong error, expected ServerIndexedMessage, got: {error:?}") |         panic!("wrong error, expected ServerIndexedMessage, got: {error:?}") | ||||||
|     }; |     }; | ||||||
|     assert_eq!(code, status_code); |     assert_eq!(code, status_code); | ||||||
|  | |||||||
| @ -2,8 +2,9 @@ | |||||||
| use beacon_chain::{ | use beacon_chain::{ | ||||||
|     chain_config::{DisallowedReOrgOffsets, ReOrgThreshold}, |     chain_config::{DisallowedReOrgOffsets, ReOrgThreshold}, | ||||||
|     test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, |     test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, | ||||||
|  |     ChainConfig, | ||||||
| }; | }; | ||||||
| use eth2::types::DepositContractData; | use eth2::types::{DepositContractData, StateId}; | ||||||
| use execution_layer::{ForkchoiceState, PayloadAttributes}; | use execution_layer::{ForkchoiceState, PayloadAttributes}; | ||||||
| use http_api::test_utils::InteractiveTester; | use http_api::test_utils::InteractiveTester; | ||||||
| use parking_lot::Mutex; | use parking_lot::Mutex; | ||||||
| @ -17,7 +18,7 @@ use std::time::Duration; | |||||||
| use tree_hash::TreeHash; | use tree_hash::TreeHash; | ||||||
| use types::{ | use types::{ | ||||||
|     Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, FullPayload, |     Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, FullPayload, | ||||||
|     MainnetEthSpec, ProposerPreparationData, Slot, |     MainnetEthSpec, MinimalEthSpec, ProposerPreparationData, Slot, | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| type E = MainnetEthSpec; | type E = MainnetEthSpec; | ||||||
| @ -48,6 +49,76 @@ async fn deposit_contract_custom_network() { | |||||||
|     assert_eq!(result, expected); |     assert_eq!(result, expected); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | // Test that state lookups by root function correctly for states that are finalized but still
 | ||||||
|  | // present in the hot database, and have had their block pruned from fork choice.
 | ||||||
|  | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] | ||||||
|  | async fn state_by_root_pruned_from_fork_choice() { | ||||||
|  |     type E = MinimalEthSpec; | ||||||
|  | 
 | ||||||
|  |     let validator_count = 24; | ||||||
|  |     let spec = ForkName::latest().make_genesis_spec(E::default_spec()); | ||||||
|  | 
 | ||||||
|  |     let tester = InteractiveTester::<E>::new_with_initializer_and_mutator( | ||||||
|  |         Some(spec.clone()), | ||||||
|  |         validator_count, | ||||||
|  |         Some(Box::new(move |builder| { | ||||||
|  |             builder | ||||||
|  |                 .deterministic_keypairs(validator_count) | ||||||
|  |                 .fresh_ephemeral_store() | ||||||
|  |                 .chain_config(ChainConfig { | ||||||
|  |                     epochs_per_migration: 1024, | ||||||
|  |                     ..ChainConfig::default() | ||||||
|  |                 }) | ||||||
|  |         })), | ||||||
|  |         None, | ||||||
|  |     ) | ||||||
|  |     .await; | ||||||
|  | 
 | ||||||
|  |     let client = &tester.client; | ||||||
|  |     let harness = &tester.harness; | ||||||
|  | 
 | ||||||
|  |     // Create some chain depth and finalize beyond fork choice's pruning depth.
 | ||||||
|  |     let num_epochs = 8_u64; | ||||||
|  |     let num_initial = num_epochs * E::slots_per_epoch(); | ||||||
|  |     harness.advance_slot(); | ||||||
|  |     harness | ||||||
|  |         .extend_chain_with_sync( | ||||||
|  |             num_initial as usize, | ||||||
|  |             BlockStrategy::OnCanonicalHead, | ||||||
|  |             AttestationStrategy::AllValidators, | ||||||
|  |             SyncCommitteeStrategy::NoValidators, | ||||||
|  |         ) | ||||||
|  |         .await; | ||||||
|  | 
 | ||||||
|  |     // Should now be finalized.
 | ||||||
|  |     let finalized_epoch = harness.finalized_checkpoint().epoch; | ||||||
|  |     assert_eq!(finalized_epoch, num_epochs - 2); | ||||||
|  | 
 | ||||||
|  |     // The split slot should still be at 0.
 | ||||||
|  |     assert_eq!(harness.chain.store.get_split_slot(), 0); | ||||||
|  | 
 | ||||||
|  |     // States that are between the split and the finalized slot should be able to be looked up by
 | ||||||
|  |     // state root.
 | ||||||
|  |     for slot in 0..finalized_epoch.start_slot(E::slots_per_epoch()).as_u64() { | ||||||
|  |         let state_root = harness | ||||||
|  |             .chain | ||||||
|  |             .state_root_at_slot(Slot::new(slot)) | ||||||
|  |             .unwrap() | ||||||
|  |             .unwrap(); | ||||||
|  |         let response = client | ||||||
|  |             .get_debug_beacon_states::<E>(StateId::Root(state_root)) | ||||||
|  |             .await | ||||||
|  |             .unwrap() | ||||||
|  |             .unwrap(); | ||||||
|  | 
 | ||||||
|  |         assert!(response.finalized.unwrap()); | ||||||
|  |         assert!(!response.execution_optimistic.unwrap()); | ||||||
|  | 
 | ||||||
|  |         let mut state = response.data; | ||||||
|  |         assert_eq!(state.update_tree_hash_cache().unwrap(), state_root); | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
| /// Data structure for tracking fork choice updates received by the mock execution layer.
 | /// Data structure for tracking fork choice updates received by the mock execution layer.
 | ||||||
| #[derive(Debug, Default)] | #[derive(Debug, Default)] | ||||||
| struct ForkChoiceUpdates { | struct ForkChoiceUpdates { | ||||||
|  | |||||||
| @ -1,5 +1,6 @@ | |||||||
| #![cfg(not(debug_assertions))] // Tests are too slow in debug.
 | #![cfg(not(debug_assertions))] // Tests are too slow in debug.
 | ||||||
| 
 | 
 | ||||||
|  | pub mod broadcast_validation_tests; | ||||||
| pub mod fork_tests; | pub mod fork_tests; | ||||||
| pub mod interactive_tests; | pub mod interactive_tests; | ||||||
| pub mod status_tests; | pub mod status_tests; | ||||||
|  | |||||||
| @ -3,6 +3,7 @@ use beacon_chain::{ | |||||||
|     test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, |     test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, | ||||||
|     BlockError, |     BlockError, | ||||||
| }; | }; | ||||||
|  | use eth2::StatusCode; | ||||||
| use execution_layer::{PayloadStatusV1, PayloadStatusV1Status}; | use execution_layer::{PayloadStatusV1, PayloadStatusV1Status}; | ||||||
| use http_api::test_utils::InteractiveTester; | use http_api::test_utils::InteractiveTester; | ||||||
| use types::{EthSpec, ExecPayload, ForkName, MinimalEthSpec, Slot}; | use types::{EthSpec, ExecPayload, ForkName, MinimalEthSpec, Slot}; | ||||||
| @ -143,3 +144,82 @@ async fn el_error_on_new_payload() { | |||||||
|     assert_eq!(api_response.is_optimistic, Some(false)); |     assert_eq!(api_response.is_optimistic, Some(false)); | ||||||
|     assert_eq!(api_response.is_syncing, false); |     assert_eq!(api_response.is_syncing, false); | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | /// Check `node health` endpoint when the EL is offline.
 | ||||||
|  | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] | ||||||
|  | async fn node_health_el_offline() { | ||||||
|  |     let num_blocks = E::slots_per_epoch() / 2; | ||||||
|  |     let num_validators = E::slots_per_epoch(); | ||||||
|  |     let tester = post_merge_tester(num_blocks, num_validators).await; | ||||||
|  |     let harness = &tester.harness; | ||||||
|  |     let mock_el = harness.mock_execution_layer.as_ref().unwrap(); | ||||||
|  | 
 | ||||||
|  |     // EL offline
 | ||||||
|  |     mock_el.server.set_syncing_response(Err("offline".into())); | ||||||
|  |     mock_el.el.upcheck().await; | ||||||
|  | 
 | ||||||
|  |     let status = tester.client.get_node_health().await; | ||||||
|  |     match status { | ||||||
|  |         Ok(_) => { | ||||||
|  |             panic!("should return 503 error status code"); | ||||||
|  |         } | ||||||
|  |         Err(e) => { | ||||||
|  |             assert_eq!(e.status().unwrap(), 503); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// Check `node health` endpoint when the EL is online and synced.
 | ||||||
|  | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] | ||||||
|  | async fn node_health_el_online_and_synced() { | ||||||
|  |     let num_blocks = E::slots_per_epoch() / 2; | ||||||
|  |     let num_validators = E::slots_per_epoch(); | ||||||
|  |     let tester = post_merge_tester(num_blocks, num_validators).await; | ||||||
|  |     let harness = &tester.harness; | ||||||
|  |     let mock_el = harness.mock_execution_layer.as_ref().unwrap(); | ||||||
|  | 
 | ||||||
|  |     // EL synced
 | ||||||
|  |     mock_el.server.set_syncing_response(Ok(false)); | ||||||
|  |     mock_el.el.upcheck().await; | ||||||
|  | 
 | ||||||
|  |     let status = tester.client.get_node_health().await; | ||||||
|  |     match status { | ||||||
|  |         Ok(response) => { | ||||||
|  |             assert_eq!(response, StatusCode::OK); | ||||||
|  |         } | ||||||
|  |         Err(_) => { | ||||||
|  |             panic!("should return 200 status code"); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// Check `node health` endpoint when the EL is online but not synced.
 | ||||||
|  | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] | ||||||
|  | async fn node_health_el_online_and_not_synced() { | ||||||
|  |     let num_blocks = E::slots_per_epoch() / 2; | ||||||
|  |     let num_validators = E::slots_per_epoch(); | ||||||
|  |     let tester = post_merge_tester(num_blocks, num_validators).await; | ||||||
|  |     let harness = &tester.harness; | ||||||
|  |     let mock_el = harness.mock_execution_layer.as_ref().unwrap(); | ||||||
|  | 
 | ||||||
|  |     // EL not synced
 | ||||||
|  |     harness.advance_slot(); | ||||||
|  |     mock_el.server.all_payloads_syncing(true); | ||||||
|  |     harness | ||||||
|  |         .extend_chain( | ||||||
|  |             1, | ||||||
|  |             BlockStrategy::OnCanonicalHead, | ||||||
|  |             AttestationStrategy::AllValidators, | ||||||
|  |         ) | ||||||
|  |         .await; | ||||||
|  | 
 | ||||||
|  |     let status = tester.client.get_node_health().await; | ||||||
|  |     match status { | ||||||
|  |         Ok(response) => { | ||||||
|  |             assert_eq!(response, StatusCode::PARTIAL_CONTENT); | ||||||
|  |         } | ||||||
|  |         Err(_) => { | ||||||
|  |             panic!("should return 206 status code"); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | |||||||
Some files were not shown because too many files have changed in this diff Show More
		Loading…
	
		Reference in New Issue
	
	Block a user