merge with upstream
This commit is contained in:
commit
b805fa6279
15
.github/workflows/docker.yml
vendored
15
.github/workflows/docker.yml
vendored
@ -55,7 +55,7 @@ jobs:
|
|||||||
VERSION: ${{ env.VERSION }}
|
VERSION: ${{ env.VERSION }}
|
||||||
VERSION_SUFFIX: ${{ env.VERSION_SUFFIX }}
|
VERSION_SUFFIX: ${{ env.VERSION_SUFFIX }}
|
||||||
build-docker-single-arch:
|
build-docker-single-arch:
|
||||||
name: build-docker-${{ matrix.binary }}
|
name: build-docker-${{ matrix.binary }}${{ matrix.features.version_suffix }}
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
@ -63,6 +63,10 @@ jobs:
|
|||||||
aarch64-portable,
|
aarch64-portable,
|
||||||
x86_64,
|
x86_64,
|
||||||
x86_64-portable]
|
x86_64-portable]
|
||||||
|
features: [
|
||||||
|
{version_suffix: "", env: "gnosis,slasher-lmdb,slasher-mdbx,jemalloc"},
|
||||||
|
{version_suffix: "-dev", env: "gnosis,slasher-lmdb,slasher-mdbx,jemalloc,spec-minimal"}
|
||||||
|
]
|
||||||
include:
|
include:
|
||||||
- profile: maxperf
|
- profile: maxperf
|
||||||
|
|
||||||
@ -72,7 +76,9 @@ jobs:
|
|||||||
DOCKER_CLI_EXPERIMENTAL: enabled
|
DOCKER_CLI_EXPERIMENTAL: enabled
|
||||||
VERSION: ${{ needs.extract-version.outputs.VERSION }}
|
VERSION: ${{ needs.extract-version.outputs.VERSION }}
|
||||||
VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }}
|
VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }}
|
||||||
CROSS_FEATURES: null
|
FEATURE_SUFFIX: ${{ matrix.features.version_suffix }}
|
||||||
|
FEATURES: ${{ matrix.features.env }}
|
||||||
|
CROSS_FEATURES: ${{ matrix.features.env }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- name: Update Rust
|
- name: Update Rust
|
||||||
@ -83,7 +89,7 @@ jobs:
|
|||||||
- name: Cross build Lighthouse binary
|
- name: Cross build Lighthouse binary
|
||||||
run: |
|
run: |
|
||||||
cargo install cross
|
cargo install cross
|
||||||
env CROSS_PROFILE=${{ matrix.profile }} make build-${{ matrix.binary }}
|
env CROSS_PROFILE=${{ matrix.profile }} CROSS_FEATURES=${{ matrix.features.env }} make build-${{ matrix.binary }}
|
||||||
- name: Move cross-built binary into Docker scope (if ARM)
|
- name: Move cross-built binary into Docker scope (if ARM)
|
||||||
if: startsWith(matrix.binary, 'aarch64')
|
if: startsWith(matrix.binary, 'aarch64')
|
||||||
run: |
|
run: |
|
||||||
@ -111,7 +117,8 @@ jobs:
|
|||||||
docker buildx build \
|
docker buildx build \
|
||||||
--platform=linux/${SHORT_ARCH} \
|
--platform=linux/${SHORT_ARCH} \
|
||||||
--file ./Dockerfile.cross . \
|
--file ./Dockerfile.cross . \
|
||||||
--tag ${IMAGE_NAME}:${VERSION}-${SHORT_ARCH}${VERSION_SUFFIX}${MODERNITY_SUFFIX} \
|
--tag ${IMAGE_NAME}:${VERSION}-${SHORT_ARCH}${VERSION_SUFFIX}${MODERNITY_SUFFIX}${FEATURE_SUFFIX} \
|
||||||
|
--build-arg FEATURES=${FEATURES} \
|
||||||
--provenance=false \
|
--provenance=false \
|
||||||
--push
|
--push
|
||||||
build-docker-multiarch:
|
build-docker-multiarch:
|
||||||
|
10
.github/workflows/release.yml
vendored
10
.github/workflows/release.yml
vendored
@ -134,11 +134,17 @@ jobs:
|
|||||||
|
|
||||||
- name: Build Lighthouse for Windows portable
|
- name: Build Lighthouse for Windows portable
|
||||||
if: matrix.arch == 'x86_64-windows-portable'
|
if: matrix.arch == 'x86_64-windows-portable'
|
||||||
run: cargo install --path lighthouse --force --locked --features portable,gnosis --profile ${{ matrix.profile }}
|
# NOTE: profile set to release until this rustc issue is fixed:
|
||||||
|
#
|
||||||
|
# https://github.com/rust-lang/rust/issues/107781
|
||||||
|
#
|
||||||
|
# tracked at: https://github.com/sigp/lighthouse/issues/3964
|
||||||
|
run: cargo install --path lighthouse --force --locked --features portable,gnosis --profile release
|
||||||
|
|
||||||
- name: Build Lighthouse for Windows modern
|
- name: Build Lighthouse for Windows modern
|
||||||
if: matrix.arch == 'x86_64-windows'
|
if: matrix.arch == 'x86_64-windows'
|
||||||
run: cargo install --path lighthouse --force --locked --features modern,gnosis --profile ${{ matrix.profile }}
|
# NOTE: profile set to release (see above)
|
||||||
|
run: cargo install --path lighthouse --force --locked --features modern,gnosis --profile release
|
||||||
|
|
||||||
- name: Configure GPG and create artifacts
|
- name: Configure GPG and create artifacts
|
||||||
if: startsWith(matrix.arch, 'x86_64-windows') != true
|
if: startsWith(matrix.arch, 'x86_64-windows') != true
|
||||||
|
199
Cargo.lock
generated
199
Cargo.lock
generated
@ -463,7 +463,7 @@ dependencies = [
|
|||||||
"http",
|
"http",
|
||||||
"http-body",
|
"http-body",
|
||||||
"hyper",
|
"hyper",
|
||||||
"itoa",
|
"itoa 1.0.5",
|
||||||
"matchit",
|
"matchit",
|
||||||
"memchr",
|
"memchr",
|
||||||
"mime",
|
"mime",
|
||||||
@ -544,7 +544,7 @@ checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf"
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "beacon-api-client"
|
name = "beacon-api-client"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
source = "git+https://github.com/ralexstokes/beacon-api-client?rev=7d5d8dad1648f771573f42585ad8080a45b05689#7d5d8dad1648f771573f42585ad8080a45b05689"
|
source = "git+https://github.com/ralexstokes/beacon-api-client#53690a711e33614d59d4d44fb09762b4699e2a4e"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ethereum-consensus",
|
"ethereum-consensus",
|
||||||
"http",
|
"http",
|
||||||
@ -570,7 +570,6 @@ dependencies = [
|
|||||||
"eth1",
|
"eth1",
|
||||||
"eth2",
|
"eth2",
|
||||||
"eth2_hashing",
|
"eth2_hashing",
|
||||||
"eth2_network_config",
|
|
||||||
"eth2_ssz",
|
"eth2_ssz",
|
||||||
"eth2_ssz_derive",
|
"eth2_ssz_derive",
|
||||||
"eth2_ssz_types",
|
"eth2_ssz_types",
|
||||||
@ -582,7 +581,6 @@ dependencies = [
|
|||||||
"hex",
|
"hex",
|
||||||
"int_to_bytes",
|
"int_to_bytes",
|
||||||
"itertools",
|
"itertools",
|
||||||
"kzg",
|
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"lighthouse_metrics",
|
"lighthouse_metrics",
|
||||||
"logging",
|
"logging",
|
||||||
@ -602,8 +600,6 @@ dependencies = [
|
|||||||
"serde_json",
|
"serde_json",
|
||||||
"slasher",
|
"slasher",
|
||||||
"slog",
|
"slog",
|
||||||
"slog-async",
|
|
||||||
"slog-term",
|
|
||||||
"sloggers",
|
"sloggers",
|
||||||
"slot_clock",
|
"slot_clock",
|
||||||
"smallvec",
|
"smallvec",
|
||||||
@ -644,7 +640,6 @@ dependencies = [
|
|||||||
"node_test_rig",
|
"node_test_rig",
|
||||||
"sensitive_url",
|
"sensitive_url",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
|
||||||
"slasher",
|
"slasher",
|
||||||
"slog",
|
"slog",
|
||||||
"store",
|
"store",
|
||||||
@ -820,6 +815,18 @@ version = "0.4.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3"
|
checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "bstr"
|
||||||
|
version = "0.2.17"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223"
|
||||||
|
dependencies = [
|
||||||
|
"lazy_static",
|
||||||
|
"memchr",
|
||||||
|
"regex-automata",
|
||||||
|
"serde",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "buf_redux"
|
name = "buf_redux"
|
||||||
version = "0.8.4"
|
version = "0.8.4"
|
||||||
@ -889,15 +896,6 @@ dependencies = [
|
|||||||
"pkg-config",
|
"pkg-config",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "c-kzg"
|
|
||||||
version = "0.1.0"
|
|
||||||
source = "git+https://github.com/ethereum/c-kzg-4844?rev=69f6155d7524247be9d3f54ab3bfbe33a0345622#69f6155d7524247be9d3f54ab3bfbe33a0345622"
|
|
||||||
dependencies = [
|
|
||||||
"hex",
|
|
||||||
"libc",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cached_tree_hash"
|
name = "cached_tree_hash"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
@ -1360,12 +1358,13 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "csv"
|
name = "csv"
|
||||||
version = "1.2.0"
|
version = "1.1.6"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "af91f40b7355f82b0a891f50e70399475945bb0b0da4f1700ce60761c9d3e359"
|
checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"bstr",
|
||||||
"csv-core",
|
"csv-core",
|
||||||
"itoa",
|
"itoa 0.4.8",
|
||||||
"ryu",
|
"ryu",
|
||||||
"serde",
|
"serde",
|
||||||
]
|
]
|
||||||
@ -1436,9 +1435,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cxx"
|
name = "cxx"
|
||||||
version = "1.0.90"
|
version = "1.0.89"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "90d59d9acd2a682b4e40605a242f6670eaa58c5957471cbf85e8aa6a0b97a5e8"
|
checksum = "bc831ee6a32dd495436e317595e639a587aa9907bef96fe6e6abc290ab6204e9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cc",
|
"cc",
|
||||||
"cxxbridge-flags",
|
"cxxbridge-flags",
|
||||||
@ -1448,9 +1447,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cxx-build"
|
name = "cxx-build"
|
||||||
version = "1.0.90"
|
version = "1.0.89"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "ebfa40bda659dd5c864e65f4c9a2b0aff19bea56b017b9b77c73d3766a453a38"
|
checksum = "94331d54f1b1a8895cd81049f7eaaaef9d05a7dcb4d1fd08bf3ff0806246789d"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cc",
|
"cc",
|
||||||
"codespan-reporting",
|
"codespan-reporting",
|
||||||
@ -1463,15 +1462,15 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cxxbridge-flags"
|
name = "cxxbridge-flags"
|
||||||
version = "1.0.90"
|
version = "1.0.89"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "457ce6757c5c70dc6ecdbda6925b958aae7f959bda7d8fb9bde889e34a09dc03"
|
checksum = "48dcd35ba14ca9b40d6e4b4b39961f23d835dbb8eed74565ded361d93e1feb8a"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cxxbridge-macro"
|
name = "cxxbridge-macro"
|
||||||
version = "1.0.90"
|
version = "1.0.89"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "ebf883b7aacd7b2aeb2a7b338648ee19f57c140d4ee8e52c68979c6b2f7f2263"
|
checksum = "81bbeb29798b407ccd82a3324ade1a7286e0d29851475990b612670f6f5124d2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
@ -1830,7 +1829,7 @@ dependencies = [
|
|||||||
"enr",
|
"enr",
|
||||||
"fnv",
|
"fnv",
|
||||||
"futures",
|
"futures",
|
||||||
"hashlink 0.7.0",
|
"hashlink",
|
||||||
"hex",
|
"hex",
|
||||||
"hkdf",
|
"hkdf",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
@ -2206,8 +2205,6 @@ dependencies = [
|
|||||||
"enr",
|
"enr",
|
||||||
"eth2_config",
|
"eth2_config",
|
||||||
"eth2_ssz",
|
"eth2_ssz",
|
||||||
"kzg",
|
|
||||||
"serde_json",
|
|
||||||
"serde_yaml",
|
"serde_yaml",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
"types",
|
"types",
|
||||||
@ -2351,7 +2348,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "ethereum-consensus"
|
name = "ethereum-consensus"
|
||||||
version = "0.1.1"
|
version = "0.1.1"
|
||||||
source = "git+https://github.com/ralexstokes/ethereum-consensus?rev=a8110af76d97bf2bf27fb987a671808fcbdf1834#a8110af76d97bf2bf27fb987a671808fcbdf1834"
|
source = "git+https://github.com/ralexstokes//ethereum-consensus?rev=9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d#9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-stream",
|
"async-stream",
|
||||||
"blst",
|
"blst",
|
||||||
@ -2360,6 +2357,7 @@ dependencies = [
|
|||||||
"hex",
|
"hex",
|
||||||
"integer-sqrt",
|
"integer-sqrt",
|
||||||
"multiaddr 0.14.0",
|
"multiaddr 0.14.0",
|
||||||
|
"multihash",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
@ -2517,7 +2515,7 @@ dependencies = [
|
|||||||
"lazy_static",
|
"lazy_static",
|
||||||
"lighthouse_metrics",
|
"lighthouse_metrics",
|
||||||
"lru 0.7.8",
|
"lru 0.7.8",
|
||||||
"mev-build-rs",
|
"mev-rs",
|
||||||
"parking_lot 0.12.1",
|
"parking_lot 0.12.1",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
"reqwest",
|
"reqwest",
|
||||||
@ -2565,9 +2563,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "fastrand"
|
name = "fastrand"
|
||||||
version = "1.9.0"
|
version = "1.8.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be"
|
checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"instant",
|
"instant",
|
||||||
]
|
]
|
||||||
@ -2989,7 +2987,7 @@ dependencies = [
|
|||||||
"indexmap",
|
"indexmap",
|
||||||
"slab",
|
"slab",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-util 0.7.7",
|
"tokio-util 0.7.4",
|
||||||
"tracing",
|
"tracing",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -3050,15 +3048,6 @@ dependencies = [
|
|||||||
"hashbrown 0.11.2",
|
"hashbrown 0.11.2",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "hashlink"
|
|
||||||
version = "0.8.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "69fe1fcf8b4278d860ad0548329f892a3631fb63f82574df68275f34cdbe0ffa"
|
|
||||||
dependencies = [
|
|
||||||
"hashbrown 0.12.3",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "headers"
|
name = "headers"
|
||||||
version = "0.3.8"
|
version = "0.3.8"
|
||||||
@ -3198,7 +3187,7 @@ checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"fnv",
|
"fnv",
|
||||||
"itoa",
|
"itoa 1.0.5",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -3317,7 +3306,7 @@ dependencies = [
|
|||||||
"http-body",
|
"http-body",
|
||||||
"httparse",
|
"httparse",
|
||||||
"httpdate",
|
"httpdate",
|
||||||
"itoa",
|
"itoa 1.0.5",
|
||||||
"pin-project-lite 0.2.9",
|
"pin-project-lite 0.2.9",
|
||||||
"socket2",
|
"socket2",
|
||||||
"tokio",
|
"tokio",
|
||||||
@ -3481,7 +3470,7 @@ version = "0.6.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f"
|
checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"parity-scale-codec 3.4.0",
|
"parity-scale-codec 3.3.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -3608,6 +3597,12 @@ dependencies = [
|
|||||||
"either",
|
"either",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "itoa"
|
||||||
|
version = "0.4.8"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "itoa"
|
name = "itoa"
|
||||||
version = "1.0.5"
|
version = "1.0.5"
|
||||||
@ -3715,23 +3710,6 @@ dependencies = [
|
|||||||
"tiny-keccak",
|
"tiny-keccak",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "kzg"
|
|
||||||
version = "0.1.0"
|
|
||||||
dependencies = [
|
|
||||||
"arbitrary",
|
|
||||||
"c-kzg",
|
|
||||||
"derivative",
|
|
||||||
"eth2_hashing",
|
|
||||||
"eth2_serde_utils",
|
|
||||||
"eth2_ssz",
|
|
||||||
"eth2_ssz_derive",
|
|
||||||
"hex",
|
|
||||||
"serde",
|
|
||||||
"serde_derive",
|
|
||||||
"tree_hash",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lazy_static"
|
name = "lazy_static"
|
||||||
version = "1.4.0"
|
version = "1.4.0"
|
||||||
@ -3762,13 +3740,11 @@ dependencies = [
|
|||||||
"environment",
|
"environment",
|
||||||
"eth1_test_rig",
|
"eth1_test_rig",
|
||||||
"eth2",
|
"eth2",
|
||||||
"eth2_hashing",
|
|
||||||
"eth2_network_config",
|
"eth2_network_config",
|
||||||
"eth2_ssz",
|
"eth2_ssz",
|
||||||
"eth2_wallet",
|
"eth2_wallet",
|
||||||
"genesis",
|
"genesis",
|
||||||
"int_to_bytes",
|
"int_to_bytes",
|
||||||
"kzg",
|
|
||||||
"lighthouse_network",
|
"lighthouse_network",
|
||||||
"lighthouse_version",
|
"lighthouse_version",
|
||||||
"log",
|
"log",
|
||||||
@ -4246,7 +4222,7 @@ dependencies = [
|
|||||||
"thiserror",
|
"thiserror",
|
||||||
"tinytemplate",
|
"tinytemplate",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-util 0.7.7",
|
"tokio-util 0.7.4",
|
||||||
"webrtc",
|
"webrtc",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -4333,9 +4309,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libsqlite3-sys"
|
name = "libsqlite3-sys"
|
||||||
version = "0.25.2"
|
version = "0.22.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "29f835d03d717946d28b1d1ed632eb6f0e24a299388ee623d0c23118d3e8a7fa"
|
checksum = "290b64917f8b0cb885d9de0f9959fe1f775d7fa12f1da2db9001c1c8ab60f89d"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cc",
|
"cc",
|
||||||
"pkg-config",
|
"pkg-config",
|
||||||
@ -4703,18 +4679,19 @@ dependencies = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "mev-build-rs"
|
name = "mev-rs"
|
||||||
version = "0.2.1"
|
version = "0.2.1"
|
||||||
source = "git+https://github.com/ralexstokes/mev-rs?rev=6c99b0fbdc0427b1625469d2e575303ce08de5b8#6c99b0fbdc0427b1625469d2e575303ce08de5b8"
|
source = "git+https://github.com/ralexstokes//mev-rs?rev=7813d4a4a564e0754e9aaab2d95520ba437c3889#7813d4a4a564e0754e9aaab2d95520ba437c3889"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"axum",
|
"axum",
|
||||||
"beacon-api-client",
|
"beacon-api-client",
|
||||||
"ethereum-consensus",
|
"ethereum-consensus",
|
||||||
|
"hyper",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
|
||||||
"ssz-rs",
|
"ssz-rs",
|
||||||
"thiserror",
|
"thiserror",
|
||||||
|
"tokio",
|
||||||
"tracing",
|
"tracing",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -4763,14 +4740,14 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "mio"
|
name = "mio"
|
||||||
version = "0.8.6"
|
version = "0.8.5"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9"
|
checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"libc",
|
"libc",
|
||||||
"log",
|
"log",
|
||||||
"wasi 0.11.0+wasi-snapshot-preview1",
|
"wasi 0.11.0+wasi-snapshot-preview1",
|
||||||
"windows-sys 0.45.0",
|
"windows-sys 0.42.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -5021,6 +4998,7 @@ dependencies = [
|
|||||||
"eth2_ssz",
|
"eth2_ssz",
|
||||||
"eth2_ssz_types",
|
"eth2_ssz_types",
|
||||||
"ethereum-types 0.14.1",
|
"ethereum-types 0.14.1",
|
||||||
|
"execution_layer",
|
||||||
"exit-future",
|
"exit-future",
|
||||||
"fnv",
|
"fnv",
|
||||||
"futures",
|
"futures",
|
||||||
@ -5255,9 +5233,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "once_cell"
|
name = "once_cell"
|
||||||
version = "1.17.1"
|
version = "1.17.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3"
|
checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "oneshot_broadcast"
|
name = "oneshot_broadcast"
|
||||||
@ -5436,9 +5414,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "parity-scale-codec"
|
name = "parity-scale-codec"
|
||||||
version = "3.4.0"
|
version = "3.3.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "637935964ff85a605d114591d4d2c13c5d1ba2806dae97cea6bf180238a749ac"
|
checksum = "c3840933452adf7b3b9145e27086a5a3376c619dca1a21b1e5a5af0d54979bed"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrayvec",
|
"arrayvec",
|
||||||
"bitvec 1.0.1",
|
"bitvec 1.0.1",
|
||||||
@ -5878,7 +5856,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "83cd1b99916654a69008fd66b4f9397fbe08e6e51dfe23d4417acf5d3b8cb87c"
|
checksum = "83cd1b99916654a69008fd66b4f9397fbe08e6e51dfe23d4417acf5d3b8cb87c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"dtoa",
|
"dtoa",
|
||||||
"itoa",
|
"itoa 1.0.5",
|
||||||
"parking_lot 0.12.1",
|
"parking_lot 0.12.1",
|
||||||
"prometheus-client-derive-text-encode",
|
"prometheus-client-derive-text-encode",
|
||||||
]
|
]
|
||||||
@ -6080,9 +6058,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "r2d2_sqlite"
|
name = "r2d2_sqlite"
|
||||||
version = "0.21.0"
|
version = "0.18.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "b4f5d0337e99cd5cacd91ffc326c6cc9d8078def459df560c4f9bf9ba4a51034"
|
checksum = "9d24607049214c5e42d3df53ac1d8a23c34cc6a5eefe3122acb2c72174719959"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"r2d2",
|
"r2d2",
|
||||||
"rusqlite",
|
"rusqlite",
|
||||||
@ -6315,7 +6293,7 @@ dependencies = [
|
|||||||
"tokio",
|
"tokio",
|
||||||
"tokio-native-tls",
|
"tokio-native-tls",
|
||||||
"tokio-rustls 0.23.4",
|
"tokio-rustls 0.23.4",
|
||||||
"tokio-util 0.7.7",
|
"tokio-util 0.7.4",
|
||||||
"tower-service",
|
"tower-service",
|
||||||
"url",
|
"url",
|
||||||
"wasm-bindgen",
|
"wasm-bindgen",
|
||||||
@ -6441,15 +6419,16 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rusqlite"
|
name = "rusqlite"
|
||||||
version = "0.28.0"
|
version = "0.25.4"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "01e213bc3ecb39ac32e81e51ebe31fd888a940515173e3a18a35f8c6e896422a"
|
checksum = "5c4b1eaf239b47034fb450ee9cdedd7d0226571689d8823030c4b6c2cb407152"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bitflags",
|
"bitflags",
|
||||||
"fallible-iterator",
|
"fallible-iterator",
|
||||||
"fallible-streaming-iterator",
|
"fallible-streaming-iterator",
|
||||||
"hashlink 0.8.1",
|
"hashlink",
|
||||||
"libsqlite3-sys",
|
"libsqlite3-sys",
|
||||||
|
"memchr",
|
||||||
"smallvec",
|
"smallvec",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -6600,7 +6579,7 @@ checksum = "001cf62ece89779fd16105b5f515ad0e5cedcd5440d3dd806bb067978e7c3608"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if",
|
"cfg-if",
|
||||||
"derive_more",
|
"derive_more",
|
||||||
"parity-scale-codec 3.4.0",
|
"parity-scale-codec 3.3.0",
|
||||||
"scale-info-derive",
|
"scale-info-derive",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -6813,6 +6792,16 @@ dependencies = [
|
|||||||
"serde_derive",
|
"serde_derive",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "serde-big-array"
|
||||||
|
version = "0.3.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "18b20e7752957bbe9661cff4e0bb04d183d0948cdab2ea58cdb9df36a61dfe62"
|
||||||
|
dependencies = [
|
||||||
|
"serde",
|
||||||
|
"serde_derive",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde_array_query"
|
name = "serde_array_query"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
@ -6846,11 +6835,11 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde_json"
|
name = "serde_json"
|
||||||
version = "1.0.93"
|
version = "1.0.92"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76"
|
checksum = "7434af0dc1cbd59268aa98b4c22c131c0584d2232f6fb166efb993e2832e896a"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"itoa",
|
"itoa 1.0.5",
|
||||||
"ryu",
|
"ryu",
|
||||||
"serde",
|
"serde",
|
||||||
]
|
]
|
||||||
@ -6873,7 +6862,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd"
|
checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"form_urlencoded",
|
"form_urlencoded",
|
||||||
"itoa",
|
"itoa 1.0.5",
|
||||||
"ryu",
|
"ryu",
|
||||||
"serde",
|
"serde",
|
||||||
]
|
]
|
||||||
@ -7010,9 +6999,9 @@ checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "signal-hook-registry"
|
name = "signal-hook-registry"
|
||||||
version = "1.4.1"
|
version = "1.4.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1"
|
checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"libc",
|
"libc",
|
||||||
]
|
]
|
||||||
@ -7318,11 +7307,10 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "ssz-rs"
|
name = "ssz-rs"
|
||||||
version = "0.8.0"
|
version = "0.8.0"
|
||||||
source = "git+https://github.com/ralexstokes/ssz-rs?rev=cb08f1#cb08f18ca919cc1b685b861d0fa9e2daabe89737"
|
source = "git+https://github.com/ralexstokes//ssz-rs?rev=adf1a0b14cef90b9536f28ef89da1fab316465e1#adf1a0b14cef90b9536f28ef89da1fab316465e1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bitvec 1.0.1",
|
"bitvec 1.0.1",
|
||||||
"hex",
|
"hex",
|
||||||
"lazy_static",
|
|
||||||
"num-bigint",
|
"num-bigint",
|
||||||
"serde",
|
"serde",
|
||||||
"sha2 0.9.9",
|
"sha2 0.9.9",
|
||||||
@ -7333,7 +7321,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "ssz-rs-derive"
|
name = "ssz-rs-derive"
|
||||||
version = "0.8.0"
|
version = "0.8.0"
|
||||||
source = "git+https://github.com/ralexstokes/ssz-rs?rev=cb08f1#cb08f18ca919cc1b685b861d0fa9e2daabe89737"
|
source = "git+https://github.com/ralexstokes//ssz-rs?rev=adf1a0b14cef90b9536f28ef89da1fab316465e1#adf1a0b14cef90b9536f28ef89da1fab316465e1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
@ -7712,11 +7700,10 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "thread_local"
|
name = "thread_local"
|
||||||
version = "1.1.7"
|
version = "1.1.4"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152"
|
checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if",
|
|
||||||
"once_cell",
|
"once_cell",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -7746,7 +7733,7 @@ version = "0.3.17"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376"
|
checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"itoa",
|
"itoa 1.0.5",
|
||||||
"libc",
|
"libc",
|
||||||
"num_threads",
|
"num_threads",
|
||||||
"serde",
|
"serde",
|
||||||
@ -7915,7 +7902,7 @@ dependencies = [
|
|||||||
"futures-core",
|
"futures-core",
|
||||||
"pin-project-lite 0.2.9",
|
"pin-project-lite 0.2.9",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-util 0.7.7",
|
"tokio-util 0.7.4",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -7965,9 +7952,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tokio-util"
|
name = "tokio-util"
|
||||||
version = "0.7.7"
|
version = "0.7.4"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2"
|
checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"futures-core",
|
"futures-core",
|
||||||
@ -8308,7 +8295,6 @@ dependencies = [
|
|||||||
"hex",
|
"hex",
|
||||||
"int_to_bytes",
|
"int_to_bytes",
|
||||||
"itertools",
|
"itertools",
|
||||||
"kzg",
|
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"log",
|
"log",
|
||||||
"maplit",
|
"maplit",
|
||||||
@ -8322,6 +8308,7 @@ dependencies = [
|
|||||||
"rusqlite",
|
"rusqlite",
|
||||||
"safe_arith",
|
"safe_arith",
|
||||||
"serde",
|
"serde",
|
||||||
|
"serde-big-array",
|
||||||
"serde_derive",
|
"serde_derive",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"serde_with",
|
"serde_with",
|
||||||
@ -9013,9 +9000,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "webrtc-ice"
|
name = "webrtc-ice"
|
||||||
version = "0.9.1"
|
version = "0.9.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "465a03cc11e9a7d7b4f9f99870558fe37a102b65b93f8045392fef7c67b39e80"
|
checksum = "494483fbb2f5492620871fdc78b084aed8807377f6e3fe88b2e49f0a9c9c41d7"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -104,6 +104,13 @@ tree_hash_derive = { path = "consensus/tree_hash_derive" }
|
|||||||
eth2_serde_utils = { path = "consensus/serde_utils" }
|
eth2_serde_utils = { path = "consensus/serde_utils" }
|
||||||
arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="a572fd8743012a4f1ada5ee5968b1b3619c427ba" }
|
arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="a572fd8743012a4f1ada5ee5968b1b3619c427ba" }
|
||||||
|
|
||||||
|
[patch."https://github.com/ralexstokes/mev-rs"]
|
||||||
|
mev-rs = { git = "https://github.com/ralexstokes//mev-rs", rev = "7813d4a4a564e0754e9aaab2d95520ba437c3889" }
|
||||||
|
[patch."https://github.com/ralexstokes/ethereum-consensus"]
|
||||||
|
ethereum-consensus = { git = "https://github.com/ralexstokes//ethereum-consensus", rev = "9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d" }
|
||||||
|
[patch."https://github.com/ralexstokes/ssz-rs"]
|
||||||
|
ssz-rs = { git = "https://github.com/ralexstokes//ssz-rs", rev = "adf1a0b14cef90b9536f28ef89da1fab316465e1" }
|
||||||
|
|
||||||
[profile.maxperf]
|
[profile.maxperf]
|
||||||
inherits = "release"
|
inherits = "release"
|
||||||
lto = "fat"
|
lto = "fat"
|
||||||
|
195
beacon_node/beacon_chain/src/attestation_rewards.rs
Normal file
195
beacon_node/beacon_chain/src/attestation_rewards.rs
Normal file
@ -0,0 +1,195 @@
|
|||||||
|
use crate::{BeaconChain, BeaconChainError, BeaconChainTypes};
|
||||||
|
use eth2::lighthouse::attestation_rewards::{IdealAttestationRewards, TotalAttestationRewards};
|
||||||
|
use eth2::lighthouse::StandardAttestationRewards;
|
||||||
|
use participation_cache::ParticipationCache;
|
||||||
|
use safe_arith::SafeArith;
|
||||||
|
use slog::{debug, Logger};
|
||||||
|
use state_processing::{
|
||||||
|
common::altair::BaseRewardPerIncrement,
|
||||||
|
per_epoch_processing::altair::{participation_cache, rewards_and_penalties::get_flag_weight},
|
||||||
|
};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use store::consts::altair::{
|
||||||
|
PARTICIPATION_FLAG_WEIGHTS, TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX,
|
||||||
|
TIMELY_TARGET_FLAG_INDEX,
|
||||||
|
};
|
||||||
|
use types::consts::altair::WEIGHT_DENOMINATOR;
|
||||||
|
|
||||||
|
use types::{Epoch, EthSpec};
|
||||||
|
|
||||||
|
use eth2::types::ValidatorId;
|
||||||
|
|
||||||
|
impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||||
|
pub fn compute_attestation_rewards(
|
||||||
|
&self,
|
||||||
|
epoch: Epoch,
|
||||||
|
validators: Vec<ValidatorId>,
|
||||||
|
log: Logger,
|
||||||
|
) -> Result<StandardAttestationRewards, BeaconChainError> {
|
||||||
|
debug!(log, "computing attestation rewards"; "epoch" => epoch, "validator_count" => validators.len());
|
||||||
|
|
||||||
|
// Get state
|
||||||
|
let spec = &self.spec;
|
||||||
|
|
||||||
|
let state_slot = (epoch + 1).end_slot(T::EthSpec::slots_per_epoch());
|
||||||
|
|
||||||
|
let state_root = self
|
||||||
|
.state_root_at_slot(state_slot)?
|
||||||
|
.ok_or(BeaconChainError::NoStateForSlot(state_slot))?;
|
||||||
|
|
||||||
|
let mut state = self
|
||||||
|
.get_state(&state_root, Some(state_slot))?
|
||||||
|
.ok_or(BeaconChainError::MissingBeaconState(state_root))?;
|
||||||
|
|
||||||
|
// Calculate ideal_rewards
|
||||||
|
let participation_cache = ParticipationCache::new(&state, spec)?;
|
||||||
|
|
||||||
|
let previous_epoch = state.previous_epoch();
|
||||||
|
|
||||||
|
let mut ideal_rewards_hashmap = HashMap::new();
|
||||||
|
|
||||||
|
for flag_index in 0..PARTICIPATION_FLAG_WEIGHTS.len() {
|
||||||
|
let weight = get_flag_weight(flag_index)
|
||||||
|
.map_err(|_| BeaconChainError::AttestationRewardsError)?;
|
||||||
|
|
||||||
|
let unslashed_participating_indices = participation_cache
|
||||||
|
.get_unslashed_participating_indices(flag_index, previous_epoch)?;
|
||||||
|
|
||||||
|
let unslashed_participating_balance =
|
||||||
|
unslashed_participating_indices
|
||||||
|
.total_balance()
|
||||||
|
.map_err(|_| BeaconChainError::AttestationRewardsError)?;
|
||||||
|
|
||||||
|
let unslashed_participating_increments =
|
||||||
|
unslashed_participating_balance.safe_div(spec.effective_balance_increment)?;
|
||||||
|
|
||||||
|
let total_active_balance = participation_cache.current_epoch_total_active_balance();
|
||||||
|
|
||||||
|
let active_increments =
|
||||||
|
total_active_balance.safe_div(spec.effective_balance_increment)?;
|
||||||
|
|
||||||
|
let base_reward_per_increment =
|
||||||
|
BaseRewardPerIncrement::new(total_active_balance, spec)?;
|
||||||
|
|
||||||
|
for effective_balance_eth in 0..=32 {
|
||||||
|
let effective_balance =
|
||||||
|
effective_balance_eth.safe_mul(spec.effective_balance_increment)?;
|
||||||
|
let base_reward =
|
||||||
|
effective_balance_eth.safe_mul(base_reward_per_increment.as_u64())?;
|
||||||
|
|
||||||
|
let penalty = -(base_reward.safe_mul(weight)?.safe_div(WEIGHT_DENOMINATOR)? as i64);
|
||||||
|
|
||||||
|
let reward_numerator = base_reward
|
||||||
|
.safe_mul(weight)?
|
||||||
|
.safe_mul(unslashed_participating_increments)?;
|
||||||
|
|
||||||
|
let ideal_reward = reward_numerator
|
||||||
|
.safe_div(active_increments)?
|
||||||
|
.safe_div(WEIGHT_DENOMINATOR)?;
|
||||||
|
if !state.is_in_inactivity_leak(previous_epoch, spec) {
|
||||||
|
ideal_rewards_hashmap
|
||||||
|
.insert((flag_index, effective_balance), (ideal_reward, penalty));
|
||||||
|
} else {
|
||||||
|
ideal_rewards_hashmap.insert((flag_index, effective_balance), (0, penalty));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate total_rewards
|
||||||
|
let mut total_rewards: Vec<TotalAttestationRewards> = Vec::new();
|
||||||
|
|
||||||
|
let validators = if validators.is_empty() {
|
||||||
|
participation_cache.eligible_validator_indices().to_vec()
|
||||||
|
} else {
|
||||||
|
validators
|
||||||
|
.into_iter()
|
||||||
|
.map(|validator| match validator {
|
||||||
|
ValidatorId::Index(i) => Ok(i as usize),
|
||||||
|
ValidatorId::PublicKey(pubkey) => state
|
||||||
|
.get_validator_index(&pubkey)?
|
||||||
|
.ok_or(BeaconChainError::ValidatorPubkeyUnknown(pubkey)),
|
||||||
|
})
|
||||||
|
.collect::<Result<Vec<_>, _>>()?
|
||||||
|
};
|
||||||
|
|
||||||
|
for validator_index in &validators {
|
||||||
|
let eligible = state.is_eligible_validator(previous_epoch, *validator_index)?;
|
||||||
|
let mut head_reward = 0u64;
|
||||||
|
let mut target_reward = 0i64;
|
||||||
|
let mut source_reward = 0i64;
|
||||||
|
|
||||||
|
if eligible {
|
||||||
|
let effective_balance = state.get_effective_balance(*validator_index)?;
|
||||||
|
|
||||||
|
for flag_index in 0..PARTICIPATION_FLAG_WEIGHTS.len() {
|
||||||
|
let (ideal_reward, penalty) = ideal_rewards_hashmap
|
||||||
|
.get(&(flag_index, effective_balance))
|
||||||
|
.ok_or(BeaconChainError::AttestationRewardsError)?;
|
||||||
|
let voted_correctly = participation_cache
|
||||||
|
.get_unslashed_participating_indices(flag_index, previous_epoch)
|
||||||
|
.map_err(|_| BeaconChainError::AttestationRewardsError)?
|
||||||
|
.contains(*validator_index)
|
||||||
|
.map_err(|_| BeaconChainError::AttestationRewardsError)?;
|
||||||
|
if voted_correctly {
|
||||||
|
if flag_index == TIMELY_HEAD_FLAG_INDEX {
|
||||||
|
head_reward += ideal_reward;
|
||||||
|
} else if flag_index == TIMELY_TARGET_FLAG_INDEX {
|
||||||
|
target_reward += *ideal_reward as i64;
|
||||||
|
} else if flag_index == TIMELY_SOURCE_FLAG_INDEX {
|
||||||
|
source_reward += *ideal_reward as i64;
|
||||||
|
}
|
||||||
|
} else if flag_index == TIMELY_HEAD_FLAG_INDEX {
|
||||||
|
head_reward = 0;
|
||||||
|
} else if flag_index == TIMELY_TARGET_FLAG_INDEX {
|
||||||
|
target_reward = *penalty;
|
||||||
|
} else if flag_index == TIMELY_SOURCE_FLAG_INDEX {
|
||||||
|
source_reward = *penalty;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
total_rewards.push(TotalAttestationRewards {
|
||||||
|
validator_index: *validator_index as u64,
|
||||||
|
head: head_reward,
|
||||||
|
target: target_reward,
|
||||||
|
source: source_reward,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert hashmap to vector
|
||||||
|
let mut ideal_rewards: Vec<IdealAttestationRewards> = ideal_rewards_hashmap
|
||||||
|
.iter()
|
||||||
|
.map(
|
||||||
|
|((flag_index, effective_balance), (ideal_reward, _penalty))| {
|
||||||
|
(flag_index, effective_balance, ideal_reward)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.fold(
|
||||||
|
HashMap::new(),
|
||||||
|
|mut acc, (flag_index, &effective_balance, ideal_reward)| {
|
||||||
|
let entry = acc
|
||||||
|
.entry(effective_balance)
|
||||||
|
.or_insert(IdealAttestationRewards {
|
||||||
|
effective_balance,
|
||||||
|
head: 0,
|
||||||
|
target: 0,
|
||||||
|
source: 0,
|
||||||
|
});
|
||||||
|
match *flag_index {
|
||||||
|
TIMELY_SOURCE_FLAG_INDEX => entry.source += ideal_reward,
|
||||||
|
TIMELY_TARGET_FLAG_INDEX => entry.target += ideal_reward,
|
||||||
|
TIMELY_HEAD_FLAG_INDEX => entry.head += ideal_reward,
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
acc
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.into_values()
|
||||||
|
.collect::<Vec<IdealAttestationRewards>>();
|
||||||
|
ideal_rewards.sort_by(|a, b| a.effective_balance.cmp(&b.effective_balance));
|
||||||
|
|
||||||
|
Ok(StandardAttestationRewards {
|
||||||
|
ideal_rewards,
|
||||||
|
total_rewards,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
237
beacon_node/beacon_chain/src/beacon_block_reward.rs
Normal file
237
beacon_node/beacon_chain/src/beacon_block_reward.rs
Normal file
@ -0,0 +1,237 @@
|
|||||||
|
use crate::{BeaconChain, BeaconChainError, BeaconChainTypes};
|
||||||
|
use eth2::lighthouse::StandardBlockReward;
|
||||||
|
use operation_pool::RewardCache;
|
||||||
|
use safe_arith::SafeArith;
|
||||||
|
use slog::error;
|
||||||
|
use state_processing::{
|
||||||
|
common::{
|
||||||
|
altair, get_attestation_participation_flag_indices, get_attesting_indices_from_state,
|
||||||
|
},
|
||||||
|
per_block_processing::{
|
||||||
|
altair::sync_committee::compute_sync_aggregate_rewards, get_slashable_indices,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
use store::{
|
||||||
|
consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR},
|
||||||
|
RelativeEpoch,
|
||||||
|
};
|
||||||
|
use types::{AbstractExecPayload, BeaconBlockRef, BeaconState, BeaconStateError, Hash256};
|
||||||
|
|
||||||
|
type BeaconBlockSubRewardValue = u64;
|
||||||
|
|
||||||
|
impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||||
|
pub fn compute_beacon_block_reward<Payload: AbstractExecPayload<T::EthSpec>>(
|
||||||
|
&self,
|
||||||
|
block: BeaconBlockRef<'_, T::EthSpec, Payload>,
|
||||||
|
block_root: Hash256,
|
||||||
|
state: &mut BeaconState<T::EthSpec>,
|
||||||
|
) -> Result<StandardBlockReward, BeaconChainError> {
|
||||||
|
if block.slot() != state.slot() {
|
||||||
|
return Err(BeaconChainError::BlockRewardSlotError);
|
||||||
|
}
|
||||||
|
|
||||||
|
state.build_committee_cache(RelativeEpoch::Previous, &self.spec)?;
|
||||||
|
state.build_committee_cache(RelativeEpoch::Current, &self.spec)?;
|
||||||
|
|
||||||
|
let proposer_index = block.proposer_index();
|
||||||
|
|
||||||
|
let sync_aggregate_reward =
|
||||||
|
self.compute_beacon_block_sync_aggregate_reward(block, state)?;
|
||||||
|
|
||||||
|
let proposer_slashing_reward = self
|
||||||
|
.compute_beacon_block_proposer_slashing_reward(block, state)
|
||||||
|
.map_err(|e| {
|
||||||
|
error!(
|
||||||
|
self.log,
|
||||||
|
"Error calculating proposer slashing reward";
|
||||||
|
"error" => ?e
|
||||||
|
);
|
||||||
|
BeaconChainError::BlockRewardError
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let attester_slashing_reward = self
|
||||||
|
.compute_beacon_block_attester_slashing_reward(block, state)
|
||||||
|
.map_err(|e| {
|
||||||
|
error!(
|
||||||
|
self.log,
|
||||||
|
"Error calculating attester slashing reward";
|
||||||
|
"error" => ?e
|
||||||
|
);
|
||||||
|
BeaconChainError::BlockRewardError
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let block_attestation_reward = if let BeaconState::Base(_) = state {
|
||||||
|
self.compute_beacon_block_attestation_reward_base(block, block_root, state)
|
||||||
|
.map_err(|e| {
|
||||||
|
error!(
|
||||||
|
self.log,
|
||||||
|
"Error calculating base block attestation reward";
|
||||||
|
"error" => ?e
|
||||||
|
);
|
||||||
|
BeaconChainError::BlockRewardAttestationError
|
||||||
|
})?
|
||||||
|
} else {
|
||||||
|
self.compute_beacon_block_attestation_reward_altair(block, state)
|
||||||
|
.map_err(|e| {
|
||||||
|
error!(
|
||||||
|
self.log,
|
||||||
|
"Error calculating altair block attestation reward";
|
||||||
|
"error" => ?e
|
||||||
|
);
|
||||||
|
BeaconChainError::BlockRewardAttestationError
|
||||||
|
})?
|
||||||
|
};
|
||||||
|
|
||||||
|
let total_reward = sync_aggregate_reward
|
||||||
|
.safe_add(proposer_slashing_reward)?
|
||||||
|
.safe_add(attester_slashing_reward)?
|
||||||
|
.safe_add(block_attestation_reward)?;
|
||||||
|
|
||||||
|
Ok(StandardBlockReward {
|
||||||
|
proposer_index,
|
||||||
|
total: total_reward,
|
||||||
|
attestations: block_attestation_reward,
|
||||||
|
sync_aggregate: sync_aggregate_reward,
|
||||||
|
proposer_slashings: proposer_slashing_reward,
|
||||||
|
attester_slashings: attester_slashing_reward,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compute_beacon_block_sync_aggregate_reward<Payload: AbstractExecPayload<T::EthSpec>>(
|
||||||
|
&self,
|
||||||
|
block: BeaconBlockRef<'_, T::EthSpec, Payload>,
|
||||||
|
state: &BeaconState<T::EthSpec>,
|
||||||
|
) -> Result<BeaconBlockSubRewardValue, BeaconChainError> {
|
||||||
|
if let Ok(sync_aggregate) = block.body().sync_aggregate() {
|
||||||
|
let (_, proposer_reward_per_bit) = compute_sync_aggregate_rewards(state, &self.spec)
|
||||||
|
.map_err(|_| BeaconChainError::BlockRewardSyncError)?;
|
||||||
|
Ok(sync_aggregate.sync_committee_bits.num_set_bits() as u64 * proposer_reward_per_bit)
|
||||||
|
} else {
|
||||||
|
Ok(0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compute_beacon_block_proposer_slashing_reward<Payload: AbstractExecPayload<T::EthSpec>>(
|
||||||
|
&self,
|
||||||
|
block: BeaconBlockRef<'_, T::EthSpec, Payload>,
|
||||||
|
state: &BeaconState<T::EthSpec>,
|
||||||
|
) -> Result<BeaconBlockSubRewardValue, BeaconChainError> {
|
||||||
|
let mut proposer_slashing_reward = 0;
|
||||||
|
|
||||||
|
let proposer_slashings = block.body().proposer_slashings();
|
||||||
|
|
||||||
|
for proposer_slashing in proposer_slashings {
|
||||||
|
proposer_slashing_reward.safe_add_assign(
|
||||||
|
state
|
||||||
|
.get_validator(proposer_slashing.proposer_index() as usize)?
|
||||||
|
.effective_balance
|
||||||
|
.safe_div(self.spec.whistleblower_reward_quotient)?,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(proposer_slashing_reward)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compute_beacon_block_attester_slashing_reward<Payload: AbstractExecPayload<T::EthSpec>>(
|
||||||
|
&self,
|
||||||
|
block: BeaconBlockRef<'_, T::EthSpec, Payload>,
|
||||||
|
state: &BeaconState<T::EthSpec>,
|
||||||
|
) -> Result<BeaconBlockSubRewardValue, BeaconChainError> {
|
||||||
|
let mut attester_slashing_reward = 0;
|
||||||
|
|
||||||
|
let attester_slashings = block.body().attester_slashings();
|
||||||
|
|
||||||
|
for attester_slashing in attester_slashings {
|
||||||
|
for attester_index in get_slashable_indices(state, attester_slashing)? {
|
||||||
|
attester_slashing_reward.safe_add_assign(
|
||||||
|
state
|
||||||
|
.get_validator(attester_index as usize)?
|
||||||
|
.effective_balance
|
||||||
|
.safe_div(self.spec.whistleblower_reward_quotient)?,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(attester_slashing_reward)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compute_beacon_block_attestation_reward_base<Payload: AbstractExecPayload<T::EthSpec>>(
|
||||||
|
&self,
|
||||||
|
block: BeaconBlockRef<'_, T::EthSpec, Payload>,
|
||||||
|
block_root: Hash256,
|
||||||
|
state: &BeaconState<T::EthSpec>,
|
||||||
|
) -> Result<BeaconBlockSubRewardValue, BeaconChainError> {
|
||||||
|
// Call compute_block_reward in the base case
|
||||||
|
// Since base does not have sync aggregate, we only grab attesation portion of the returned
|
||||||
|
// value
|
||||||
|
let mut reward_cache = RewardCache::default();
|
||||||
|
let block_attestation_reward = self
|
||||||
|
.compute_block_reward(block, block_root, state, &mut reward_cache, true)?
|
||||||
|
.attestation_rewards
|
||||||
|
.total;
|
||||||
|
|
||||||
|
Ok(block_attestation_reward)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compute_beacon_block_attestation_reward_altair<Payload: AbstractExecPayload<T::EthSpec>>(
|
||||||
|
&self,
|
||||||
|
block: BeaconBlockRef<'_, T::EthSpec, Payload>,
|
||||||
|
state: &mut BeaconState<T::EthSpec>,
|
||||||
|
) -> Result<BeaconBlockSubRewardValue, BeaconChainError> {
|
||||||
|
let total_active_balance = state.get_total_active_balance()?;
|
||||||
|
let base_reward_per_increment =
|
||||||
|
altair::BaseRewardPerIncrement::new(total_active_balance, &self.spec)?;
|
||||||
|
|
||||||
|
let mut total_proposer_reward = 0;
|
||||||
|
|
||||||
|
let proposer_reward_denominator = WEIGHT_DENOMINATOR
|
||||||
|
.safe_sub(PROPOSER_WEIGHT)?
|
||||||
|
.safe_mul(WEIGHT_DENOMINATOR)?
|
||||||
|
.safe_div(PROPOSER_WEIGHT)?;
|
||||||
|
|
||||||
|
for attestation in block.body().attestations() {
|
||||||
|
let data = &attestation.data;
|
||||||
|
let inclusion_delay = state.slot().safe_sub(data.slot)?.as_u64();
|
||||||
|
let participation_flag_indices = get_attestation_participation_flag_indices(
|
||||||
|
state,
|
||||||
|
data,
|
||||||
|
inclusion_delay,
|
||||||
|
&self.spec,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let attesting_indices = get_attesting_indices_from_state(state, attestation)?;
|
||||||
|
|
||||||
|
let mut proposer_reward_numerator = 0;
|
||||||
|
for index in attesting_indices {
|
||||||
|
let index = index as usize;
|
||||||
|
for (flag_index, &weight) in PARTICIPATION_FLAG_WEIGHTS.iter().enumerate() {
|
||||||
|
let epoch_participation =
|
||||||
|
state.get_epoch_participation_mut(data.target.epoch)?;
|
||||||
|
let validator_participation = epoch_participation
|
||||||
|
.get_mut(index)
|
||||||
|
.ok_or(BeaconStateError::ParticipationOutOfBounds(index))?;
|
||||||
|
|
||||||
|
if participation_flag_indices.contains(&flag_index)
|
||||||
|
&& !validator_participation.has_flag(flag_index)?
|
||||||
|
{
|
||||||
|
validator_participation.add_flag(flag_index)?;
|
||||||
|
proposer_reward_numerator.safe_add_assign(
|
||||||
|
altair::get_base_reward(
|
||||||
|
state,
|
||||||
|
index,
|
||||||
|
base_reward_per_increment,
|
||||||
|
&self.spec,
|
||||||
|
)?
|
||||||
|
.safe_mul(weight)?,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
total_proposer_reward.safe_add_assign(
|
||||||
|
proposer_reward_numerator.safe_div(proposer_reward_denominator)?,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(total_proposer_reward)
|
||||||
|
}
|
||||||
|
}
|
@ -10,7 +10,7 @@ use crate::blob_cache::BlobCache;
|
|||||||
use crate::blob_verification::{AsBlock, AvailableBlock, BlockWrapper};
|
use crate::blob_verification::{AsBlock, AvailableBlock, BlockWrapper};
|
||||||
use crate::block_times_cache::BlockTimesCache;
|
use crate::block_times_cache::BlockTimesCache;
|
||||||
use crate::block_verification::{
|
use crate::block_verification::{
|
||||||
check_block_is_finalized_descendant, check_block_relevancy, get_block_root,
|
check_block_is_finalized_checkpoint_or_descendant, check_block_relevancy, get_block_root,
|
||||||
signature_verify_chain_segment, BlockError, ExecutionPendingBlock, GossipVerifiedBlock,
|
signature_verify_chain_segment, BlockError, ExecutionPendingBlock, GossipVerifiedBlock,
|
||||||
IntoExecutionPendingBlock, PayloadVerificationOutcome, POS_PANDA_BANNER,
|
IntoExecutionPendingBlock, PayloadVerificationOutcome, POS_PANDA_BANNER,
|
||||||
};
|
};
|
||||||
@ -1025,11 +1025,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
})?
|
})?
|
||||||
.ok_or(Error::BlockHashMissingFromExecutionLayer(exec_block_hash))?;
|
.ok_or(Error::BlockHashMissingFromExecutionLayer(exec_block_hash))?;
|
||||||
|
|
||||||
//FIXME(sean) avoid the clone by comparing refs to headers (`as_execution_payload_header` method ?)
|
|
||||||
let full_payload: FullPayload<T::EthSpec> = execution_payload.clone().into();
|
|
||||||
|
|
||||||
// Verify payload integrity.
|
// Verify payload integrity.
|
||||||
let header_from_payload = full_payload.to_execution_payload_header();
|
let header_from_payload = ExecutionPayloadHeader::from(execution_payload.to_ref());
|
||||||
if header_from_payload != execution_payload_header {
|
if header_from_payload != execution_payload_header {
|
||||||
for txn in execution_payload.transactions() {
|
for txn in execution_payload.transactions() {
|
||||||
debug!(
|
debug!(
|
||||||
@ -2900,7 +2897,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
let mut fork_choice = self.canonical_head.fork_choice_write_lock();
|
let mut fork_choice = self.canonical_head.fork_choice_write_lock();
|
||||||
|
|
||||||
// Do not import a block that doesn't descend from the finalized root.
|
// Do not import a block that doesn't descend from the finalized root.
|
||||||
let signed_block = check_block_is_finalized_descendant(self, &fork_choice, signed_block)?;
|
let signed_block = check_block_is_finalized_checkpoint_or_descendant(self, &fork_choice, signed_block)?;
|
||||||
let block = signed_block.message();
|
let block = signed_block.message();
|
||||||
|
|
||||||
// Register the new block with the fork choice service.
|
// Register the new block with the fork choice service.
|
||||||
|
@ -792,7 +792,7 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
|
|||||||
// Do not process a block that doesn't descend from the finalized root.
|
// Do not process a block that doesn't descend from the finalized root.
|
||||||
//
|
//
|
||||||
// We check this *before* we load the parent so that we can return a more detailed error.
|
// We check this *before* we load the parent so that we can return a more detailed error.
|
||||||
let block = check_block_is_finalized_descendant(
|
let block =check_block_is_finalized_checkpoint_or_descendant(
|
||||||
chain,
|
chain,
|
||||||
&chain.canonical_head.fork_choice_write_lock(),
|
&chain.canonical_head.fork_choice_write_lock(),
|
||||||
block,
|
block,
|
||||||
@ -1647,12 +1647,12 @@ fn check_block_against_finalized_slot<T: BeaconChainTypes>(
|
|||||||
/// ## Warning
|
/// ## Warning
|
||||||
///
|
///
|
||||||
/// Taking a lock on the `chain.canonical_head.fork_choice` might cause a deadlock here.
|
/// Taking a lock on the `chain.canonical_head.fork_choice` might cause a deadlock here.
|
||||||
pub fn check_block_is_finalized_descendant<T: BeaconChainTypes, B: IntoBlockWrapper<T::EthSpec>>(
|
pub fn check_block_is_finalized_checkpoint_or_descendant<T: BeaconChainTypes, B: IntoBlockWrapper<T::EthSpec>>(
|
||||||
chain: &BeaconChain<T>,
|
chain: &BeaconChain<T>,
|
||||||
fork_choice: &BeaconForkChoice<T>,
|
fork_choice: &BeaconForkChoice<T>,
|
||||||
block: B,
|
block: &Arc<SignedBeaconBlock<T::EthSpec>>,
|
||||||
) -> Result<B, BlockError<T::EthSpec>> {
|
) -> Result<B, BlockError<T::EthSpec>> {
|
||||||
if fork_choice.is_descendant_of_finalized(block.parent_root()) {
|
if fork_choice.is_finalized_checkpoint_or_descendant(block.parent_root()) {
|
||||||
Ok(block)
|
Ok(block)
|
||||||
} else {
|
} else {
|
||||||
// If fork choice does *not* consider the parent to be a descendant of the finalized block,
|
// If fork choice does *not* consider the parent to be a descendant of the finalized block,
|
||||||
|
@ -51,7 +51,6 @@ pub enum BeaconChainError {
|
|||||||
},
|
},
|
||||||
SlotClockDidNotStart,
|
SlotClockDidNotStart,
|
||||||
NoStateForSlot(Slot),
|
NoStateForSlot(Slot),
|
||||||
UnableToFindTargetRoot(Slot),
|
|
||||||
BeaconStateError(BeaconStateError),
|
BeaconStateError(BeaconStateError),
|
||||||
DBInconsistent(String),
|
DBInconsistent(String),
|
||||||
DBError(store::Error),
|
DBError(store::Error),
|
||||||
@ -157,10 +156,12 @@ pub enum BeaconChainError {
|
|||||||
ExecutionForkChoiceUpdateInvalid {
|
ExecutionForkChoiceUpdateInvalid {
|
||||||
status: PayloadStatus,
|
status: PayloadStatus,
|
||||||
},
|
},
|
||||||
|
BlockRewardError,
|
||||||
BlockRewardSlotError,
|
BlockRewardSlotError,
|
||||||
BlockRewardAttestationError,
|
BlockRewardAttestationError,
|
||||||
BlockRewardSyncError,
|
BlockRewardSyncError,
|
||||||
SyncCommitteeRewardsSyncError,
|
SyncCommitteeRewardsSyncError,
|
||||||
|
AttestationRewardsError,
|
||||||
HeadMissingFromForkChoice(Hash256),
|
HeadMissingFromForkChoice(Hash256),
|
||||||
FinalizedBlockMissingFromForkChoice(Hash256),
|
FinalizedBlockMissingFromForkChoice(Hash256),
|
||||||
HeadBlockMissingFromForkChoice(Hash256),
|
HeadBlockMissingFromForkChoice(Hash256),
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
#![recursion_limit = "128"] // For lazy-static
|
#![recursion_limit = "128"] // For lazy-static
|
||||||
|
pub mod attestation_rewards;
|
||||||
pub mod attestation_verification;
|
pub mod attestation_verification;
|
||||||
mod attester_cache;
|
mod attester_cache;
|
||||||
|
pub mod beacon_block_reward;
|
||||||
mod beacon_chain;
|
mod beacon_chain;
|
||||||
mod beacon_fork_choice_store;
|
mod beacon_fork_choice_store;
|
||||||
pub mod beacon_proposer_cache;
|
pub mod beacon_proposer_cache;
|
||||||
|
@ -41,9 +41,9 @@ lazy_static = "1.4.0"
|
|||||||
ethers-core = "1.0.2"
|
ethers-core = "1.0.2"
|
||||||
builder_client = { path = "../builder_client" }
|
builder_client = { path = "../builder_client" }
|
||||||
fork_choice = { path = "../../consensus/fork_choice" }
|
fork_choice = { path = "../../consensus/fork_choice" }
|
||||||
mev-build-rs = { git = "https://github.com/ralexstokes/mev-rs", rev = "6c99b0fbdc0427b1625469d2e575303ce08de5b8" }
|
mev-rs = { git = "https://github.com/ralexstokes/mev-rs" }
|
||||||
ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus", rev = "a8110af76d97bf2bf27fb987a671808fcbdf1834" }
|
ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus" }
|
||||||
ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs", rev = "cb08f1" }
|
ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs" }
|
||||||
tokio-stream = { version = "0.1.9", features = [ "sync" ] }
|
tokio-stream = { version = "0.1.9", features = [ "sync" ] }
|
||||||
strum = "0.24.0"
|
strum = "0.24.0"
|
||||||
keccak-hash = "0.10.0"
|
keccak-hash = "0.10.0"
|
||||||
|
@ -364,7 +364,7 @@ impl Engine {
|
|||||||
Ok(result)
|
Ok(result)
|
||||||
}
|
}
|
||||||
Err(error) => {
|
Err(error) => {
|
||||||
error!(
|
warn!(
|
||||||
self.log,
|
self.log,
|
||||||
"Execution engine call failed";
|
"Execution engine call failed";
|
||||||
"error" => ?error,
|
"error" => ?error,
|
||||||
|
@ -40,15 +40,18 @@ use tokio::{
|
|||||||
time::sleep,
|
time::sleep,
|
||||||
};
|
};
|
||||||
use tokio_stream::wrappers::WatchStream;
|
use tokio_stream::wrappers::WatchStream;
|
||||||
|
use tree_hash::TreeHash;
|
||||||
|
use types::{Withdrawals};
|
||||||
use types::consts::eip4844::BLOB_TX_TYPE;
|
use types::consts::eip4844::BLOB_TX_TYPE;
|
||||||
use types::transaction::{AccessTuple, BlobTransaction, EcdsaSignature, SignedBlobTransaction};
|
use types::transaction::{AccessTuple, BlobTransaction, EcdsaSignature, SignedBlobTransaction};
|
||||||
use types::{
|
use types::{
|
||||||
|
BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ExecutionPayload,
|
||||||
|
ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge, ForkName,
|
||||||
blobs_sidecar::{Blobs, KzgCommitments},
|
blobs_sidecar::{Blobs, KzgCommitments},
|
||||||
ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge,
|
ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge,
|
||||||
};
|
};
|
||||||
use types::{AbstractExecPayload, BeaconStateError, ExecPayload, VersionedHash};
|
use types::{AbstractExecPayload, BeaconStateError, ExecPayload, VersionedHash};
|
||||||
use types::{
|
use types::{
|
||||||
BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ForkName,
|
|
||||||
ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, Transaction,
|
ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, Transaction,
|
||||||
Uint256,
|
Uint256,
|
||||||
};
|
};
|
||||||
@ -1816,10 +1819,10 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
&metrics::EXECUTION_LAYER_BUILDER_REVEAL_PAYLOAD_OUTCOME,
|
&metrics::EXECUTION_LAYER_BUILDER_REVEAL_PAYLOAD_OUTCOME,
|
||||||
&[metrics::FAILURE],
|
&[metrics::FAILURE],
|
||||||
);
|
);
|
||||||
error!(
|
warn!(
|
||||||
self.log(),
|
self.log(),
|
||||||
"Builder failed to reveal payload";
|
"Builder failed to reveal payload";
|
||||||
"info" => "this relay failure may cause a missed proposal",
|
"info" => "this is common behaviour for some builders and may not indicate an issue",
|
||||||
"error" => ?e,
|
"error" => ?e,
|
||||||
"relay_response_ms" => duration.as_millis(),
|
"relay_response_ms" => duration.as_millis(),
|
||||||
"block_root" => ?block_root,
|
"block_root" => ?block_root,
|
||||||
@ -1870,10 +1873,9 @@ enum InvalidBuilderPayload {
|
|||||||
signature: Signature,
|
signature: Signature,
|
||||||
pubkey: PublicKeyBytes,
|
pubkey: PublicKeyBytes,
|
||||||
},
|
},
|
||||||
#[allow(dead_code)]
|
|
||||||
WithdrawalsRoot {
|
WithdrawalsRoot {
|
||||||
payload: Hash256,
|
payload: Option<Hash256>,
|
||||||
expected: Hash256,
|
expected: Option<Hash256>,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1926,10 +1928,16 @@ impl fmt::Display for InvalidBuilderPayload {
|
|||||||
signature, pubkey
|
signature, pubkey
|
||||||
),
|
),
|
||||||
InvalidBuilderPayload::WithdrawalsRoot { payload, expected } => {
|
InvalidBuilderPayload::WithdrawalsRoot { payload, expected } => {
|
||||||
|
let opt_string = |opt_hash: &Option<Hash256>| {
|
||||||
|
opt_hash
|
||||||
|
.map(|hash| hash.to_string())
|
||||||
|
.unwrap_or_else(|| "None".to_string())
|
||||||
|
};
|
||||||
write!(
|
write!(
|
||||||
f,
|
f,
|
||||||
"payload withdrawals root was {} not {}",
|
"payload withdrawals root was {} not {}",
|
||||||
payload, expected
|
opt_string(payload),
|
||||||
|
opt_string(expected)
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1960,6 +1968,13 @@ fn verify_builder_bid<T: EthSpec, Payload: AbstractExecPayload<T>>(
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let expected_withdrawals_root = payload_attributes
|
||||||
|
.withdrawals()
|
||||||
|
.ok()
|
||||||
|
.cloned()
|
||||||
|
.map(|withdrawals| Withdrawals::<T>::from(withdrawals).tree_hash_root());
|
||||||
|
let payload_withdrawals_root = header.withdrawals_root().ok();
|
||||||
|
|
||||||
if payload_value < profit_threshold {
|
if payload_value < profit_threshold {
|
||||||
Err(Box::new(InvalidBuilderPayload::LowValue {
|
Err(Box::new(InvalidBuilderPayload::LowValue {
|
||||||
profit_threshold,
|
profit_threshold,
|
||||||
@ -1995,6 +2010,11 @@ fn verify_builder_bid<T: EthSpec, Payload: AbstractExecPayload<T>>(
|
|||||||
signature: bid.data.signature.clone(),
|
signature: bid.data.signature.clone(),
|
||||||
pubkey: bid.data.message.pubkey,
|
pubkey: bid.data.message.pubkey,
|
||||||
}))
|
}))
|
||||||
|
} else if payload_withdrawals_root != expected_withdrawals_root {
|
||||||
|
Err(Box::new(InvalidBuilderPayload::WithdrawalsRoot {
|
||||||
|
payload: payload_withdrawals_root,
|
||||||
|
expected: expected_withdrawals_root,
|
||||||
|
}))
|
||||||
} else {
|
} else {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -3,15 +3,19 @@ use crate::{Config, ExecutionLayer, PayloadAttributes};
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use eth2::types::{BlockId, StateId, ValidatorId};
|
use eth2::types::{BlockId, StateId, ValidatorId};
|
||||||
use eth2::{BeaconNodeHttpClient, Timeouts};
|
use eth2::{BeaconNodeHttpClient, Timeouts};
|
||||||
use ethereum_consensus::crypto::{SecretKey, Signature};
|
|
||||||
use ethereum_consensus::primitives::BlsPublicKey;
|
|
||||||
pub use ethereum_consensus::state_transition::Context;
|
pub use ethereum_consensus::state_transition::Context;
|
||||||
|
use ethereum_consensus::{
|
||||||
|
crypto::{SecretKey, Signature},
|
||||||
|
primitives::{BlsPublicKey, BlsSignature, ExecutionAddress, Hash32, Root, U256},
|
||||||
|
state_transition::Error,
|
||||||
|
};
|
||||||
use fork_choice::ForkchoiceUpdateParameters;
|
use fork_choice::ForkchoiceUpdateParameters;
|
||||||
use mev_build_rs::{
|
use mev_rs::{
|
||||||
|
bellatrix::{BuilderBid as BuilderBidBellatrix, SignedBuilderBid as SignedBuilderBidBellatrix},
|
||||||
|
capella::{BuilderBid as BuilderBidCapella, SignedBuilderBid as SignedBuilderBidCapella},
|
||||||
sign_builder_message, verify_signed_builder_message, BidRequest, BlindedBlockProviderError,
|
sign_builder_message, verify_signed_builder_message, BidRequest, BlindedBlockProviderError,
|
||||||
BlindedBlockProviderServer, BuilderBid, ExecutionPayload as ServerPayload,
|
BlindedBlockProviderServer, BuilderBid, ExecutionPayload as ServerPayload,
|
||||||
ExecutionPayloadHeader as ServerPayloadHeader, SignedBlindedBeaconBlock, SignedBuilderBid,
|
SignedBlindedBeaconBlock, SignedBuilderBid, SignedValidatorRegistration,
|
||||||
SignedValidatorRegistration,
|
|
||||||
};
|
};
|
||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
use sensitive_url::SensitiveUrl;
|
use sensitive_url::SensitiveUrl;
|
||||||
@ -39,25 +43,129 @@ pub enum Operation {
|
|||||||
PrevRandao(Hash256),
|
PrevRandao(Hash256),
|
||||||
BlockNumber(usize),
|
BlockNumber(usize),
|
||||||
Timestamp(usize),
|
Timestamp(usize),
|
||||||
|
WithdrawalsRoot(Hash256),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Operation {
|
impl Operation {
|
||||||
fn apply(self, bid: &mut BuilderBid) -> Result<(), BlindedBlockProviderError> {
|
fn apply<B: BidStuff>(self, bid: &mut B) -> Result<(), BlindedBlockProviderError> {
|
||||||
match self {
|
match self {
|
||||||
Operation::FeeRecipient(fee_recipient) => {
|
Operation::FeeRecipient(fee_recipient) => {
|
||||||
bid.header.fee_recipient = to_ssz_rs(&fee_recipient)?
|
*bid.fee_recipient_mut() = to_ssz_rs(&fee_recipient)?
|
||||||
}
|
}
|
||||||
Operation::GasLimit(gas_limit) => bid.header.gas_limit = gas_limit as u64,
|
Operation::GasLimit(gas_limit) => *bid.gas_limit_mut() = gas_limit as u64,
|
||||||
Operation::Value(value) => bid.value = to_ssz_rs(&value)?,
|
Operation::Value(value) => *bid.value_mut() = to_ssz_rs(&value)?,
|
||||||
Operation::ParentHash(parent_hash) => bid.header.parent_hash = to_ssz_rs(&parent_hash)?,
|
Operation::ParentHash(parent_hash) => *bid.parent_hash_mut() = to_ssz_rs(&parent_hash)?,
|
||||||
Operation::PrevRandao(prev_randao) => bid.header.prev_randao = to_ssz_rs(&prev_randao)?,
|
Operation::PrevRandao(prev_randao) => *bid.prev_randao_mut() = to_ssz_rs(&prev_randao)?,
|
||||||
Operation::BlockNumber(block_number) => bid.header.block_number = block_number as u64,
|
Operation::BlockNumber(block_number) => *bid.block_number_mut() = block_number as u64,
|
||||||
Operation::Timestamp(timestamp) => bid.header.timestamp = timestamp as u64,
|
Operation::Timestamp(timestamp) => *bid.timestamp_mut() = timestamp as u64,
|
||||||
|
Operation::WithdrawalsRoot(root) => *bid.withdrawals_root_mut()? = to_ssz_rs(&root)?,
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// contains functions we need for BuilderBids.. not sure what to call this
|
||||||
|
pub trait BidStuff {
|
||||||
|
fn fee_recipient_mut(&mut self) -> &mut ExecutionAddress;
|
||||||
|
fn gas_limit_mut(&mut self) -> &mut u64;
|
||||||
|
fn value_mut(&mut self) -> &mut U256;
|
||||||
|
fn parent_hash_mut(&mut self) -> &mut Hash32;
|
||||||
|
fn prev_randao_mut(&mut self) -> &mut Hash32;
|
||||||
|
fn block_number_mut(&mut self) -> &mut u64;
|
||||||
|
fn timestamp_mut(&mut self) -> &mut u64;
|
||||||
|
fn withdrawals_root_mut(&mut self) -> Result<&mut Root, BlindedBlockProviderError>;
|
||||||
|
|
||||||
|
fn sign_builder_message(
|
||||||
|
&mut self,
|
||||||
|
signing_key: &SecretKey,
|
||||||
|
context: &Context,
|
||||||
|
) -> Result<BlsSignature, Error>;
|
||||||
|
|
||||||
|
fn to_signed_bid(self, signature: BlsSignature) -> SignedBuilderBid;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BidStuff for BuilderBid {
|
||||||
|
fn fee_recipient_mut(&mut self) -> &mut ExecutionAddress {
|
||||||
|
match self {
|
||||||
|
Self::Bellatrix(bid) => &mut bid.header.fee_recipient,
|
||||||
|
Self::Capella(bid) => &mut bid.header.fee_recipient,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn gas_limit_mut(&mut self) -> &mut u64 {
|
||||||
|
match self {
|
||||||
|
Self::Bellatrix(bid) => &mut bid.header.gas_limit,
|
||||||
|
Self::Capella(bid) => &mut bid.header.gas_limit,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn value_mut(&mut self) -> &mut U256 {
|
||||||
|
match self {
|
||||||
|
Self::Bellatrix(bid) => &mut bid.value,
|
||||||
|
Self::Capella(bid) => &mut bid.value,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parent_hash_mut(&mut self) -> &mut Hash32 {
|
||||||
|
match self {
|
||||||
|
Self::Bellatrix(bid) => &mut bid.header.parent_hash,
|
||||||
|
Self::Capella(bid) => &mut bid.header.parent_hash,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn prev_randao_mut(&mut self) -> &mut Hash32 {
|
||||||
|
match self {
|
||||||
|
Self::Bellatrix(bid) => &mut bid.header.prev_randao,
|
||||||
|
Self::Capella(bid) => &mut bid.header.prev_randao,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn block_number_mut(&mut self) -> &mut u64 {
|
||||||
|
match self {
|
||||||
|
Self::Bellatrix(bid) => &mut bid.header.block_number,
|
||||||
|
Self::Capella(bid) => &mut bid.header.block_number,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn timestamp_mut(&mut self) -> &mut u64 {
|
||||||
|
match self {
|
||||||
|
Self::Bellatrix(bid) => &mut bid.header.timestamp,
|
||||||
|
Self::Capella(bid) => &mut bid.header.timestamp,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn withdrawals_root_mut(&mut self) -> Result<&mut Root, BlindedBlockProviderError> {
|
||||||
|
match self {
|
||||||
|
Self::Bellatrix(_) => Err(BlindedBlockProviderError::Custom(
|
||||||
|
"withdrawals_root called on bellatrix bid".to_string(),
|
||||||
|
)),
|
||||||
|
Self::Capella(bid) => Ok(&mut bid.header.withdrawals_root),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn sign_builder_message(
|
||||||
|
&mut self,
|
||||||
|
signing_key: &SecretKey,
|
||||||
|
context: &Context,
|
||||||
|
) -> Result<Signature, Error> {
|
||||||
|
match self {
|
||||||
|
Self::Bellatrix(message) => sign_builder_message(message, signing_key, context),
|
||||||
|
Self::Capella(message) => sign_builder_message(message, signing_key, context),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn to_signed_bid(self, signature: Signature) -> SignedBuilderBid {
|
||||||
|
match self {
|
||||||
|
Self::Bellatrix(message) => {
|
||||||
|
SignedBuilderBid::Bellatrix(SignedBuilderBidBellatrix { message, signature })
|
||||||
|
}
|
||||||
|
Self::Capella(message) => {
|
||||||
|
SignedBuilderBid::Capella(SignedBuilderBidCapella { message, signature })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub struct TestingBuilder<E: EthSpec> {
|
pub struct TestingBuilder<E: EthSpec> {
|
||||||
server: BlindedBlockProviderServer<MockBuilder<E>>,
|
server: BlindedBlockProviderServer<MockBuilder<E>>,
|
||||||
pub builder: MockBuilder<E>,
|
pub builder: MockBuilder<E>,
|
||||||
@ -112,7 +220,10 @@ impl<E: EthSpec> TestingBuilder<E> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn run(&self) {
|
pub async fn run(&self) {
|
||||||
self.server.run().await
|
let server = self.server.serve();
|
||||||
|
if let Err(err) = server.await {
|
||||||
|
println!("error while listening for incoming: {err}")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -163,7 +274,7 @@ impl<E: EthSpec> MockBuilder<E> {
|
|||||||
*self.invalidate_signatures.write() = false;
|
*self.invalidate_signatures.write() = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn apply_operations(&self, bid: &mut BuilderBid) -> Result<(), BlindedBlockProviderError> {
|
fn apply_operations<B: BidStuff>(&self, bid: &mut B) -> Result<(), BlindedBlockProviderError> {
|
||||||
let mut guard = self.operations.write();
|
let mut guard = self.operations.write();
|
||||||
while let Some(op) = guard.pop() {
|
while let Some(op) = guard.pop() {
|
||||||
op.apply(bid)?;
|
op.apply(bid)?;
|
||||||
@ -173,7 +284,7 @@ impl<E: EthSpec> MockBuilder<E> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<E: EthSpec> mev_build_rs::BlindedBlockProvider for MockBuilder<E> {
|
impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> {
|
||||||
async fn register_validators(
|
async fn register_validators(
|
||||||
&self,
|
&self,
|
||||||
registrations: &mut [SignedValidatorRegistration],
|
registrations: &mut [SignedValidatorRegistration],
|
||||||
@ -201,6 +312,7 @@ impl<E: EthSpec> mev_build_rs::BlindedBlockProvider for MockBuilder<E> {
|
|||||||
bid_request: &BidRequest,
|
bid_request: &BidRequest,
|
||||||
) -> Result<SignedBuilderBid, BlindedBlockProviderError> {
|
) -> Result<SignedBuilderBid, BlindedBlockProviderError> {
|
||||||
let slot = Slot::new(bid_request.slot);
|
let slot = Slot::new(bid_request.slot);
|
||||||
|
let fork = self.spec.fork_name_at_slot::<E>(slot);
|
||||||
let signed_cached_data = self
|
let signed_cached_data = self
|
||||||
.val_registration_cache
|
.val_registration_cache
|
||||||
.read()
|
.read()
|
||||||
@ -216,9 +328,13 @@ impl<E: EthSpec> mev_build_rs::BlindedBlockProvider for MockBuilder<E> {
|
|||||||
.map_err(convert_err)?
|
.map_err(convert_err)?
|
||||||
.ok_or_else(|| convert_err("missing head block"))?;
|
.ok_or_else(|| convert_err("missing head block"))?;
|
||||||
|
|
||||||
let block = head.data.message_merge().map_err(convert_err)?;
|
let block = head.data.message();
|
||||||
let head_block_root = block.tree_hash_root();
|
let head_block_root = block.tree_hash_root();
|
||||||
let head_execution_hash = block.body.execution_payload.execution_payload.block_hash;
|
let head_execution_hash = block
|
||||||
|
.body()
|
||||||
|
.execution_payload()
|
||||||
|
.map_err(convert_err)?
|
||||||
|
.block_hash();
|
||||||
if head_execution_hash != from_ssz_rs(&bid_request.parent_hash)? {
|
if head_execution_hash != from_ssz_rs(&bid_request.parent_hash)? {
|
||||||
return Err(BlindedBlockProviderError::Custom(format!(
|
return Err(BlindedBlockProviderError::Custom(format!(
|
||||||
"head mismatch: {} {}",
|
"head mismatch: {} {}",
|
||||||
@ -233,12 +349,11 @@ impl<E: EthSpec> mev_build_rs::BlindedBlockProvider for MockBuilder<E> {
|
|||||||
.map_err(convert_err)?
|
.map_err(convert_err)?
|
||||||
.ok_or_else(|| convert_err("missing finalized block"))?
|
.ok_or_else(|| convert_err("missing finalized block"))?
|
||||||
.data
|
.data
|
||||||
.message_merge()
|
.message()
|
||||||
|
.body()
|
||||||
|
.execution_payload()
|
||||||
.map_err(convert_err)?
|
.map_err(convert_err)?
|
||||||
.body
|
.block_hash();
|
||||||
.execution_payload
|
|
||||||
.execution_payload
|
|
||||||
.block_hash;
|
|
||||||
|
|
||||||
let justified_execution_hash = self
|
let justified_execution_hash = self
|
||||||
.beacon_client
|
.beacon_client
|
||||||
@ -247,12 +362,11 @@ impl<E: EthSpec> mev_build_rs::BlindedBlockProvider for MockBuilder<E> {
|
|||||||
.map_err(convert_err)?
|
.map_err(convert_err)?
|
||||||
.ok_or_else(|| convert_err("missing finalized block"))?
|
.ok_or_else(|| convert_err("missing finalized block"))?
|
||||||
.data
|
.data
|
||||||
.message_merge()
|
.message()
|
||||||
|
.body()
|
||||||
|
.execution_payload()
|
||||||
.map_err(convert_err)?
|
.map_err(convert_err)?
|
||||||
.body
|
.block_hash();
|
||||||
.execution_payload
|
|
||||||
.execution_payload
|
|
||||||
.block_hash;
|
|
||||||
|
|
||||||
let val_index = self
|
let val_index = self
|
||||||
.beacon_client
|
.beacon_client
|
||||||
@ -288,12 +402,22 @@ impl<E: EthSpec> mev_build_rs::BlindedBlockProvider for MockBuilder<E> {
|
|||||||
.get_randao_mix(head_state.current_epoch())
|
.get_randao_mix(head_state.current_epoch())
|
||||||
.map_err(convert_err)?;
|
.map_err(convert_err)?;
|
||||||
|
|
||||||
// FIXME: think about proper fork here
|
let payload_attributes = match fork {
|
||||||
let payload_attributes =
|
ForkName::Merge => PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, None),
|
||||||
PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, None);
|
// the withdrawals root is filled in by operations
|
||||||
|
ForkName::Capella | ForkName::Eip4844 => {
|
||||||
|
PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, Some(vec![]))
|
||||||
|
}
|
||||||
|
ForkName::Base | ForkName::Altair => {
|
||||||
|
return Err(BlindedBlockProviderError::Custom(format!(
|
||||||
|
"Unsupported fork: {}",
|
||||||
|
fork
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
self.el
|
self.el
|
||||||
.insert_proposer(slot, head_block_root, val_index, payload_attributes)
|
.insert_proposer(slot, head_block_root, val_index, payload_attributes.clone())
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let forkchoice_update_params = ForkchoiceUpdateParameters {
|
let forkchoice_update_params = ForkchoiceUpdateParameters {
|
||||||
@ -303,17 +427,13 @@ impl<E: EthSpec> mev_build_rs::BlindedBlockProvider for MockBuilder<E> {
|
|||||||
finalized_hash: Some(finalized_execution_hash),
|
finalized_hash: Some(finalized_execution_hash),
|
||||||
};
|
};
|
||||||
|
|
||||||
let payload_attributes =
|
|
||||||
PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, None);
|
|
||||||
|
|
||||||
let payload = self
|
let payload = self
|
||||||
.el
|
.el
|
||||||
.get_full_payload_caching::<BlindedPayload<E>>(
|
.get_full_payload_caching::<BlindedPayload<E>>(
|
||||||
head_execution_hash,
|
head_execution_hash,
|
||||||
&payload_attributes,
|
&payload_attributes,
|
||||||
forkchoice_update_params,
|
forkchoice_update_params,
|
||||||
// TODO: do we need to write a test for this if this is Capella fork?
|
fork,
|
||||||
ForkName::Merge,
|
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.map_err(convert_err)?
|
.map_err(convert_err)?
|
||||||
@ -321,44 +441,54 @@ impl<E: EthSpec> mev_build_rs::BlindedBlockProvider for MockBuilder<E> {
|
|||||||
.to_execution_payload_header();
|
.to_execution_payload_header();
|
||||||
|
|
||||||
let json_payload = serde_json::to_string(&payload).map_err(convert_err)?;
|
let json_payload = serde_json::to_string(&payload).map_err(convert_err)?;
|
||||||
let mut header: ServerPayloadHeader =
|
let mut message = match fork {
|
||||||
serde_json::from_str(json_payload.as_str()).map_err(convert_err)?;
|
ForkName::Capella => BuilderBid::Capella(BuilderBidCapella {
|
||||||
|
header: serde_json::from_str(json_payload.as_str()).map_err(convert_err)?,
|
||||||
header.gas_limit = cached_data.gas_limit;
|
value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?,
|
||||||
|
public_key: self.builder_sk.public_key(),
|
||||||
let mut message = BuilderBid {
|
}),
|
||||||
header,
|
ForkName::Merge => BuilderBid::Bellatrix(BuilderBidBellatrix {
|
||||||
value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?,
|
header: serde_json::from_str(json_payload.as_str()).map_err(convert_err)?,
|
||||||
public_key: self.builder_sk.public_key(),
|
value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?,
|
||||||
|
public_key: self.builder_sk.public_key(),
|
||||||
|
}),
|
||||||
|
ForkName::Base | ForkName::Altair | ForkName::Eip4844 => {
|
||||||
|
return Err(BlindedBlockProviderError::Custom(format!(
|
||||||
|
"Unsupported fork: {}",
|
||||||
|
fork
|
||||||
|
)))
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
*message.gas_limit_mut() = cached_data.gas_limit;
|
||||||
|
|
||||||
self.apply_operations(&mut message)?;
|
self.apply_operations(&mut message)?;
|
||||||
|
|
||||||
let mut signature =
|
let mut signature =
|
||||||
sign_builder_message(&mut message, &self.builder_sk, self.context.as_ref())?;
|
message.sign_builder_message(&self.builder_sk, self.context.as_ref())?;
|
||||||
|
|
||||||
if *self.invalidate_signatures.read() {
|
if *self.invalidate_signatures.read() {
|
||||||
signature = Signature::default();
|
signature = Signature::default();
|
||||||
}
|
}
|
||||||
|
|
||||||
let signed_bid = SignedBuilderBid { message, signature };
|
Ok(message.to_signed_bid(signature))
|
||||||
Ok(signed_bid)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn open_bid(
|
async fn open_bid(
|
||||||
&self,
|
&self,
|
||||||
signed_block: &mut SignedBlindedBeaconBlock,
|
signed_block: &mut SignedBlindedBeaconBlock,
|
||||||
) -> Result<ServerPayload, BlindedBlockProviderError> {
|
) -> Result<ServerPayload, BlindedBlockProviderError> {
|
||||||
|
let node = match signed_block {
|
||||||
|
SignedBlindedBeaconBlock::Bellatrix(block) => {
|
||||||
|
block.message.body.execution_payload_header.hash_tree_root()
|
||||||
|
}
|
||||||
|
SignedBlindedBeaconBlock::Capella(block) => {
|
||||||
|
block.message.body.execution_payload_header.hash_tree_root()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
.map_err(convert_err)?;
|
||||||
|
|
||||||
let payload = self
|
let payload = self
|
||||||
.el
|
.el
|
||||||
.get_payload_by_root(&from_ssz_rs(
|
.get_payload_by_root(&from_ssz_rs(&node)?)
|
||||||
&signed_block
|
|
||||||
.message
|
|
||||||
.body
|
|
||||||
.execution_payload_header
|
|
||||||
.hash_tree_root()
|
|
||||||
.map_err(convert_err)?,
|
|
||||||
)?)
|
|
||||||
.ok_or_else(|| convert_err("missing payload for tx root"))?;
|
.ok_or_else(|| convert_err("missing payload for tx root"))?;
|
||||||
|
|
||||||
let json_payload = serde_json::to_string(&payload).map_err(convert_err)?;
|
let json_payload = serde_json::to_string(&payload).map_err(convert_err)?;
|
||||||
|
@ -15,6 +15,7 @@ mod database;
|
|||||||
mod metrics;
|
mod metrics;
|
||||||
mod proposer_duties;
|
mod proposer_duties;
|
||||||
mod publish_blocks;
|
mod publish_blocks;
|
||||||
|
mod standard_block_rewards;
|
||||||
mod state_id;
|
mod state_id;
|
||||||
mod sync_committee_rewards;
|
mod sync_committee_rewards;
|
||||||
mod sync_committees;
|
mod sync_committees;
|
||||||
@ -1800,6 +1801,27 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
|
let beacon_rewards_path = eth_v1
|
||||||
|
.and(warp::path("beacon"))
|
||||||
|
.and(warp::path("rewards"))
|
||||||
|
.and(chain_filter.clone());
|
||||||
|
|
||||||
|
// GET beacon/rewards/blocks/{block_id}
|
||||||
|
let get_beacon_rewards_blocks = beacon_rewards_path
|
||||||
|
.clone()
|
||||||
|
.and(warp::path("blocks"))
|
||||||
|
.and(block_id_or_err)
|
||||||
|
.and(warp::path::end())
|
||||||
|
.and_then(|chain: Arc<BeaconChain<T>>, block_id: BlockId| {
|
||||||
|
blocking_json_task(move || {
|
||||||
|
let (rewards, execution_optimistic) =
|
||||||
|
standard_block_rewards::compute_beacon_block_rewards(chain, block_id)?;
|
||||||
|
Ok(rewards)
|
||||||
|
.map(api_types::GenericResponse::from)
|
||||||
|
.map(|resp| resp.add_execution_optimistic(execution_optimistic))
|
||||||
|
})
|
||||||
|
});
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* beacon/rewards
|
* beacon/rewards
|
||||||
*/
|
*/
|
||||||
@ -1809,6 +1831,58 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
.and(warp::path("rewards"))
|
.and(warp::path("rewards"))
|
||||||
.and(chain_filter.clone());
|
.and(chain_filter.clone());
|
||||||
|
|
||||||
|
// POST beacon/rewards/attestations/{epoch}
|
||||||
|
let post_beacon_rewards_attestations = beacon_rewards_path
|
||||||
|
.clone()
|
||||||
|
.and(warp::path("attestations"))
|
||||||
|
.and(warp::path::param::<Epoch>())
|
||||||
|
.and(warp::path::end())
|
||||||
|
.and(warp::body::json())
|
||||||
|
.and(log_filter.clone())
|
||||||
|
.and_then(
|
||||||
|
|chain: Arc<BeaconChain<T>>,
|
||||||
|
epoch: Epoch,
|
||||||
|
validators: Vec<ValidatorId>,
|
||||||
|
log: Logger| {
|
||||||
|
blocking_json_task(move || {
|
||||||
|
let attestation_rewards = chain
|
||||||
|
.compute_attestation_rewards(epoch, validators, log)
|
||||||
|
.map_err(|e| match e {
|
||||||
|
BeaconChainError::MissingBeaconState(root) => {
|
||||||
|
warp_utils::reject::custom_not_found(format!(
|
||||||
|
"missing state {root:?}",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
BeaconChainError::NoStateForSlot(slot) => {
|
||||||
|
warp_utils::reject::custom_not_found(format!(
|
||||||
|
"missing state at slot {slot}"
|
||||||
|
))
|
||||||
|
}
|
||||||
|
BeaconChainError::BeaconStateError(
|
||||||
|
BeaconStateError::UnknownValidator(validator_index),
|
||||||
|
) => warp_utils::reject::custom_bad_request(format!(
|
||||||
|
"validator is unknown: {validator_index}"
|
||||||
|
)),
|
||||||
|
BeaconChainError::ValidatorPubkeyUnknown(pubkey) => {
|
||||||
|
warp_utils::reject::custom_bad_request(format!(
|
||||||
|
"validator pubkey is unknown: {pubkey:?}"
|
||||||
|
))
|
||||||
|
}
|
||||||
|
e => warp_utils::reject::custom_server_error(format!(
|
||||||
|
"unexpected error: {:?}",
|
||||||
|
e
|
||||||
|
)),
|
||||||
|
})?;
|
||||||
|
let execution_optimistic =
|
||||||
|
chain.is_optimistic_or_invalid_head().unwrap_or_default();
|
||||||
|
|
||||||
|
Ok(attestation_rewards)
|
||||||
|
.map(api_types::GenericResponse::from)
|
||||||
|
.map(|resp| resp.add_execution_optimistic(execution_optimistic))
|
||||||
|
})
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
// POST beacon/rewards/sync_committee/{block_id}
|
// POST beacon/rewards/sync_committee/{block_id}
|
||||||
let post_beacon_rewards_sync_committee = beacon_rewards_path
|
let post_beacon_rewards_sync_committee = beacon_rewards_path
|
||||||
.clone()
|
.clone()
|
||||||
@ -2881,7 +2955,7 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
.await
|
.await
|
||||||
.map(|resp| warp::reply::json(&resp))
|
.map(|resp| warp::reply::json(&resp))
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
error!(
|
warn!(
|
||||||
log,
|
log,
|
||||||
"Relay error when registering validator(s)";
|
"Relay error when registering validator(s)";
|
||||||
"num_registrations" => filtered_registration_data.len(),
|
"num_registrations" => filtered_registration_data.len(),
|
||||||
@ -3517,6 +3591,7 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
.or(get_beacon_pool_voluntary_exits.boxed())
|
.or(get_beacon_pool_voluntary_exits.boxed())
|
||||||
.or(get_beacon_pool_bls_to_execution_changes.boxed())
|
.or(get_beacon_pool_bls_to_execution_changes.boxed())
|
||||||
.or(get_beacon_deposit_snapshot.boxed())
|
.or(get_beacon_deposit_snapshot.boxed())
|
||||||
|
.or(get_beacon_rewards_blocks.boxed())
|
||||||
.or(get_config_fork_schedule.boxed())
|
.or(get_config_fork_schedule.boxed())
|
||||||
.or(get_config_spec.boxed())
|
.or(get_config_spec.boxed())
|
||||||
.or(get_config_deposit_contract.boxed())
|
.or(get_config_deposit_contract.boxed())
|
||||||
@ -3570,6 +3645,7 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
.or(post_beacon_pool_voluntary_exits.boxed())
|
.or(post_beacon_pool_voluntary_exits.boxed())
|
||||||
.or(post_beacon_pool_sync_committees.boxed())
|
.or(post_beacon_pool_sync_committees.boxed())
|
||||||
.or(post_beacon_pool_bls_to_execution_changes.boxed())
|
.or(post_beacon_pool_bls_to_execution_changes.boxed())
|
||||||
|
.or(post_beacon_rewards_attestations.boxed())
|
||||||
.or(post_beacon_rewards_sync_committee.boxed())
|
.or(post_beacon_rewards_sync_committee.boxed())
|
||||||
.or(post_validator_duties_attester.boxed())
|
.or(post_validator_duties_attester.boxed())
|
||||||
.or(post_validator_duties_sync.boxed())
|
.or(post_validator_duties_sync.boxed())
|
||||||
|
27
beacon_node/http_api/src/standard_block_rewards.rs
Normal file
27
beacon_node/http_api/src/standard_block_rewards.rs
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
use crate::sync_committee_rewards::get_state_before_applying_block;
|
||||||
|
use crate::BlockId;
|
||||||
|
use crate::ExecutionOptimistic;
|
||||||
|
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||||
|
use eth2::lighthouse::StandardBlockReward;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use warp_utils::reject::beacon_chain_error;
|
||||||
|
//// The difference between block_rewards and beacon_block_rewards is the later returns block
|
||||||
|
//// reward format that satisfies beacon-api specs
|
||||||
|
pub fn compute_beacon_block_rewards<T: BeaconChainTypes>(
|
||||||
|
chain: Arc<BeaconChain<T>>,
|
||||||
|
block_id: BlockId,
|
||||||
|
) -> Result<(StandardBlockReward, ExecutionOptimistic), warp::Rejection> {
|
||||||
|
let (block, execution_optimistic) = block_id.blinded_block(&chain)?;
|
||||||
|
|
||||||
|
let block_ref = block.message();
|
||||||
|
|
||||||
|
let block_root = block.canonical_root();
|
||||||
|
|
||||||
|
let mut state = get_state_before_applying_block(chain.clone(), &block)?;
|
||||||
|
|
||||||
|
let rewards = chain
|
||||||
|
.compute_beacon_block_reward(block_ref, block_root, &mut state)
|
||||||
|
.map_err(beacon_chain_error)?;
|
||||||
|
|
||||||
|
Ok((rewards, execution_optimistic))
|
||||||
|
}
|
@ -47,7 +47,7 @@ pub fn compute_sync_committee_rewards<T: BeaconChainTypes>(
|
|||||||
Ok((data, execution_optimistic))
|
Ok((data, execution_optimistic))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_state_before_applying_block<T: BeaconChainTypes>(
|
pub fn get_state_before_applying_block<T: BeaconChainTypes>(
|
||||||
chain: Arc<BeaconChain<T>>,
|
chain: Arc<BeaconChain<T>>,
|
||||||
block: &SignedBlindedBeaconBlock<T::EthSpec>,
|
block: &SignedBlindedBeaconBlock<T::EthSpec>,
|
||||||
) -> Result<BeaconState<T::EthSpec>, warp::reject::Rejection> {
|
) -> Result<BeaconState<T::EthSpec>, warp::reject::Rejection> {
|
||||||
|
@ -1,9 +1,9 @@
|
|||||||
use crate::api_types::{
|
use crate::api_types::EndpointVersion;
|
||||||
EndpointVersion, ExecutionOptimisticForkVersionedResponse, ForkVersionedResponse,
|
|
||||||
};
|
|
||||||
use eth2::CONSENSUS_VERSION_HEADER;
|
use eth2::CONSENSUS_VERSION_HEADER;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use types::{ForkName, InconsistentFork};
|
use types::{
|
||||||
|
ExecutionOptimisticForkVersionedResponse, ForkName, ForkVersionedResponse, InconsistentFork,
|
||||||
|
};
|
||||||
use warp::reply::{self, Reply, WithHeader};
|
use warp::reply::{self, Reply, WithHeader};
|
||||||
|
|
||||||
pub const V1: EndpointVersion = EndpointVersion(1);
|
pub const V1: EndpointVersion = EndpointVersion(1);
|
||||||
|
@ -24,6 +24,7 @@ use network::NetworkReceivers;
|
|||||||
use proto_array::ExecutionStatus;
|
use proto_array::ExecutionStatus;
|
||||||
use sensitive_url::SensitiveUrl;
|
use sensitive_url::SensitiveUrl;
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
|
use state_processing::per_block_processing::get_expected_withdrawals;
|
||||||
use state_processing::per_slot_processing;
|
use state_processing::per_slot_processing;
|
||||||
use std::convert::TryInto;
|
use std::convert::TryInto;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@ -3428,6 +3429,98 @@ impl ApiTester {
|
|||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn test_builder_works_post_capella(self) -> Self {
|
||||||
|
// Ensure builder payload is chosen
|
||||||
|
self.mock_builder
|
||||||
|
.as_ref()
|
||||||
|
.unwrap()
|
||||||
|
.builder
|
||||||
|
.add_operation(Operation::Value(Uint256::from(
|
||||||
|
DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI + 1,
|
||||||
|
)));
|
||||||
|
|
||||||
|
let slot = self.chain.slot().unwrap();
|
||||||
|
let propose_state = self
|
||||||
|
.harness
|
||||||
|
.chain
|
||||||
|
.state_at_slot(slot, StateSkipConfig::WithoutStateRoots)
|
||||||
|
.unwrap();
|
||||||
|
let withdrawals = get_expected_withdrawals(&propose_state, &self.chain.spec).unwrap();
|
||||||
|
let withdrawals_root = withdrawals.tree_hash_root();
|
||||||
|
// Set withdrawals root for builder
|
||||||
|
self.mock_builder
|
||||||
|
.as_ref()
|
||||||
|
.unwrap()
|
||||||
|
.builder
|
||||||
|
.add_operation(Operation::WithdrawalsRoot(withdrawals_root));
|
||||||
|
|
||||||
|
let epoch = self.chain.epoch().unwrap();
|
||||||
|
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||||
|
|
||||||
|
let payload: BlindedPayload<E> = self
|
||||||
|
.client
|
||||||
|
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.data
|
||||||
|
.body()
|
||||||
|
.execution_payload()
|
||||||
|
.unwrap()
|
||||||
|
.into();
|
||||||
|
|
||||||
|
// The builder's payload should've been chosen, so this cache should not be populated
|
||||||
|
assert!(self
|
||||||
|
.chain
|
||||||
|
.execution_layer
|
||||||
|
.as_ref()
|
||||||
|
.unwrap()
|
||||||
|
.get_payload_by_root(&payload.tree_hash_root())
|
||||||
|
.is_none());
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn test_lighthouse_rejects_invalid_withdrawals_root(self) -> Self {
|
||||||
|
// Ensure builder payload *would be* chosen
|
||||||
|
self.mock_builder
|
||||||
|
.as_ref()
|
||||||
|
.unwrap()
|
||||||
|
.builder
|
||||||
|
.add_operation(Operation::Value(Uint256::from(
|
||||||
|
DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI + 1,
|
||||||
|
)));
|
||||||
|
// Set withdrawals root to something invalid
|
||||||
|
self.mock_builder
|
||||||
|
.as_ref()
|
||||||
|
.unwrap()
|
||||||
|
.builder
|
||||||
|
.add_operation(Operation::WithdrawalsRoot(Hash256::repeat_byte(0x42)));
|
||||||
|
|
||||||
|
let slot = self.chain.slot().unwrap();
|
||||||
|
let epoch = self.chain.epoch().unwrap();
|
||||||
|
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||||
|
|
||||||
|
let payload: BlindedPayload<E> = self
|
||||||
|
.client
|
||||||
|
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.data
|
||||||
|
.body()
|
||||||
|
.execution_payload()
|
||||||
|
.unwrap()
|
||||||
|
.into();
|
||||||
|
|
||||||
|
// The local payload should've been chosen because the builder's was invalid
|
||||||
|
assert!(self
|
||||||
|
.chain
|
||||||
|
.execution_layer
|
||||||
|
.as_ref()
|
||||||
|
.unwrap()
|
||||||
|
.get_payload_by_root(&payload.tree_hash_root())
|
||||||
|
.is_some());
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(target_os = "linux")]
|
#[cfg(target_os = "linux")]
|
||||||
pub async fn test_get_lighthouse_health(self) -> Self {
|
pub async fn test_get_lighthouse_health(self) -> Self {
|
||||||
self.client.get_lighthouse_health().await.unwrap();
|
self.client.get_lighthouse_health().await.unwrap();
|
||||||
@ -4424,6 +4517,26 @@ async fn builder_payload_chosen_by_profit() {
|
|||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
|
async fn builder_works_post_capella() {
|
||||||
|
let mut config = ApiTesterConfig {
|
||||||
|
builder_threshold: Some(0),
|
||||||
|
spec: E::default_spec(),
|
||||||
|
};
|
||||||
|
config.spec.altair_fork_epoch = Some(Epoch::new(0));
|
||||||
|
config.spec.bellatrix_fork_epoch = Some(Epoch::new(0));
|
||||||
|
config.spec.capella_fork_epoch = Some(Epoch::new(0));
|
||||||
|
|
||||||
|
ApiTester::new_from_config(config)
|
||||||
|
.await
|
||||||
|
.test_post_validator_register_validator()
|
||||||
|
.await
|
||||||
|
.test_builder_works_post_capella()
|
||||||
|
.await
|
||||||
|
.test_lighthouse_rejects_invalid_withdrawals_root()
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn lighthouse_endpoints() {
|
async fn lighthouse_endpoints() {
|
||||||
ApiTester::new()
|
ApiTester::new()
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
use crate::rpc::config::OutboundRateLimiterConfig;
|
||||||
use crate::types::GossipKind;
|
use crate::types::GossipKind;
|
||||||
use crate::{Enr, PeerIdSerialized};
|
use crate::{Enr, PeerIdSerialized};
|
||||||
use directory::{
|
use directory::{
|
||||||
@ -133,6 +134,9 @@ pub struct Config {
|
|||||||
|
|
||||||
/// Whether light client protocols should be enabled.
|
/// Whether light client protocols should be enabled.
|
||||||
pub enable_light_client_server: bool,
|
pub enable_light_client_server: bool,
|
||||||
|
|
||||||
|
/// Configuration for the outbound rate limiter (requests made by this node).
|
||||||
|
pub outbound_rate_limiter_config: Option<OutboundRateLimiterConfig>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Config {
|
impl Default for Config {
|
||||||
@ -211,6 +215,7 @@ impl Default for Config {
|
|||||||
topics: Vec::new(),
|
topics: Vec::new(),
|
||||||
metrics_enabled: false,
|
metrics_enabled: false,
|
||||||
enable_light_client_server: false,
|
enable_light_client_server: false,
|
||||||
|
outbound_rate_limiter_config: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
182
beacon_node/lighthouse_network/src/rpc/config.rs
Normal file
182
beacon_node/lighthouse_network/src/rpc/config.rs
Normal file
@ -0,0 +1,182 @@
|
|||||||
|
use std::{
|
||||||
|
fmt::{Debug, Display},
|
||||||
|
str::FromStr,
|
||||||
|
time::Duration,
|
||||||
|
};
|
||||||
|
|
||||||
|
use super::{methods, rate_limiter::Quota, Protocol};
|
||||||
|
|
||||||
|
use serde_derive::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
/// Auxiliary struct to aid on configuration parsing.
|
||||||
|
///
|
||||||
|
/// A protocol's quota is specified as `protocol_name:tokens/time_in_seconds`.
|
||||||
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
|
struct ProtocolQuota {
|
||||||
|
protocol: Protocol,
|
||||||
|
quota: Quota,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for ProtocolQuota {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"{}:{}/{}",
|
||||||
|
self.protocol.as_ref(),
|
||||||
|
self.quota.max_tokens,
|
||||||
|
self.quota.replenish_all_every.as_secs()
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for ProtocolQuota {
|
||||||
|
type Err = &'static str;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
let (protocol_str, quota_str) = s
|
||||||
|
.split_once(':')
|
||||||
|
.ok_or("Missing ':' from quota definition.")?;
|
||||||
|
let protocol = protocol_str
|
||||||
|
.parse()
|
||||||
|
.map_err(|_parse_err| "Wrong protocol representation in quota")?;
|
||||||
|
let (tokens_str, time_str) = quota_str
|
||||||
|
.split_once('/')
|
||||||
|
.ok_or("Quota should be defined as \"n/t\" (t in seconds). Missing '/' from quota.")?;
|
||||||
|
let tokens = tokens_str
|
||||||
|
.parse()
|
||||||
|
.map_err(|_| "Failed to parse tokens from quota.")?;
|
||||||
|
let seconds = time_str
|
||||||
|
.parse::<u64>()
|
||||||
|
.map_err(|_| "Failed to parse time in seconds from quota.")?;
|
||||||
|
Ok(ProtocolQuota {
|
||||||
|
protocol,
|
||||||
|
quota: Quota {
|
||||||
|
replenish_all_every: Duration::from_secs(seconds),
|
||||||
|
max_tokens: tokens,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Configurations for the rate limiter applied to outbound requests (made by the node itself).
|
||||||
|
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
|
pub struct OutboundRateLimiterConfig {
|
||||||
|
pub(super) ping_quota: Quota,
|
||||||
|
pub(super) meta_data_quota: Quota,
|
||||||
|
pub(super) status_quota: Quota,
|
||||||
|
pub(super) goodbye_quota: Quota,
|
||||||
|
pub(super) blocks_by_range_quota: Quota,
|
||||||
|
pub(super) blocks_by_root_quota: Quota,
|
||||||
|
pub(super) blobs_by_range_quota: Quota,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OutboundRateLimiterConfig {
|
||||||
|
pub const DEFAULT_PING_QUOTA: Quota = Quota::n_every(2, 10);
|
||||||
|
pub const DEFAULT_META_DATA_QUOTA: Quota = Quota::n_every(2, 5);
|
||||||
|
pub const DEFAULT_STATUS_QUOTA: Quota = Quota::n_every(5, 15);
|
||||||
|
pub const DEFAULT_GOODBYE_QUOTA: Quota = Quota::one_every(10);
|
||||||
|
pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota =
|
||||||
|
Quota::n_every(methods::MAX_REQUEST_BLOCKS, 10);
|
||||||
|
pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10);
|
||||||
|
pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota =
|
||||||
|
Quota::n_every(methods::MAX_REQUEST_BLOBS_SIDECARS, 10);
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for OutboundRateLimiterConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
OutboundRateLimiterConfig {
|
||||||
|
ping_quota: Self::DEFAULT_PING_QUOTA,
|
||||||
|
meta_data_quota: Self::DEFAULT_META_DATA_QUOTA,
|
||||||
|
status_quota: Self::DEFAULT_STATUS_QUOTA,
|
||||||
|
goodbye_quota: Self::DEFAULT_GOODBYE_QUOTA,
|
||||||
|
blocks_by_range_quota: Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA,
|
||||||
|
blocks_by_root_quota: Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA,
|
||||||
|
blobs_by_range_quota: Self::DEFAULT_BLOBS_BY_RANGE_QUOTA,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Debug for OutboundRateLimiterConfig {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
macro_rules! fmt_q {
|
||||||
|
($quota:expr) => {
|
||||||
|
&format_args!(
|
||||||
|
"{}/{}s",
|
||||||
|
$quota.max_tokens,
|
||||||
|
$quota.replenish_all_every.as_secs()
|
||||||
|
)
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
f.debug_struct("OutboundRateLimiterConfig")
|
||||||
|
.field("ping", fmt_q!(&self.ping_quota))
|
||||||
|
.field("metadata", fmt_q!(&self.meta_data_quota))
|
||||||
|
.field("status", fmt_q!(&self.status_quota))
|
||||||
|
.field("goodbye", fmt_q!(&self.goodbye_quota))
|
||||||
|
.field("blocks_by_range", fmt_q!(&self.blocks_by_range_quota))
|
||||||
|
.field("blocks_by_root", fmt_q!(&self.blocks_by_root_quota))
|
||||||
|
.field("blobs_by_range", fmt_q!(&self.blobs_by_range_quota))
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse configurations for the outbound rate limiter. Protocols that are not specified use
|
||||||
|
/// the default values. Protocol specified more than once use only the first given Quota.
|
||||||
|
///
|
||||||
|
/// The expected format is a ';' separated list of [`ProtocolQuota`].
|
||||||
|
impl FromStr for OutboundRateLimiterConfig {
|
||||||
|
type Err = &'static str;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
let mut ping_quota = None;
|
||||||
|
let mut meta_data_quota = None;
|
||||||
|
let mut status_quota = None;
|
||||||
|
let mut goodbye_quota = None;
|
||||||
|
let mut blocks_by_range_quota = None;
|
||||||
|
let mut blocks_by_root_quota = None;
|
||||||
|
let mut blobs_by_range_quota = None;
|
||||||
|
for proto_def in s.split(';') {
|
||||||
|
let ProtocolQuota { protocol, quota } = proto_def.parse()?;
|
||||||
|
let quota = Some(quota);
|
||||||
|
match protocol {
|
||||||
|
Protocol::Status => status_quota = status_quota.or(quota),
|
||||||
|
Protocol::Goodbye => goodbye_quota = goodbye_quota.or(quota),
|
||||||
|
Protocol::BlocksByRange => blocks_by_range_quota = blocks_by_range_quota.or(quota),
|
||||||
|
Protocol::BlocksByRoot => blocks_by_root_quota = blocks_by_root_quota.or(quota),
|
||||||
|
Protocol::Ping => ping_quota = ping_quota.or(quota),
|
||||||
|
Protocol::MetaData => meta_data_quota = meta_data_quota.or(quota),
|
||||||
|
Protocol::BlobsByRange => blobs_by_range_quota = blobs_by_range_quota.or(quota),
|
||||||
|
Protocol::LightClientBootstrap => return Err("Lighthouse does not send LightClientBootstrap requests. Quota should not be set."),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(OutboundRateLimiterConfig {
|
||||||
|
ping_quota: ping_quota.unwrap_or(Self::DEFAULT_PING_QUOTA),
|
||||||
|
meta_data_quota: meta_data_quota.unwrap_or(Self::DEFAULT_META_DATA_QUOTA),
|
||||||
|
status_quota: status_quota.unwrap_or(Self::DEFAULT_STATUS_QUOTA),
|
||||||
|
goodbye_quota: goodbye_quota.unwrap_or(Self::DEFAULT_GOODBYE_QUOTA),
|
||||||
|
blocks_by_range_quota: blocks_by_range_quota
|
||||||
|
.unwrap_or(Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA),
|
||||||
|
blocks_by_root_quota: blocks_by_root_quota
|
||||||
|
.unwrap_or(Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA),
|
||||||
|
blobs_by_range_quota: blobs_by_range_quota
|
||||||
|
.unwrap_or(Self::DEFAULT_BLOBS_BY_RANGE_QUOTA),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_quota_inverse() {
|
||||||
|
let quota = ProtocolQuota {
|
||||||
|
protocol: Protocol::Goodbye,
|
||||||
|
quota: Quota {
|
||||||
|
replenish_all_every: Duration::from_secs(10),
|
||||||
|
max_tokens: 8,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
assert_eq!(quota.to_string().parse(), Ok(quota))
|
||||||
|
}
|
||||||
|
}
|
@ -12,7 +12,7 @@ use libp2p::swarm::{
|
|||||||
PollParameters, SubstreamProtocol,
|
PollParameters, SubstreamProtocol,
|
||||||
};
|
};
|
||||||
use libp2p::PeerId;
|
use libp2p::PeerId;
|
||||||
use rate_limiter::{RPCRateLimiter as RateLimiter, RPCRateLimiterBuilder, RateLimitedErr};
|
use rate_limiter::{RPCRateLimiter as RateLimiter, RateLimitedErr};
|
||||||
use slog::{crit, debug, o};
|
use slog::{crit, debug, o};
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@ -33,12 +33,17 @@ pub use methods::{
|
|||||||
pub(crate) use outbound::OutboundRequest;
|
pub(crate) use outbound::OutboundRequest;
|
||||||
pub use protocol::{max_rpc_size, Protocol, RPCError};
|
pub use protocol::{max_rpc_size, Protocol, RPCError};
|
||||||
|
|
||||||
|
use self::config::OutboundRateLimiterConfig;
|
||||||
|
use self::self_limiter::SelfRateLimiter;
|
||||||
|
|
||||||
pub(crate) mod codec;
|
pub(crate) mod codec;
|
||||||
|
pub mod config;
|
||||||
mod handler;
|
mod handler;
|
||||||
pub mod methods;
|
pub mod methods;
|
||||||
mod outbound;
|
mod outbound;
|
||||||
mod protocol;
|
mod protocol;
|
||||||
mod rate_limiter;
|
mod rate_limiter;
|
||||||
|
mod self_limiter;
|
||||||
|
|
||||||
/// Composite trait for a request id.
|
/// Composite trait for a request id.
|
||||||
pub trait ReqId: Send + 'static + std::fmt::Debug + Copy + Clone {}
|
pub trait ReqId: Send + 'static + std::fmt::Debug + Copy + Clone {}
|
||||||
@ -101,13 +106,18 @@ pub struct RPCMessage<Id, TSpec: EthSpec> {
|
|||||||
pub event: HandlerEvent<Id, TSpec>,
|
pub event: HandlerEvent<Id, TSpec>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type BehaviourAction<Id, TSpec> =
|
||||||
|
NetworkBehaviourAction<RPCMessage<Id, TSpec>, RPCHandler<Id, TSpec>>;
|
||||||
|
|
||||||
/// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level
|
/// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level
|
||||||
/// logic.
|
/// logic.
|
||||||
pub struct RPC<Id: ReqId, TSpec: EthSpec> {
|
pub struct RPC<Id: ReqId, TSpec: EthSpec> {
|
||||||
/// Rate limiter
|
/// Rate limiter
|
||||||
limiter: RateLimiter,
|
limiter: RateLimiter,
|
||||||
|
/// Rate limiter for our own requests.
|
||||||
|
self_limiter: Option<SelfRateLimiter<Id, TSpec>>,
|
||||||
/// Queue of events to be processed.
|
/// Queue of events to be processed.
|
||||||
events: Vec<NetworkBehaviourAction<RPCMessage<Id, TSpec>, RPCHandler<Id, TSpec>>>,
|
events: Vec<BehaviourAction<Id, TSpec>>,
|
||||||
fork_context: Arc<ForkContext>,
|
fork_context: Arc<ForkContext>,
|
||||||
enable_light_client_server: bool,
|
enable_light_client_server: bool,
|
||||||
/// Slog logger for RPC behaviour.
|
/// Slog logger for RPC behaviour.
|
||||||
@ -118,10 +128,12 @@ impl<Id: ReqId, TSpec: EthSpec> RPC<Id, TSpec> {
|
|||||||
pub fn new(
|
pub fn new(
|
||||||
fork_context: Arc<ForkContext>,
|
fork_context: Arc<ForkContext>,
|
||||||
enable_light_client_server: bool,
|
enable_light_client_server: bool,
|
||||||
|
outbound_rate_limiter_config: Option<OutboundRateLimiterConfig>,
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let log = log.new(o!("service" => "libp2p_rpc"));
|
let log = log.new(o!("service" => "libp2p_rpc"));
|
||||||
let limiter = RPCRateLimiterBuilder::new()
|
|
||||||
|
let limiter = RateLimiter::builder()
|
||||||
.n_every(Protocol::MetaData, 2, Duration::from_secs(5))
|
.n_every(Protocol::MetaData, 2, Duration::from_secs(5))
|
||||||
.n_every(Protocol::Ping, 2, Duration::from_secs(10))
|
.n_every(Protocol::Ping, 2, Duration::from_secs(10))
|
||||||
.n_every(Protocol::Status, 5, Duration::from_secs(15))
|
.n_every(Protocol::Status, 5, Duration::from_secs(15))
|
||||||
@ -141,8 +153,14 @@ impl<Id: ReqId, TSpec: EthSpec> RPC<Id, TSpec> {
|
|||||||
)
|
)
|
||||||
.build()
|
.build()
|
||||||
.expect("Configuration parameters are valid");
|
.expect("Configuration parameters are valid");
|
||||||
|
|
||||||
|
let self_limiter = outbound_rate_limiter_config.map(|config| {
|
||||||
|
SelfRateLimiter::new(config, log.clone()).expect("Configuration parameters are valid")
|
||||||
|
});
|
||||||
|
|
||||||
RPC {
|
RPC {
|
||||||
limiter,
|
limiter,
|
||||||
|
self_limiter,
|
||||||
events: Vec::new(),
|
events: Vec::new(),
|
||||||
fork_context,
|
fork_context,
|
||||||
enable_light_client_server,
|
enable_light_client_server,
|
||||||
@ -169,12 +187,24 @@ impl<Id: ReqId, TSpec: EthSpec> RPC<Id, TSpec> {
|
|||||||
/// Submits an RPC request.
|
/// Submits an RPC request.
|
||||||
///
|
///
|
||||||
/// The peer must be connected for this to succeed.
|
/// The peer must be connected for this to succeed.
|
||||||
pub fn send_request(&mut self, peer_id: PeerId, request_id: Id, event: OutboundRequest<TSpec>) {
|
pub fn send_request(&mut self, peer_id: PeerId, request_id: Id, req: OutboundRequest<TSpec>) {
|
||||||
self.events.push(NetworkBehaviourAction::NotifyHandler {
|
let event = if let Some(self_limiter) = self.self_limiter.as_mut() {
|
||||||
peer_id,
|
match self_limiter.allows(peer_id, request_id, req) {
|
||||||
handler: NotifyHandler::Any,
|
Ok(event) => event,
|
||||||
event: RPCSend::Request(request_id, event),
|
Err(_e) => {
|
||||||
});
|
// Request is logged and queued internally in the self rate limiter.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
NetworkBehaviourAction::NotifyHandler {
|
||||||
|
peer_id,
|
||||||
|
handler: NotifyHandler::Any,
|
||||||
|
event: RPCSend::Request(request_id, req),
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
self.events.push(event);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Lighthouse wishes to disconnect from this peer by sending a Goodbye message. This
|
/// Lighthouse wishes to disconnect from this peer by sending a Goodbye message. This
|
||||||
@ -279,11 +309,19 @@ where
|
|||||||
cx: &mut Context,
|
cx: &mut Context,
|
||||||
_: &mut impl PollParameters,
|
_: &mut impl PollParameters,
|
||||||
) -> Poll<NetworkBehaviourAction<Self::OutEvent, Self::ConnectionHandler>> {
|
) -> Poll<NetworkBehaviourAction<Self::OutEvent, Self::ConnectionHandler>> {
|
||||||
// let the rate limiter prune
|
// let the rate limiter prune.
|
||||||
let _ = self.limiter.poll_unpin(cx);
|
let _ = self.limiter.poll_unpin(cx);
|
||||||
|
|
||||||
|
if let Some(self_limiter) = self.self_limiter.as_mut() {
|
||||||
|
if let Poll::Ready(event) = self_limiter.poll_ready(cx) {
|
||||||
|
self.events.push(event)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if !self.events.is_empty() {
|
if !self.events.is_empty() {
|
||||||
return Poll::Ready(self.events.remove(0));
|
return Poll::Ready(self.events.remove(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
Poll::Pending
|
Poll::Pending
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -14,7 +14,7 @@ use std::io;
|
|||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use strum::IntoStaticStr;
|
use strum::{AsRefStr, Display, EnumString, IntoStaticStr};
|
||||||
use tokio_io_timeout::TimeoutStream;
|
use tokio_io_timeout::TimeoutStream;
|
||||||
use tokio_util::{
|
use tokio_util::{
|
||||||
codec::Framed,
|
codec::Framed,
|
||||||
@ -169,15 +169,18 @@ pub fn rpc_block_limits_by_fork(current_fork: ForkName) -> RpcLimits {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Protocol names to be used.
|
/// Protocol names to be used.
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EnumString, AsRefStr, Display)]
|
||||||
|
#[strum(serialize_all = "snake_case")]
|
||||||
pub enum Protocol {
|
pub enum Protocol {
|
||||||
/// The Status protocol name.
|
/// The Status protocol name.
|
||||||
Status,
|
Status,
|
||||||
/// The Goodbye protocol name.
|
/// The Goodbye protocol name.
|
||||||
Goodbye,
|
Goodbye,
|
||||||
/// The `BlocksByRange` protocol name.
|
/// The `BlocksByRange` protocol name.
|
||||||
|
#[strum(serialize = "beacon_blocks_by_range")]
|
||||||
BlocksByRange,
|
BlocksByRange,
|
||||||
/// The `BlocksByRoot` protocol name.
|
/// The `BlocksByRoot` protocol name.
|
||||||
|
#[strum(serialize = "beacon_blocks_by_root")]
|
||||||
BlocksByRoot,
|
BlocksByRoot,
|
||||||
/// The `BlobsByRange` protocol name.
|
/// The `BlobsByRange` protocol name.
|
||||||
BlobsByRange,
|
BlobsByRange,
|
||||||
@ -186,8 +189,10 @@ pub enum Protocol {
|
|||||||
/// The `Ping` protocol name.
|
/// The `Ping` protocol name.
|
||||||
Ping,
|
Ping,
|
||||||
/// The `MetaData` protocol name.
|
/// The `MetaData` protocol name.
|
||||||
|
#[strum(serialize = "metadata")]
|
||||||
MetaData,
|
MetaData,
|
||||||
/// The `LightClientBootstrap` protocol name.
|
/// The `LightClientBootstrap` protocol name.
|
||||||
|
#[strum(serialize = "light_client_bootstrap")]
|
||||||
LightClientBootstrap,
|
LightClientBootstrap,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -222,23 +227,6 @@ impl Protocol {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Display for Protocol {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
let repr = match self {
|
|
||||||
Protocol::Status => "status",
|
|
||||||
Protocol::Goodbye => "goodbye",
|
|
||||||
Protocol::BlocksByRange => "beacon_blocks_by_range",
|
|
||||||
Protocol::BlocksByRoot => "beacon_blocks_by_root",
|
|
||||||
Protocol::BlobsByRange => "blobs_sidecars_by_range",
|
|
||||||
Protocol::BlobsByRoot => "beacon_block_and_blobs_sidecar_by_root",
|
|
||||||
Protocol::Ping => "ping",
|
|
||||||
Protocol::MetaData => "metadata",
|
|
||||||
Protocol::LightClientBootstrap => "light_client_bootstrap",
|
|
||||||
};
|
|
||||||
f.write_str(repr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::fmt::Display for Encoding {
|
impl std::fmt::Display for Encoding {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
let repr = match self {
|
let repr = match self {
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
use crate::rpc::{InboundRequest, Protocol};
|
use crate::rpc::Protocol;
|
||||||
use fnv::FnvHashMap;
|
use fnv::FnvHashMap;
|
||||||
use libp2p::PeerId;
|
use libp2p::PeerId;
|
||||||
|
use serde_derive::{Deserialize, Serialize};
|
||||||
use std::convert::TryInto;
|
use std::convert::TryInto;
|
||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
use std::hash::Hash;
|
use std::hash::Hash;
|
||||||
@ -47,12 +48,31 @@ type Nanosecs = u64;
|
|||||||
/// n*`replenish_all_every`/`max_tokens` units of time since their last request.
|
/// n*`replenish_all_every`/`max_tokens` units of time since their last request.
|
||||||
///
|
///
|
||||||
/// To produce hard limits, set `max_tokens` to 1.
|
/// To produce hard limits, set `max_tokens` to 1.
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
pub struct Quota {
|
pub struct Quota {
|
||||||
/// How often are `max_tokens` fully replenished.
|
/// How often are `max_tokens` fully replenished.
|
||||||
replenish_all_every: Duration,
|
pub(super) replenish_all_every: Duration,
|
||||||
/// Token limit. This translates on how large can an instantaneous batch of
|
/// Token limit. This translates on how large can an instantaneous batch of
|
||||||
/// tokens be.
|
/// tokens be.
|
||||||
max_tokens: u64,
|
pub(super) max_tokens: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Quota {
|
||||||
|
/// A hard limit of one token every `seconds`.
|
||||||
|
pub const fn one_every(seconds: u64) -> Self {
|
||||||
|
Quota {
|
||||||
|
replenish_all_every: Duration::from_secs(seconds),
|
||||||
|
max_tokens: 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Allow `n` tokens to be use used every `seconds`.
|
||||||
|
pub const fn n_every(n: u64, seconds: u64) -> Self {
|
||||||
|
Quota {
|
||||||
|
replenish_all_every: Duration::from_secs(seconds),
|
||||||
|
max_tokens: n,
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Manages rate limiting of requests per peer, with differentiated rates per protocol.
|
/// Manages rate limiting of requests per peer, with differentiated rates per protocol.
|
||||||
@ -82,6 +102,7 @@ pub struct RPCRateLimiter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Error type for non conformant requests
|
/// Error type for non conformant requests
|
||||||
|
#[derive(Debug)]
|
||||||
pub enum RateLimitedErr {
|
pub enum RateLimitedErr {
|
||||||
/// Required tokens for this request exceed the maximum
|
/// Required tokens for this request exceed the maximum
|
||||||
TooLarge,
|
TooLarge,
|
||||||
@ -90,7 +111,7 @@ pub enum RateLimitedErr {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// User-friendly builder of a `RPCRateLimiter`
|
/// User-friendly builder of a `RPCRateLimiter`
|
||||||
#[derive(Default)]
|
#[derive(Default, Clone)]
|
||||||
pub struct RPCRateLimiterBuilder {
|
pub struct RPCRateLimiterBuilder {
|
||||||
/// Quota for the Goodbye protocol.
|
/// Quota for the Goodbye protocol.
|
||||||
goodbye_quota: Option<Quota>,
|
goodbye_quota: Option<Quota>,
|
||||||
@ -113,13 +134,8 @@ pub struct RPCRateLimiterBuilder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl RPCRateLimiterBuilder {
|
impl RPCRateLimiterBuilder {
|
||||||
/// Get an empty `RPCRateLimiterBuilder`.
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Default::default()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set a quota for a protocol.
|
/// Set a quota for a protocol.
|
||||||
fn set_quota(mut self, protocol: Protocol, quota: Quota) -> Self {
|
pub fn set_quota(mut self, protocol: Protocol, quota: Quota) -> Self {
|
||||||
let q = Some(quota);
|
let q = Some(quota);
|
||||||
match protocol {
|
match protocol {
|
||||||
Protocol::Ping => self.ping_quota = q,
|
Protocol::Ping => self.ping_quota = q,
|
||||||
@ -213,11 +229,40 @@ impl RPCRateLimiterBuilder {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub trait RateLimiterItem {
|
||||||
|
fn protocol(&self) -> Protocol;
|
||||||
|
fn expected_responses(&self) -> u64;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec> RateLimiterItem for super::InboundRequest<T> {
|
||||||
|
fn protocol(&self) -> Protocol {
|
||||||
|
self.protocol()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn expected_responses(&self) -> u64 {
|
||||||
|
self.expected_responses()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec> RateLimiterItem for super::OutboundRequest<T> {
|
||||||
|
fn protocol(&self) -> Protocol {
|
||||||
|
self.protocol()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn expected_responses(&self) -> u64 {
|
||||||
|
self.expected_responses()
|
||||||
|
}
|
||||||
|
}
|
||||||
impl RPCRateLimiter {
|
impl RPCRateLimiter {
|
||||||
pub fn allows<T: EthSpec>(
|
/// Get a builder instance.
|
||||||
|
pub fn builder() -> RPCRateLimiterBuilder {
|
||||||
|
RPCRateLimiterBuilder::default()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn allows<Item: RateLimiterItem>(
|
||||||
&mut self,
|
&mut self,
|
||||||
peer_id: &PeerId,
|
peer_id: &PeerId,
|
||||||
request: &InboundRequest<T>,
|
request: &Item,
|
||||||
) -> Result<(), RateLimitedErr> {
|
) -> Result<(), RateLimitedErr> {
|
||||||
let time_since_start = self.init_time.elapsed();
|
let time_since_start = self.init_time.elapsed();
|
||||||
let tokens = request.expected_responses().max(1);
|
let tokens = request.expected_responses().max(1);
|
||||||
|
204
beacon_node/lighthouse_network/src/rpc/self_limiter.rs
Normal file
204
beacon_node/lighthouse_network/src/rpc/self_limiter.rs
Normal file
@ -0,0 +1,204 @@
|
|||||||
|
use std::{
|
||||||
|
collections::{hash_map::Entry, HashMap, VecDeque},
|
||||||
|
task::{Context, Poll},
|
||||||
|
time::Duration,
|
||||||
|
};
|
||||||
|
|
||||||
|
use futures::FutureExt;
|
||||||
|
use libp2p::{swarm::NotifyHandler, PeerId};
|
||||||
|
use slog::{crit, debug, Logger};
|
||||||
|
use smallvec::SmallVec;
|
||||||
|
use tokio_util::time::DelayQueue;
|
||||||
|
use types::EthSpec;
|
||||||
|
|
||||||
|
use super::{
|
||||||
|
config::OutboundRateLimiterConfig,
|
||||||
|
rate_limiter::{RPCRateLimiter as RateLimiter, RateLimitedErr},
|
||||||
|
BehaviourAction, OutboundRequest, Protocol, RPCSend, ReqId,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// A request that was rate limited or waiting on rate limited requests for the same peer and
|
||||||
|
/// protocol.
|
||||||
|
struct QueuedRequest<Id: ReqId, TSpec: EthSpec> {
|
||||||
|
req: OutboundRequest<TSpec>,
|
||||||
|
request_id: Id,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) struct SelfRateLimiter<Id: ReqId, TSpec: EthSpec> {
|
||||||
|
/// Requests queued for sending per peer. This requests are stored when the self rate
|
||||||
|
/// limiter rejects them. Rate limiting is based on a Peer and Protocol basis, therefore
|
||||||
|
/// are stored in the same way.
|
||||||
|
delayed_requests: HashMap<(PeerId, Protocol), VecDeque<QueuedRequest<Id, TSpec>>>,
|
||||||
|
/// The delay required to allow a peer's outbound request per protocol.
|
||||||
|
next_peer_request: DelayQueue<(PeerId, Protocol)>,
|
||||||
|
/// Rate limiter for our own requests.
|
||||||
|
limiter: RateLimiter,
|
||||||
|
/// Requests that are ready to be sent.
|
||||||
|
ready_requests: SmallVec<[BehaviourAction<Id, TSpec>; 3]>,
|
||||||
|
/// Slog logger.
|
||||||
|
log: Logger,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Error returned when the rate limiter does not accept a request.
|
||||||
|
// NOTE: this is currently not used, but might be useful for debugging.
|
||||||
|
pub enum Error {
|
||||||
|
/// There are queued requests for this same peer and protocol.
|
||||||
|
PendingRequests,
|
||||||
|
/// Request was tried but rate limited.
|
||||||
|
RateLimited,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Id: ReqId, TSpec: EthSpec> SelfRateLimiter<Id, TSpec> {
|
||||||
|
/// Creates a new [`SelfRateLimiter`] based on configration values.
|
||||||
|
pub fn new(config: OutboundRateLimiterConfig, log: Logger) -> Result<Self, &'static str> {
|
||||||
|
debug!(log, "Using self rate limiting params"; "config" => ?config);
|
||||||
|
// Destructure to make sure every configuration value is used.
|
||||||
|
let OutboundRateLimiterConfig {
|
||||||
|
ping_quota,
|
||||||
|
meta_data_quota,
|
||||||
|
status_quota,
|
||||||
|
goodbye_quota,
|
||||||
|
blocks_by_range_quota,
|
||||||
|
blocks_by_root_quota,
|
||||||
|
blobs_by_range_quota,
|
||||||
|
} = config;
|
||||||
|
|
||||||
|
let limiter = RateLimiter::builder()
|
||||||
|
.set_quota(Protocol::Ping, ping_quota)
|
||||||
|
.set_quota(Protocol::MetaData, meta_data_quota)
|
||||||
|
.set_quota(Protocol::Status, status_quota)
|
||||||
|
.set_quota(Protocol::Goodbye, goodbye_quota)
|
||||||
|
.set_quota(Protocol::BlocksByRange, blocks_by_range_quota)
|
||||||
|
.set_quota(Protocol::BlocksByRoot, blocks_by_root_quota)
|
||||||
|
.set_quota(Protocol::BlobsByRange, blobs_by_range_quota)
|
||||||
|
// Manually set the LightClientBootstrap quota, since we use the same rate limiter for
|
||||||
|
// inbound and outbound requests, and the LightClientBootstrap is an only inbound
|
||||||
|
// protocol.
|
||||||
|
.one_every(Protocol::LightClientBootstrap, Duration::from_secs(10))
|
||||||
|
.build()?;
|
||||||
|
|
||||||
|
Ok(SelfRateLimiter {
|
||||||
|
delayed_requests: Default::default(),
|
||||||
|
next_peer_request: Default::default(),
|
||||||
|
limiter,
|
||||||
|
ready_requests: Default::default(),
|
||||||
|
log,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks if the rate limiter allows the request. If it's allowed, returns the
|
||||||
|
/// [`NetworkBehaviourAction`] that should be emitted. When not allowed, the request is delayed
|
||||||
|
/// until it can be sent.
|
||||||
|
pub fn allows(
|
||||||
|
&mut self,
|
||||||
|
peer_id: PeerId,
|
||||||
|
request_id: Id,
|
||||||
|
req: OutboundRequest<TSpec>,
|
||||||
|
) -> Result<BehaviourAction<Id, TSpec>, Error> {
|
||||||
|
let protocol = req.protocol();
|
||||||
|
// First check that there are not already other requests waiting to be sent.
|
||||||
|
if let Some(queued_requests) = self.delayed_requests.get_mut(&(peer_id, protocol)) {
|
||||||
|
queued_requests.push_back(QueuedRequest { req, request_id });
|
||||||
|
|
||||||
|
return Err(Error::PendingRequests);
|
||||||
|
}
|
||||||
|
match Self::try_send_request(&mut self.limiter, peer_id, request_id, req, &self.log) {
|
||||||
|
Err((rate_limited_req, wait_time)) => {
|
||||||
|
let key = (peer_id, protocol);
|
||||||
|
self.next_peer_request.insert(key, wait_time);
|
||||||
|
self.delayed_requests
|
||||||
|
.entry(key)
|
||||||
|
.or_default()
|
||||||
|
.push_back(rate_limited_req);
|
||||||
|
|
||||||
|
Err(Error::RateLimited)
|
||||||
|
}
|
||||||
|
Ok(event) => Ok(event),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Auxiliary function to deal with self rate limiting outcomes. If the rate limiter allows the
|
||||||
|
/// request, the [`NetworkBehaviourAction`] that should be emitted is returned. If the request
|
||||||
|
/// should be delayed, it's returned with the duration to wait.
|
||||||
|
fn try_send_request(
|
||||||
|
limiter: &mut RateLimiter,
|
||||||
|
peer_id: PeerId,
|
||||||
|
request_id: Id,
|
||||||
|
req: OutboundRequest<TSpec>,
|
||||||
|
log: &Logger,
|
||||||
|
) -> Result<BehaviourAction<Id, TSpec>, (QueuedRequest<Id, TSpec>, Duration)> {
|
||||||
|
match limiter.allows(&peer_id, &req) {
|
||||||
|
Ok(()) => Ok(BehaviourAction::NotifyHandler {
|
||||||
|
peer_id,
|
||||||
|
handler: NotifyHandler::Any,
|
||||||
|
event: RPCSend::Request(request_id, req),
|
||||||
|
}),
|
||||||
|
Err(e) => {
|
||||||
|
let protocol = req.protocol();
|
||||||
|
match e {
|
||||||
|
RateLimitedErr::TooLarge => {
|
||||||
|
// this should never happen with default parameters. Let's just send the request.
|
||||||
|
// Log a crit since this is a config issue.
|
||||||
|
crit!(
|
||||||
|
log,
|
||||||
|
"Self rate limiting error for a batch that will never fit. Sending request anyway. Check configuration parameters.";
|
||||||
|
"protocol" => %req.protocol()
|
||||||
|
);
|
||||||
|
Ok(BehaviourAction::NotifyHandler {
|
||||||
|
peer_id,
|
||||||
|
handler: NotifyHandler::Any,
|
||||||
|
event: RPCSend::Request(request_id, req),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
RateLimitedErr::TooSoon(wait_time) => {
|
||||||
|
debug!(log, "Self rate limiting"; "protocol" => %protocol, "wait_time_ms" => wait_time.as_millis(), "peer_id" => %peer_id);
|
||||||
|
Err((QueuedRequest { req, request_id }, wait_time))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// When a peer and protocol are allowed to send a next request, this function checks the
|
||||||
|
/// queued requests and attempts marking as ready as many as the limiter allows.
|
||||||
|
fn next_peer_request_ready(&mut self, peer_id: PeerId, protocol: Protocol) {
|
||||||
|
if let Entry::Occupied(mut entry) = self.delayed_requests.entry((peer_id, protocol)) {
|
||||||
|
let queued_requests = entry.get_mut();
|
||||||
|
while let Some(QueuedRequest { req, request_id }) = queued_requests.pop_front() {
|
||||||
|
match Self::try_send_request(&mut self.limiter, peer_id, request_id, req, &self.log)
|
||||||
|
{
|
||||||
|
Err((rate_limited_req, wait_time)) => {
|
||||||
|
let key = (peer_id, protocol);
|
||||||
|
self.next_peer_request.insert(key, wait_time);
|
||||||
|
queued_requests.push_back(rate_limited_req);
|
||||||
|
// If one fails just wait for the next window that allows sending requests.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
Ok(event) => self.ready_requests.push(event),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if queued_requests.is_empty() {
|
||||||
|
entry.remove();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<BehaviourAction<Id, TSpec>> {
|
||||||
|
// First check the requests that were self rate limited, since those might add events to
|
||||||
|
// the queue. Also do this this before rate limiter prunning to avoid removing and
|
||||||
|
// immediately adding rate limiting keys.
|
||||||
|
if let Poll::Ready(Some(Ok(expired))) = self.next_peer_request.poll_expired(cx) {
|
||||||
|
let (peer_id, protocol) = expired.into_inner();
|
||||||
|
self.next_peer_request_ready(peer_id, protocol);
|
||||||
|
}
|
||||||
|
// Prune the rate limiter.
|
||||||
|
let _ = self.limiter.poll_unpin(cx);
|
||||||
|
|
||||||
|
// Finally return any queued events.
|
||||||
|
if !self.ready_requests.is_empty() {
|
||||||
|
return Poll::Ready(self.ready_requests.remove(0));
|
||||||
|
}
|
||||||
|
|
||||||
|
Poll::Pending
|
||||||
|
}
|
||||||
|
}
|
@ -266,6 +266,7 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
|
|||||||
let eth2_rpc = RPC::new(
|
let eth2_rpc = RPC::new(
|
||||||
ctx.fork_context.clone(),
|
ctx.fork_context.clone(),
|
||||||
config.enable_light_client_server,
|
config.enable_light_client_server,
|
||||||
|
config.outbound_rate_limiter_config.clone(),
|
||||||
log.clone(),
|
log.clone(),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -46,6 +46,7 @@ derivative = "2.2.0"
|
|||||||
delay_map = "0.1.1"
|
delay_map = "0.1.1"
|
||||||
ethereum-types = { version = "0.14.1", optional = true }
|
ethereum-types = { version = "0.14.1", optional = true }
|
||||||
operation_pool = { path = "../operation_pool" }
|
operation_pool = { path = "../operation_pool" }
|
||||||
|
execution_layer = { path = "../execution_layer" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
deterministic_long_lived_attnets = [ "ethereum-types" ]
|
deterministic_long_lived_attnets = [ "ethereum-types" ]
|
||||||
|
@ -10,7 +10,7 @@ use lighthouse_network::rpc::methods::{
|
|||||||
use lighthouse_network::rpc::StatusMessage;
|
use lighthouse_network::rpc::StatusMessage;
|
||||||
use lighthouse_network::rpc::*;
|
use lighthouse_network::rpc::*;
|
||||||
use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo};
|
use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo};
|
||||||
use slog::{debug, error};
|
use slog::{debug, error, warn};
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use task_executor::TaskExecutor;
|
use task_executor::TaskExecutor;
|
||||||
@ -576,14 +576,26 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!(
|
if matches!(
|
||||||
self.log,
|
e,
|
||||||
"Error fetching block for peer";
|
BeaconChainError::ExecutionLayerErrorPayloadReconstruction(_block_hash, ref boxed_error)
|
||||||
"request" => ?req,
|
if matches!(**boxed_error, execution_layer::Error::EngineError(_))
|
||||||
"peer" => %peer_id,
|
) {
|
||||||
"block_root" => ?root,
|
warn!(
|
||||||
"error" => ?e
|
self.log,
|
||||||
);
|
"Error rebuilding payload for peer";
|
||||||
|
"info" => "this may occur occasionally when the EE is busy",
|
||||||
|
"block_root" => ?root,
|
||||||
|
"error" => ?e,
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
error!(
|
||||||
|
self.log,
|
||||||
|
"Error fetching block for peer";
|
||||||
|
"block_root" => ?root,
|
||||||
|
"error" => ?e
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
// send the stream terminator
|
// send the stream terminator
|
||||||
self.send_error_response(
|
self.send_error_response(
|
||||||
|
@ -10,7 +10,7 @@ mod reward_cache;
|
|||||||
mod sync_aggregate_id;
|
mod sync_aggregate_id;
|
||||||
|
|
||||||
pub use crate::bls_to_execution_changes::ReceivedPreCapella;
|
pub use crate::bls_to_execution_changes::ReceivedPreCapella;
|
||||||
pub use attestation::AttMaxCover;
|
pub use attestation::{earliest_attestation_validators, AttMaxCover};
|
||||||
pub use attestation_storage::{AttestationRef, SplitAttestation};
|
pub use attestation_storage::{AttestationRef, SplitAttestation};
|
||||||
pub use max_cover::MaxCover;
|
pub use max_cover::MaxCover;
|
||||||
pub use persistence::{
|
pub use persistence::{
|
||||||
|
@ -201,6 +201,21 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
.help("Lighthouse by default does not discover private IP addresses. Set this flag to enable connection attempts to local addresses.")
|
.help("Lighthouse by default does not discover private IP addresses. Set this flag to enable connection attempts to local addresses.")
|
||||||
.takes_value(false),
|
.takes_value(false),
|
||||||
)
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("self-limiter")
|
||||||
|
.long("self-limiter")
|
||||||
|
.help(
|
||||||
|
"Enables the outbound rate limiter (requests made by this node).\
|
||||||
|
\
|
||||||
|
Rate limit quotas per protocol can be set in the form of \
|
||||||
|
<protocol_name>:<tokens>/<time_in_seconds>. To set quotas for multiple protocols, \
|
||||||
|
separate them by ';'. If the self rate limiter is enabled and a protocol is not \
|
||||||
|
present in the configuration, the quotas used for the inbound rate limiter will be \
|
||||||
|
used."
|
||||||
|
)
|
||||||
|
.min_values(0)
|
||||||
|
.hidden(true)
|
||||||
|
)
|
||||||
/* REST API related arguments */
|
/* REST API related arguments */
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("http")
|
Arg::with_name("http")
|
||||||
|
@ -1004,6 +1004,13 @@ pub fn set_network_config(
|
|||||||
// Light client server config.
|
// Light client server config.
|
||||||
config.enable_light_client_server = cli_args.is_present("light-client-server");
|
config.enable_light_client_server = cli_args.is_present("light-client-server");
|
||||||
|
|
||||||
|
// This flag can be used both with or without a value. Try to parse it first with a value, if
|
||||||
|
// no value is defined but the flag is present, use the default params.
|
||||||
|
config.outbound_rate_limiter_config = clap_utils::parse_optional(cli_args, "self-limiter")?;
|
||||||
|
if cli_args.is_present("self-limiter") && config.outbound_rate_limiter_config.is_none() {
|
||||||
|
config.outbound_rate_limiter_config = Some(Default::default());
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,7 +57,7 @@ $ docker pull sigp/lighthouse:latest-modern
|
|||||||
Image tags follow this format:
|
Image tags follow this format:
|
||||||
|
|
||||||
```
|
```
|
||||||
${version}${arch}${stability}${modernity}
|
${version}${arch}${stability}${modernity}${features}
|
||||||
```
|
```
|
||||||
|
|
||||||
The `version` is:
|
The `version` is:
|
||||||
@ -81,6 +81,12 @@ The `modernity` is:
|
|||||||
* `-modern` for optimized builds
|
* `-modern` for optimized builds
|
||||||
* empty for a `portable` unoptimized build
|
* empty for a `portable` unoptimized build
|
||||||
|
|
||||||
|
The `features` is:
|
||||||
|
|
||||||
|
* `-dev` for a development build with `minimal-spec` preset enabled.
|
||||||
|
* empty for a standard build with no custom feature enabled.
|
||||||
|
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
|
|
||||||
* `latest-unstable-modern`: most recent `unstable` build for all modern CPUs (x86_64 or ARM)
|
* `latest-unstable-modern`: most recent `unstable` build for all modern CPUs (x86_64 or ARM)
|
||||||
|
@ -59,14 +59,7 @@ The following fields are returned:
|
|||||||
- `previous_epoch_head_attesting_gwei`: the total staked gwei that attested to a
|
- `previous_epoch_head_attesting_gwei`: the total staked gwei that attested to a
|
||||||
head beacon block that is in the canonical chain.
|
head beacon block that is in the canonical chain.
|
||||||
|
|
||||||
From this data you can calculate some interesting figures:
|
From this data you can calculate:
|
||||||
|
|
||||||
#### Participation Rate
|
|
||||||
|
|
||||||
`previous_epoch_attesting_gwei / previous_epoch_active_gwei`
|
|
||||||
|
|
||||||
Expresses the ratio of validators that managed to have an attestation
|
|
||||||
voting upon the previous epoch included in a block.
|
|
||||||
|
|
||||||
#### Justification/Finalization Rate
|
#### Justification/Finalization Rate
|
||||||
|
|
||||||
|
@ -14,9 +14,8 @@ pub mod lighthouse_vc;
|
|||||||
pub mod mixin;
|
pub mod mixin;
|
||||||
pub mod types;
|
pub mod types;
|
||||||
|
|
||||||
use self::mixin::{RequestAccept, ResponseForkName, ResponseOptional};
|
use self::mixin::{RequestAccept, ResponseOptional};
|
||||||
use self::types::{Error as ResponseError, *};
|
use self::types::{Error as ResponseError, *};
|
||||||
use ::types::map_fork_name_with;
|
|
||||||
use futures::Stream;
|
use futures::Stream;
|
||||||
use futures_util::StreamExt;
|
use futures_util::StreamExt;
|
||||||
use lighthouse_network::PeerId;
|
use lighthouse_network::PeerId;
|
||||||
@ -696,35 +695,7 @@ impl BeaconNodeHttpClient {
|
|||||||
None => return Ok(None),
|
None => return Ok(None),
|
||||||
};
|
};
|
||||||
|
|
||||||
// If present, use the fork provided in the headers to decode the block. Gracefully handle
|
Ok(Some(response.json().await?))
|
||||||
// missing and malformed fork names by falling back to regular deserialisation.
|
|
||||||
let (block, version, execution_optimistic) = match response.fork_name_from_header() {
|
|
||||||
Ok(Some(fork_name)) => {
|
|
||||||
let (data, (version, execution_optimistic)) =
|
|
||||||
map_fork_name_with!(fork_name, SignedBeaconBlock, {
|
|
||||||
let ExecutionOptimisticForkVersionedResponse {
|
|
||||||
version,
|
|
||||||
execution_optimistic,
|
|
||||||
data,
|
|
||||||
} = response.json().await?;
|
|
||||||
(data, (version, execution_optimistic))
|
|
||||||
});
|
|
||||||
(data, version, execution_optimistic)
|
|
||||||
}
|
|
||||||
Ok(None) | Err(_) => {
|
|
||||||
let ExecutionOptimisticForkVersionedResponse {
|
|
||||||
version,
|
|
||||||
execution_optimistic,
|
|
||||||
data,
|
|
||||||
} = response.json().await?;
|
|
||||||
(data, version, execution_optimistic)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
Ok(Some(ExecutionOptimisticForkVersionedResponse {
|
|
||||||
version,
|
|
||||||
execution_optimistic,
|
|
||||||
data: block,
|
|
||||||
}))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// `GET lighthouse/beacon/blobs_sidecars/{block_id}`
|
/// `GET lighthouse/beacon/blobs_sidecars/{block_id}`
|
||||||
@ -758,35 +729,7 @@ impl BeaconNodeHttpClient {
|
|||||||
None => return Ok(None),
|
None => return Ok(None),
|
||||||
};
|
};
|
||||||
|
|
||||||
// If present, use the fork provided in the headers to decode the block. Gracefully handle
|
Ok(Some(response.json().await?))
|
||||||
// missing and malformed fork names by falling back to regular deserialisation.
|
|
||||||
let (block, version, execution_optimistic) = match response.fork_name_from_header() {
|
|
||||||
Ok(Some(fork_name)) => {
|
|
||||||
let (data, (version, execution_optimistic)) =
|
|
||||||
map_fork_name_with!(fork_name, SignedBlindedBeaconBlock, {
|
|
||||||
let ExecutionOptimisticForkVersionedResponse {
|
|
||||||
version,
|
|
||||||
execution_optimistic,
|
|
||||||
data,
|
|
||||||
} = response.json().await?;
|
|
||||||
(data, (version, execution_optimistic))
|
|
||||||
});
|
|
||||||
(data, version, execution_optimistic)
|
|
||||||
}
|
|
||||||
Ok(None) | Err(_) => {
|
|
||||||
let ExecutionOptimisticForkVersionedResponse {
|
|
||||||
version,
|
|
||||||
execution_optimistic,
|
|
||||||
data,
|
|
||||||
} = response.json().await?;
|
|
||||||
(data, version, execution_optimistic)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
Ok(Some(ExecutionOptimisticForkVersionedResponse {
|
|
||||||
version,
|
|
||||||
execution_optimistic,
|
|
||||||
data: block,
|
|
||||||
}))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// `GET v1/beacon/blocks` (LEGACY)
|
/// `GET v1/beacon/blocks` (LEGACY)
|
||||||
@ -1092,6 +1035,40 @@ impl BeaconNodeHttpClient {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// `GET beacon/rewards/blocks`
|
||||||
|
pub async fn get_beacon_rewards_blocks(&self, epoch: Epoch) -> Result<(), Error> {
|
||||||
|
let mut path = self.eth_path(V1)?;
|
||||||
|
|
||||||
|
path.path_segments_mut()
|
||||||
|
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
|
||||||
|
.push("beacon")
|
||||||
|
.push("rewards")
|
||||||
|
.push("blocks");
|
||||||
|
|
||||||
|
path.query_pairs_mut()
|
||||||
|
.append_pair("epoch", &epoch.to_string());
|
||||||
|
|
||||||
|
self.get(path).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// `POST beacon/rewards/attestations`
|
||||||
|
pub async fn post_beacon_rewards_attestations(
|
||||||
|
&self,
|
||||||
|
attestations: &[ValidatorId],
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let mut path = self.eth_path(V1)?;
|
||||||
|
|
||||||
|
path.path_segments_mut()
|
||||||
|
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
|
||||||
|
.push("beacon")
|
||||||
|
.push("rewards")
|
||||||
|
.push("attestations");
|
||||||
|
|
||||||
|
self.post(path, &attestations).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// `POST validator/contribution_and_proofs`
|
/// `POST validator/contribution_and_proofs`
|
||||||
pub async fn post_validator_contribution_and_proofs<T: EthSpec>(
|
pub async fn post_validator_contribution_and_proofs<T: EthSpec>(
|
||||||
&self,
|
&self,
|
||||||
|
@ -1,8 +1,10 @@
|
|||||||
//! This module contains endpoints that are non-standard and only available on Lighthouse servers.
|
//! This module contains endpoints that are non-standard and only available on Lighthouse servers.
|
||||||
|
|
||||||
mod attestation_performance;
|
mod attestation_performance;
|
||||||
|
pub mod attestation_rewards;
|
||||||
mod block_packing_efficiency;
|
mod block_packing_efficiency;
|
||||||
mod block_rewards;
|
mod block_rewards;
|
||||||
|
mod standard_block_rewards;
|
||||||
mod sync_committee_rewards;
|
mod sync_committee_rewards;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
@ -23,11 +25,13 @@ use store::{AnchorInfo, Split, StoreConfig};
|
|||||||
pub use attestation_performance::{
|
pub use attestation_performance::{
|
||||||
AttestationPerformance, AttestationPerformanceQuery, AttestationPerformanceStatistics,
|
AttestationPerformance, AttestationPerformanceQuery, AttestationPerformanceStatistics,
|
||||||
};
|
};
|
||||||
|
pub use attestation_rewards::StandardAttestationRewards;
|
||||||
pub use block_packing_efficiency::{
|
pub use block_packing_efficiency::{
|
||||||
BlockPackingEfficiency, BlockPackingEfficiencyQuery, ProposerInfo, UniqueAttestation,
|
BlockPackingEfficiency, BlockPackingEfficiencyQuery, ProposerInfo, UniqueAttestation,
|
||||||
};
|
};
|
||||||
pub use block_rewards::{AttestationRewards, BlockReward, BlockRewardMeta, BlockRewardsQuery};
|
pub use block_rewards::{AttestationRewards, BlockReward, BlockRewardMeta, BlockRewardsQuery};
|
||||||
pub use lighthouse_network::{types::SyncState, PeerInfo};
|
pub use lighthouse_network::{types::SyncState, PeerInfo};
|
||||||
|
pub use standard_block_rewards::StandardBlockReward;
|
||||||
pub use sync_committee_rewards::SyncCommitteeReward;
|
pub use sync_committee_rewards::SyncCommitteeReward;
|
||||||
|
|
||||||
// Define "legacy" implementations of `Option<T>` which use four bytes for encoding the union
|
// Define "legacy" implementations of `Option<T>` which use four bytes for encoding the union
|
||||||
|
44
common/eth2/src/lighthouse/attestation_rewards.rs
Normal file
44
common/eth2/src/lighthouse/attestation_rewards.rs
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
// Details about the rewards paid for attestations
|
||||||
|
// All rewards in GWei
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct IdealAttestationRewards {
|
||||||
|
// Validator's effective balance in gwei
|
||||||
|
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
||||||
|
pub effective_balance: u64,
|
||||||
|
// Ideal attester's reward for head vote in gwei
|
||||||
|
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
||||||
|
pub head: u64,
|
||||||
|
// Ideal attester's reward for target vote in gwei
|
||||||
|
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
||||||
|
pub target: u64,
|
||||||
|
// Ideal attester's reward for source vote in gwei
|
||||||
|
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
||||||
|
pub source: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct TotalAttestationRewards {
|
||||||
|
// one entry for every validator based on their attestations in the epoch
|
||||||
|
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
||||||
|
pub validator_index: u64,
|
||||||
|
// attester's reward for head vote in gwei
|
||||||
|
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
||||||
|
pub head: u64,
|
||||||
|
// attester's reward for target vote in gwei
|
||||||
|
#[serde(with = "eth2_serde_utils::quoted_i64")]
|
||||||
|
pub target: i64,
|
||||||
|
// attester's reward for source vote in gwei
|
||||||
|
#[serde(with = "eth2_serde_utils::quoted_i64")]
|
||||||
|
pub source: i64,
|
||||||
|
// TBD attester's inclusion_delay reward in gwei (phase0 only)
|
||||||
|
// pub inclusion_delay: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct StandardAttestationRewards {
|
||||||
|
pub ideal_rewards: Vec<IdealAttestationRewards>,
|
||||||
|
pub total_rewards: Vec<TotalAttestationRewards>,
|
||||||
|
}
|
26
common/eth2/src/lighthouse/standard_block_rewards.rs
Normal file
26
common/eth2/src/lighthouse/standard_block_rewards.rs
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
// Details about the rewards for a single block
|
||||||
|
// All rewards in GWei
|
||||||
|
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct StandardBlockReward {
|
||||||
|
// proposer of the block, the proposer index who receives these rewards
|
||||||
|
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
||||||
|
pub proposer_index: u64,
|
||||||
|
// total block reward in gwei,
|
||||||
|
// equal to attestations + sync_aggregate + proposer_slashings + attester_slashings
|
||||||
|
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
||||||
|
pub total: u64,
|
||||||
|
// block reward component due to included attestations in gwei
|
||||||
|
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
||||||
|
pub attestations: u64,
|
||||||
|
// block reward component due to included sync_aggregate in gwei
|
||||||
|
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
||||||
|
pub sync_aggregate: u64,
|
||||||
|
// block reward component due to included proposer_slashings in gwei
|
||||||
|
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
||||||
|
pub proposer_slashings: u64,
|
||||||
|
// block reward component due to included attester_slashings in gwei
|
||||||
|
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
||||||
|
pub attester_slashings: u64,
|
||||||
|
}
|
@ -8,5 +8,6 @@ pub struct SyncCommitteeReward {
|
|||||||
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
||||||
pub validator_index: u64,
|
pub validator_index: u64,
|
||||||
// sync committee reward in gwei for the validator
|
// sync committee reward in gwei for the validator
|
||||||
|
#[serde(with = "eth2_serde_utils::quoted_i64")]
|
||||||
pub reward: i64,
|
pub reward: i64,
|
||||||
}
|
}
|
||||||
|
@ -236,21 +236,6 @@ impl<'a, T: Serialize> From<&'a T> for GenericResponseRef<'a, T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct ExecutionOptimisticForkVersionedResponse<T> {
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub version: Option<ForkName>,
|
|
||||||
pub execution_optimistic: Option<bool>,
|
|
||||||
pub data: T,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct ForkVersionedResponse<T> {
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub version: Option<ForkName>,
|
|
||||||
pub data: T,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
|
||||||
pub struct RootData {
|
pub struct RootData {
|
||||||
pub root: Hash256,
|
pub root: Hash256,
|
||||||
@ -270,11 +255,20 @@ pub struct FinalityCheckpointsData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
#[serde(try_from = "&str")]
|
||||||
pub enum ValidatorId {
|
pub enum ValidatorId {
|
||||||
PublicKey(PublicKeyBytes),
|
PublicKey(PublicKeyBytes),
|
||||||
Index(u64),
|
Index(u64),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl TryFrom<&str> for ValidatorId {
|
||||||
|
type Error = String;
|
||||||
|
|
||||||
|
fn try_from(s: &str) -> Result<Self, Self::Error> {
|
||||||
|
Self::from_str(s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl FromStr for ValidatorId {
|
impl FromStr for ValidatorId {
|
||||||
type Err = String;
|
type Err = String;
|
||||||
|
|
||||||
@ -1128,6 +1122,30 @@ pub struct BlocksAndBlobs<T: EthSpec, Payload: AbstractExecPayload<T>> {
|
|||||||
pub kzg_aggregate_proof: KzgProof,
|
pub kzg_aggregate_proof: KzgProof,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec, Payload: AbstractExecPayload<T>> ForkVersionDeserialize
|
||||||
|
for BlocksAndBlobs<T, Payload>
|
||||||
|
{
|
||||||
|
fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>(
|
||||||
|
value: serde_json::value::Value,
|
||||||
|
fork_name: ForkName,
|
||||||
|
) -> Result<Self, D::Error> {
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[serde(bound = "T: EthSpec")]
|
||||||
|
struct Helper<T: EthSpec> {
|
||||||
|
block: serde_json::Value,
|
||||||
|
blobs: Vec<Blob<T>>,
|
||||||
|
kzg_aggregate_proof: KzgProof,
|
||||||
|
}
|
||||||
|
let helper: Helper<T> = serde_json::from_value(value).map_err(serde::de::Error::custom)?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
block: BeaconBlock::deserialize_by_fork::<'de, D>(helper.block, fork_name)?,
|
||||||
|
blobs: helper.blobs,
|
||||||
|
kzg_aggregate_proof: helper.kzg_aggregate_proof,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
@ -722,7 +722,7 @@ where
|
|||||||
op: &InvalidationOperation,
|
op: &InvalidationOperation,
|
||||||
) -> Result<(), Error<T::Error>> {
|
) -> Result<(), Error<T::Error>> {
|
||||||
self.proto_array
|
self.proto_array
|
||||||
.process_execution_payload_invalidation(op)
|
.process_execution_payload_invalidation::<E>(op)
|
||||||
.map_err(Error::FailedToProcessInvalidExecutionPayload)
|
.map_err(Error::FailedToProcessInvalidExecutionPayload)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1288,7 +1288,7 @@ where
|
|||||||
|
|
||||||
if store.best_justified_checkpoint().epoch > store.justified_checkpoint().epoch {
|
if store.best_justified_checkpoint().epoch > store.justified_checkpoint().epoch {
|
||||||
let store = &self.fc_store;
|
let store = &self.fc_store;
|
||||||
if self.is_descendant_of_finalized(store.best_justified_checkpoint().root) {
|
if self.is_finalized_checkpoint_or_descendant(store.best_justified_checkpoint().root) {
|
||||||
let store = &mut self.fc_store;
|
let store = &mut self.fc_store;
|
||||||
store
|
store
|
||||||
.set_justified_checkpoint(*store.best_justified_checkpoint())
|
.set_justified_checkpoint(*store.best_justified_checkpoint())
|
||||||
@ -1329,12 +1329,13 @@ where
|
|||||||
|
|
||||||
/// Returns `true` if the block is known **and** a descendant of the finalized root.
|
/// Returns `true` if the block is known **and** a descendant of the finalized root.
|
||||||
pub fn contains_block(&self, block_root: &Hash256) -> bool {
|
pub fn contains_block(&self, block_root: &Hash256) -> bool {
|
||||||
self.proto_array.contains_block(block_root) && self.is_descendant_of_finalized(*block_root)
|
self.proto_array.contains_block(block_root)
|
||||||
|
&& self.is_finalized_checkpoint_or_descendant(*block_root)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a `ProtoBlock` if the block is known **and** a descendant of the finalized root.
|
/// Returns a `ProtoBlock` if the block is known **and** a descendant of the finalized root.
|
||||||
pub fn get_block(&self, block_root: &Hash256) -> Option<ProtoBlock> {
|
pub fn get_block(&self, block_root: &Hash256) -> Option<ProtoBlock> {
|
||||||
if self.is_descendant_of_finalized(*block_root) {
|
if self.is_finalized_checkpoint_or_descendant(*block_root) {
|
||||||
self.proto_array.get_block(block_root)
|
self.proto_array.get_block(block_root)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
@ -1343,7 +1344,7 @@ where
|
|||||||
|
|
||||||
/// Returns an `ExecutionStatus` if the block is known **and** a descendant of the finalized root.
|
/// Returns an `ExecutionStatus` if the block is known **and** a descendant of the finalized root.
|
||||||
pub fn get_block_execution_status(&self, block_root: &Hash256) -> Option<ExecutionStatus> {
|
pub fn get_block_execution_status(&self, block_root: &Hash256) -> Option<ExecutionStatus> {
|
||||||
if self.is_descendant_of_finalized(*block_root) {
|
if self.is_finalized_checkpoint_or_descendant(*block_root) {
|
||||||
self.proto_array.get_block_execution_status(block_root)
|
self.proto_array.get_block_execution_status(block_root)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
@ -1378,10 +1379,10 @@ where
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return `true` if `block_root` is equal to the finalized root, or a known descendant of it.
|
/// Return `true` if `block_root` is equal to the finalized checkpoint, or a known descendant of it.
|
||||||
pub fn is_descendant_of_finalized(&self, block_root: Hash256) -> bool {
|
pub fn is_finalized_checkpoint_or_descendant(&self, block_root: Hash256) -> bool {
|
||||||
self.proto_array
|
self.proto_array
|
||||||
.is_descendant(self.fc_store.finalized_checkpoint().root, block_root)
|
.is_finalized_checkpoint_or_descendant::<E>(block_root)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns `Ok(true)` if `block_root` has been imported optimistically or deemed invalid.
|
/// Returns `Ok(true)` if `block_root` has been imported optimistically or deemed invalid.
|
||||||
|
@ -273,7 +273,7 @@ impl ForkChoiceTestDefinition {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
fork_choice
|
fork_choice
|
||||||
.process_execution_payload_invalidation(&op)
|
.process_execution_payload_invalidation::<MainnetEthSpec>(&op)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
}
|
}
|
||||||
Operation::AssertWeight { block_root, weight } => assert_eq!(
|
Operation::AssertWeight { block_root, weight } => assert_eq!(
|
||||||
|
@ -451,7 +451,7 @@ impl ProtoArray {
|
|||||||
/// Invalidate zero or more blocks, as specified by the `InvalidationOperation`.
|
/// Invalidate zero or more blocks, as specified by the `InvalidationOperation`.
|
||||||
///
|
///
|
||||||
/// See the documentation of `InvalidationOperation` for usage.
|
/// See the documentation of `InvalidationOperation` for usage.
|
||||||
pub fn propagate_execution_payload_invalidation(
|
pub fn propagate_execution_payload_invalidation<E: EthSpec>(
|
||||||
&mut self,
|
&mut self,
|
||||||
op: &InvalidationOperation,
|
op: &InvalidationOperation,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -482,7 +482,7 @@ impl ProtoArray {
|
|||||||
let latest_valid_ancestor_is_descendant =
|
let latest_valid_ancestor_is_descendant =
|
||||||
latest_valid_ancestor_root.map_or(false, |ancestor_root| {
|
latest_valid_ancestor_root.map_or(false, |ancestor_root| {
|
||||||
self.is_descendant(ancestor_root, head_block_root)
|
self.is_descendant(ancestor_root, head_block_root)
|
||||||
&& self.is_descendant(self.finalized_checkpoint.root, ancestor_root)
|
&& self.is_finalized_checkpoint_or_descendant::<E>(ancestor_root)
|
||||||
});
|
});
|
||||||
|
|
||||||
// Collect all *ancestors* which were declared invalid since they reside between the
|
// Collect all *ancestors* which were declared invalid since they reside between the
|
||||||
@ -977,6 +977,12 @@ impl ProtoArray {
|
|||||||
/// ## Notes
|
/// ## Notes
|
||||||
///
|
///
|
||||||
/// Still returns `true` if `ancestor_root` is known and `ancestor_root == descendant_root`.
|
/// Still returns `true` if `ancestor_root` is known and `ancestor_root == descendant_root`.
|
||||||
|
///
|
||||||
|
/// ## Warning
|
||||||
|
///
|
||||||
|
/// Do not use this function to check if a block is a descendant of the
|
||||||
|
/// finalized checkpoint. Use `Self::is_finalized_checkpoint_or_descendant`
|
||||||
|
/// instead.
|
||||||
pub fn is_descendant(&self, ancestor_root: Hash256, descendant_root: Hash256) -> bool {
|
pub fn is_descendant(&self, ancestor_root: Hash256, descendant_root: Hash256) -> bool {
|
||||||
self.indices
|
self.indices
|
||||||
.get(&ancestor_root)
|
.get(&ancestor_root)
|
||||||
@ -990,6 +996,70 @@ impl ProtoArray {
|
|||||||
.unwrap_or(false)
|
.unwrap_or(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns `true` if `root` is equal to or a descendant of
|
||||||
|
/// `self.finalized_checkpoint`.
|
||||||
|
///
|
||||||
|
/// Notably, this function is checking ancestory of the finalized
|
||||||
|
/// *checkpoint* not the finalized *block*.
|
||||||
|
pub fn is_finalized_checkpoint_or_descendant<E: EthSpec>(&self, root: Hash256) -> bool {
|
||||||
|
let finalized_root = self.finalized_checkpoint.root;
|
||||||
|
let finalized_slot = self
|
||||||
|
.finalized_checkpoint
|
||||||
|
.epoch
|
||||||
|
.start_slot(E::slots_per_epoch());
|
||||||
|
|
||||||
|
let mut node = if let Some(node) = self
|
||||||
|
.indices
|
||||||
|
.get(&root)
|
||||||
|
.and_then(|index| self.nodes.get(*index))
|
||||||
|
{
|
||||||
|
node
|
||||||
|
} else {
|
||||||
|
// An unknown root is not a finalized descendant. This line can only
|
||||||
|
// be reached if the user supplies a root that is not known to fork
|
||||||
|
// choice.
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
// The finalized and justified checkpoints represent a list of known
|
||||||
|
// ancestors of `node` that are likely to coincide with the store's
|
||||||
|
// finalized checkpoint.
|
||||||
|
//
|
||||||
|
// Run this check once, outside of the loop rather than inside the loop.
|
||||||
|
// If the conditions don't match for this node then they're unlikely to
|
||||||
|
// start matching for its ancestors.
|
||||||
|
for checkpoint in &[
|
||||||
|
node.finalized_checkpoint,
|
||||||
|
node.justified_checkpoint,
|
||||||
|
node.unrealized_finalized_checkpoint,
|
||||||
|
node.unrealized_justified_checkpoint,
|
||||||
|
] {
|
||||||
|
if checkpoint.map_or(false, |cp| cp == self.finalized_checkpoint) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
loop {
|
||||||
|
// If `node` is less than or equal to the finalized slot then `node`
|
||||||
|
// must be the finalized block.
|
||||||
|
if node.slot <= finalized_slot {
|
||||||
|
return node.root == finalized_root;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Since `node` is from a higher slot that the finalized checkpoint,
|
||||||
|
// replace `node` with the parent of `node`.
|
||||||
|
if let Some(parent) = node.parent.and_then(|index| self.nodes.get(index)) {
|
||||||
|
node = parent
|
||||||
|
} else {
|
||||||
|
// If `node` is not the finalized block and its parent does not
|
||||||
|
// exist in fork choice, then the parent must have been pruned.
|
||||||
|
// Proto-array only prunes blocks prior to the finalized block,
|
||||||
|
// so this means the parent conflicts with finality.
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the first *beacon block root* which contains an execution payload with the given
|
/// Returns the first *beacon block root* which contains an execution payload with the given
|
||||||
/// `block_hash`, if any.
|
/// `block_hash`, if any.
|
||||||
pub fn execution_block_hash_to_beacon_block_root(
|
pub fn execution_block_hash_to_beacon_block_root(
|
||||||
|
@ -358,12 +358,12 @@ impl ProtoArrayForkChoice {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// See `ProtoArray::propagate_execution_payload_invalidation` for documentation.
|
/// See `ProtoArray::propagate_execution_payload_invalidation` for documentation.
|
||||||
pub fn process_execution_payload_invalidation(
|
pub fn process_execution_payload_invalidation<E: EthSpec>(
|
||||||
&mut self,
|
&mut self,
|
||||||
op: &InvalidationOperation,
|
op: &InvalidationOperation,
|
||||||
) -> Result<(), String> {
|
) -> Result<(), String> {
|
||||||
self.proto_array
|
self.proto_array
|
||||||
.propagate_execution_payload_invalidation(op)
|
.propagate_execution_payload_invalidation::<E>(op)
|
||||||
.map_err(|e| format!("Failed to process invalid payload: {:?}", e))
|
.map_err(|e| format!("Failed to process invalid payload: {:?}", e))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -748,6 +748,15 @@ impl ProtoArrayForkChoice {
|
|||||||
.is_descendant(ancestor_root, descendant_root)
|
.is_descendant(ancestor_root, descendant_root)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// See `ProtoArray` documentation.
|
||||||
|
pub fn is_finalized_checkpoint_or_descendant<E: EthSpec>(
|
||||||
|
&self,
|
||||||
|
descendant_root: Hash256,
|
||||||
|
) -> bool {
|
||||||
|
self.proto_array
|
||||||
|
.is_finalized_checkpoint_or_descendant::<E>(descendant_root)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Epoch)> {
|
pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Epoch)> {
|
||||||
if validator_index < self.votes.0.len() {
|
if validator_index < self.votes.0.len() {
|
||||||
let vote = &self.votes.0[validator_index];
|
let vote = &self.votes.0[validator_index];
|
||||||
@ -928,6 +937,10 @@ mod test_compute_deltas {
|
|||||||
epoch: genesis_epoch,
|
epoch: genesis_epoch,
|
||||||
root: finalized_root,
|
root: finalized_root,
|
||||||
};
|
};
|
||||||
|
let junk_checkpoint = Checkpoint {
|
||||||
|
epoch: Epoch::new(42),
|
||||||
|
root: Hash256::repeat_byte(42),
|
||||||
|
};
|
||||||
|
|
||||||
let mut fc = ProtoArrayForkChoice::new::<MainnetEthSpec>(
|
let mut fc = ProtoArrayForkChoice::new::<MainnetEthSpec>(
|
||||||
genesis_slot,
|
genesis_slot,
|
||||||
@ -973,8 +986,10 @@ mod test_compute_deltas {
|
|||||||
target_root: finalized_root,
|
target_root: finalized_root,
|
||||||
current_epoch_shuffling_id: junk_shuffling_id.clone(),
|
current_epoch_shuffling_id: junk_shuffling_id.clone(),
|
||||||
next_epoch_shuffling_id: junk_shuffling_id,
|
next_epoch_shuffling_id: junk_shuffling_id,
|
||||||
justified_checkpoint: genesis_checkpoint,
|
// Use the junk checkpoint for the next to values to prevent
|
||||||
finalized_checkpoint: genesis_checkpoint,
|
// the loop-shortcutting mechanism from triggering.
|
||||||
|
justified_checkpoint: junk_checkpoint,
|
||||||
|
finalized_checkpoint: junk_checkpoint,
|
||||||
execution_status,
|
execution_status,
|
||||||
unrealized_justified_checkpoint: None,
|
unrealized_justified_checkpoint: None,
|
||||||
unrealized_finalized_checkpoint: None,
|
unrealized_finalized_checkpoint: None,
|
||||||
@ -993,6 +1008,11 @@ mod test_compute_deltas {
|
|||||||
assert!(!fc.is_descendant(finalized_root, not_finalized_desc));
|
assert!(!fc.is_descendant(finalized_root, not_finalized_desc));
|
||||||
assert!(!fc.is_descendant(finalized_root, unknown));
|
assert!(!fc.is_descendant(finalized_root, unknown));
|
||||||
|
|
||||||
|
assert!(fc.is_finalized_checkpoint_or_descendant::<MainnetEthSpec>(finalized_root));
|
||||||
|
assert!(fc.is_finalized_checkpoint_or_descendant::<MainnetEthSpec>(finalized_desc));
|
||||||
|
assert!(!fc.is_finalized_checkpoint_or_descendant::<MainnetEthSpec>(not_finalized_desc));
|
||||||
|
assert!(!fc.is_finalized_checkpoint_or_descendant::<MainnetEthSpec>(unknown));
|
||||||
|
|
||||||
assert!(!fc.is_descendant(finalized_desc, not_finalized_desc));
|
assert!(!fc.is_descendant(finalized_desc, not_finalized_desc));
|
||||||
assert!(fc.is_descendant(finalized_desc, finalized_desc));
|
assert!(fc.is_descendant(finalized_desc, finalized_desc));
|
||||||
assert!(!fc.is_descendant(finalized_desc, finalized_root));
|
assert!(!fc.is_descendant(finalized_desc, finalized_root));
|
||||||
@ -1004,6 +1024,171 @@ mod test_compute_deltas {
|
|||||||
assert!(!fc.is_descendant(not_finalized_desc, unknown));
|
assert!(!fc.is_descendant(not_finalized_desc, unknown));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// This test covers an interesting case where a block can be a descendant
|
||||||
|
/// of the finalized *block*, but not a descenant of the finalized
|
||||||
|
/// *checkpoint*.
|
||||||
|
///
|
||||||
|
/// ## Example
|
||||||
|
///
|
||||||
|
/// Consider this block tree which has three blocks (`A`, `B` and `C`):
|
||||||
|
///
|
||||||
|
/// ```ignore
|
||||||
|
/// [A] <--- [-] <--- [B]
|
||||||
|
/// |
|
||||||
|
/// |--[C]
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// - `A` (slot 31) is the common descendant.
|
||||||
|
/// - `B` (slot 33) descends from `A`, but there is a single skip slot
|
||||||
|
/// between it and `A`.
|
||||||
|
/// - `C` (slot 32) descends from `A` and conflicts with `B`.
|
||||||
|
///
|
||||||
|
/// Imagine that the `B` chain is finalized at epoch 1. This means that the
|
||||||
|
/// finalized checkpoint points to the skipped slot at 32. The root of the
|
||||||
|
/// finalized checkpoint is `A`.
|
||||||
|
///
|
||||||
|
/// In this scenario, the block `C` has the finalized root (`A`) as an
|
||||||
|
/// ancestor whilst simultaneously conflicting with the finalized
|
||||||
|
/// checkpoint.
|
||||||
|
///
|
||||||
|
/// This means that to ensure a block does not conflict with finality we
|
||||||
|
/// must check to ensure that it's an ancestor of the finalized
|
||||||
|
/// *checkpoint*, not just the finalized *block*.
|
||||||
|
#[test]
|
||||||
|
fn finalized_descendant_edge_case() {
|
||||||
|
let get_block_root = Hash256::from_low_u64_be;
|
||||||
|
let genesis_slot = Slot::new(0);
|
||||||
|
let junk_state_root = Hash256::zero();
|
||||||
|
let junk_shuffling_id =
|
||||||
|
AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero());
|
||||||
|
let execution_status = ExecutionStatus::irrelevant();
|
||||||
|
|
||||||
|
let genesis_checkpoint = Checkpoint {
|
||||||
|
epoch: Epoch::new(0),
|
||||||
|
root: get_block_root(0),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut fc = ProtoArrayForkChoice::new::<MainnetEthSpec>(
|
||||||
|
genesis_slot,
|
||||||
|
junk_state_root,
|
||||||
|
genesis_checkpoint,
|
||||||
|
genesis_checkpoint,
|
||||||
|
junk_shuffling_id.clone(),
|
||||||
|
junk_shuffling_id.clone(),
|
||||||
|
execution_status,
|
||||||
|
CountUnrealizedFull::default(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
struct TestBlock {
|
||||||
|
slot: u64,
|
||||||
|
root: u64,
|
||||||
|
parent_root: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
let insert_block = |fc: &mut ProtoArrayForkChoice, block: TestBlock| {
|
||||||
|
fc.proto_array
|
||||||
|
.on_block::<MainnetEthSpec>(
|
||||||
|
Block {
|
||||||
|
slot: Slot::from(block.slot),
|
||||||
|
root: get_block_root(block.root),
|
||||||
|
parent_root: Some(get_block_root(block.parent_root)),
|
||||||
|
state_root: Hash256::zero(),
|
||||||
|
target_root: Hash256::zero(),
|
||||||
|
current_epoch_shuffling_id: junk_shuffling_id.clone(),
|
||||||
|
next_epoch_shuffling_id: junk_shuffling_id.clone(),
|
||||||
|
justified_checkpoint: Checkpoint {
|
||||||
|
epoch: Epoch::new(0),
|
||||||
|
root: get_block_root(0),
|
||||||
|
},
|
||||||
|
finalized_checkpoint: genesis_checkpoint,
|
||||||
|
execution_status,
|
||||||
|
unrealized_justified_checkpoint: Some(genesis_checkpoint),
|
||||||
|
unrealized_finalized_checkpoint: Some(genesis_checkpoint),
|
||||||
|
},
|
||||||
|
Slot::from(block.slot),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Start of interesting part of tests.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Produce the 0th epoch of blocks. They should all form a chain from
|
||||||
|
// the genesis block.
|
||||||
|
for i in 1..MainnetEthSpec::slots_per_epoch() {
|
||||||
|
insert_block(
|
||||||
|
&mut fc,
|
||||||
|
TestBlock {
|
||||||
|
slot: i,
|
||||||
|
root: i,
|
||||||
|
parent_root: i - 1,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
let last_slot_of_epoch_0 = MainnetEthSpec::slots_per_epoch() - 1;
|
||||||
|
|
||||||
|
// Produce a block that descends from the last block of epoch -.
|
||||||
|
//
|
||||||
|
// This block will be non-canonical.
|
||||||
|
let non_canonical_slot = last_slot_of_epoch_0 + 1;
|
||||||
|
insert_block(
|
||||||
|
&mut fc,
|
||||||
|
TestBlock {
|
||||||
|
slot: non_canonical_slot,
|
||||||
|
root: non_canonical_slot,
|
||||||
|
parent_root: non_canonical_slot - 1,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Produce a block that descends from the last block of the 0th epoch,
|
||||||
|
// that skips the 1st slot of the 1st epoch.
|
||||||
|
//
|
||||||
|
// This block will be canonical.
|
||||||
|
let canonical_slot = last_slot_of_epoch_0 + 2;
|
||||||
|
insert_block(
|
||||||
|
&mut fc,
|
||||||
|
TestBlock {
|
||||||
|
slot: canonical_slot,
|
||||||
|
root: canonical_slot,
|
||||||
|
parent_root: non_canonical_slot - 1,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
let finalized_root = get_block_root(last_slot_of_epoch_0);
|
||||||
|
|
||||||
|
// Set the finalized checkpoint to finalize the first slot of epoch 1 on
|
||||||
|
// the canonical chain.
|
||||||
|
fc.proto_array.finalized_checkpoint = Checkpoint {
|
||||||
|
root: finalized_root,
|
||||||
|
epoch: Epoch::new(1),
|
||||||
|
};
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
fc.proto_array
|
||||||
|
.is_finalized_checkpoint_or_descendant::<MainnetEthSpec>(finalized_root),
|
||||||
|
"the finalized checkpoint is the finalized checkpoint"
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
fc.proto_array
|
||||||
|
.is_finalized_checkpoint_or_descendant::<MainnetEthSpec>(get_block_root(
|
||||||
|
canonical_slot
|
||||||
|
)),
|
||||||
|
"the canonical block is a descendant of the finalized checkpoint"
|
||||||
|
);
|
||||||
|
assert!(
|
||||||
|
!fc.proto_array
|
||||||
|
.is_finalized_checkpoint_or_descendant::<MainnetEthSpec>(get_block_root(
|
||||||
|
non_canonical_slot
|
||||||
|
)),
|
||||||
|
"although the non-canonical block is a descendant of the finalized block, \
|
||||||
|
it's not a descendant of the finalized checkpoint"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn zero_hash() {
|
fn zero_hash() {
|
||||||
let validator_count: usize = 16;
|
let validator_count: usize = 16;
|
||||||
|
@ -12,4 +12,4 @@ pub mod u64_hex_be;
|
|||||||
pub mod u8_hex;
|
pub mod u8_hex;
|
||||||
|
|
||||||
pub use fixed_bytes_hex::{bytes_4_hex, bytes_8_hex};
|
pub use fixed_bytes_hex::{bytes_4_hex, bytes_8_hex};
|
||||||
pub use quoted_int::{quoted_u256, quoted_u32, quoted_u64, quoted_u8};
|
pub use quoted_int::{quoted_i64, quoted_u256, quoted_u32, quoted_u64, quoted_u8};
|
||||||
|
@ -11,7 +11,7 @@ use std::convert::TryFrom;
|
|||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
|
|
||||||
macro_rules! define_mod {
|
macro_rules! define_mod {
|
||||||
($int: ty, $visit_fn: ident) => {
|
($int: ty) => {
|
||||||
/// Serde support for deserializing quoted integers.
|
/// Serde support for deserializing quoted integers.
|
||||||
///
|
///
|
||||||
/// Configurable so that quotes are either required or optional.
|
/// Configurable so that quotes are either required or optional.
|
||||||
@ -140,19 +140,25 @@ macro_rules! define_mod {
|
|||||||
pub mod quoted_u8 {
|
pub mod quoted_u8 {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
define_mod!(u8, visit_u8);
|
define_mod!(u8);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub mod quoted_u32 {
|
pub mod quoted_u32 {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
define_mod!(u32, visit_u32);
|
define_mod!(u32);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub mod quoted_u64 {
|
pub mod quoted_u64 {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
define_mod!(u64, visit_u64);
|
define_mod!(u64);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub mod quoted_i64 {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
define_mod!(i64);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub mod quoted_u256 {
|
pub mod quoted_u256 {
|
||||||
@ -216,4 +222,26 @@ mod test {
|
|||||||
fn u256_without_quotes() {
|
fn u256_without_quotes() {
|
||||||
serde_json::from_str::<WrappedU256>("1").unwrap_err();
|
serde_json::from_str::<WrappedU256>("1").unwrap_err();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
||||||
|
#[serde(transparent)]
|
||||||
|
struct WrappedI64(#[serde(with = "quoted_i64")] i64);
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn negative_i64_with_quotes() {
|
||||||
|
assert_eq!(
|
||||||
|
serde_json::from_str::<WrappedI64>("\"-200\"").unwrap().0,
|
||||||
|
-200
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
serde_json::to_string(&WrappedI64(-12_500)).unwrap(),
|
||||||
|
"\"-12500\""
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// It would be OK if this worked, but we don't need it to (i64s should always be quoted).
|
||||||
|
#[test]
|
||||||
|
fn negative_i64_without_quotes() {
|
||||||
|
serde_json::from_str::<WrappedI64>("-200").unwrap_err();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -348,8 +348,7 @@ where
|
|||||||
&mut self,
|
&mut self,
|
||||||
block: &'a SignedBeaconBlock<T, Payload>,
|
block: &'a SignedBeaconBlock<T, Payload>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
// FIXME(capella): to improve performance we might want to decompress the withdrawal pubkeys
|
// To improve performance we might want to decompress the withdrawal pubkeys in parallel.
|
||||||
// in parallel.
|
|
||||||
if let Ok(bls_to_execution_changes) = block.message().body().bls_to_execution_changes() {
|
if let Ok(bls_to_execution_changes) = block.message().body().bls_to_execution_changes() {
|
||||||
for bls_to_execution_change in bls_to_execution_changes {
|
for bls_to_execution_change in bls_to_execution_changes {
|
||||||
self.sets.push(bls_execution_change_signature_set(
|
self.sets.push(bls_execution_change_signature_set(
|
||||||
|
@ -37,10 +37,9 @@ pub fn verify_bls_to_execution_change<T: EthSpec>(
|
|||||||
Invalid::NonBlsWithdrawalCredentials
|
Invalid::NonBlsWithdrawalCredentials
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Re-hashing the pubkey isn't necessary during block replay, so we may want to skip that in
|
||||||
|
// future.
|
||||||
let pubkey_hash = hash(address_change.from_bls_pubkey.as_serialized());
|
let pubkey_hash = hash(address_change.from_bls_pubkey.as_serialized());
|
||||||
|
|
||||||
// FIXME: Should this check be put inside the verify_signatures.is_true() condition?
|
|
||||||
// I believe that's used for fuzzing so this is a Mehdi question..
|
|
||||||
verify!(
|
verify!(
|
||||||
validator.withdrawal_credentials.as_bytes().get(1..) == pubkey_hash.get(1..),
|
validator.withdrawal_credentials.as_bytes().get(1..) == pubkey_hash.get(1..),
|
||||||
Invalid::WithdrawalCredentialsMismatch
|
Invalid::WithdrawalCredentialsMismatch
|
||||||
|
@ -685,6 +685,24 @@ impl<E: EthSpec> From<BeaconBlock<E, FullPayload<E>>>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec, Payload: AbstractExecPayload<T>> ForkVersionDeserialize
|
||||||
|
for BeaconBlock<T, Payload>
|
||||||
|
{
|
||||||
|
fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>(
|
||||||
|
value: serde_json::value::Value,
|
||||||
|
fork_name: ForkName,
|
||||||
|
) -> Result<Self, D::Error> {
|
||||||
|
Ok(map_fork_name!(
|
||||||
|
fork_name,
|
||||||
|
Self,
|
||||||
|
serde_json::from_value(value).map_err(|e| serde::de::Error::custom(format!(
|
||||||
|
"BeaconBlock failed to deserialize: {:?}",
|
||||||
|
e
|
||||||
|
)))?
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
@ -278,7 +278,7 @@ impl<E: EthSpec> From<BeaconBlockBodyMerge<E, FullPayload<E>>>
|
|||||||
voluntary_exits,
|
voluntary_exits,
|
||||||
sync_aggregate,
|
sync_aggregate,
|
||||||
execution_payload: BlindedPayloadMerge {
|
execution_payload: BlindedPayloadMerge {
|
||||||
execution_payload_header: From::from(execution_payload.clone()),
|
execution_payload_header: From::from(&execution_payload),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Some(execution_payload),
|
Some(execution_payload),
|
||||||
@ -319,7 +319,7 @@ impl<E: EthSpec> From<BeaconBlockBodyCapella<E, FullPayload<E>>>
|
|||||||
voluntary_exits,
|
voluntary_exits,
|
||||||
sync_aggregate,
|
sync_aggregate,
|
||||||
execution_payload: BlindedPayloadCapella {
|
execution_payload: BlindedPayloadCapella {
|
||||||
execution_payload_header: From::from(execution_payload.clone()),
|
execution_payload_header: From::from(&execution_payload),
|
||||||
},
|
},
|
||||||
bls_to_execution_changes,
|
bls_to_execution_changes,
|
||||||
},
|
},
|
||||||
@ -362,7 +362,7 @@ impl<E: EthSpec> From<BeaconBlockBodyEip4844<E, FullPayload<E>>>
|
|||||||
voluntary_exits,
|
voluntary_exits,
|
||||||
sync_aggregate,
|
sync_aggregate,
|
||||||
execution_payload: BlindedPayloadEip4844 {
|
execution_payload: BlindedPayloadEip4844 {
|
||||||
execution_payload_header: From::from(execution_payload.clone()),
|
execution_payload_header: From::from(&execution_payload),
|
||||||
},
|
},
|
||||||
bls_to_execution_changes,
|
bls_to_execution_changes,
|
||||||
blob_kzg_commitments,
|
blob_kzg_commitments,
|
||||||
@ -413,7 +413,7 @@ impl<E: EthSpec> BeaconBlockBodyMerge<E, FullPayload<E>> {
|
|||||||
voluntary_exits: voluntary_exits.clone(),
|
voluntary_exits: voluntary_exits.clone(),
|
||||||
sync_aggregate: sync_aggregate.clone(),
|
sync_aggregate: sync_aggregate.clone(),
|
||||||
execution_payload: BlindedPayloadMerge {
|
execution_payload: BlindedPayloadMerge {
|
||||||
execution_payload_header: From::from(execution_payload.clone()),
|
execution_payload_header: execution_payload.into(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -446,7 +446,7 @@ impl<E: EthSpec> BeaconBlockBodyCapella<E, FullPayload<E>> {
|
|||||||
voluntary_exits: voluntary_exits.clone(),
|
voluntary_exits: voluntary_exits.clone(),
|
||||||
sync_aggregate: sync_aggregate.clone(),
|
sync_aggregate: sync_aggregate.clone(),
|
||||||
execution_payload: BlindedPayloadCapella {
|
execution_payload: BlindedPayloadCapella {
|
||||||
execution_payload_header: From::from(execution_payload.clone()),
|
execution_payload_header: execution_payload.into(),
|
||||||
},
|
},
|
||||||
bls_to_execution_changes: bls_to_execution_changes.clone(),
|
bls_to_execution_changes: bls_to_execution_changes.clone(),
|
||||||
}
|
}
|
||||||
@ -481,7 +481,7 @@ impl<E: EthSpec> BeaconBlockBodyEip4844<E, FullPayload<E>> {
|
|||||||
voluntary_exits: voluntary_exits.clone(),
|
voluntary_exits: voluntary_exits.clone(),
|
||||||
sync_aggregate: sync_aggregate.clone(),
|
sync_aggregate: sync_aggregate.clone(),
|
||||||
execution_payload: BlindedPayloadEip4844 {
|
execution_payload: BlindedPayloadEip4844 {
|
||||||
execution_payload_header: From::from(execution_payload.clone()),
|
execution_payload_header: execution_payload.into(),
|
||||||
},
|
},
|
||||||
bls_to_execution_changes: bls_to_execution_changes.clone(),
|
bls_to_execution_changes: bls_to_execution_changes.clone(),
|
||||||
blob_kzg_commitments: blob_kzg_commitments.clone(),
|
blob_kzg_commitments: blob_kzg_commitments.clone(),
|
||||||
|
@ -301,8 +301,10 @@ where
|
|||||||
|
|
||||||
// Capella
|
// Capella
|
||||||
#[superstruct(only(Capella, Eip4844), partial_getter(copy))]
|
#[superstruct(only(Capella, Eip4844), partial_getter(copy))]
|
||||||
|
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
||||||
pub next_withdrawal_index: u64,
|
pub next_withdrawal_index: u64,
|
||||||
#[superstruct(only(Capella, Eip4844), partial_getter(copy))]
|
#[superstruct(only(Capella, Eip4844), partial_getter(copy))]
|
||||||
|
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
||||||
pub next_withdrawal_validator_index: u64,
|
pub next_withdrawal_validator_index: u64,
|
||||||
// Deep history valid from Capella onwards.
|
// Deep history valid from Capella onwards.
|
||||||
#[superstruct(only(Capella, Eip4844))]
|
#[superstruct(only(Capella, Eip4844))]
|
||||||
@ -1853,3 +1855,19 @@ impl<T: EthSpec> CompareFields for BeaconState<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec> ForkVersionDeserialize for BeaconState<T> {
|
||||||
|
fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>(
|
||||||
|
value: serde_json::value::Value,
|
||||||
|
fork_name: ForkName,
|
||||||
|
) -> Result<Self, D::Error> {
|
||||||
|
Ok(map_fork_name!(
|
||||||
|
fork_name,
|
||||||
|
Self,
|
||||||
|
serde_json::from_value(value).map_err(|e| serde::de::Error::custom(format!(
|
||||||
|
"BeaconState failed to deserialize: {:?}",
|
||||||
|
e
|
||||||
|
)))?
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use crate::{
|
use crate::{
|
||||||
AbstractExecPayload, ChainSpec, EthSpec, ExecPayload, ExecutionPayloadHeader, SignedRoot,
|
AbstractExecPayload, ChainSpec, EthSpec, ExecPayload, ExecutionPayloadHeader, ForkName,
|
||||||
Uint256,
|
ForkVersionDeserialize, SignedRoot, Uint256,
|
||||||
};
|
};
|
||||||
use bls::PublicKeyBytes;
|
use bls::PublicKeyBytes;
|
||||||
use bls::Signature;
|
use bls::Signature;
|
||||||
@ -34,6 +34,60 @@ pub struct SignedBuilderBid<E: EthSpec, Payload: AbstractExecPayload<E>> {
|
|||||||
pub signature: Signature,
|
pub signature: Signature,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec, Payload: AbstractExecPayload<T>> ForkVersionDeserialize
|
||||||
|
for BuilderBid<T, Payload>
|
||||||
|
{
|
||||||
|
fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>(
|
||||||
|
value: serde_json::value::Value,
|
||||||
|
fork_name: ForkName,
|
||||||
|
) -> Result<Self, D::Error> {
|
||||||
|
let convert_err = |_| {
|
||||||
|
serde::de::Error::custom(
|
||||||
|
"BuilderBid failed to deserialize: unable to convert payload header to payload",
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct Helper {
|
||||||
|
header: serde_json::Value,
|
||||||
|
#[serde(with = "eth2_serde_utils::quoted_u256")]
|
||||||
|
value: Uint256,
|
||||||
|
pubkey: PublicKeyBytes,
|
||||||
|
}
|
||||||
|
let helper: Helper = serde_json::from_value(value).map_err(serde::de::Error::custom)?;
|
||||||
|
let payload_header =
|
||||||
|
ExecutionPayloadHeader::deserialize_by_fork::<'de, D>(helper.header, fork_name)?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
header: Payload::try_from(payload_header).map_err(convert_err)?,
|
||||||
|
value: helper.value,
|
||||||
|
pubkey: helper.pubkey,
|
||||||
|
_phantom_data: Default::default(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec, Payload: AbstractExecPayload<T>> ForkVersionDeserialize
|
||||||
|
for SignedBuilderBid<T, Payload>
|
||||||
|
{
|
||||||
|
fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>(
|
||||||
|
value: serde_json::value::Value,
|
||||||
|
fork_name: ForkName,
|
||||||
|
) -> Result<Self, D::Error> {
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct Helper {
|
||||||
|
pub message: serde_json::Value,
|
||||||
|
pub signature: Signature,
|
||||||
|
}
|
||||||
|
let helper: Helper = serde_json::from_value(value).map_err(serde::de::Error::custom)?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
message: BuilderBid::deserialize_by_fork::<'de, D>(helper.message, fork_name)?,
|
||||||
|
signature: helper.signature,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
struct BlindedPayloadAsHeader<E>(PhantomData<E>);
|
struct BlindedPayloadAsHeader<E>(PhantomData<E>);
|
||||||
|
|
||||||
impl<E: EthSpec, Payload: ExecPayload<E>> SerializeAs<Payload> for BlindedPayloadAsHeader<E> {
|
impl<E: EthSpec, Payload: ExecPayload<E>> SerializeAs<Payload> for BlindedPayloadAsHeader<E> {
|
||||||
|
@ -35,7 +35,9 @@ pub type Withdrawals<T> = VariableList<Withdrawal, <T as EthSpec>::MaxWithdrawal
|
|||||||
arbitrary(bound = "T: EthSpec")
|
arbitrary(bound = "T: EthSpec")
|
||||||
),
|
),
|
||||||
cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"),
|
cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"),
|
||||||
partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant")
|
partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"),
|
||||||
|
map_into(FullPayload, BlindedPayload),
|
||||||
|
map_ref_into(ExecutionPayloadHeader)
|
||||||
)]
|
)]
|
||||||
#[derive(
|
#[derive(
|
||||||
Debug, Clone, Serialize, Encode, Deserialize, TreeHash, Derivative, arbitrary::Arbitrary,
|
Debug, Clone, Serialize, Encode, Deserialize, TreeHash, Derivative, arbitrary::Arbitrary,
|
||||||
@ -146,3 +148,26 @@ impl<T: EthSpec> ExecutionPayload<T> {
|
|||||||
+ (T::max_withdrawals_per_payload() * <Withdrawal as Encode>::ssz_fixed_len())
|
+ (T::max_withdrawals_per_payload() * <Withdrawal as Encode>::ssz_fixed_len())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec> ForkVersionDeserialize for ExecutionPayload<T> {
|
||||||
|
fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>(
|
||||||
|
value: serde_json::value::Value,
|
||||||
|
fork_name: ForkName,
|
||||||
|
) -> Result<Self, D::Error> {
|
||||||
|
let convert_err = |e| {
|
||||||
|
serde::de::Error::custom(format!("ExecutionPayload failed to deserialize: {:?}", e))
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(match fork_name {
|
||||||
|
ForkName::Merge => Self::Merge(serde_json::from_value(value).map_err(convert_err)?),
|
||||||
|
ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?),
|
||||||
|
ForkName::Eip4844 => Self::Eip4844(serde_json::from_value(value).map_err(convert_err)?),
|
||||||
|
ForkName::Base | ForkName::Altair => {
|
||||||
|
return Err(serde::de::Error::custom(format!(
|
||||||
|
"ExecutionPayload failed to deserialize: unsupported fork '{}'",
|
||||||
|
fork_name
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -159,40 +159,40 @@ impl<T: EthSpec> ExecutionPayloadHeaderCapella<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: EthSpec> From<ExecutionPayloadMerge<T>> for ExecutionPayloadHeaderMerge<T> {
|
impl<'a, T: EthSpec> From<&'a ExecutionPayloadMerge<T>> for ExecutionPayloadHeaderMerge<T> {
|
||||||
fn from(payload: ExecutionPayloadMerge<T>) -> Self {
|
fn from(payload: &'a ExecutionPayloadMerge<T>) -> Self {
|
||||||
Self {
|
Self {
|
||||||
parent_hash: payload.parent_hash,
|
parent_hash: payload.parent_hash,
|
||||||
fee_recipient: payload.fee_recipient,
|
fee_recipient: payload.fee_recipient,
|
||||||
state_root: payload.state_root,
|
state_root: payload.state_root,
|
||||||
receipts_root: payload.receipts_root,
|
receipts_root: payload.receipts_root,
|
||||||
logs_bloom: payload.logs_bloom,
|
logs_bloom: payload.logs_bloom.clone(),
|
||||||
prev_randao: payload.prev_randao,
|
prev_randao: payload.prev_randao,
|
||||||
block_number: payload.block_number,
|
block_number: payload.block_number,
|
||||||
gas_limit: payload.gas_limit,
|
gas_limit: payload.gas_limit,
|
||||||
gas_used: payload.gas_used,
|
gas_used: payload.gas_used,
|
||||||
timestamp: payload.timestamp,
|
timestamp: payload.timestamp,
|
||||||
extra_data: payload.extra_data,
|
extra_data: payload.extra_data.clone(),
|
||||||
base_fee_per_gas: payload.base_fee_per_gas,
|
base_fee_per_gas: payload.base_fee_per_gas,
|
||||||
block_hash: payload.block_hash,
|
block_hash: payload.block_hash,
|
||||||
transactions_root: payload.transactions.tree_hash_root(),
|
transactions_root: payload.transactions.tree_hash_root(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl<T: EthSpec> From<ExecutionPayloadCapella<T>> for ExecutionPayloadHeaderCapella<T> {
|
impl<'a, T: EthSpec> From<&'a ExecutionPayloadCapella<T>> for ExecutionPayloadHeaderCapella<T> {
|
||||||
fn from(payload: ExecutionPayloadCapella<T>) -> Self {
|
fn from(payload: &'a ExecutionPayloadCapella<T>) -> Self {
|
||||||
Self {
|
Self {
|
||||||
parent_hash: payload.parent_hash,
|
parent_hash: payload.parent_hash,
|
||||||
fee_recipient: payload.fee_recipient,
|
fee_recipient: payload.fee_recipient,
|
||||||
state_root: payload.state_root,
|
state_root: payload.state_root,
|
||||||
receipts_root: payload.receipts_root,
|
receipts_root: payload.receipts_root,
|
||||||
logs_bloom: payload.logs_bloom,
|
logs_bloom: payload.logs_bloom.clone(),
|
||||||
prev_randao: payload.prev_randao,
|
prev_randao: payload.prev_randao,
|
||||||
block_number: payload.block_number,
|
block_number: payload.block_number,
|
||||||
gas_limit: payload.gas_limit,
|
gas_limit: payload.gas_limit,
|
||||||
gas_used: payload.gas_used,
|
gas_used: payload.gas_used,
|
||||||
timestamp: payload.timestamp,
|
timestamp: payload.timestamp,
|
||||||
extra_data: payload.extra_data,
|
extra_data: payload.extra_data.clone(),
|
||||||
base_fee_per_gas: payload.base_fee_per_gas,
|
base_fee_per_gas: payload.base_fee_per_gas,
|
||||||
block_hash: payload.block_hash,
|
block_hash: payload.block_hash,
|
||||||
transactions_root: payload.transactions.tree_hash_root(),
|
transactions_root: payload.transactions.tree_hash_root(),
|
||||||
@ -200,20 +200,21 @@ impl<T: EthSpec> From<ExecutionPayloadCapella<T>> for ExecutionPayloadHeaderCape
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl<T: EthSpec> From<ExecutionPayloadEip4844<T>> for ExecutionPayloadHeaderEip4844<T> {
|
|
||||||
fn from(payload: ExecutionPayloadEip4844<T>) -> Self {
|
impl<'a, T: EthSpec> From<&'a ExecutionPayloadEip4844<T>> for ExecutionPayloadHeaderEip4844<T> {
|
||||||
|
fn from(payload: &'a ExecutionPayloadEip4844<T>) -> Self {
|
||||||
Self {
|
Self {
|
||||||
parent_hash: payload.parent_hash,
|
parent_hash: payload.parent_hash,
|
||||||
fee_recipient: payload.fee_recipient,
|
fee_recipient: payload.fee_recipient,
|
||||||
state_root: payload.state_root,
|
state_root: payload.state_root,
|
||||||
receipts_root: payload.receipts_root,
|
receipts_root: payload.receipts_root,
|
||||||
logs_bloom: payload.logs_bloom,
|
logs_bloom: payload.logs_bloom.clone(),
|
||||||
prev_randao: payload.prev_randao,
|
prev_randao: payload.prev_randao,
|
||||||
block_number: payload.block_number,
|
block_number: payload.block_number,
|
||||||
gas_limit: payload.gas_limit,
|
gas_limit: payload.gas_limit,
|
||||||
gas_used: payload.gas_used,
|
gas_used: payload.gas_used,
|
||||||
timestamp: payload.timestamp,
|
timestamp: payload.timestamp,
|
||||||
extra_data: payload.extra_data,
|
extra_data: payload.extra_data.clone(),
|
||||||
base_fee_per_gas: payload.base_fee_per_gas,
|
base_fee_per_gas: payload.base_fee_per_gas,
|
||||||
excess_data_gas: payload.excess_data_gas,
|
excess_data_gas: payload.excess_data_gas,
|
||||||
block_hash: payload.block_hash,
|
block_hash: payload.block_hash,
|
||||||
@ -223,31 +224,33 @@ impl<T: EthSpec> From<ExecutionPayloadEip4844<T>> for ExecutionPayloadHeaderEip4
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: EthSpec> From<ExecutionPayloadMerge<T>> for ExecutionPayloadHeader<T> {
|
// These impls are required to work around an inelegance in `to_execution_payload_header`.
|
||||||
fn from(payload: ExecutionPayloadMerge<T>) -> Self {
|
// They only clone headers so they should be relatively cheap.
|
||||||
Self::Merge(ExecutionPayloadHeaderMerge::from(payload))
|
impl<'a, T: EthSpec> From<&'a Self> for ExecutionPayloadHeaderMerge<T> {
|
||||||
|
fn from(payload: &'a Self) -> Self {
|
||||||
|
payload.clone()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: EthSpec> From<ExecutionPayloadCapella<T>> for ExecutionPayloadHeader<T> {
|
impl<'a, T: EthSpec> From<&'a Self> for ExecutionPayloadHeaderCapella<T> {
|
||||||
fn from(payload: ExecutionPayloadCapella<T>) -> Self {
|
fn from(payload: &'a Self) -> Self {
|
||||||
Self::Capella(ExecutionPayloadHeaderCapella::from(payload))
|
payload.clone()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: EthSpec> From<ExecutionPayloadEip4844<T>> for ExecutionPayloadHeader<T> {
|
impl<'a, T: EthSpec> From<&'a Self> for ExecutionPayloadHeaderEip4844<T> {
|
||||||
fn from(payload: ExecutionPayloadEip4844<T>) -> Self {
|
fn from(payload: &'a Self) -> Self {
|
||||||
Self::Eip4844(ExecutionPayloadHeaderEip4844::from(payload))
|
payload.clone()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: EthSpec> From<ExecutionPayload<T>> for ExecutionPayloadHeader<T> {
|
impl<'a, T: EthSpec> From<ExecutionPayloadRef<'a, T>> for ExecutionPayloadHeader<T> {
|
||||||
fn from(payload: ExecutionPayload<T>) -> Self {
|
fn from(payload: ExecutionPayloadRef<'a, T>) -> Self {
|
||||||
match payload {
|
map_execution_payload_ref_into_execution_payload_header!(
|
||||||
ExecutionPayload::Merge(payload) => Self::from(payload),
|
&'a _,
|
||||||
ExecutionPayload::Capella(payload) => Self::from(payload),
|
payload,
|
||||||
ExecutionPayload::Eip4844(payload) => Self::from(payload),
|
|inner, cons| cons(inner.into())
|
||||||
}
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -282,3 +285,29 @@ impl<T: EthSpec> TryFrom<ExecutionPayloadHeader<T>> for ExecutionPayloadHeaderEi
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec> ForkVersionDeserialize for ExecutionPayloadHeader<T> {
|
||||||
|
fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>(
|
||||||
|
value: serde_json::value::Value,
|
||||||
|
fork_name: ForkName,
|
||||||
|
) -> Result<Self, D::Error> {
|
||||||
|
let convert_err = |e| {
|
||||||
|
serde::de::Error::custom(format!(
|
||||||
|
"ExecutionPayloadHeader failed to deserialize: {:?}",
|
||||||
|
e
|
||||||
|
))
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(match fork_name {
|
||||||
|
ForkName::Merge => Self::Merge(serde_json::from_value(value).map_err(convert_err)?),
|
||||||
|
ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?),
|
||||||
|
ForkName::Eip4844 => Self::Eip4844(serde_json::from_value(value).map_err(convert_err)?),
|
||||||
|
ForkName::Base | ForkName::Altair => {
|
||||||
|
return Err(serde::de::Error::custom(format!(
|
||||||
|
"ExecutionPayloadHeader failed to deserialize: unsupported fork '{}'",
|
||||||
|
fork_name
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
138
consensus/types/src/fork_versioned_response.rs
Normal file
138
consensus/types/src/fork_versioned_response.rs
Normal file
@ -0,0 +1,138 @@
|
|||||||
|
use crate::ForkName;
|
||||||
|
use serde::de::DeserializeOwned;
|
||||||
|
use serde::{Deserialize, Deserializer, Serialize};
|
||||||
|
use serde_json::value::Value;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
// Deserialize is only implemented for types that implement ForkVersionDeserialize
|
||||||
|
#[derive(Debug, PartialEq, Clone, Serialize)]
|
||||||
|
pub struct ExecutionOptimisticForkVersionedResponse<T> {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub version: Option<ForkName>,
|
||||||
|
pub execution_optimistic: Option<bool>,
|
||||||
|
pub data: T,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'de, F> serde::Deserialize<'de> for ExecutionOptimisticForkVersionedResponse<F>
|
||||||
|
where
|
||||||
|
F: ForkVersionDeserialize,
|
||||||
|
{
|
||||||
|
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||||
|
where
|
||||||
|
D: Deserializer<'de>,
|
||||||
|
{
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct Helper {
|
||||||
|
version: Option<ForkName>,
|
||||||
|
execution_optimistic: Option<bool>,
|
||||||
|
data: serde_json::Value,
|
||||||
|
}
|
||||||
|
|
||||||
|
let helper = Helper::deserialize(deserializer)?;
|
||||||
|
let data = match helper.version {
|
||||||
|
Some(fork_name) => F::deserialize_by_fork::<'de, D>(helper.data, fork_name)?,
|
||||||
|
None => serde_json::from_value(helper.data).map_err(serde::de::Error::custom)?,
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(ExecutionOptimisticForkVersionedResponse {
|
||||||
|
version: helper.version,
|
||||||
|
execution_optimistic: helper.execution_optimistic,
|
||||||
|
data,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub trait ForkVersionDeserialize: Sized + DeserializeOwned {
|
||||||
|
fn deserialize_by_fork<'de, D: Deserializer<'de>>(
|
||||||
|
value: Value,
|
||||||
|
fork_name: ForkName,
|
||||||
|
) -> Result<Self, D::Error>;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deserialize is only implemented for types that implement ForkVersionDeserialize
|
||||||
|
#[derive(Debug, PartialEq, Clone, Serialize)]
|
||||||
|
pub struct ForkVersionedResponse<T> {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub version: Option<ForkName>,
|
||||||
|
pub data: T,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'de, F> serde::Deserialize<'de> for ForkVersionedResponse<F>
|
||||||
|
where
|
||||||
|
F: ForkVersionDeserialize,
|
||||||
|
{
|
||||||
|
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||||
|
where
|
||||||
|
D: Deserializer<'de>,
|
||||||
|
{
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct Helper {
|
||||||
|
version: Option<ForkName>,
|
||||||
|
data: serde_json::Value,
|
||||||
|
}
|
||||||
|
|
||||||
|
let helper = Helper::deserialize(deserializer)?;
|
||||||
|
let data = match helper.version {
|
||||||
|
Some(fork_name) => F::deserialize_by_fork::<'de, D>(helper.data, fork_name)?,
|
||||||
|
None => serde_json::from_value(helper.data).map_err(serde::de::Error::custom)?,
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(ForkVersionedResponse {
|
||||||
|
version: helper.version,
|
||||||
|
data,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<F: ForkVersionDeserialize> ForkVersionDeserialize for Arc<F> {
|
||||||
|
fn deserialize_by_fork<'de, D: Deserializer<'de>>(
|
||||||
|
value: Value,
|
||||||
|
fork_name: ForkName,
|
||||||
|
) -> Result<Self, D::Error> {
|
||||||
|
Ok(Arc::new(F::deserialize_by_fork::<'de, D>(
|
||||||
|
value, fork_name,
|
||||||
|
)?))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod fork_version_response_tests {
|
||||||
|
use crate::{
|
||||||
|
ExecutionPayload, ExecutionPayloadMerge, ForkName, ForkVersionedResponse, MainnetEthSpec,
|
||||||
|
};
|
||||||
|
use serde_json::json;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn fork_versioned_response_deserialize_correct_fork() {
|
||||||
|
type E = MainnetEthSpec;
|
||||||
|
|
||||||
|
let response_json =
|
||||||
|
serde_json::to_string(&json!(ForkVersionedResponse::<ExecutionPayload<E>> {
|
||||||
|
version: Some(ForkName::Merge),
|
||||||
|
data: ExecutionPayload::Merge(ExecutionPayloadMerge::default()),
|
||||||
|
}))
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let result: Result<ForkVersionedResponse<ExecutionPayload<E>>, _> =
|
||||||
|
serde_json::from_str(&response_json);
|
||||||
|
|
||||||
|
assert!(result.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn fork_versioned_response_deserialize_incorrect_fork() {
|
||||||
|
type E = MainnetEthSpec;
|
||||||
|
|
||||||
|
let response_json =
|
||||||
|
serde_json::to_string(&json!(ForkVersionedResponse::<ExecutionPayload<E>> {
|
||||||
|
version: Some(ForkName::Capella),
|
||||||
|
data: ExecutionPayload::Merge(ExecutionPayloadMerge::default()),
|
||||||
|
}))
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let result: Result<ForkVersionedResponse<ExecutionPayload<E>>, _> =
|
||||||
|
serde_json::from_str(&response_json);
|
||||||
|
|
||||||
|
assert!(result.is_err());
|
||||||
|
}
|
||||||
|
}
|
@ -46,6 +46,7 @@ pub mod execution_payload_header;
|
|||||||
pub mod fork;
|
pub mod fork;
|
||||||
pub mod fork_data;
|
pub mod fork_data;
|
||||||
pub mod fork_name;
|
pub mod fork_name;
|
||||||
|
pub mod fork_versioned_response;
|
||||||
pub mod free_attestation;
|
pub mod free_attestation;
|
||||||
pub mod graffiti;
|
pub mod graffiti;
|
||||||
pub mod historical_batch;
|
pub mod historical_batch;
|
||||||
@ -150,6 +151,9 @@ pub use crate::fork::Fork;
|
|||||||
pub use crate::fork_context::ForkContext;
|
pub use crate::fork_context::ForkContext;
|
||||||
pub use crate::fork_data::ForkData;
|
pub use crate::fork_data::ForkData;
|
||||||
pub use crate::fork_name::{ForkName, InconsistentFork};
|
pub use crate::fork_name::{ForkName, InconsistentFork};
|
||||||
|
pub use crate::fork_versioned_response::{
|
||||||
|
ExecutionOptimisticForkVersionedResponse, ForkVersionDeserialize, ForkVersionedResponse,
|
||||||
|
};
|
||||||
pub use crate::free_attestation::FreeAttestation;
|
pub use crate::free_attestation::FreeAttestation;
|
||||||
pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN};
|
pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN};
|
||||||
pub use crate::historical_batch::HistoricalBatch;
|
pub use crate::historical_batch::HistoricalBatch;
|
||||||
|
@ -4,6 +4,7 @@ use serde::de::DeserializeOwned;
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use ssz::{Decode, Encode};
|
use ssz::{Decode, Encode};
|
||||||
use ssz_derive::{Decode, Encode};
|
use ssz_derive::{Decode, Encode};
|
||||||
|
use std::borrow::Cow;
|
||||||
use std::convert::TryFrom;
|
use std::convert::TryFrom;
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
use std::hash::Hash;
|
use std::hash::Hash;
|
||||||
@ -90,15 +91,15 @@ pub trait AbstractExecPayload<T: EthSpec>:
|
|||||||
|
|
||||||
type Merge: OwnedExecPayload<T>
|
type Merge: OwnedExecPayload<T>
|
||||||
+ Into<Self>
|
+ Into<Self>
|
||||||
+ From<ExecutionPayloadMerge<T>>
|
+ for<'a> From<Cow<'a, ExecutionPayloadMerge<T>>>
|
||||||
+ TryFrom<ExecutionPayloadHeaderMerge<T>>;
|
+ TryFrom<ExecutionPayloadHeaderMerge<T>>;
|
||||||
type Capella: OwnedExecPayload<T>
|
type Capella: OwnedExecPayload<T>
|
||||||
+ Into<Self>
|
+ Into<Self>
|
||||||
+ From<ExecutionPayloadCapella<T>>
|
+ for<'a> From<Cow<'a, ExecutionPayloadCapella<T>>>
|
||||||
+ TryFrom<ExecutionPayloadHeaderCapella<T>>;
|
+ TryFrom<ExecutionPayloadHeaderCapella<T>>;
|
||||||
type Eip4844: OwnedExecPayload<T>
|
type Eip4844: OwnedExecPayload<T>
|
||||||
+ Into<Self>
|
+ Into<Self>
|
||||||
+ From<ExecutionPayloadEip4844<T>>
|
+ for<'a> From<Cow<'a, ExecutionPayloadEip4844<T>>>
|
||||||
+ TryFrom<ExecutionPayloadHeaderEip4844<T>>;
|
+ TryFrom<ExecutionPayloadHeaderEip4844<T>>;
|
||||||
|
|
||||||
fn default_at_fork(fork_name: ForkName) -> Result<Self, Error>;
|
fn default_at_fork(fork_name: ForkName) -> Result<Self, Error>;
|
||||||
@ -150,31 +151,21 @@ pub struct FullPayload<T: EthSpec> {
|
|||||||
|
|
||||||
impl<T: EthSpec> From<FullPayload<T>> for ExecutionPayload<T> {
|
impl<T: EthSpec> From<FullPayload<T>> for ExecutionPayload<T> {
|
||||||
fn from(full_payload: FullPayload<T>) -> Self {
|
fn from(full_payload: FullPayload<T>) -> Self {
|
||||||
match full_payload {
|
map_full_payload_into_execution_payload!(full_payload, move |payload, cons| {
|
||||||
FullPayload::Merge(payload) => ExecutionPayload::Merge(payload.execution_payload),
|
cons(payload.execution_payload)
|
||||||
FullPayload::Capella(payload) => ExecutionPayload::Capella(payload.execution_payload),
|
})
|
||||||
FullPayload::Eip4844(payload) => ExecutionPayload::Eip4844(payload.execution_payload),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T: EthSpec> From<FullPayloadRef<'a, T>> for ExecutionPayload<T> {
|
impl<'a, T: EthSpec> From<FullPayloadRef<'a, T>> for ExecutionPayload<T> {
|
||||||
fn from(full_payload_ref: FullPayloadRef<'a, T>) -> Self {
|
fn from(full_payload_ref: FullPayloadRef<'a, T>) -> Self {
|
||||||
match full_payload_ref {
|
map_full_payload_ref!(&'a _, full_payload_ref, move |payload, cons| {
|
||||||
FullPayloadRef::Merge(payload) => {
|
cons(payload);
|
||||||
ExecutionPayload::Merge(payload.execution_payload.clone())
|
payload.execution_payload.clone().into()
|
||||||
}
|
})
|
||||||
FullPayloadRef::Capella(payload) => {
|
|
||||||
ExecutionPayload::Capella(payload.execution_payload.clone())
|
|
||||||
}
|
|
||||||
FullPayloadRef::Eip4844(payload) => {
|
|
||||||
ExecutionPayload::Eip4844(payload.execution_payload.clone())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: can this be implemented as Deref or Clone somehow?
|
|
||||||
impl<'a, T: EthSpec> From<FullPayloadRef<'a, T>> for FullPayload<T> {
|
impl<'a, T: EthSpec> From<FullPayloadRef<'a, T>> for FullPayload<T> {
|
||||||
fn from(full_payload_ref: FullPayloadRef<'a, T>) -> Self {
|
fn from(full_payload_ref: FullPayloadRef<'a, T>) -> Self {
|
||||||
map_full_payload_ref!(&'a _, full_payload_ref, move |payload, cons| {
|
map_full_payload_ref!(&'a _, full_payload_ref, move |payload, cons| {
|
||||||
@ -189,11 +180,12 @@ impl<T: EthSpec> ExecPayload<T> for FullPayload<T> {
|
|||||||
BlockType::Full
|
BlockType::Full
|
||||||
}
|
}
|
||||||
|
|
||||||
fn to_execution_payload_header(&self) -> ExecutionPayloadHeader<T> {
|
fn to_execution_payload_header<'a>(&'a self) -> ExecutionPayloadHeader<T> {
|
||||||
let payload = map_full_payload_into_execution_payload!(self.clone(), |inner, cons| {
|
map_full_payload_ref!(&'a _, self.to_ref(), move |inner, cons| {
|
||||||
cons(inner.execution_payload)
|
cons(inner);
|
||||||
});
|
let exec_payload_ref: ExecutionPayloadRef<'a, T> = From::from(&inner.execution_payload);
|
||||||
ExecutionPayloadHeader::from(payload)
|
ExecutionPayloadHeader::from(exec_payload_ref)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn parent_hash<'a>(&'a self) -> ExecutionBlockHash {
|
fn parent_hash<'a>(&'a self) -> ExecutionBlockHash {
|
||||||
@ -404,17 +396,9 @@ impl<T: EthSpec> AbstractExecPayload<T> for FullPayload<T> {
|
|||||||
|
|
||||||
impl<T: EthSpec> From<ExecutionPayload<T>> for FullPayload<T> {
|
impl<T: EthSpec> From<ExecutionPayload<T>> for FullPayload<T> {
|
||||||
fn from(execution_payload: ExecutionPayload<T>) -> Self {
|
fn from(execution_payload: ExecutionPayload<T>) -> Self {
|
||||||
match execution_payload {
|
map_execution_payload_into_full_payload!(execution_payload, |inner, cons| {
|
||||||
ExecutionPayload::Merge(execution_payload) => {
|
cons(inner.into())
|
||||||
Self::Merge(FullPayloadMerge { execution_payload })
|
})
|
||||||
}
|
|
||||||
ExecutionPayload::Capella(execution_payload) => {
|
|
||||||
Self::Capella(FullPayloadCapella { execution_payload })
|
|
||||||
}
|
|
||||||
ExecutionPayload::Eip4844(execution_payload) => {
|
|
||||||
Self::Eip4844(FullPayloadEip4844 { execution_payload })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -666,6 +650,7 @@ macro_rules! impl_exec_payload_common {
|
|||||||
$wrapped_field:ident, // execution_payload_header | execution_payload
|
$wrapped_field:ident, // execution_payload_header | execution_payload
|
||||||
$fork_variant:ident, // Merge | Merge
|
$fork_variant:ident, // Merge | Merge
|
||||||
$block_type_variant:ident, // Blinded | Full
|
$block_type_variant:ident, // Blinded | Full
|
||||||
|
$is_default_with_empty_roots:block,
|
||||||
$f:block,
|
$f:block,
|
||||||
$g:block) => {
|
$g:block) => {
|
||||||
impl<T: EthSpec> ExecPayload<T> for $wrapper_type<T> {
|
impl<T: EthSpec> ExecPayload<T> for $wrapper_type<T> {
|
||||||
@ -675,7 +660,7 @@ macro_rules! impl_exec_payload_common {
|
|||||||
|
|
||||||
fn to_execution_payload_header(&self) -> ExecutionPayloadHeader<T> {
|
fn to_execution_payload_header(&self) -> ExecutionPayloadHeader<T> {
|
||||||
ExecutionPayloadHeader::$fork_variant($wrapped_type_header::from(
|
ExecutionPayloadHeader::$fork_variant($wrapped_type_header::from(
|
||||||
self.$wrapped_field.clone(),
|
&self.$wrapped_field,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -712,15 +697,8 @@ macro_rules! impl_exec_payload_common {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn is_default_with_empty_roots(&self) -> bool {
|
fn is_default_with_empty_roots(&self) -> bool {
|
||||||
// FIXME: is there a better way than ignoring this lint?
|
let f = $is_default_with_empty_roots;
|
||||||
// This is necessary because the first invocation of this macro might expand to:
|
f(self)
|
||||||
// self.execution_payload_header == ExecutionPayloadHeaderMerge::from(ExecutionPayloadMerge::default())
|
|
||||||
// but the second invocation might expand to:
|
|
||||||
// self.execution_payload == ExecutionPayloadMerge::from(ExecutionPayloadMerge::default())
|
|
||||||
#[allow(clippy::cmp_owned)]
|
|
||||||
{
|
|
||||||
self.$wrapped_field == $wrapped_type::from($wrapped_type_full::default())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn transactions(&self) -> Option<&Transactions<T>> {
|
fn transactions(&self) -> Option<&Transactions<T>> {
|
||||||
@ -755,6 +733,12 @@ macro_rules! impl_exec_payload_for_fork {
|
|||||||
execution_payload_header,
|
execution_payload_header,
|
||||||
$fork_variant, // Merge
|
$fork_variant, // Merge
|
||||||
Blinded,
|
Blinded,
|
||||||
|
{
|
||||||
|
|wrapper: &$wrapper_type_header<T>| {
|
||||||
|
wrapper.execution_payload_header
|
||||||
|
== $wrapped_type_header::from(&$wrapped_type_full::default())
|
||||||
|
}
|
||||||
|
},
|
||||||
{ |_| { None } },
|
{ |_| { None } },
|
||||||
{
|
{
|
||||||
let c: for<'a> fn(&'a $wrapper_type_header<T>) -> Result<Hash256, Error> =
|
let c: for<'a> fn(&'a $wrapper_type_header<T>) -> Result<Hash256, Error> =
|
||||||
@ -788,7 +772,7 @@ macro_rules! impl_exec_payload_for_fork {
|
|||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
execution_payload_header: $wrapped_type_header::from(
|
execution_payload_header: $wrapped_type_header::from(
|
||||||
$wrapped_type_full::default(),
|
&$wrapped_type_full::default(),
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -806,11 +790,11 @@ macro_rules! impl_exec_payload_for_fork {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME(sproul): consider adding references to these From impls
|
// BlindedPayload* from CoW reference to ExecutionPayload* (hopefully just a reference).
|
||||||
impl<T: EthSpec> From<$wrapped_type_full<T>> for $wrapper_type_header<T> {
|
impl<'a, T: EthSpec> From<Cow<'a, $wrapped_type_full<T>>> for $wrapper_type_header<T> {
|
||||||
fn from(execution_payload: $wrapped_type_full<T>) -> Self {
|
fn from(execution_payload: Cow<'a, $wrapped_type_full<T>>) -> Self {
|
||||||
Self {
|
Self {
|
||||||
execution_payload_header: $wrapped_type_header::from(execution_payload),
|
execution_payload_header: $wrapped_type_header::from(&*execution_payload),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -825,6 +809,11 @@ macro_rules! impl_exec_payload_for_fork {
|
|||||||
execution_payload,
|
execution_payload,
|
||||||
$fork_variant, // Merge
|
$fork_variant, // Merge
|
||||||
Full,
|
Full,
|
||||||
|
{
|
||||||
|
|wrapper: &$wrapper_type_full<T>| {
|
||||||
|
wrapper.execution_payload == $wrapped_type_full::default()
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
let c: for<'a> fn(&'a $wrapper_type_full<T>) -> Option<&'a Transactions<T>> =
|
let c: for<'a> fn(&'a $wrapper_type_full<T>) -> Option<&'a Transactions<T>> =
|
||||||
|payload: &$wrapper_type_full<T>| Some(&payload.execution_payload.transactions);
|
|payload: &$wrapper_type_full<T>| Some(&payload.execution_payload.transactions);
|
||||||
@ -848,6 +837,15 @@ macro_rules! impl_exec_payload_for_fork {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FullPayload * from CoW reference to ExecutionPayload* (hopefully already owned).
|
||||||
|
impl<'a, T: EthSpec> From<Cow<'a, $wrapped_type_full<T>>> for $wrapper_type_full<T> {
|
||||||
|
fn from(execution_payload: Cow<'a, $wrapped_type_full<T>>) -> Self {
|
||||||
|
Self {
|
||||||
|
execution_payload: $wrapped_type_full::from(execution_payload.into_owned()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T: EthSpec> TryFrom<ExecutionPayloadHeader<T>> for $wrapper_type_full<T> {
|
impl<T: EthSpec> TryFrom<ExecutionPayloadHeader<T>> for $wrapper_type_full<T> {
|
||||||
type Error = Error;
|
type Error = Error;
|
||||||
fn try_from(_: ExecutionPayloadHeader<T>) -> Result<Self, Self::Error> {
|
fn try_from(_: ExecutionPayloadHeader<T>) -> Result<Self, Self::Error> {
|
||||||
@ -915,11 +913,12 @@ impl<T: EthSpec> AbstractExecPayload<T> for BlindedPayload<T> {
|
|||||||
|
|
||||||
impl<T: EthSpec> From<ExecutionPayload<T>> for BlindedPayload<T> {
|
impl<T: EthSpec> From<ExecutionPayload<T>> for BlindedPayload<T> {
|
||||||
fn from(payload: ExecutionPayload<T>) -> Self {
|
fn from(payload: ExecutionPayload<T>) -> Self {
|
||||||
match payload {
|
// This implementation is a bit wasteful in that it discards the payload body.
|
||||||
ExecutionPayload::Merge(payload) => BlindedPayload::Merge(payload.into()),
|
// Required by the top-level constraint on AbstractExecPayload but could maybe be loosened
|
||||||
ExecutionPayload::Capella(payload) => BlindedPayload::Capella(payload.into()),
|
// in future.
|
||||||
ExecutionPayload::Eip4844(payload) => BlindedPayload::Eip4844(payload.into()),
|
map_execution_payload_into_blinded_payload!(payload, |inner, cons| cons(From::from(
|
||||||
}
|
Cow::Owned(inner)
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -522,6 +522,24 @@ impl<E: EthSpec> SignedBeaconBlock<E> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<E: EthSpec, Payload: AbstractExecPayload<E>> ForkVersionDeserialize
|
||||||
|
for SignedBeaconBlock<E, Payload>
|
||||||
|
{
|
||||||
|
fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>(
|
||||||
|
value: serde_json::value::Value,
|
||||||
|
fork_name: ForkName,
|
||||||
|
) -> Result<Self, D::Error> {
|
||||||
|
Ok(map_fork_name!(
|
||||||
|
fork_name,
|
||||||
|
Self,
|
||||||
|
serde_json::from_value(value).map_err(|e| serde::de::Error::custom(format!(
|
||||||
|
"SignedBeaconBlock failed to deserialize: {:?}",
|
||||||
|
e
|
||||||
|
)))?
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
@ -1079,6 +1079,19 @@ fn http_port_flag() {
|
|||||||
.with_config(|config| assert_eq!(config.http_api.listen_port, port1));
|
.with_config(|config| assert_eq!(config.http_api.listen_port, port1));
|
||||||
}
|
}
|
||||||
#[test]
|
#[test]
|
||||||
|
fn empty_self_limiter_flag() {
|
||||||
|
// Test that empty rate limiter is accepted using the default rate limiting configurations.
|
||||||
|
CommandLineTest::new()
|
||||||
|
.flag("self-limiter", None)
|
||||||
|
.run_with_zero_port()
|
||||||
|
.with_config(|config| {
|
||||||
|
assert_eq!(
|
||||||
|
config.network.outbound_rate_limiter_config,
|
||||||
|
Some(lighthouse_network::rpc::config::OutboundRateLimiterConfig::default())
|
||||||
|
)
|
||||||
|
});
|
||||||
|
}
|
||||||
|
#[test]
|
||||||
fn http_allow_origin_flag() {
|
fn http_allow_origin_flag() {
|
||||||
CommandLineTest::new()
|
CommandLineTest::new()
|
||||||
.flag("http-allow-origin", Some("127.0.0.99"))
|
.flag("http-allow-origin", Some("127.0.0.99"))
|
||||||
|
@ -231,6 +231,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
address of this server (e.g., http://localhost:5064).")
|
address of this server (e.g., http://localhost:5064).")
|
||||||
.takes_value(true),
|
.takes_value(true),
|
||||||
)
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("enable-high-validator-count-metrics")
|
||||||
|
.long("enable-high-validator-count-metrics")
|
||||||
|
.help("Enable per validator metrics for > 64 validators. \
|
||||||
|
Note: This flag is automatically enabled for <= 64 validators. \
|
||||||
|
Enabling this metric for higher validator counts will lead to higher volume \
|
||||||
|
of prometheus metrics being collected.")
|
||||||
|
.takes_value(false),
|
||||||
|
)
|
||||||
/*
|
/*
|
||||||
* Explorer metrics
|
* Explorer metrics
|
||||||
*/
|
*/
|
||||||
|
@ -53,6 +53,11 @@ pub struct Config {
|
|||||||
/// If true, enable functionality that monitors the network for attestations or proposals from
|
/// If true, enable functionality that monitors the network for attestations or proposals from
|
||||||
/// any of the validators managed by this client before starting up.
|
/// any of the validators managed by this client before starting up.
|
||||||
pub enable_doppelganger_protection: bool,
|
pub enable_doppelganger_protection: bool,
|
||||||
|
/// If true, then we publish validator specific metrics (e.g next attestation duty slot)
|
||||||
|
/// for all our managed validators.
|
||||||
|
/// Note: We publish validator specific metrics for low validator counts without this flag
|
||||||
|
/// (<= 64 validators)
|
||||||
|
pub enable_high_validator_count_metrics: bool,
|
||||||
/// Enable use of the blinded block endpoints during proposals.
|
/// Enable use of the blinded block endpoints during proposals.
|
||||||
pub builder_proposals: bool,
|
pub builder_proposals: bool,
|
||||||
/// Overrides the timestamp field in builder api ValidatorRegistrationV1
|
/// Overrides the timestamp field in builder api ValidatorRegistrationV1
|
||||||
@ -99,6 +104,7 @@ impl Default for Config {
|
|||||||
http_metrics: <_>::default(),
|
http_metrics: <_>::default(),
|
||||||
monitoring_api: None,
|
monitoring_api: None,
|
||||||
enable_doppelganger_protection: false,
|
enable_doppelganger_protection: false,
|
||||||
|
enable_high_validator_count_metrics: false,
|
||||||
beacon_nodes_tls_certs: None,
|
beacon_nodes_tls_certs: None,
|
||||||
block_delay: None,
|
block_delay: None,
|
||||||
builder_proposals: false,
|
builder_proposals: false,
|
||||||
@ -273,6 +279,10 @@ impl Config {
|
|||||||
config.http_metrics.enabled = true;
|
config.http_metrics.enabled = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cli_args.is_present("enable-high-validator-count-metrics") {
|
||||||
|
config.enable_high_validator_count_metrics = true;
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(address) = cli_args.value_of("metrics-address") {
|
if let Some(address) = cli_args.value_of("metrics-address") {
|
||||||
config.http_metrics.listen_addr = address
|
config.http_metrics.listen_addr = address
|
||||||
.parse::<IpAddr>()
|
.parse::<IpAddr>()
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
mod sync;
|
mod sync;
|
||||||
|
|
||||||
use crate::beacon_node_fallback::{BeaconNodeFallback, OfflineOnFailure, RequireSynced};
|
use crate::beacon_node_fallback::{BeaconNodeFallback, OfflineOnFailure, RequireSynced};
|
||||||
|
use crate::http_metrics::metrics::{get_int_gauge, set_int_gauge, ATTESTATION_DUTY};
|
||||||
use crate::{
|
use crate::{
|
||||||
block_service::BlockServiceNotification,
|
block_service::BlockServiceNotification,
|
||||||
http_metrics::metrics,
|
http_metrics::metrics,
|
||||||
@ -39,6 +40,11 @@ const SUBSCRIPTION_BUFFER_SLOTS: u64 = 2;
|
|||||||
/// Only retain `HISTORICAL_DUTIES_EPOCHS` duties prior to the current epoch.
|
/// Only retain `HISTORICAL_DUTIES_EPOCHS` duties prior to the current epoch.
|
||||||
const HISTORICAL_DUTIES_EPOCHS: u64 = 2;
|
const HISTORICAL_DUTIES_EPOCHS: u64 = 2;
|
||||||
|
|
||||||
|
/// Minimum number of validators for which we auto-enable per-validator metrics.
|
||||||
|
/// For validators greater than this value, we need to manually set the `enable-per-validator-metrics`
|
||||||
|
/// flag in the cli to enable collection of per validator metrics.
|
||||||
|
const VALIDATOR_METRICS_MIN_COUNT: usize = 64;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
UnableToReadSlotClock,
|
UnableToReadSlotClock,
|
||||||
@ -121,6 +127,7 @@ pub struct DutiesService<T, E: EthSpec> {
|
|||||||
/// This functionality is a little redundant since most BNs will likely reject duties when they
|
/// This functionality is a little redundant since most BNs will likely reject duties when they
|
||||||
/// aren't synced, but we keep it around for an emergency.
|
/// aren't synced, but we keep it around for an emergency.
|
||||||
pub require_synced: RequireSynced,
|
pub require_synced: RequireSynced,
|
||||||
|
pub enable_high_validator_count_metrics: bool,
|
||||||
pub context: RuntimeContext<E>,
|
pub context: RuntimeContext<E>,
|
||||||
pub spec: ChainSpec,
|
pub spec: ChainSpec,
|
||||||
}
|
}
|
||||||
@ -220,6 +227,12 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> {
|
|||||||
.cloned()
|
.cloned()
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns `true` if we should collect per validator metrics and `false` otherwise.
|
||||||
|
pub fn per_validator_metrics(&self) -> bool {
|
||||||
|
self.enable_high_validator_count_metrics
|
||||||
|
|| self.total_validator_count() <= VALIDATOR_METRICS_MIN_COUNT
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Start the service that periodically polls the beacon node for validator duties. This will start
|
/// Start the service that periodically polls the beacon node for validator duties. This will start
|
||||||
@ -501,6 +514,7 @@ async fn poll_beacon_attesters<T: SlotClock + 'static, E: EthSpec>(
|
|||||||
current_epoch,
|
current_epoch,
|
||||||
&local_indices,
|
&local_indices,
|
||||||
&local_pubkeys,
|
&local_pubkeys,
|
||||||
|
current_slot,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
@ -520,9 +534,14 @@ async fn poll_beacon_attesters<T: SlotClock + 'static, E: EthSpec>(
|
|||||||
);
|
);
|
||||||
|
|
||||||
// Download the duties and update the duties for the next epoch.
|
// Download the duties and update the duties for the next epoch.
|
||||||
if let Err(e) =
|
if let Err(e) = poll_beacon_attesters_for_epoch(
|
||||||
poll_beacon_attesters_for_epoch(duties_service, next_epoch, &local_indices, &local_pubkeys)
|
duties_service,
|
||||||
.await
|
next_epoch,
|
||||||
|
&local_indices,
|
||||||
|
&local_pubkeys,
|
||||||
|
current_slot,
|
||||||
|
)
|
||||||
|
.await
|
||||||
{
|
{
|
||||||
error!(
|
error!(
|
||||||
log,
|
log,
|
||||||
@ -619,6 +638,7 @@ async fn poll_beacon_attesters_for_epoch<T: SlotClock + 'static, E: EthSpec>(
|
|||||||
epoch: Epoch,
|
epoch: Epoch,
|
||||||
local_indices: &[u64],
|
local_indices: &[u64],
|
||||||
local_pubkeys: &HashSet<PublicKeyBytes>,
|
local_pubkeys: &HashSet<PublicKeyBytes>,
|
||||||
|
current_slot: Slot,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let log = duties_service.context.log();
|
let log = duties_service.context.log();
|
||||||
|
|
||||||
@ -671,6 +691,35 @@ async fn poll_beacon_attesters_for_epoch<T: SlotClock + 'static, E: EthSpec>(
|
|||||||
.data
|
.data
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|duty| {
|
.filter(|duty| {
|
||||||
|
if duties_service.per_validator_metrics() {
|
||||||
|
let validator_index = duty.validator_index;
|
||||||
|
let duty_slot = duty.slot;
|
||||||
|
if let Some(existing_slot_gauge) =
|
||||||
|
get_int_gauge(&ATTESTATION_DUTY, &[&validator_index.to_string()])
|
||||||
|
{
|
||||||
|
let existing_slot = Slot::new(existing_slot_gauge.get() as u64);
|
||||||
|
let existing_epoch = existing_slot.epoch(E::slots_per_epoch());
|
||||||
|
|
||||||
|
// First condition ensures that we switch to the next epoch duty slot
|
||||||
|
// once the current epoch duty slot passes.
|
||||||
|
// Second condition is to ensure that next epoch duties don't override
|
||||||
|
// current epoch duties.
|
||||||
|
if existing_slot < current_slot
|
||||||
|
|| (duty_slot.epoch(E::slots_per_epoch()) <= existing_epoch
|
||||||
|
&& duty_slot > current_slot
|
||||||
|
&& duty_slot != existing_slot)
|
||||||
|
{
|
||||||
|
existing_slot_gauge.set(duty_slot.as_u64() as i64);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
set_int_gauge(
|
||||||
|
&ATTESTATION_DUTY,
|
||||||
|
&[&validator_index.to_string()],
|
||||||
|
duty_slot.as_u64() as i64,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
local_pubkeys.contains(&duty.pubkey) && {
|
local_pubkeys.contains(&duty.pubkey) && {
|
||||||
// Only update the duties if either is true:
|
// Only update the duties if either is true:
|
||||||
//
|
//
|
||||||
|
@ -177,6 +177,12 @@ lazy_static::lazy_static! {
|
|||||||
"Duration to obtain a signature",
|
"Duration to obtain a signature",
|
||||||
&["type"]
|
&["type"]
|
||||||
);
|
);
|
||||||
|
|
||||||
|
pub static ref ATTESTATION_DUTY: Result<IntGaugeVec> = try_create_int_gauge_vec(
|
||||||
|
"vc_attestation_duty_slot",
|
||||||
|
"Attestation duty slot for all managed validators",
|
||||||
|
&["validator"]
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn gather_prometheus_metrics<T: EthSpec>(
|
pub fn gather_prometheus_metrics<T: EthSpec>(
|
||||||
|
@ -422,6 +422,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
|
|||||||
},
|
},
|
||||||
spec: context.eth2_config.spec.clone(),
|
spec: context.eth2_config.spec.clone(),
|
||||||
context: duties_context,
|
context: duties_context,
|
||||||
|
enable_high_validator_count_metrics: config.enable_high_validator_count_metrics,
|
||||||
});
|
});
|
||||||
|
|
||||||
// Update the metrics server.
|
// Update the metrics server.
|
||||||
|
Loading…
Reference in New Issue
Block a user