diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 27f5dabed..eb927338b 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -55,7 +55,7 @@ jobs: VERSION: ${{ env.VERSION }} VERSION_SUFFIX: ${{ env.VERSION_SUFFIX }} build-docker-single-arch: - name: build-docker-${{ matrix.binary }} + name: build-docker-${{ matrix.binary }}${{ matrix.features.version_suffix }} runs-on: ubuntu-22.04 strategy: matrix: @@ -63,6 +63,10 @@ jobs: aarch64-portable, x86_64, x86_64-portable] + features: [ + {version_suffix: "", env: "gnosis,slasher-lmdb,slasher-mdbx,jemalloc"}, + {version_suffix: "-dev", env: "gnosis,slasher-lmdb,slasher-mdbx,jemalloc,spec-minimal"} + ] include: - profile: maxperf @@ -72,7 +76,9 @@ jobs: DOCKER_CLI_EXPERIMENTAL: enabled VERSION: ${{ needs.extract-version.outputs.VERSION }} VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} - CROSS_FEATURES: null + FEATURE_SUFFIX: ${{ matrix.features.version_suffix }} + FEATURES: ${{ matrix.features.env }} + CROSS_FEATURES: ${{ matrix.features.env }} steps: - uses: actions/checkout@v3 - name: Update Rust @@ -83,7 +89,7 @@ jobs: - name: Cross build Lighthouse binary run: | cargo install cross - env CROSS_PROFILE=${{ matrix.profile }} make build-${{ matrix.binary }} + env CROSS_PROFILE=${{ matrix.profile }} CROSS_FEATURES=${{ matrix.features.env }} make build-${{ matrix.binary }} - name: Move cross-built binary into Docker scope (if ARM) if: startsWith(matrix.binary, 'aarch64') run: | @@ -111,7 +117,8 @@ jobs: docker buildx build \ --platform=linux/${SHORT_ARCH} \ --file ./Dockerfile.cross . \ - --tag ${IMAGE_NAME}:${VERSION}-${SHORT_ARCH}${VERSION_SUFFIX}${MODERNITY_SUFFIX} \ + --tag ${IMAGE_NAME}:${VERSION}-${SHORT_ARCH}${VERSION_SUFFIX}${MODERNITY_SUFFIX}${FEATURE_SUFFIX} \ + --build-arg FEATURES=${FEATURES} \ --provenance=false \ --push build-docker-multiarch: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8ca6ab0f9..2e63b4d6c 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -134,11 +134,17 @@ jobs: - name: Build Lighthouse for Windows portable if: matrix.arch == 'x86_64-windows-portable' - run: cargo install --path lighthouse --force --locked --features portable,gnosis --profile ${{ matrix.profile }} + # NOTE: profile set to release until this rustc issue is fixed: + # + # https://github.com/rust-lang/rust/issues/107781 + # + # tracked at: https://github.com/sigp/lighthouse/issues/3964 + run: cargo install --path lighthouse --force --locked --features portable,gnosis --profile release - name: Build Lighthouse for Windows modern if: matrix.arch == 'x86_64-windows' - run: cargo install --path lighthouse --force --locked --features modern,gnosis --profile ${{ matrix.profile }} + # NOTE: profile set to release (see above) + run: cargo install --path lighthouse --force --locked --features modern,gnosis --profile release - name: Configure GPG and create artifacts if: startsWith(matrix.arch, 'x86_64-windows') != true diff --git a/Cargo.lock b/Cargo.lock index 8ca3e9b68..1d8d88f38 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -463,7 +463,7 @@ dependencies = [ "http", "http-body", "hyper", - "itoa", + "itoa 1.0.5", "matchit", "memchr", "mime", @@ -544,7 +544,7 @@ checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf" [[package]] name = "beacon-api-client" version = "0.1.0" -source = "git+https://github.com/ralexstokes/beacon-api-client?rev=7d5d8dad1648f771573f42585ad8080a45b05689#7d5d8dad1648f771573f42585ad8080a45b05689" +source = "git+https://github.com/ralexstokes/beacon-api-client#53690a711e33614d59d4d44fb09762b4699e2a4e" dependencies = [ "ethereum-consensus", "http", @@ -570,7 +570,6 @@ dependencies = [ "eth1", "eth2", "eth2_hashing", - "eth2_network_config", "eth2_ssz", "eth2_ssz_derive", "eth2_ssz_types", @@ -582,7 +581,6 @@ dependencies = [ "hex", "int_to_bytes", "itertools", - "kzg", "lazy_static", "lighthouse_metrics", "logging", @@ -602,8 +600,6 @@ dependencies = [ "serde_json", "slasher", "slog", - "slog-async", - "slog-term", "sloggers", "slot_clock", "smallvec", @@ -644,7 +640,6 @@ dependencies = [ "node_test_rig", "sensitive_url", "serde", - "serde_json", "slasher", "slog", "store", @@ -820,6 +815,18 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" +[[package]] +name = "bstr" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" +dependencies = [ + "lazy_static", + "memchr", + "regex-automata", + "serde", +] + [[package]] name = "buf_redux" version = "0.8.4" @@ -889,15 +896,6 @@ dependencies = [ "pkg-config", ] -[[package]] -name = "c-kzg" -version = "0.1.0" -source = "git+https://github.com/ethereum/c-kzg-4844?rev=69f6155d7524247be9d3f54ab3bfbe33a0345622#69f6155d7524247be9d3f54ab3bfbe33a0345622" -dependencies = [ - "hex", - "libc", -] - [[package]] name = "cached_tree_hash" version = "0.1.0" @@ -1360,12 +1358,13 @@ dependencies = [ [[package]] name = "csv" -version = "1.2.0" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af91f40b7355f82b0a891f50e70399475945bb0b0da4f1700ce60761c9d3e359" +checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1" dependencies = [ + "bstr", "csv-core", - "itoa", + "itoa 0.4.8", "ryu", "serde", ] @@ -1436,9 +1435,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.90" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90d59d9acd2a682b4e40605a242f6670eaa58c5957471cbf85e8aa6a0b97a5e8" +checksum = "bc831ee6a32dd495436e317595e639a587aa9907bef96fe6e6abc290ab6204e9" dependencies = [ "cc", "cxxbridge-flags", @@ -1448,9 +1447,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.90" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebfa40bda659dd5c864e65f4c9a2b0aff19bea56b017b9b77c73d3766a453a38" +checksum = "94331d54f1b1a8895cd81049f7eaaaef9d05a7dcb4d1fd08bf3ff0806246789d" dependencies = [ "cc", "codespan-reporting", @@ -1463,15 +1462,15 @@ dependencies = [ [[package]] name = "cxxbridge-flags" -version = "1.0.90" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "457ce6757c5c70dc6ecdbda6925b958aae7f959bda7d8fb9bde889e34a09dc03" +checksum = "48dcd35ba14ca9b40d6e4b4b39961f23d835dbb8eed74565ded361d93e1feb8a" [[package]] name = "cxxbridge-macro" -version = "1.0.90" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebf883b7aacd7b2aeb2a7b338648ee19f57c140d4ee8e52c68979c6b2f7f2263" +checksum = "81bbeb29798b407ccd82a3324ade1a7286e0d29851475990b612670f6f5124d2" dependencies = [ "proc-macro2", "quote", @@ -1830,7 +1829,7 @@ dependencies = [ "enr", "fnv", "futures", - "hashlink 0.7.0", + "hashlink", "hex", "hkdf", "lazy_static", @@ -2206,8 +2205,6 @@ dependencies = [ "enr", "eth2_config", "eth2_ssz", - "kzg", - "serde_json", "serde_yaml", "tempfile", "types", @@ -2351,7 +2348,7 @@ dependencies = [ [[package]] name = "ethereum-consensus" version = "0.1.1" -source = "git+https://github.com/ralexstokes/ethereum-consensus?rev=a8110af76d97bf2bf27fb987a671808fcbdf1834#a8110af76d97bf2bf27fb987a671808fcbdf1834" +source = "git+https://github.com/ralexstokes//ethereum-consensus?rev=9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d#9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d" dependencies = [ "async-stream", "blst", @@ -2360,6 +2357,7 @@ dependencies = [ "hex", "integer-sqrt", "multiaddr 0.14.0", + "multihash", "rand 0.8.5", "serde", "serde_json", @@ -2517,7 +2515,7 @@ dependencies = [ "lazy_static", "lighthouse_metrics", "lru 0.7.8", - "mev-build-rs", + "mev-rs", "parking_lot 0.12.1", "rand 0.8.5", "reqwest", @@ -2565,9 +2563,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "1.9.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" +checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" dependencies = [ "instant", ] @@ -2989,7 +2987,7 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util 0.7.7", + "tokio-util 0.7.4", "tracing", ] @@ -3050,15 +3048,6 @@ dependencies = [ "hashbrown 0.11.2", ] -[[package]] -name = "hashlink" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69fe1fcf8b4278d860ad0548329f892a3631fb63f82574df68275f34cdbe0ffa" -dependencies = [ - "hashbrown 0.12.3", -] - [[package]] name = "headers" version = "0.3.8" @@ -3198,7 +3187,7 @@ checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", - "itoa", + "itoa 1.0.5", ] [[package]] @@ -3317,7 +3306,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa", + "itoa 1.0.5", "pin-project-lite 0.2.9", "socket2", "tokio", @@ -3481,7 +3470,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec 3.4.0", + "parity-scale-codec 3.3.0", ] [[package]] @@ -3608,6 +3597,12 @@ dependencies = [ "either", ] +[[package]] +name = "itoa" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" + [[package]] name = "itoa" version = "1.0.5" @@ -3715,23 +3710,6 @@ dependencies = [ "tiny-keccak", ] -[[package]] -name = "kzg" -version = "0.1.0" -dependencies = [ - "arbitrary", - "c-kzg", - "derivative", - "eth2_hashing", - "eth2_serde_utils", - "eth2_ssz", - "eth2_ssz_derive", - "hex", - "serde", - "serde_derive", - "tree_hash", -] - [[package]] name = "lazy_static" version = "1.4.0" @@ -3762,13 +3740,11 @@ dependencies = [ "environment", "eth1_test_rig", "eth2", - "eth2_hashing", "eth2_network_config", "eth2_ssz", "eth2_wallet", "genesis", "int_to_bytes", - "kzg", "lighthouse_network", "lighthouse_version", "log", @@ -4246,7 +4222,7 @@ dependencies = [ "thiserror", "tinytemplate", "tokio", - "tokio-util 0.7.7", + "tokio-util 0.7.4", "webrtc", ] @@ -4333,9 +4309,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.25.2" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29f835d03d717946d28b1d1ed632eb6f0e24a299388ee623d0c23118d3e8a7fa" +checksum = "290b64917f8b0cb885d9de0f9959fe1f775d7fa12f1da2db9001c1c8ab60f89d" dependencies = [ "cc", "pkg-config", @@ -4703,18 +4679,19 @@ dependencies = [ ] [[package]] -name = "mev-build-rs" +name = "mev-rs" version = "0.2.1" -source = "git+https://github.com/ralexstokes/mev-rs?rev=6c99b0fbdc0427b1625469d2e575303ce08de5b8#6c99b0fbdc0427b1625469d2e575303ce08de5b8" +source = "git+https://github.com/ralexstokes//mev-rs?rev=7813d4a4a564e0754e9aaab2d95520ba437c3889#7813d4a4a564e0754e9aaab2d95520ba437c3889" dependencies = [ "async-trait", "axum", "beacon-api-client", "ethereum-consensus", + "hyper", "serde", - "serde_json", "ssz-rs", "thiserror", + "tokio", "tracing", ] @@ -4763,14 +4740,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.6" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" +checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.45.0", + "windows-sys 0.42.0", ] [[package]] @@ -5021,6 +4998,7 @@ dependencies = [ "eth2_ssz", "eth2_ssz_types", "ethereum-types 0.14.1", + "execution_layer", "exit-future", "fnv", "futures", @@ -5255,9 +5233,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.17.1" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" +checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66" [[package]] name = "oneshot_broadcast" @@ -5436,9 +5414,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.4.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "637935964ff85a605d114591d4d2c13c5d1ba2806dae97cea6bf180238a749ac" +checksum = "c3840933452adf7b3b9145e27086a5a3376c619dca1a21b1e5a5af0d54979bed" dependencies = [ "arrayvec", "bitvec 1.0.1", @@ -5878,7 +5856,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83cd1b99916654a69008fd66b4f9397fbe08e6e51dfe23d4417acf5d3b8cb87c" dependencies = [ "dtoa", - "itoa", + "itoa 1.0.5", "parking_lot 0.12.1", "prometheus-client-derive-text-encode", ] @@ -6080,9 +6058,9 @@ dependencies = [ [[package]] name = "r2d2_sqlite" -version = "0.21.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4f5d0337e99cd5cacd91ffc326c6cc9d8078def459df560c4f9bf9ba4a51034" +checksum = "9d24607049214c5e42d3df53ac1d8a23c34cc6a5eefe3122acb2c72174719959" dependencies = [ "r2d2", "rusqlite", @@ -6315,7 +6293,7 @@ dependencies = [ "tokio", "tokio-native-tls", "tokio-rustls 0.23.4", - "tokio-util 0.7.7", + "tokio-util 0.7.4", "tower-service", "url", "wasm-bindgen", @@ -6441,15 +6419,16 @@ dependencies = [ [[package]] name = "rusqlite" -version = "0.28.0" +version = "0.25.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01e213bc3ecb39ac32e81e51ebe31fd888a940515173e3a18a35f8c6e896422a" +checksum = "5c4b1eaf239b47034fb450ee9cdedd7d0226571689d8823030c4b6c2cb407152" dependencies = [ "bitflags", "fallible-iterator", "fallible-streaming-iterator", - "hashlink 0.8.1", + "hashlink", "libsqlite3-sys", + "memchr", "smallvec", ] @@ -6600,7 +6579,7 @@ checksum = "001cf62ece89779fd16105b5f515ad0e5cedcd5440d3dd806bb067978e7c3608" dependencies = [ "cfg-if", "derive_more", - "parity-scale-codec 3.4.0", + "parity-scale-codec 3.3.0", "scale-info-derive", ] @@ -6813,6 +6792,16 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde-big-array" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18b20e7752957bbe9661cff4e0bb04d183d0948cdab2ea58cdb9df36a61dfe62" +dependencies = [ + "serde", + "serde_derive", +] + [[package]] name = "serde_array_query" version = "0.1.0" @@ -6846,11 +6835,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.93" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76" +checksum = "7434af0dc1cbd59268aa98b4c22c131c0584d2232f6fb166efb993e2832e896a" dependencies = [ - "itoa", + "itoa 1.0.5", "ryu", "serde", ] @@ -6873,7 +6862,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa", + "itoa 1.0.5", "ryu", "serde", ] @@ -7010,9 +6999,9 @@ checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" [[package]] name = "signal-hook-registry" -version = "1.4.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" dependencies = [ "libc", ] @@ -7318,11 +7307,10 @@ dependencies = [ [[package]] name = "ssz-rs" version = "0.8.0" -source = "git+https://github.com/ralexstokes/ssz-rs?rev=cb08f1#cb08f18ca919cc1b685b861d0fa9e2daabe89737" +source = "git+https://github.com/ralexstokes//ssz-rs?rev=adf1a0b14cef90b9536f28ef89da1fab316465e1#adf1a0b14cef90b9536f28ef89da1fab316465e1" dependencies = [ "bitvec 1.0.1", "hex", - "lazy_static", "num-bigint", "serde", "sha2 0.9.9", @@ -7333,7 +7321,7 @@ dependencies = [ [[package]] name = "ssz-rs-derive" version = "0.8.0" -source = "git+https://github.com/ralexstokes/ssz-rs?rev=cb08f1#cb08f18ca919cc1b685b861d0fa9e2daabe89737" +source = "git+https://github.com/ralexstokes//ssz-rs?rev=adf1a0b14cef90b9536f28ef89da1fab316465e1#adf1a0b14cef90b9536f28ef89da1fab316465e1" dependencies = [ "proc-macro2", "quote", @@ -7712,11 +7700,10 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.1.7" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" +checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" dependencies = [ - "cfg-if", "once_cell", ] @@ -7746,7 +7733,7 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" dependencies = [ - "itoa", + "itoa 1.0.5", "libc", "num_threads", "serde", @@ -7915,7 +7902,7 @@ dependencies = [ "futures-core", "pin-project-lite 0.2.9", "tokio", - "tokio-util 0.7.7", + "tokio-util 0.7.4", ] [[package]] @@ -7965,9 +7952,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.7" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2" +checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" dependencies = [ "bytes", "futures-core", @@ -8308,7 +8295,6 @@ dependencies = [ "hex", "int_to_bytes", "itertools", - "kzg", "lazy_static", "log", "maplit", @@ -8322,6 +8308,7 @@ dependencies = [ "rusqlite", "safe_arith", "serde", + "serde-big-array", "serde_derive", "serde_json", "serde_with", @@ -9013,9 +9000,9 @@ dependencies = [ [[package]] name = "webrtc-ice" -version = "0.9.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "465a03cc11e9a7d7b4f9f99870558fe37a102b65b93f8045392fef7c67b39e80" +checksum = "494483fbb2f5492620871fdc78b084aed8807377f6e3fe88b2e49f0a9c9c41d7" dependencies = [ "arc-swap", "async-trait", diff --git a/Cargo.toml b/Cargo.toml index 581c056a0..37440a60b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -104,6 +104,13 @@ tree_hash_derive = { path = "consensus/tree_hash_derive" } eth2_serde_utils = { path = "consensus/serde_utils" } arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="a572fd8743012a4f1ada5ee5968b1b3619c427ba" } +[patch."https://github.com/ralexstokes/mev-rs"] +mev-rs = { git = "https://github.com/ralexstokes//mev-rs", rev = "7813d4a4a564e0754e9aaab2d95520ba437c3889" } +[patch."https://github.com/ralexstokes/ethereum-consensus"] +ethereum-consensus = { git = "https://github.com/ralexstokes//ethereum-consensus", rev = "9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d" } +[patch."https://github.com/ralexstokes/ssz-rs"] +ssz-rs = { git = "https://github.com/ralexstokes//ssz-rs", rev = "adf1a0b14cef90b9536f28ef89da1fab316465e1" } + [profile.maxperf] inherits = "release" lto = "fat" diff --git a/beacon_node/beacon_chain/src/attestation_rewards.rs b/beacon_node/beacon_chain/src/attestation_rewards.rs new file mode 100644 index 000000000..a4a661197 --- /dev/null +++ b/beacon_node/beacon_chain/src/attestation_rewards.rs @@ -0,0 +1,195 @@ +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::lighthouse::attestation_rewards::{IdealAttestationRewards, TotalAttestationRewards}; +use eth2::lighthouse::StandardAttestationRewards; +use participation_cache::ParticipationCache; +use safe_arith::SafeArith; +use slog::{debug, Logger}; +use state_processing::{ + common::altair::BaseRewardPerIncrement, + per_epoch_processing::altair::{participation_cache, rewards_and_penalties::get_flag_weight}, +}; +use std::collections::HashMap; +use store::consts::altair::{ + PARTICIPATION_FLAG_WEIGHTS, TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, + TIMELY_TARGET_FLAG_INDEX, +}; +use types::consts::altair::WEIGHT_DENOMINATOR; + +use types::{Epoch, EthSpec}; + +use eth2::types::ValidatorId; + +impl BeaconChain { + pub fn compute_attestation_rewards( + &self, + epoch: Epoch, + validators: Vec, + log: Logger, + ) -> Result { + debug!(log, "computing attestation rewards"; "epoch" => epoch, "validator_count" => validators.len()); + + // Get state + let spec = &self.spec; + + let state_slot = (epoch + 1).end_slot(T::EthSpec::slots_per_epoch()); + + let state_root = self + .state_root_at_slot(state_slot)? + .ok_or(BeaconChainError::NoStateForSlot(state_slot))?; + + let mut state = self + .get_state(&state_root, Some(state_slot))? + .ok_or(BeaconChainError::MissingBeaconState(state_root))?; + + // Calculate ideal_rewards + let participation_cache = ParticipationCache::new(&state, spec)?; + + let previous_epoch = state.previous_epoch(); + + let mut ideal_rewards_hashmap = HashMap::new(); + + for flag_index in 0..PARTICIPATION_FLAG_WEIGHTS.len() { + let weight = get_flag_weight(flag_index) + .map_err(|_| BeaconChainError::AttestationRewardsError)?; + + let unslashed_participating_indices = participation_cache + .get_unslashed_participating_indices(flag_index, previous_epoch)?; + + let unslashed_participating_balance = + unslashed_participating_indices + .total_balance() + .map_err(|_| BeaconChainError::AttestationRewardsError)?; + + let unslashed_participating_increments = + unslashed_participating_balance.safe_div(spec.effective_balance_increment)?; + + let total_active_balance = participation_cache.current_epoch_total_active_balance(); + + let active_increments = + total_active_balance.safe_div(spec.effective_balance_increment)?; + + let base_reward_per_increment = + BaseRewardPerIncrement::new(total_active_balance, spec)?; + + for effective_balance_eth in 0..=32 { + let effective_balance = + effective_balance_eth.safe_mul(spec.effective_balance_increment)?; + let base_reward = + effective_balance_eth.safe_mul(base_reward_per_increment.as_u64())?; + + let penalty = -(base_reward.safe_mul(weight)?.safe_div(WEIGHT_DENOMINATOR)? as i64); + + let reward_numerator = base_reward + .safe_mul(weight)? + .safe_mul(unslashed_participating_increments)?; + + let ideal_reward = reward_numerator + .safe_div(active_increments)? + .safe_div(WEIGHT_DENOMINATOR)?; + if !state.is_in_inactivity_leak(previous_epoch, spec) { + ideal_rewards_hashmap + .insert((flag_index, effective_balance), (ideal_reward, penalty)); + } else { + ideal_rewards_hashmap.insert((flag_index, effective_balance), (0, penalty)); + } + } + } + + // Calculate total_rewards + let mut total_rewards: Vec = Vec::new(); + + let validators = if validators.is_empty() { + participation_cache.eligible_validator_indices().to_vec() + } else { + validators + .into_iter() + .map(|validator| match validator { + ValidatorId::Index(i) => Ok(i as usize), + ValidatorId::PublicKey(pubkey) => state + .get_validator_index(&pubkey)? + .ok_or(BeaconChainError::ValidatorPubkeyUnknown(pubkey)), + }) + .collect::, _>>()? + }; + + for validator_index in &validators { + let eligible = state.is_eligible_validator(previous_epoch, *validator_index)?; + let mut head_reward = 0u64; + let mut target_reward = 0i64; + let mut source_reward = 0i64; + + if eligible { + let effective_balance = state.get_effective_balance(*validator_index)?; + + for flag_index in 0..PARTICIPATION_FLAG_WEIGHTS.len() { + let (ideal_reward, penalty) = ideal_rewards_hashmap + .get(&(flag_index, effective_balance)) + .ok_or(BeaconChainError::AttestationRewardsError)?; + let voted_correctly = participation_cache + .get_unslashed_participating_indices(flag_index, previous_epoch) + .map_err(|_| BeaconChainError::AttestationRewardsError)? + .contains(*validator_index) + .map_err(|_| BeaconChainError::AttestationRewardsError)?; + if voted_correctly { + if flag_index == TIMELY_HEAD_FLAG_INDEX { + head_reward += ideal_reward; + } else if flag_index == TIMELY_TARGET_FLAG_INDEX { + target_reward += *ideal_reward as i64; + } else if flag_index == TIMELY_SOURCE_FLAG_INDEX { + source_reward += *ideal_reward as i64; + } + } else if flag_index == TIMELY_HEAD_FLAG_INDEX { + head_reward = 0; + } else if flag_index == TIMELY_TARGET_FLAG_INDEX { + target_reward = *penalty; + } else if flag_index == TIMELY_SOURCE_FLAG_INDEX { + source_reward = *penalty; + } + } + } + total_rewards.push(TotalAttestationRewards { + validator_index: *validator_index as u64, + head: head_reward, + target: target_reward, + source: source_reward, + }); + } + + // Convert hashmap to vector + let mut ideal_rewards: Vec = ideal_rewards_hashmap + .iter() + .map( + |((flag_index, effective_balance), (ideal_reward, _penalty))| { + (flag_index, effective_balance, ideal_reward) + }, + ) + .fold( + HashMap::new(), + |mut acc, (flag_index, &effective_balance, ideal_reward)| { + let entry = acc + .entry(effective_balance) + .or_insert(IdealAttestationRewards { + effective_balance, + head: 0, + target: 0, + source: 0, + }); + match *flag_index { + TIMELY_SOURCE_FLAG_INDEX => entry.source += ideal_reward, + TIMELY_TARGET_FLAG_INDEX => entry.target += ideal_reward, + TIMELY_HEAD_FLAG_INDEX => entry.head += ideal_reward, + _ => {} + } + acc + }, + ) + .into_values() + .collect::>(); + ideal_rewards.sort_by(|a, b| a.effective_balance.cmp(&b.effective_balance)); + + Ok(StandardAttestationRewards { + ideal_rewards, + total_rewards, + }) + } +} diff --git a/beacon_node/beacon_chain/src/beacon_block_reward.rs b/beacon_node/beacon_chain/src/beacon_block_reward.rs new file mode 100644 index 000000000..786402c99 --- /dev/null +++ b/beacon_node/beacon_chain/src/beacon_block_reward.rs @@ -0,0 +1,237 @@ +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::lighthouse::StandardBlockReward; +use operation_pool::RewardCache; +use safe_arith::SafeArith; +use slog::error; +use state_processing::{ + common::{ + altair, get_attestation_participation_flag_indices, get_attesting_indices_from_state, + }, + per_block_processing::{ + altair::sync_committee::compute_sync_aggregate_rewards, get_slashable_indices, + }, +}; +use store::{ + consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}, + RelativeEpoch, +}; +use types::{AbstractExecPayload, BeaconBlockRef, BeaconState, BeaconStateError, Hash256}; + +type BeaconBlockSubRewardValue = u64; + +impl BeaconChain { + pub fn compute_beacon_block_reward>( + &self, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, + block_root: Hash256, + state: &mut BeaconState, + ) -> Result { + if block.slot() != state.slot() { + return Err(BeaconChainError::BlockRewardSlotError); + } + + state.build_committee_cache(RelativeEpoch::Previous, &self.spec)?; + state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; + + let proposer_index = block.proposer_index(); + + let sync_aggregate_reward = + self.compute_beacon_block_sync_aggregate_reward(block, state)?; + + let proposer_slashing_reward = self + .compute_beacon_block_proposer_slashing_reward(block, state) + .map_err(|e| { + error!( + self.log, + "Error calculating proposer slashing reward"; + "error" => ?e + ); + BeaconChainError::BlockRewardError + })?; + + let attester_slashing_reward = self + .compute_beacon_block_attester_slashing_reward(block, state) + .map_err(|e| { + error!( + self.log, + "Error calculating attester slashing reward"; + "error" => ?e + ); + BeaconChainError::BlockRewardError + })?; + + let block_attestation_reward = if let BeaconState::Base(_) = state { + self.compute_beacon_block_attestation_reward_base(block, block_root, state) + .map_err(|e| { + error!( + self.log, + "Error calculating base block attestation reward"; + "error" => ?e + ); + BeaconChainError::BlockRewardAttestationError + })? + } else { + self.compute_beacon_block_attestation_reward_altair(block, state) + .map_err(|e| { + error!( + self.log, + "Error calculating altair block attestation reward"; + "error" => ?e + ); + BeaconChainError::BlockRewardAttestationError + })? + }; + + let total_reward = sync_aggregate_reward + .safe_add(proposer_slashing_reward)? + .safe_add(attester_slashing_reward)? + .safe_add(block_attestation_reward)?; + + Ok(StandardBlockReward { + proposer_index, + total: total_reward, + attestations: block_attestation_reward, + sync_aggregate: sync_aggregate_reward, + proposer_slashings: proposer_slashing_reward, + attester_slashings: attester_slashing_reward, + }) + } + + fn compute_beacon_block_sync_aggregate_reward>( + &self, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, + state: &BeaconState, + ) -> Result { + if let Ok(sync_aggregate) = block.body().sync_aggregate() { + let (_, proposer_reward_per_bit) = compute_sync_aggregate_rewards(state, &self.spec) + .map_err(|_| BeaconChainError::BlockRewardSyncError)?; + Ok(sync_aggregate.sync_committee_bits.num_set_bits() as u64 * proposer_reward_per_bit) + } else { + Ok(0) + } + } + + fn compute_beacon_block_proposer_slashing_reward>( + &self, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, + state: &BeaconState, + ) -> Result { + let mut proposer_slashing_reward = 0; + + let proposer_slashings = block.body().proposer_slashings(); + + for proposer_slashing in proposer_slashings { + proposer_slashing_reward.safe_add_assign( + state + .get_validator(proposer_slashing.proposer_index() as usize)? + .effective_balance + .safe_div(self.spec.whistleblower_reward_quotient)?, + )?; + } + + Ok(proposer_slashing_reward) + } + + fn compute_beacon_block_attester_slashing_reward>( + &self, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, + state: &BeaconState, + ) -> Result { + let mut attester_slashing_reward = 0; + + let attester_slashings = block.body().attester_slashings(); + + for attester_slashing in attester_slashings { + for attester_index in get_slashable_indices(state, attester_slashing)? { + attester_slashing_reward.safe_add_assign( + state + .get_validator(attester_index as usize)? + .effective_balance + .safe_div(self.spec.whistleblower_reward_quotient)?, + )?; + } + } + + Ok(attester_slashing_reward) + } + + fn compute_beacon_block_attestation_reward_base>( + &self, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, + block_root: Hash256, + state: &BeaconState, + ) -> Result { + // Call compute_block_reward in the base case + // Since base does not have sync aggregate, we only grab attesation portion of the returned + // value + let mut reward_cache = RewardCache::default(); + let block_attestation_reward = self + .compute_block_reward(block, block_root, state, &mut reward_cache, true)? + .attestation_rewards + .total; + + Ok(block_attestation_reward) + } + + fn compute_beacon_block_attestation_reward_altair>( + &self, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, + state: &mut BeaconState, + ) -> Result { + let total_active_balance = state.get_total_active_balance()?; + let base_reward_per_increment = + altair::BaseRewardPerIncrement::new(total_active_balance, &self.spec)?; + + let mut total_proposer_reward = 0; + + let proposer_reward_denominator = WEIGHT_DENOMINATOR + .safe_sub(PROPOSER_WEIGHT)? + .safe_mul(WEIGHT_DENOMINATOR)? + .safe_div(PROPOSER_WEIGHT)?; + + for attestation in block.body().attestations() { + let data = &attestation.data; + let inclusion_delay = state.slot().safe_sub(data.slot)?.as_u64(); + let participation_flag_indices = get_attestation_participation_flag_indices( + state, + data, + inclusion_delay, + &self.spec, + )?; + + let attesting_indices = get_attesting_indices_from_state(state, attestation)?; + + let mut proposer_reward_numerator = 0; + for index in attesting_indices { + let index = index as usize; + for (flag_index, &weight) in PARTICIPATION_FLAG_WEIGHTS.iter().enumerate() { + let epoch_participation = + state.get_epoch_participation_mut(data.target.epoch)?; + let validator_participation = epoch_participation + .get_mut(index) + .ok_or(BeaconStateError::ParticipationOutOfBounds(index))?; + + if participation_flag_indices.contains(&flag_index) + && !validator_participation.has_flag(flag_index)? + { + validator_participation.add_flag(flag_index)?; + proposer_reward_numerator.safe_add_assign( + altair::get_base_reward( + state, + index, + base_reward_per_increment, + &self.spec, + )? + .safe_mul(weight)?, + )?; + } + } + } + total_proposer_reward.safe_add_assign( + proposer_reward_numerator.safe_div(proposer_reward_denominator)?, + )?; + } + + Ok(total_proposer_reward) + } +} diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index ff2992e0b..0dd02d896 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -10,7 +10,7 @@ use crate::blob_cache::BlobCache; use crate::blob_verification::{AsBlock, AvailableBlock, BlockWrapper}; use crate::block_times_cache::BlockTimesCache; use crate::block_verification::{ - check_block_is_finalized_descendant, check_block_relevancy, get_block_root, + check_block_is_finalized_checkpoint_or_descendant, check_block_relevancy, get_block_root, signature_verify_chain_segment, BlockError, ExecutionPendingBlock, GossipVerifiedBlock, IntoExecutionPendingBlock, PayloadVerificationOutcome, POS_PANDA_BANNER, }; @@ -1025,11 +1025,8 @@ impl BeaconChain { })? .ok_or(Error::BlockHashMissingFromExecutionLayer(exec_block_hash))?; - //FIXME(sean) avoid the clone by comparing refs to headers (`as_execution_payload_header` method ?) - let full_payload: FullPayload = execution_payload.clone().into(); - // Verify payload integrity. - let header_from_payload = full_payload.to_execution_payload_header(); + let header_from_payload = ExecutionPayloadHeader::from(execution_payload.to_ref()); if header_from_payload != execution_payload_header { for txn in execution_payload.transactions() { debug!( @@ -2900,7 +2897,7 @@ impl BeaconChain { let mut fork_choice = self.canonical_head.fork_choice_write_lock(); // Do not import a block that doesn't descend from the finalized root. - let signed_block = check_block_is_finalized_descendant(self, &fork_choice, signed_block)?; + let signed_block = check_block_is_finalized_checkpoint_or_descendant(self, &fork_choice, signed_block)?; let block = signed_block.message(); // Register the new block with the fork choice service. diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index cd178317f..dee1d9686 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -792,7 +792,7 @@ impl GossipVerifiedBlock { // Do not process a block that doesn't descend from the finalized root. // // We check this *before* we load the parent so that we can return a more detailed error. - let block = check_block_is_finalized_descendant( + let block =check_block_is_finalized_checkpoint_or_descendant( chain, &chain.canonical_head.fork_choice_write_lock(), block, @@ -1647,12 +1647,12 @@ fn check_block_against_finalized_slot( /// ## Warning /// /// Taking a lock on the `chain.canonical_head.fork_choice` might cause a deadlock here. -pub fn check_block_is_finalized_descendant>( +pub fn check_block_is_finalized_checkpoint_or_descendant>( chain: &BeaconChain, fork_choice: &BeaconForkChoice, - block: B, + block: &Arc>, ) -> Result> { - if fork_choice.is_descendant_of_finalized(block.parent_root()) { + if fork_choice.is_finalized_checkpoint_or_descendant(block.parent_root()) { Ok(block) } else { // If fork choice does *not* consider the parent to be a descendant of the finalized block, diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 28e6bf7a6..3b4c46249 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -51,7 +51,6 @@ pub enum BeaconChainError { }, SlotClockDidNotStart, NoStateForSlot(Slot), - UnableToFindTargetRoot(Slot), BeaconStateError(BeaconStateError), DBInconsistent(String), DBError(store::Error), @@ -157,10 +156,12 @@ pub enum BeaconChainError { ExecutionForkChoiceUpdateInvalid { status: PayloadStatus, }, + BlockRewardError, BlockRewardSlotError, BlockRewardAttestationError, BlockRewardSyncError, SyncCommitteeRewardsSyncError, + AttestationRewardsError, HeadMissingFromForkChoice(Hash256), FinalizedBlockMissingFromForkChoice(Hash256), HeadBlockMissingFromForkChoice(Hash256), diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 86a5fded5..9be5f13f3 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -1,6 +1,8 @@ #![recursion_limit = "128"] // For lazy-static +pub mod attestation_rewards; pub mod attestation_verification; mod attester_cache; +pub mod beacon_block_reward; mod beacon_chain; mod beacon_fork_choice_store; pub mod beacon_proposer_cache; diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 5b016b464..1b687a8b6 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -41,9 +41,9 @@ lazy_static = "1.4.0" ethers-core = "1.0.2" builder_client = { path = "../builder_client" } fork_choice = { path = "../../consensus/fork_choice" } -mev-build-rs = { git = "https://github.com/ralexstokes/mev-rs", rev = "6c99b0fbdc0427b1625469d2e575303ce08de5b8" } -ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus", rev = "a8110af76d97bf2bf27fb987a671808fcbdf1834" } -ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs", rev = "cb08f1" } +mev-rs = { git = "https://github.com/ralexstokes/mev-rs" } +ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus" } +ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs" } tokio-stream = { version = "0.1.9", features = [ "sync" ] } strum = "0.24.0" keccak-hash = "0.10.0" diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index 5532fbb34..fe4058af0 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -364,7 +364,7 @@ impl Engine { Ok(result) } Err(error) => { - error!( + warn!( self.log, "Execution engine call failed"; "error" => ?error, diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 2018059cb..f400da002 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -40,15 +40,18 @@ use tokio::{ time::sleep, }; use tokio_stream::wrappers::WatchStream; +use tree_hash::TreeHash; +use types::{Withdrawals}; use types::consts::eip4844::BLOB_TX_TYPE; use types::transaction::{AccessTuple, BlobTransaction, EcdsaSignature, SignedBlobTransaction}; use types::{ + BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ExecutionPayload, + ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge, ForkName, blobs_sidecar::{Blobs, KzgCommitments}, ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge, }; use types::{AbstractExecPayload, BeaconStateError, ExecPayload, VersionedHash}; use types::{ - BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ForkName, ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, Transaction, Uint256, }; @@ -1816,10 +1819,10 @@ impl ExecutionLayer { &metrics::EXECUTION_LAYER_BUILDER_REVEAL_PAYLOAD_OUTCOME, &[metrics::FAILURE], ); - error!( + warn!( self.log(), "Builder failed to reveal payload"; - "info" => "this relay failure may cause a missed proposal", + "info" => "this is common behaviour for some builders and may not indicate an issue", "error" => ?e, "relay_response_ms" => duration.as_millis(), "block_root" => ?block_root, @@ -1870,10 +1873,9 @@ enum InvalidBuilderPayload { signature: Signature, pubkey: PublicKeyBytes, }, - #[allow(dead_code)] WithdrawalsRoot { - payload: Hash256, - expected: Hash256, + payload: Option, + expected: Option, }, } @@ -1926,10 +1928,16 @@ impl fmt::Display for InvalidBuilderPayload { signature, pubkey ), InvalidBuilderPayload::WithdrawalsRoot { payload, expected } => { + let opt_string = |opt_hash: &Option| { + opt_hash + .map(|hash| hash.to_string()) + .unwrap_or_else(|| "None".to_string()) + }; write!( f, "payload withdrawals root was {} not {}", - payload, expected + opt_string(payload), + opt_string(expected) ) } } @@ -1960,6 +1968,13 @@ fn verify_builder_bid>( ); } + let expected_withdrawals_root = payload_attributes + .withdrawals() + .ok() + .cloned() + .map(|withdrawals| Withdrawals::::from(withdrawals).tree_hash_root()); + let payload_withdrawals_root = header.withdrawals_root().ok(); + if payload_value < profit_threshold { Err(Box::new(InvalidBuilderPayload::LowValue { profit_threshold, @@ -1995,6 +2010,11 @@ fn verify_builder_bid>( signature: bid.data.signature.clone(), pubkey: bid.data.message.pubkey, })) + } else if payload_withdrawals_root != expected_withdrawals_root { + Err(Box::new(InvalidBuilderPayload::WithdrawalsRoot { + payload: payload_withdrawals_root, + expected: expected_withdrawals_root, + })) } else { Ok(()) } diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 40a0c41af..199726501 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -3,15 +3,19 @@ use crate::{Config, ExecutionLayer, PayloadAttributes}; use async_trait::async_trait; use eth2::types::{BlockId, StateId, ValidatorId}; use eth2::{BeaconNodeHttpClient, Timeouts}; -use ethereum_consensus::crypto::{SecretKey, Signature}; -use ethereum_consensus::primitives::BlsPublicKey; pub use ethereum_consensus::state_transition::Context; +use ethereum_consensus::{ + crypto::{SecretKey, Signature}, + primitives::{BlsPublicKey, BlsSignature, ExecutionAddress, Hash32, Root, U256}, + state_transition::Error, +}; use fork_choice::ForkchoiceUpdateParameters; -use mev_build_rs::{ +use mev_rs::{ + bellatrix::{BuilderBid as BuilderBidBellatrix, SignedBuilderBid as SignedBuilderBidBellatrix}, + capella::{BuilderBid as BuilderBidCapella, SignedBuilderBid as SignedBuilderBidCapella}, sign_builder_message, verify_signed_builder_message, BidRequest, BlindedBlockProviderError, BlindedBlockProviderServer, BuilderBid, ExecutionPayload as ServerPayload, - ExecutionPayloadHeader as ServerPayloadHeader, SignedBlindedBeaconBlock, SignedBuilderBid, - SignedValidatorRegistration, + SignedBlindedBeaconBlock, SignedBuilderBid, SignedValidatorRegistration, }; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; @@ -39,25 +43,129 @@ pub enum Operation { PrevRandao(Hash256), BlockNumber(usize), Timestamp(usize), + WithdrawalsRoot(Hash256), } impl Operation { - fn apply(self, bid: &mut BuilderBid) -> Result<(), BlindedBlockProviderError> { + fn apply(self, bid: &mut B) -> Result<(), BlindedBlockProviderError> { match self { Operation::FeeRecipient(fee_recipient) => { - bid.header.fee_recipient = to_ssz_rs(&fee_recipient)? + *bid.fee_recipient_mut() = to_ssz_rs(&fee_recipient)? } - Operation::GasLimit(gas_limit) => bid.header.gas_limit = gas_limit as u64, - Operation::Value(value) => bid.value = to_ssz_rs(&value)?, - Operation::ParentHash(parent_hash) => bid.header.parent_hash = to_ssz_rs(&parent_hash)?, - Operation::PrevRandao(prev_randao) => bid.header.prev_randao = to_ssz_rs(&prev_randao)?, - Operation::BlockNumber(block_number) => bid.header.block_number = block_number as u64, - Operation::Timestamp(timestamp) => bid.header.timestamp = timestamp as u64, + Operation::GasLimit(gas_limit) => *bid.gas_limit_mut() = gas_limit as u64, + Operation::Value(value) => *bid.value_mut() = to_ssz_rs(&value)?, + Operation::ParentHash(parent_hash) => *bid.parent_hash_mut() = to_ssz_rs(&parent_hash)?, + Operation::PrevRandao(prev_randao) => *bid.prev_randao_mut() = to_ssz_rs(&prev_randao)?, + Operation::BlockNumber(block_number) => *bid.block_number_mut() = block_number as u64, + Operation::Timestamp(timestamp) => *bid.timestamp_mut() = timestamp as u64, + Operation::WithdrawalsRoot(root) => *bid.withdrawals_root_mut()? = to_ssz_rs(&root)?, } Ok(()) } } +// contains functions we need for BuilderBids.. not sure what to call this +pub trait BidStuff { + fn fee_recipient_mut(&mut self) -> &mut ExecutionAddress; + fn gas_limit_mut(&mut self) -> &mut u64; + fn value_mut(&mut self) -> &mut U256; + fn parent_hash_mut(&mut self) -> &mut Hash32; + fn prev_randao_mut(&mut self) -> &mut Hash32; + fn block_number_mut(&mut self) -> &mut u64; + fn timestamp_mut(&mut self) -> &mut u64; + fn withdrawals_root_mut(&mut self) -> Result<&mut Root, BlindedBlockProviderError>; + + fn sign_builder_message( + &mut self, + signing_key: &SecretKey, + context: &Context, + ) -> Result; + + fn to_signed_bid(self, signature: BlsSignature) -> SignedBuilderBid; +} + +impl BidStuff for BuilderBid { + fn fee_recipient_mut(&mut self) -> &mut ExecutionAddress { + match self { + Self::Bellatrix(bid) => &mut bid.header.fee_recipient, + Self::Capella(bid) => &mut bid.header.fee_recipient, + } + } + + fn gas_limit_mut(&mut self) -> &mut u64 { + match self { + Self::Bellatrix(bid) => &mut bid.header.gas_limit, + Self::Capella(bid) => &mut bid.header.gas_limit, + } + } + + fn value_mut(&mut self) -> &mut U256 { + match self { + Self::Bellatrix(bid) => &mut bid.value, + Self::Capella(bid) => &mut bid.value, + } + } + + fn parent_hash_mut(&mut self) -> &mut Hash32 { + match self { + Self::Bellatrix(bid) => &mut bid.header.parent_hash, + Self::Capella(bid) => &mut bid.header.parent_hash, + } + } + + fn prev_randao_mut(&mut self) -> &mut Hash32 { + match self { + Self::Bellatrix(bid) => &mut bid.header.prev_randao, + Self::Capella(bid) => &mut bid.header.prev_randao, + } + } + + fn block_number_mut(&mut self) -> &mut u64 { + match self { + Self::Bellatrix(bid) => &mut bid.header.block_number, + Self::Capella(bid) => &mut bid.header.block_number, + } + } + + fn timestamp_mut(&mut self) -> &mut u64 { + match self { + Self::Bellatrix(bid) => &mut bid.header.timestamp, + Self::Capella(bid) => &mut bid.header.timestamp, + } + } + + fn withdrawals_root_mut(&mut self) -> Result<&mut Root, BlindedBlockProviderError> { + match self { + Self::Bellatrix(_) => Err(BlindedBlockProviderError::Custom( + "withdrawals_root called on bellatrix bid".to_string(), + )), + Self::Capella(bid) => Ok(&mut bid.header.withdrawals_root), + } + } + + fn sign_builder_message( + &mut self, + signing_key: &SecretKey, + context: &Context, + ) -> Result { + match self { + Self::Bellatrix(message) => sign_builder_message(message, signing_key, context), + Self::Capella(message) => sign_builder_message(message, signing_key, context), + } + } + + fn to_signed_bid(self, signature: Signature) -> SignedBuilderBid { + match self { + Self::Bellatrix(message) => { + SignedBuilderBid::Bellatrix(SignedBuilderBidBellatrix { message, signature }) + } + Self::Capella(message) => { + SignedBuilderBid::Capella(SignedBuilderBidCapella { message, signature }) + } + } + } +} + pub struct TestingBuilder { server: BlindedBlockProviderServer>, pub builder: MockBuilder, @@ -112,7 +220,10 @@ impl TestingBuilder { } pub async fn run(&self) { - self.server.run().await + let server = self.server.serve(); + if let Err(err) = server.await { + println!("error while listening for incoming: {err}") + } } } @@ -163,7 +274,7 @@ impl MockBuilder { *self.invalidate_signatures.write() = false; } - fn apply_operations(&self, bid: &mut BuilderBid) -> Result<(), BlindedBlockProviderError> { + fn apply_operations(&self, bid: &mut B) -> Result<(), BlindedBlockProviderError> { let mut guard = self.operations.write(); while let Some(op) = guard.pop() { op.apply(bid)?; @@ -173,7 +284,7 @@ impl MockBuilder { } #[async_trait] -impl mev_build_rs::BlindedBlockProvider for MockBuilder { +impl mev_rs::BlindedBlockProvider for MockBuilder { async fn register_validators( &self, registrations: &mut [SignedValidatorRegistration], @@ -201,6 +312,7 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { bid_request: &BidRequest, ) -> Result { let slot = Slot::new(bid_request.slot); + let fork = self.spec.fork_name_at_slot::(slot); let signed_cached_data = self .val_registration_cache .read() @@ -216,9 +328,13 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { .map_err(convert_err)? .ok_or_else(|| convert_err("missing head block"))?; - let block = head.data.message_merge().map_err(convert_err)?; + let block = head.data.message(); let head_block_root = block.tree_hash_root(); - let head_execution_hash = block.body.execution_payload.execution_payload.block_hash; + let head_execution_hash = block + .body() + .execution_payload() + .map_err(convert_err)? + .block_hash(); if head_execution_hash != from_ssz_rs(&bid_request.parent_hash)? { return Err(BlindedBlockProviderError::Custom(format!( "head mismatch: {} {}", @@ -233,12 +349,11 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { .map_err(convert_err)? .ok_or_else(|| convert_err("missing finalized block"))? .data - .message_merge() + .message() + .body() + .execution_payload() .map_err(convert_err)? - .body - .execution_payload - .execution_payload - .block_hash; + .block_hash(); let justified_execution_hash = self .beacon_client @@ -247,12 +362,11 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { .map_err(convert_err)? .ok_or_else(|| convert_err("missing finalized block"))? .data - .message_merge() + .message() + .body() + .execution_payload() .map_err(convert_err)? - .body - .execution_payload - .execution_payload - .block_hash; + .block_hash(); let val_index = self .beacon_client @@ -288,12 +402,22 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { .get_randao_mix(head_state.current_epoch()) .map_err(convert_err)?; - // FIXME: think about proper fork here - let payload_attributes = - PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, None); + let payload_attributes = match fork { + ForkName::Merge => PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, None), + // the withdrawals root is filled in by operations + ForkName::Capella | ForkName::Eip4844 => { + PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, Some(vec![])) + } + ForkName::Base | ForkName::Altair => { + return Err(BlindedBlockProviderError::Custom(format!( + "Unsupported fork: {}", + fork + ))); + } + }; self.el - .insert_proposer(slot, head_block_root, val_index, payload_attributes) + .insert_proposer(slot, head_block_root, val_index, payload_attributes.clone()) .await; let forkchoice_update_params = ForkchoiceUpdateParameters { @@ -303,17 +427,13 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { finalized_hash: Some(finalized_execution_hash), }; - let payload_attributes = - PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, None); - let payload = self .el .get_full_payload_caching::>( head_execution_hash, &payload_attributes, forkchoice_update_params, - // TODO: do we need to write a test for this if this is Capella fork? - ForkName::Merge, + fork, ) .await .map_err(convert_err)? @@ -321,44 +441,54 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { .to_execution_payload_header(); let json_payload = serde_json::to_string(&payload).map_err(convert_err)?; - let mut header: ServerPayloadHeader = - serde_json::from_str(json_payload.as_str()).map_err(convert_err)?; - - header.gas_limit = cached_data.gas_limit; - - let mut message = BuilderBid { - header, - value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?, - public_key: self.builder_sk.public_key(), + let mut message = match fork { + ForkName::Capella => BuilderBid::Capella(BuilderBidCapella { + header: serde_json::from_str(json_payload.as_str()).map_err(convert_err)?, + value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?, + public_key: self.builder_sk.public_key(), + }), + ForkName::Merge => BuilderBid::Bellatrix(BuilderBidBellatrix { + header: serde_json::from_str(json_payload.as_str()).map_err(convert_err)?, + value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?, + public_key: self.builder_sk.public_key(), + }), + ForkName::Base | ForkName::Altair | ForkName::Eip4844 => { + return Err(BlindedBlockProviderError::Custom(format!( + "Unsupported fork: {}", + fork + ))) + } }; + *message.gas_limit_mut() = cached_data.gas_limit; self.apply_operations(&mut message)?; - let mut signature = - sign_builder_message(&mut message, &self.builder_sk, self.context.as_ref())?; + message.sign_builder_message(&self.builder_sk, self.context.as_ref())?; if *self.invalidate_signatures.read() { signature = Signature::default(); } - let signed_bid = SignedBuilderBid { message, signature }; - Ok(signed_bid) + Ok(message.to_signed_bid(signature)) } async fn open_bid( &self, signed_block: &mut SignedBlindedBeaconBlock, ) -> Result { + let node = match signed_block { + SignedBlindedBeaconBlock::Bellatrix(block) => { + block.message.body.execution_payload_header.hash_tree_root() + } + SignedBlindedBeaconBlock::Capella(block) => { + block.message.body.execution_payload_header.hash_tree_root() + } + } + .map_err(convert_err)?; + let payload = self .el - .get_payload_by_root(&from_ssz_rs( - &signed_block - .message - .body - .execution_payload_header - .hash_tree_root() - .map_err(convert_err)?, - )?) + .get_payload_by_root(&from_ssz_rs(&node)?) .ok_or_else(|| convert_err("missing payload for tx root"))?; let json_payload = serde_json::to_string(&payload).map_err(convert_err)?; diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 71e8a7242..ec3d01085 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -15,6 +15,7 @@ mod database; mod metrics; mod proposer_duties; mod publish_blocks; +mod standard_block_rewards; mod state_id; mod sync_committee_rewards; mod sync_committees; @@ -1800,6 +1801,27 @@ pub fn serve( }, ); + let beacon_rewards_path = eth_v1 + .and(warp::path("beacon")) + .and(warp::path("rewards")) + .and(chain_filter.clone()); + + // GET beacon/rewards/blocks/{block_id} + let get_beacon_rewards_blocks = beacon_rewards_path + .clone() + .and(warp::path("blocks")) + .and(block_id_or_err) + .and(warp::path::end()) + .and_then(|chain: Arc>, block_id: BlockId| { + blocking_json_task(move || { + let (rewards, execution_optimistic) = + standard_block_rewards::compute_beacon_block_rewards(chain, block_id)?; + Ok(rewards) + .map(api_types::GenericResponse::from) + .map(|resp| resp.add_execution_optimistic(execution_optimistic)) + }) + }); + /* * beacon/rewards */ @@ -1809,6 +1831,58 @@ pub fn serve( .and(warp::path("rewards")) .and(chain_filter.clone()); + // POST beacon/rewards/attestations/{epoch} + let post_beacon_rewards_attestations = beacon_rewards_path + .clone() + .and(warp::path("attestations")) + .and(warp::path::param::()) + .and(warp::path::end()) + .and(warp::body::json()) + .and(log_filter.clone()) + .and_then( + |chain: Arc>, + epoch: Epoch, + validators: Vec, + log: Logger| { + blocking_json_task(move || { + let attestation_rewards = chain + .compute_attestation_rewards(epoch, validators, log) + .map_err(|e| match e { + BeaconChainError::MissingBeaconState(root) => { + warp_utils::reject::custom_not_found(format!( + "missing state {root:?}", + )) + } + BeaconChainError::NoStateForSlot(slot) => { + warp_utils::reject::custom_not_found(format!( + "missing state at slot {slot}" + )) + } + BeaconChainError::BeaconStateError( + BeaconStateError::UnknownValidator(validator_index), + ) => warp_utils::reject::custom_bad_request(format!( + "validator is unknown: {validator_index}" + )), + BeaconChainError::ValidatorPubkeyUnknown(pubkey) => { + warp_utils::reject::custom_bad_request(format!( + "validator pubkey is unknown: {pubkey:?}" + )) + } + e => warp_utils::reject::custom_server_error(format!( + "unexpected error: {:?}", + e + )), + })?; + let execution_optimistic = + chain.is_optimistic_or_invalid_head().unwrap_or_default(); + + Ok(attestation_rewards) + .map(api_types::GenericResponse::from) + .map(|resp| resp.add_execution_optimistic(execution_optimistic)) + }) + }, + ); + // POST beacon/rewards/sync_committee/{block_id} let post_beacon_rewards_sync_committee = beacon_rewards_path .clone() @@ -2881,7 +2955,7 @@ pub fn serve( .await .map(|resp| warp::reply::json(&resp)) .map_err(|e| { - error!( + warn!( log, "Relay error when registering validator(s)"; "num_registrations" => filtered_registration_data.len(), @@ -3517,6 +3591,7 @@ pub fn serve( .or(get_beacon_pool_voluntary_exits.boxed()) .or(get_beacon_pool_bls_to_execution_changes.boxed()) .or(get_beacon_deposit_snapshot.boxed()) + .or(get_beacon_rewards_blocks.boxed()) .or(get_config_fork_schedule.boxed()) .or(get_config_spec.boxed()) .or(get_config_deposit_contract.boxed()) @@ -3570,6 +3645,7 @@ pub fn serve( .or(post_beacon_pool_voluntary_exits.boxed()) .or(post_beacon_pool_sync_committees.boxed()) .or(post_beacon_pool_bls_to_execution_changes.boxed()) + .or(post_beacon_rewards_attestations.boxed()) .or(post_beacon_rewards_sync_committee.boxed()) .or(post_validator_duties_attester.boxed()) .or(post_validator_duties_sync.boxed()) diff --git a/beacon_node/http_api/src/standard_block_rewards.rs b/beacon_node/http_api/src/standard_block_rewards.rs new file mode 100644 index 000000000..b3c90d08a --- /dev/null +++ b/beacon_node/http_api/src/standard_block_rewards.rs @@ -0,0 +1,27 @@ +use crate::sync_committee_rewards::get_state_before_applying_block; +use crate::BlockId; +use crate::ExecutionOptimistic; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2::lighthouse::StandardBlockReward; +use std::sync::Arc; +use warp_utils::reject::beacon_chain_error; +//// The difference between block_rewards and beacon_block_rewards is the later returns block +//// reward format that satisfies beacon-api specs +pub fn compute_beacon_block_rewards( + chain: Arc>, + block_id: BlockId, +) -> Result<(StandardBlockReward, ExecutionOptimistic), warp::Rejection> { + let (block, execution_optimistic) = block_id.blinded_block(&chain)?; + + let block_ref = block.message(); + + let block_root = block.canonical_root(); + + let mut state = get_state_before_applying_block(chain.clone(), &block)?; + + let rewards = chain + .compute_beacon_block_reward(block_ref, block_root, &mut state) + .map_err(beacon_chain_error)?; + + Ok((rewards, execution_optimistic)) +} diff --git a/beacon_node/http_api/src/sync_committee_rewards.rs b/beacon_node/http_api/src/sync_committee_rewards.rs index ae369115d..cefa98db4 100644 --- a/beacon_node/http_api/src/sync_committee_rewards.rs +++ b/beacon_node/http_api/src/sync_committee_rewards.rs @@ -47,7 +47,7 @@ pub fn compute_sync_committee_rewards( Ok((data, execution_optimistic)) } -fn get_state_before_applying_block( +pub fn get_state_before_applying_block( chain: Arc>, block: &SignedBlindedBeaconBlock, ) -> Result, warp::reject::Rejection> { diff --git a/beacon_node/http_api/src/version.rs b/beacon_node/http_api/src/version.rs index 87ba3a466..30f475e68 100644 --- a/beacon_node/http_api/src/version.rs +++ b/beacon_node/http_api/src/version.rs @@ -1,9 +1,9 @@ -use crate::api_types::{ - EndpointVersion, ExecutionOptimisticForkVersionedResponse, ForkVersionedResponse, -}; +use crate::api_types::EndpointVersion; use eth2::CONSENSUS_VERSION_HEADER; use serde::Serialize; -use types::{ForkName, InconsistentFork}; +use types::{ + ExecutionOptimisticForkVersionedResponse, ForkName, ForkVersionedResponse, InconsistentFork, +}; use warp::reply::{self, Reply, WithHeader}; pub const V1: EndpointVersion = EndpointVersion(1); diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 43099c7a9..6424d73eb 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -24,6 +24,7 @@ use network::NetworkReceivers; use proto_array::ExecutionStatus; use sensitive_url::SensitiveUrl; use slot_clock::SlotClock; +use state_processing::per_block_processing::get_expected_withdrawals; use state_processing::per_slot_processing; use std::convert::TryInto; use std::sync::Arc; @@ -3428,6 +3429,98 @@ impl ApiTester { self } + pub async fn test_builder_works_post_capella(self) -> Self { + // Ensure builder payload is chosen + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI + 1, + ))); + + let slot = self.chain.slot().unwrap(); + let propose_state = self + .harness + .chain + .state_at_slot(slot, StateSkipConfig::WithoutStateRoots) + .unwrap(); + let withdrawals = get_expected_withdrawals(&propose_state, &self.chain.spec).unwrap(); + let withdrawals_root = withdrawals.tree_hash_root(); + // Set withdrawals root for builder + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::WithdrawalsRoot(withdrawals_root)); + + let epoch = self.chain.epoch().unwrap(); + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload: BlindedPayload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .into(); + + // The builder's payload should've been chosen, so this cache should not be populated + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + self + } + + pub async fn test_lighthouse_rejects_invalid_withdrawals_root(self) -> Self { + // Ensure builder payload *would be* chosen + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI + 1, + ))); + // Set withdrawals root to something invalid + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::WithdrawalsRoot(Hash256::repeat_byte(0x42))); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload: BlindedPayload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .into(); + + // The local payload should've been chosen because the builder's was invalid + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + #[cfg(target_os = "linux")] pub async fn test_get_lighthouse_health(self) -> Self { self.client.get_lighthouse_health().await.unwrap(); @@ -4424,6 +4517,26 @@ async fn builder_payload_chosen_by_profit() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_works_post_capella() { + let mut config = ApiTesterConfig { + builder_threshold: Some(0), + spec: E::default_spec(), + }; + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + + ApiTester::new_from_config(config) + .await + .test_post_validator_register_validator() + .await + .test_builder_works_post_capella() + .await + .test_lighthouse_rejects_invalid_withdrawals_root() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn lighthouse_endpoints() { ApiTester::new() diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 32712e32a..e40384332 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -1,3 +1,4 @@ +use crate::rpc::config::OutboundRateLimiterConfig; use crate::types::GossipKind; use crate::{Enr, PeerIdSerialized}; use directory::{ @@ -133,6 +134,9 @@ pub struct Config { /// Whether light client protocols should be enabled. pub enable_light_client_server: bool, + + /// Configuration for the outbound rate limiter (requests made by this node). + pub outbound_rate_limiter_config: Option, } impl Default for Config { @@ -211,6 +215,7 @@ impl Default for Config { topics: Vec::new(), metrics_enabled: false, enable_light_client_server: false, + outbound_rate_limiter_config: None, } } } diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs new file mode 100644 index 000000000..e89d45850 --- /dev/null +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -0,0 +1,182 @@ +use std::{ + fmt::{Debug, Display}, + str::FromStr, + time::Duration, +}; + +use super::{methods, rate_limiter::Quota, Protocol}; + +use serde_derive::{Deserialize, Serialize}; + +/// Auxiliary struct to aid on configuration parsing. +/// +/// A protocol's quota is specified as `protocol_name:tokens/time_in_seconds`. +#[derive(Debug, PartialEq, Eq)] +struct ProtocolQuota { + protocol: Protocol, + quota: Quota, +} + +impl Display for ProtocolQuota { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}:{}/{}", + self.protocol.as_ref(), + self.quota.max_tokens, + self.quota.replenish_all_every.as_secs() + ) + } +} + +impl FromStr for ProtocolQuota { + type Err = &'static str; + + fn from_str(s: &str) -> Result { + let (protocol_str, quota_str) = s + .split_once(':') + .ok_or("Missing ':' from quota definition.")?; + let protocol = protocol_str + .parse() + .map_err(|_parse_err| "Wrong protocol representation in quota")?; + let (tokens_str, time_str) = quota_str + .split_once('/') + .ok_or("Quota should be defined as \"n/t\" (t in seconds). Missing '/' from quota.")?; + let tokens = tokens_str + .parse() + .map_err(|_| "Failed to parse tokens from quota.")?; + let seconds = time_str + .parse::() + .map_err(|_| "Failed to parse time in seconds from quota.")?; + Ok(ProtocolQuota { + protocol, + quota: Quota { + replenish_all_every: Duration::from_secs(seconds), + max_tokens: tokens, + }, + }) + } +} + +/// Configurations for the rate limiter applied to outbound requests (made by the node itself). +#[derive(Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct OutboundRateLimiterConfig { + pub(super) ping_quota: Quota, + pub(super) meta_data_quota: Quota, + pub(super) status_quota: Quota, + pub(super) goodbye_quota: Quota, + pub(super) blocks_by_range_quota: Quota, + pub(super) blocks_by_root_quota: Quota, + pub(super) blobs_by_range_quota: Quota, +} + +impl OutboundRateLimiterConfig { + pub const DEFAULT_PING_QUOTA: Quota = Quota::n_every(2, 10); + pub const DEFAULT_META_DATA_QUOTA: Quota = Quota::n_every(2, 5); + pub const DEFAULT_STATUS_QUOTA: Quota = Quota::n_every(5, 15); + pub const DEFAULT_GOODBYE_QUOTA: Quota = Quota::one_every(10); + pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota = + Quota::n_every(methods::MAX_REQUEST_BLOCKS, 10); + pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10); + pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = + Quota::n_every(methods::MAX_REQUEST_BLOBS_SIDECARS, 10); +} + +impl Default for OutboundRateLimiterConfig { + fn default() -> Self { + OutboundRateLimiterConfig { + ping_quota: Self::DEFAULT_PING_QUOTA, + meta_data_quota: Self::DEFAULT_META_DATA_QUOTA, + status_quota: Self::DEFAULT_STATUS_QUOTA, + goodbye_quota: Self::DEFAULT_GOODBYE_QUOTA, + blocks_by_range_quota: Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA, + blocks_by_root_quota: Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA, + blobs_by_range_quota: Self::DEFAULT_BLOBS_BY_RANGE_QUOTA, + } + } +} + +impl Debug for OutboundRateLimiterConfig { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + macro_rules! fmt_q { + ($quota:expr) => { + &format_args!( + "{}/{}s", + $quota.max_tokens, + $quota.replenish_all_every.as_secs() + ) + }; + } + + f.debug_struct("OutboundRateLimiterConfig") + .field("ping", fmt_q!(&self.ping_quota)) + .field("metadata", fmt_q!(&self.meta_data_quota)) + .field("status", fmt_q!(&self.status_quota)) + .field("goodbye", fmt_q!(&self.goodbye_quota)) + .field("blocks_by_range", fmt_q!(&self.blocks_by_range_quota)) + .field("blocks_by_root", fmt_q!(&self.blocks_by_root_quota)) + .field("blobs_by_range", fmt_q!(&self.blobs_by_range_quota)) + .finish() + } +} + +/// Parse configurations for the outbound rate limiter. Protocols that are not specified use +/// the default values. Protocol specified more than once use only the first given Quota. +/// +/// The expected format is a ';' separated list of [`ProtocolQuota`]. +impl FromStr for OutboundRateLimiterConfig { + type Err = &'static str; + + fn from_str(s: &str) -> Result { + let mut ping_quota = None; + let mut meta_data_quota = None; + let mut status_quota = None; + let mut goodbye_quota = None; + let mut blocks_by_range_quota = None; + let mut blocks_by_root_quota = None; + let mut blobs_by_range_quota = None; + for proto_def in s.split(';') { + let ProtocolQuota { protocol, quota } = proto_def.parse()?; + let quota = Some(quota); + match protocol { + Protocol::Status => status_quota = status_quota.or(quota), + Protocol::Goodbye => goodbye_quota = goodbye_quota.or(quota), + Protocol::BlocksByRange => blocks_by_range_quota = blocks_by_range_quota.or(quota), + Protocol::BlocksByRoot => blocks_by_root_quota = blocks_by_root_quota.or(quota), + Protocol::Ping => ping_quota = ping_quota.or(quota), + Protocol::MetaData => meta_data_quota = meta_data_quota.or(quota), + Protocol::BlobsByRange => blobs_by_range_quota = blobs_by_range_quota.or(quota), + Protocol::LightClientBootstrap => return Err("Lighthouse does not send LightClientBootstrap requests. Quota should not be set."), + } + } + Ok(OutboundRateLimiterConfig { + ping_quota: ping_quota.unwrap_or(Self::DEFAULT_PING_QUOTA), + meta_data_quota: meta_data_quota.unwrap_or(Self::DEFAULT_META_DATA_QUOTA), + status_quota: status_quota.unwrap_or(Self::DEFAULT_STATUS_QUOTA), + goodbye_quota: goodbye_quota.unwrap_or(Self::DEFAULT_GOODBYE_QUOTA), + blocks_by_range_quota: blocks_by_range_quota + .unwrap_or(Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA), + blocks_by_root_quota: blocks_by_root_quota + .unwrap_or(Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA), + blobs_by_range_quota: blobs_by_range_quota + .unwrap_or(Self::DEFAULT_BLOBS_BY_RANGE_QUOTA), + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_quota_inverse() { + let quota = ProtocolQuota { + protocol: Protocol::Goodbye, + quota: Quota { + replenish_all_every: Duration::from_secs(10), + max_tokens: 8, + }, + }; + assert_eq!(quota.to_string().parse(), Ok(quota)) + } +} diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 9d2aeb0eb..8727f7df7 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -12,7 +12,7 @@ use libp2p::swarm::{ PollParameters, SubstreamProtocol, }; use libp2p::PeerId; -use rate_limiter::{RPCRateLimiter as RateLimiter, RPCRateLimiterBuilder, RateLimitedErr}; +use rate_limiter::{RPCRateLimiter as RateLimiter, RateLimitedErr}; use slog::{crit, debug, o}; use std::marker::PhantomData; use std::sync::Arc; @@ -33,12 +33,17 @@ pub use methods::{ pub(crate) use outbound::OutboundRequest; pub use protocol::{max_rpc_size, Protocol, RPCError}; +use self::config::OutboundRateLimiterConfig; +use self::self_limiter::SelfRateLimiter; + pub(crate) mod codec; +pub mod config; mod handler; pub mod methods; mod outbound; mod protocol; mod rate_limiter; +mod self_limiter; /// Composite trait for a request id. pub trait ReqId: Send + 'static + std::fmt::Debug + Copy + Clone {} @@ -101,13 +106,18 @@ pub struct RPCMessage { pub event: HandlerEvent, } +type BehaviourAction = + NetworkBehaviourAction, RPCHandler>; + /// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level /// logic. pub struct RPC { /// Rate limiter limiter: RateLimiter, + /// Rate limiter for our own requests. + self_limiter: Option>, /// Queue of events to be processed. - events: Vec, RPCHandler>>, + events: Vec>, fork_context: Arc, enable_light_client_server: bool, /// Slog logger for RPC behaviour. @@ -118,10 +128,12 @@ impl RPC { pub fn new( fork_context: Arc, enable_light_client_server: bool, + outbound_rate_limiter_config: Option, log: slog::Logger, ) -> Self { let log = log.new(o!("service" => "libp2p_rpc")); - let limiter = RPCRateLimiterBuilder::new() + + let limiter = RateLimiter::builder() .n_every(Protocol::MetaData, 2, Duration::from_secs(5)) .n_every(Protocol::Ping, 2, Duration::from_secs(10)) .n_every(Protocol::Status, 5, Duration::from_secs(15)) @@ -141,8 +153,14 @@ impl RPC { ) .build() .expect("Configuration parameters are valid"); + + let self_limiter = outbound_rate_limiter_config.map(|config| { + SelfRateLimiter::new(config, log.clone()).expect("Configuration parameters are valid") + }); + RPC { limiter, + self_limiter, events: Vec::new(), fork_context, enable_light_client_server, @@ -169,12 +187,24 @@ impl RPC { /// Submits an RPC request. /// /// The peer must be connected for this to succeed. - pub fn send_request(&mut self, peer_id: PeerId, request_id: Id, event: OutboundRequest) { - self.events.push(NetworkBehaviourAction::NotifyHandler { - peer_id, - handler: NotifyHandler::Any, - event: RPCSend::Request(request_id, event), - }); + pub fn send_request(&mut self, peer_id: PeerId, request_id: Id, req: OutboundRequest) { + let event = if let Some(self_limiter) = self.self_limiter.as_mut() { + match self_limiter.allows(peer_id, request_id, req) { + Ok(event) => event, + Err(_e) => { + // Request is logged and queued internally in the self rate limiter. + return; + } + } + } else { + NetworkBehaviourAction::NotifyHandler { + peer_id, + handler: NotifyHandler::Any, + event: RPCSend::Request(request_id, req), + } + }; + + self.events.push(event); } /// Lighthouse wishes to disconnect from this peer by sending a Goodbye message. This @@ -279,11 +309,19 @@ where cx: &mut Context, _: &mut impl PollParameters, ) -> Poll> { - // let the rate limiter prune + // let the rate limiter prune. let _ = self.limiter.poll_unpin(cx); + + if let Some(self_limiter) = self.self_limiter.as_mut() { + if let Poll::Ready(event) = self_limiter.poll_ready(cx) { + self.events.push(event) + } + } + if !self.events.is_empty() { return Poll::Ready(self.events.remove(0)); } + Poll::Pending } } diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index ea48251d6..1e0c26489 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -14,7 +14,7 @@ use std::io; use std::marker::PhantomData; use std::sync::Arc; use std::time::Duration; -use strum::IntoStaticStr; +use strum::{AsRefStr, Display, EnumString, IntoStaticStr}; use tokio_io_timeout::TimeoutStream; use tokio_util::{ codec::Framed, @@ -169,15 +169,18 @@ pub fn rpc_block_limits_by_fork(current_fork: ForkName) -> RpcLimits { } /// Protocol names to be used. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EnumString, AsRefStr, Display)] +#[strum(serialize_all = "snake_case")] pub enum Protocol { /// The Status protocol name. Status, /// The Goodbye protocol name. Goodbye, /// The `BlocksByRange` protocol name. + #[strum(serialize = "beacon_blocks_by_range")] BlocksByRange, /// The `BlocksByRoot` protocol name. + #[strum(serialize = "beacon_blocks_by_root")] BlocksByRoot, /// The `BlobsByRange` protocol name. BlobsByRange, @@ -186,8 +189,10 @@ pub enum Protocol { /// The `Ping` protocol name. Ping, /// The `MetaData` protocol name. + #[strum(serialize = "metadata")] MetaData, /// The `LightClientBootstrap` protocol name. + #[strum(serialize = "light_client_bootstrap")] LightClientBootstrap, } @@ -222,23 +227,6 @@ impl Protocol { } } -impl std::fmt::Display for Protocol { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let repr = match self { - Protocol::Status => "status", - Protocol::Goodbye => "goodbye", - Protocol::BlocksByRange => "beacon_blocks_by_range", - Protocol::BlocksByRoot => "beacon_blocks_by_root", - Protocol::BlobsByRange => "blobs_sidecars_by_range", - Protocol::BlobsByRoot => "beacon_block_and_blobs_sidecar_by_root", - Protocol::Ping => "ping", - Protocol::MetaData => "metadata", - Protocol::LightClientBootstrap => "light_client_bootstrap", - }; - f.write_str(repr) - } -} - impl std::fmt::Display for Encoding { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let repr = match self { diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index d021afcea..83fc56247 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -1,6 +1,7 @@ -use crate::rpc::{InboundRequest, Protocol}; +use crate::rpc::Protocol; use fnv::FnvHashMap; use libp2p::PeerId; +use serde_derive::{Deserialize, Serialize}; use std::convert::TryInto; use std::future::Future; use std::hash::Hash; @@ -47,12 +48,31 @@ type Nanosecs = u64; /// n*`replenish_all_every`/`max_tokens` units of time since their last request. /// /// To produce hard limits, set `max_tokens` to 1. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct Quota { /// How often are `max_tokens` fully replenished. - replenish_all_every: Duration, + pub(super) replenish_all_every: Duration, /// Token limit. This translates on how large can an instantaneous batch of /// tokens be. - max_tokens: u64, + pub(super) max_tokens: u64, +} + +impl Quota { + /// A hard limit of one token every `seconds`. + pub const fn one_every(seconds: u64) -> Self { + Quota { + replenish_all_every: Duration::from_secs(seconds), + max_tokens: 1, + } + } + + /// Allow `n` tokens to be use used every `seconds`. + pub const fn n_every(n: u64, seconds: u64) -> Self { + Quota { + replenish_all_every: Duration::from_secs(seconds), + max_tokens: n, + } + } } /// Manages rate limiting of requests per peer, with differentiated rates per protocol. @@ -82,6 +102,7 @@ pub struct RPCRateLimiter { } /// Error type for non conformant requests +#[derive(Debug)] pub enum RateLimitedErr { /// Required tokens for this request exceed the maximum TooLarge, @@ -90,7 +111,7 @@ pub enum RateLimitedErr { } /// User-friendly builder of a `RPCRateLimiter` -#[derive(Default)] +#[derive(Default, Clone)] pub struct RPCRateLimiterBuilder { /// Quota for the Goodbye protocol. goodbye_quota: Option, @@ -113,13 +134,8 @@ pub struct RPCRateLimiterBuilder { } impl RPCRateLimiterBuilder { - /// Get an empty `RPCRateLimiterBuilder`. - pub fn new() -> Self { - Default::default() - } - /// Set a quota for a protocol. - fn set_quota(mut self, protocol: Protocol, quota: Quota) -> Self { + pub fn set_quota(mut self, protocol: Protocol, quota: Quota) -> Self { let q = Some(quota); match protocol { Protocol::Ping => self.ping_quota = q, @@ -213,11 +229,40 @@ impl RPCRateLimiterBuilder { } } +pub trait RateLimiterItem { + fn protocol(&self) -> Protocol; + fn expected_responses(&self) -> u64; +} + +impl RateLimiterItem for super::InboundRequest { + fn protocol(&self) -> Protocol { + self.protocol() + } + + fn expected_responses(&self) -> u64 { + self.expected_responses() + } +} + +impl RateLimiterItem for super::OutboundRequest { + fn protocol(&self) -> Protocol { + self.protocol() + } + + fn expected_responses(&self) -> u64 { + self.expected_responses() + } +} impl RPCRateLimiter { - pub fn allows( + /// Get a builder instance. + pub fn builder() -> RPCRateLimiterBuilder { + RPCRateLimiterBuilder::default() + } + + pub fn allows( &mut self, peer_id: &PeerId, - request: &InboundRequest, + request: &Item, ) -> Result<(), RateLimitedErr> { let time_since_start = self.init_time.elapsed(); let tokens = request.expected_responses().max(1); diff --git a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs new file mode 100644 index 000000000..61e9b46a9 --- /dev/null +++ b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs @@ -0,0 +1,204 @@ +use std::{ + collections::{hash_map::Entry, HashMap, VecDeque}, + task::{Context, Poll}, + time::Duration, +}; + +use futures::FutureExt; +use libp2p::{swarm::NotifyHandler, PeerId}; +use slog::{crit, debug, Logger}; +use smallvec::SmallVec; +use tokio_util::time::DelayQueue; +use types::EthSpec; + +use super::{ + config::OutboundRateLimiterConfig, + rate_limiter::{RPCRateLimiter as RateLimiter, RateLimitedErr}, + BehaviourAction, OutboundRequest, Protocol, RPCSend, ReqId, +}; + +/// A request that was rate limited or waiting on rate limited requests for the same peer and +/// protocol. +struct QueuedRequest { + req: OutboundRequest, + request_id: Id, +} + +pub(crate) struct SelfRateLimiter { + /// Requests queued for sending per peer. This requests are stored when the self rate + /// limiter rejects them. Rate limiting is based on a Peer and Protocol basis, therefore + /// are stored in the same way. + delayed_requests: HashMap<(PeerId, Protocol), VecDeque>>, + /// The delay required to allow a peer's outbound request per protocol. + next_peer_request: DelayQueue<(PeerId, Protocol)>, + /// Rate limiter for our own requests. + limiter: RateLimiter, + /// Requests that are ready to be sent. + ready_requests: SmallVec<[BehaviourAction; 3]>, + /// Slog logger. + log: Logger, +} + +/// Error returned when the rate limiter does not accept a request. +// NOTE: this is currently not used, but might be useful for debugging. +pub enum Error { + /// There are queued requests for this same peer and protocol. + PendingRequests, + /// Request was tried but rate limited. + RateLimited, +} + +impl SelfRateLimiter { + /// Creates a new [`SelfRateLimiter`] based on configration values. + pub fn new(config: OutboundRateLimiterConfig, log: Logger) -> Result { + debug!(log, "Using self rate limiting params"; "config" => ?config); + // Destructure to make sure every configuration value is used. + let OutboundRateLimiterConfig { + ping_quota, + meta_data_quota, + status_quota, + goodbye_quota, + blocks_by_range_quota, + blocks_by_root_quota, + blobs_by_range_quota, + } = config; + + let limiter = RateLimiter::builder() + .set_quota(Protocol::Ping, ping_quota) + .set_quota(Protocol::MetaData, meta_data_quota) + .set_quota(Protocol::Status, status_quota) + .set_quota(Protocol::Goodbye, goodbye_quota) + .set_quota(Protocol::BlocksByRange, blocks_by_range_quota) + .set_quota(Protocol::BlocksByRoot, blocks_by_root_quota) + .set_quota(Protocol::BlobsByRange, blobs_by_range_quota) + // Manually set the LightClientBootstrap quota, since we use the same rate limiter for + // inbound and outbound requests, and the LightClientBootstrap is an only inbound + // protocol. + .one_every(Protocol::LightClientBootstrap, Duration::from_secs(10)) + .build()?; + + Ok(SelfRateLimiter { + delayed_requests: Default::default(), + next_peer_request: Default::default(), + limiter, + ready_requests: Default::default(), + log, + }) + } + + /// Checks if the rate limiter allows the request. If it's allowed, returns the + /// [`NetworkBehaviourAction`] that should be emitted. When not allowed, the request is delayed + /// until it can be sent. + pub fn allows( + &mut self, + peer_id: PeerId, + request_id: Id, + req: OutboundRequest, + ) -> Result, Error> { + let protocol = req.protocol(); + // First check that there are not already other requests waiting to be sent. + if let Some(queued_requests) = self.delayed_requests.get_mut(&(peer_id, protocol)) { + queued_requests.push_back(QueuedRequest { req, request_id }); + + return Err(Error::PendingRequests); + } + match Self::try_send_request(&mut self.limiter, peer_id, request_id, req, &self.log) { + Err((rate_limited_req, wait_time)) => { + let key = (peer_id, protocol); + self.next_peer_request.insert(key, wait_time); + self.delayed_requests + .entry(key) + .or_default() + .push_back(rate_limited_req); + + Err(Error::RateLimited) + } + Ok(event) => Ok(event), + } + } + + /// Auxiliary function to deal with self rate limiting outcomes. If the rate limiter allows the + /// request, the [`NetworkBehaviourAction`] that should be emitted is returned. If the request + /// should be delayed, it's returned with the duration to wait. + fn try_send_request( + limiter: &mut RateLimiter, + peer_id: PeerId, + request_id: Id, + req: OutboundRequest, + log: &Logger, + ) -> Result, (QueuedRequest, Duration)> { + match limiter.allows(&peer_id, &req) { + Ok(()) => Ok(BehaviourAction::NotifyHandler { + peer_id, + handler: NotifyHandler::Any, + event: RPCSend::Request(request_id, req), + }), + Err(e) => { + let protocol = req.protocol(); + match e { + RateLimitedErr::TooLarge => { + // this should never happen with default parameters. Let's just send the request. + // Log a crit since this is a config issue. + crit!( + log, + "Self rate limiting error for a batch that will never fit. Sending request anyway. Check configuration parameters."; + "protocol" => %req.protocol() + ); + Ok(BehaviourAction::NotifyHandler { + peer_id, + handler: NotifyHandler::Any, + event: RPCSend::Request(request_id, req), + }) + } + RateLimitedErr::TooSoon(wait_time) => { + debug!(log, "Self rate limiting"; "protocol" => %protocol, "wait_time_ms" => wait_time.as_millis(), "peer_id" => %peer_id); + Err((QueuedRequest { req, request_id }, wait_time)) + } + } + } + } + } + + /// When a peer and protocol are allowed to send a next request, this function checks the + /// queued requests and attempts marking as ready as many as the limiter allows. + fn next_peer_request_ready(&mut self, peer_id: PeerId, protocol: Protocol) { + if let Entry::Occupied(mut entry) = self.delayed_requests.entry((peer_id, protocol)) { + let queued_requests = entry.get_mut(); + while let Some(QueuedRequest { req, request_id }) = queued_requests.pop_front() { + match Self::try_send_request(&mut self.limiter, peer_id, request_id, req, &self.log) + { + Err((rate_limited_req, wait_time)) => { + let key = (peer_id, protocol); + self.next_peer_request.insert(key, wait_time); + queued_requests.push_back(rate_limited_req); + // If one fails just wait for the next window that allows sending requests. + return; + } + Ok(event) => self.ready_requests.push(event), + } + } + if queued_requests.is_empty() { + entry.remove(); + } + } + } + + pub fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + // First check the requests that were self rate limited, since those might add events to + // the queue. Also do this this before rate limiter prunning to avoid removing and + // immediately adding rate limiting keys. + if let Poll::Ready(Some(Ok(expired))) = self.next_peer_request.poll_expired(cx) { + let (peer_id, protocol) = expired.into_inner(); + self.next_peer_request_ready(peer_id, protocol); + } + // Prune the rate limiter. + let _ = self.limiter.poll_unpin(cx); + + // Finally return any queued events. + if !self.ready_requests.is_empty() { + return Poll::Ready(self.ready_requests.remove(0)); + } + + Poll::Pending + } +} diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index e7f8d8945..9b0728d8d 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -266,6 +266,7 @@ impl Network { let eth2_rpc = RPC::new( ctx.fork_context.clone(), config.enable_light_client_server, + config.outbound_rate_limiter_config.clone(), log.clone(), ); diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 5ce331169..95d8a294c 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -46,6 +46,7 @@ derivative = "2.2.0" delay_map = "0.1.1" ethereum-types = { version = "0.14.1", optional = true } operation_pool = { path = "../operation_pool" } +execution_layer = { path = "../execution_layer" } [features] deterministic_long_lived_attnets = [ "ethereum-types" ] diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index 01b7cb43b..cefb1c77e 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -10,7 +10,7 @@ use lighthouse_network::rpc::methods::{ use lighthouse_network::rpc::StatusMessage; use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; -use slog::{debug, error}; +use slog::{debug, error, warn}; use slot_clock::SlotClock; use std::sync::Arc; use task_executor::TaskExecutor; @@ -576,14 +576,26 @@ impl Worker { break; } Err(e) => { - error!( - self.log, - "Error fetching block for peer"; - "request" => ?req, - "peer" => %peer_id, - "block_root" => ?root, - "error" => ?e - ); + if matches!( + e, + BeaconChainError::ExecutionLayerErrorPayloadReconstruction(_block_hash, ref boxed_error) + if matches!(**boxed_error, execution_layer::Error::EngineError(_)) + ) { + warn!( + self.log, + "Error rebuilding payload for peer"; + "info" => "this may occur occasionally when the EE is busy", + "block_root" => ?root, + "error" => ?e, + ); + } else { + error!( + self.log, + "Error fetching block for peer"; + "block_root" => ?root, + "error" => ?e + ); + } // send the stream terminator self.send_error_response( diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index d401deb89..c5be4f0a6 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -10,7 +10,7 @@ mod reward_cache; mod sync_aggregate_id; pub use crate::bls_to_execution_changes::ReceivedPreCapella; -pub use attestation::AttMaxCover; +pub use attestation::{earliest_attestation_validators, AttMaxCover}; pub use attestation_storage::{AttestationRef, SplitAttestation}; pub use max_cover::MaxCover; pub use persistence::{ diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index eb6754aa9..b0ada1828 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -201,6 +201,21 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("Lighthouse by default does not discover private IP addresses. Set this flag to enable connection attempts to local addresses.") .takes_value(false), ) + .arg( + Arg::with_name("self-limiter") + .long("self-limiter") + .help( + "Enables the outbound rate limiter (requests made by this node).\ + \ + Rate limit quotas per protocol can be set in the form of \ + :/. To set quotas for multiple protocols, \ + separate them by ';'. If the self rate limiter is enabled and a protocol is not \ + present in the configuration, the quotas used for the inbound rate limiter will be \ + used." + ) + .min_values(0) + .hidden(true) + ) /* REST API related arguments */ .arg( Arg::with_name("http") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index e8128cb79..afa94a54e 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1004,6 +1004,13 @@ pub fn set_network_config( // Light client server config. config.enable_light_client_server = cli_args.is_present("light-client-server"); + // This flag can be used both with or without a value. Try to parse it first with a value, if + // no value is defined but the flag is present, use the default params. + config.outbound_rate_limiter_config = clap_utils::parse_optional(cli_args, "self-limiter")?; + if cli_args.is_present("self-limiter") && config.outbound_rate_limiter_config.is_none() { + config.outbound_rate_limiter_config = Some(Default::default()); + } + Ok(()) } diff --git a/book/src/docker.md b/book/src/docker.md index f22b8a200..7484f9f52 100644 --- a/book/src/docker.md +++ b/book/src/docker.md @@ -57,7 +57,7 @@ $ docker pull sigp/lighthouse:latest-modern Image tags follow this format: ``` -${version}${arch}${stability}${modernity} +${version}${arch}${stability}${modernity}${features} ``` The `version` is: @@ -81,6 +81,12 @@ The `modernity` is: * `-modern` for optimized builds * empty for a `portable` unoptimized build +The `features` is: + +* `-dev` for a development build with `minimal-spec` preset enabled. +* empty for a standard build with no custom feature enabled. + + Examples: * `latest-unstable-modern`: most recent `unstable` build for all modern CPUs (x86_64 or ARM) diff --git a/book/src/validator-inclusion.md b/book/src/validator-inclusion.md index e6fbc0f16..0793af20d 100644 --- a/book/src/validator-inclusion.md +++ b/book/src/validator-inclusion.md @@ -59,14 +59,7 @@ The following fields are returned: - `previous_epoch_head_attesting_gwei`: the total staked gwei that attested to a head beacon block that is in the canonical chain. -From this data you can calculate some interesting figures: - -#### Participation Rate - -`previous_epoch_attesting_gwei / previous_epoch_active_gwei` - -Expresses the ratio of validators that managed to have an attestation -voting upon the previous epoch included in a block. +From this data you can calculate: #### Justification/Finalization Rate diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 9e67f6dec..4535546a9 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -14,9 +14,8 @@ pub mod lighthouse_vc; pub mod mixin; pub mod types; -use self::mixin::{RequestAccept, ResponseForkName, ResponseOptional}; +use self::mixin::{RequestAccept, ResponseOptional}; use self::types::{Error as ResponseError, *}; -use ::types::map_fork_name_with; use futures::Stream; use futures_util::StreamExt; use lighthouse_network::PeerId; @@ -696,35 +695,7 @@ impl BeaconNodeHttpClient { None => return Ok(None), }; - // If present, use the fork provided in the headers to decode the block. Gracefully handle - // missing and malformed fork names by falling back to regular deserialisation. - let (block, version, execution_optimistic) = match response.fork_name_from_header() { - Ok(Some(fork_name)) => { - let (data, (version, execution_optimistic)) = - map_fork_name_with!(fork_name, SignedBeaconBlock, { - let ExecutionOptimisticForkVersionedResponse { - version, - execution_optimistic, - data, - } = response.json().await?; - (data, (version, execution_optimistic)) - }); - (data, version, execution_optimistic) - } - Ok(None) | Err(_) => { - let ExecutionOptimisticForkVersionedResponse { - version, - execution_optimistic, - data, - } = response.json().await?; - (data, version, execution_optimistic) - } - }; - Ok(Some(ExecutionOptimisticForkVersionedResponse { - version, - execution_optimistic, - data: block, - })) + Ok(Some(response.json().await?)) } /// `GET lighthouse/beacon/blobs_sidecars/{block_id}` @@ -758,35 +729,7 @@ impl BeaconNodeHttpClient { None => return Ok(None), }; - // If present, use the fork provided in the headers to decode the block. Gracefully handle - // missing and malformed fork names by falling back to regular deserialisation. - let (block, version, execution_optimistic) = match response.fork_name_from_header() { - Ok(Some(fork_name)) => { - let (data, (version, execution_optimistic)) = - map_fork_name_with!(fork_name, SignedBlindedBeaconBlock, { - let ExecutionOptimisticForkVersionedResponse { - version, - execution_optimistic, - data, - } = response.json().await?; - (data, (version, execution_optimistic)) - }); - (data, version, execution_optimistic) - } - Ok(None) | Err(_) => { - let ExecutionOptimisticForkVersionedResponse { - version, - execution_optimistic, - data, - } = response.json().await?; - (data, version, execution_optimistic) - } - }; - Ok(Some(ExecutionOptimisticForkVersionedResponse { - version, - execution_optimistic, - data: block, - })) + Ok(Some(response.json().await?)) } /// `GET v1/beacon/blocks` (LEGACY) @@ -1092,6 +1035,40 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `GET beacon/rewards/blocks` + pub async fn get_beacon_rewards_blocks(&self, epoch: Epoch) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("rewards") + .push("blocks"); + + path.query_pairs_mut() + .append_pair("epoch", &epoch.to_string()); + + self.get(path).await + } + + /// `POST beacon/rewards/attestations` + pub async fn post_beacon_rewards_attestations( + &self, + attestations: &[ValidatorId], + ) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("rewards") + .push("attestations"); + + self.post(path, &attestations).await?; + + Ok(()) + } + /// `POST validator/contribution_and_proofs` pub async fn post_validator_contribution_and_proofs( &self, diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 068abd693..e50d9f4dc 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -1,8 +1,10 @@ //! This module contains endpoints that are non-standard and only available on Lighthouse servers. mod attestation_performance; +pub mod attestation_rewards; mod block_packing_efficiency; mod block_rewards; +mod standard_block_rewards; mod sync_committee_rewards; use crate::{ @@ -23,11 +25,13 @@ use store::{AnchorInfo, Split, StoreConfig}; pub use attestation_performance::{ AttestationPerformance, AttestationPerformanceQuery, AttestationPerformanceStatistics, }; +pub use attestation_rewards::StandardAttestationRewards; pub use block_packing_efficiency::{ BlockPackingEfficiency, BlockPackingEfficiencyQuery, ProposerInfo, UniqueAttestation, }; pub use block_rewards::{AttestationRewards, BlockReward, BlockRewardMeta, BlockRewardsQuery}; pub use lighthouse_network::{types::SyncState, PeerInfo}; +pub use standard_block_rewards::StandardBlockReward; pub use sync_committee_rewards::SyncCommitteeReward; // Define "legacy" implementations of `Option` which use four bytes for encoding the union diff --git a/common/eth2/src/lighthouse/attestation_rewards.rs b/common/eth2/src/lighthouse/attestation_rewards.rs new file mode 100644 index 000000000..314ffb851 --- /dev/null +++ b/common/eth2/src/lighthouse/attestation_rewards.rs @@ -0,0 +1,44 @@ +use serde::{Deserialize, Serialize}; + +// Details about the rewards paid for attestations +// All rewards in GWei + +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +pub struct IdealAttestationRewards { + // Validator's effective balance in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub effective_balance: u64, + // Ideal attester's reward for head vote in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub head: u64, + // Ideal attester's reward for target vote in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub target: u64, + // Ideal attester's reward for source vote in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub source: u64, +} + +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +pub struct TotalAttestationRewards { + // one entry for every validator based on their attestations in the epoch + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub validator_index: u64, + // attester's reward for head vote in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub head: u64, + // attester's reward for target vote in gwei + #[serde(with = "eth2_serde_utils::quoted_i64")] + pub target: i64, + // attester's reward for source vote in gwei + #[serde(with = "eth2_serde_utils::quoted_i64")] + pub source: i64, + // TBD attester's inclusion_delay reward in gwei (phase0 only) + // pub inclusion_delay: u64, +} + +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +pub struct StandardAttestationRewards { + pub ideal_rewards: Vec, + pub total_rewards: Vec, +} diff --git a/common/eth2/src/lighthouse/standard_block_rewards.rs b/common/eth2/src/lighthouse/standard_block_rewards.rs new file mode 100644 index 000000000..502577500 --- /dev/null +++ b/common/eth2/src/lighthouse/standard_block_rewards.rs @@ -0,0 +1,26 @@ +use serde::{Deserialize, Serialize}; + +// Details about the rewards for a single block +// All rewards in GWei +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct StandardBlockReward { + // proposer of the block, the proposer index who receives these rewards + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub proposer_index: u64, + // total block reward in gwei, + // equal to attestations + sync_aggregate + proposer_slashings + attester_slashings + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub total: u64, + // block reward component due to included attestations in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub attestations: u64, + // block reward component due to included sync_aggregate in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub sync_aggregate: u64, + // block reward component due to included proposer_slashings in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub proposer_slashings: u64, + // block reward component due to included attester_slashings in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub attester_slashings: u64, +} diff --git a/common/eth2/src/lighthouse/sync_committee_rewards.rs b/common/eth2/src/lighthouse/sync_committee_rewards.rs index cdd685065..e215d8e3e 100644 --- a/common/eth2/src/lighthouse/sync_committee_rewards.rs +++ b/common/eth2/src/lighthouse/sync_committee_rewards.rs @@ -8,5 +8,6 @@ pub struct SyncCommitteeReward { #[serde(with = "eth2_serde_utils::quoted_u64")] pub validator_index: u64, // sync committee reward in gwei for the validator + #[serde(with = "eth2_serde_utils::quoted_i64")] pub reward: i64, } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 4b7ae5539..1a0b46e38 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -236,21 +236,6 @@ impl<'a, T: Serialize> From<&'a T> for GenericResponseRef<'a, T> { } } -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -pub struct ExecutionOptimisticForkVersionedResponse { - #[serde(skip_serializing_if = "Option::is_none")] - pub version: Option, - pub execution_optimistic: Option, - pub data: T, -} - -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -pub struct ForkVersionedResponse { - #[serde(skip_serializing_if = "Option::is_none")] - pub version: Option, - pub data: T, -} - #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] pub struct RootData { pub root: Hash256, @@ -270,11 +255,20 @@ pub struct FinalityCheckpointsData { } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(try_from = "&str")] pub enum ValidatorId { PublicKey(PublicKeyBytes), Index(u64), } +impl TryFrom<&str> for ValidatorId { + type Error = String; + + fn try_from(s: &str) -> Result { + Self::from_str(s) + } +} + impl FromStr for ValidatorId { type Err = String; @@ -1128,6 +1122,30 @@ pub struct BlocksAndBlobs> { pub kzg_aggregate_proof: KzgProof, } +impl> ForkVersionDeserialize + for BlocksAndBlobs +{ + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + #[derive(Deserialize)] + #[serde(bound = "T: EthSpec")] + struct Helper { + block: serde_json::Value, + blobs: Vec>, + kzg_aggregate_proof: KzgProof, + } + let helper: Helper = serde_json::from_value(value).map_err(serde::de::Error::custom)?; + + Ok(Self { + block: BeaconBlock::deserialize_by_fork::<'de, D>(helper.block, fork_name)?, + blobs: helper.blobs, + kzg_aggregate_proof: helper.kzg_aggregate_proof, + }) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index f614007ae..ace198724 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -722,7 +722,7 @@ where op: &InvalidationOperation, ) -> Result<(), Error> { self.proto_array - .process_execution_payload_invalidation(op) + .process_execution_payload_invalidation::(op) .map_err(Error::FailedToProcessInvalidExecutionPayload) } @@ -1288,7 +1288,7 @@ where if store.best_justified_checkpoint().epoch > store.justified_checkpoint().epoch { let store = &self.fc_store; - if self.is_descendant_of_finalized(store.best_justified_checkpoint().root) { + if self.is_finalized_checkpoint_or_descendant(store.best_justified_checkpoint().root) { let store = &mut self.fc_store; store .set_justified_checkpoint(*store.best_justified_checkpoint()) @@ -1329,12 +1329,13 @@ where /// Returns `true` if the block is known **and** a descendant of the finalized root. pub fn contains_block(&self, block_root: &Hash256) -> bool { - self.proto_array.contains_block(block_root) && self.is_descendant_of_finalized(*block_root) + self.proto_array.contains_block(block_root) + && self.is_finalized_checkpoint_or_descendant(*block_root) } /// Returns a `ProtoBlock` if the block is known **and** a descendant of the finalized root. pub fn get_block(&self, block_root: &Hash256) -> Option { - if self.is_descendant_of_finalized(*block_root) { + if self.is_finalized_checkpoint_or_descendant(*block_root) { self.proto_array.get_block(block_root) } else { None @@ -1343,7 +1344,7 @@ where /// Returns an `ExecutionStatus` if the block is known **and** a descendant of the finalized root. pub fn get_block_execution_status(&self, block_root: &Hash256) -> Option { - if self.is_descendant_of_finalized(*block_root) { + if self.is_finalized_checkpoint_or_descendant(*block_root) { self.proto_array.get_block_execution_status(block_root) } else { None @@ -1378,10 +1379,10 @@ where }) } - /// Return `true` if `block_root` is equal to the finalized root, or a known descendant of it. - pub fn is_descendant_of_finalized(&self, block_root: Hash256) -> bool { + /// Return `true` if `block_root` is equal to the finalized checkpoint, or a known descendant of it. + pub fn is_finalized_checkpoint_or_descendant(&self, block_root: Hash256) -> bool { self.proto_array - .is_descendant(self.fc_store.finalized_checkpoint().root, block_root) + .is_finalized_checkpoint_or_descendant::(block_root) } /// Returns `Ok(true)` if `block_root` has been imported optimistically or deemed invalid. diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 035fb799e..68b3fb719 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -273,7 +273,7 @@ impl ForkChoiceTestDefinition { } }; fork_choice - .process_execution_payload_invalidation(&op) + .process_execution_payload_invalidation::(&op) .unwrap() } Operation::AssertWeight { block_root, weight } => assert_eq!( diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index add84f547..bf50c0802 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -451,7 +451,7 @@ impl ProtoArray { /// Invalidate zero or more blocks, as specified by the `InvalidationOperation`. /// /// See the documentation of `InvalidationOperation` for usage. - pub fn propagate_execution_payload_invalidation( + pub fn propagate_execution_payload_invalidation( &mut self, op: &InvalidationOperation, ) -> Result<(), Error> { @@ -482,7 +482,7 @@ impl ProtoArray { let latest_valid_ancestor_is_descendant = latest_valid_ancestor_root.map_or(false, |ancestor_root| { self.is_descendant(ancestor_root, head_block_root) - && self.is_descendant(self.finalized_checkpoint.root, ancestor_root) + && self.is_finalized_checkpoint_or_descendant::(ancestor_root) }); // Collect all *ancestors* which were declared invalid since they reside between the @@ -977,6 +977,12 @@ impl ProtoArray { /// ## Notes /// /// Still returns `true` if `ancestor_root` is known and `ancestor_root == descendant_root`. + /// + /// ## Warning + /// + /// Do not use this function to check if a block is a descendant of the + /// finalized checkpoint. Use `Self::is_finalized_checkpoint_or_descendant` + /// instead. pub fn is_descendant(&self, ancestor_root: Hash256, descendant_root: Hash256) -> bool { self.indices .get(&ancestor_root) @@ -990,6 +996,70 @@ impl ProtoArray { .unwrap_or(false) } + /// Returns `true` if `root` is equal to or a descendant of + /// `self.finalized_checkpoint`. + /// + /// Notably, this function is checking ancestory of the finalized + /// *checkpoint* not the finalized *block*. + pub fn is_finalized_checkpoint_or_descendant(&self, root: Hash256) -> bool { + let finalized_root = self.finalized_checkpoint.root; + let finalized_slot = self + .finalized_checkpoint + .epoch + .start_slot(E::slots_per_epoch()); + + let mut node = if let Some(node) = self + .indices + .get(&root) + .and_then(|index| self.nodes.get(*index)) + { + node + } else { + // An unknown root is not a finalized descendant. This line can only + // be reached if the user supplies a root that is not known to fork + // choice. + return false; + }; + + // The finalized and justified checkpoints represent a list of known + // ancestors of `node` that are likely to coincide with the store's + // finalized checkpoint. + // + // Run this check once, outside of the loop rather than inside the loop. + // If the conditions don't match for this node then they're unlikely to + // start matching for its ancestors. + for checkpoint in &[ + node.finalized_checkpoint, + node.justified_checkpoint, + node.unrealized_finalized_checkpoint, + node.unrealized_justified_checkpoint, + ] { + if checkpoint.map_or(false, |cp| cp == self.finalized_checkpoint) { + return true; + } + } + + loop { + // If `node` is less than or equal to the finalized slot then `node` + // must be the finalized block. + if node.slot <= finalized_slot { + return node.root == finalized_root; + } + + // Since `node` is from a higher slot that the finalized checkpoint, + // replace `node` with the parent of `node`. + if let Some(parent) = node.parent.and_then(|index| self.nodes.get(index)) { + node = parent + } else { + // If `node` is not the finalized block and its parent does not + // exist in fork choice, then the parent must have been pruned. + // Proto-array only prunes blocks prior to the finalized block, + // so this means the parent conflicts with finality. + return false; + }; + } + } + /// Returns the first *beacon block root* which contains an execution payload with the given /// `block_hash`, if any. pub fn execution_block_hash_to_beacon_block_root( diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index cbd369ae6..0e0d806e7 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -358,12 +358,12 @@ impl ProtoArrayForkChoice { } /// See `ProtoArray::propagate_execution_payload_invalidation` for documentation. - pub fn process_execution_payload_invalidation( + pub fn process_execution_payload_invalidation( &mut self, op: &InvalidationOperation, ) -> Result<(), String> { self.proto_array - .propagate_execution_payload_invalidation(op) + .propagate_execution_payload_invalidation::(op) .map_err(|e| format!("Failed to process invalid payload: {:?}", e)) } @@ -748,6 +748,15 @@ impl ProtoArrayForkChoice { .is_descendant(ancestor_root, descendant_root) } + /// See `ProtoArray` documentation. + pub fn is_finalized_checkpoint_or_descendant( + &self, + descendant_root: Hash256, + ) -> bool { + self.proto_array + .is_finalized_checkpoint_or_descendant::(descendant_root) + } + pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Epoch)> { if validator_index < self.votes.0.len() { let vote = &self.votes.0[validator_index]; @@ -928,6 +937,10 @@ mod test_compute_deltas { epoch: genesis_epoch, root: finalized_root, }; + let junk_checkpoint = Checkpoint { + epoch: Epoch::new(42), + root: Hash256::repeat_byte(42), + }; let mut fc = ProtoArrayForkChoice::new::( genesis_slot, @@ -973,8 +986,10 @@ mod test_compute_deltas { target_root: finalized_root, current_epoch_shuffling_id: junk_shuffling_id.clone(), next_epoch_shuffling_id: junk_shuffling_id, - justified_checkpoint: genesis_checkpoint, - finalized_checkpoint: genesis_checkpoint, + // Use the junk checkpoint for the next to values to prevent + // the loop-shortcutting mechanism from triggering. + justified_checkpoint: junk_checkpoint, + finalized_checkpoint: junk_checkpoint, execution_status, unrealized_justified_checkpoint: None, unrealized_finalized_checkpoint: None, @@ -993,6 +1008,11 @@ mod test_compute_deltas { assert!(!fc.is_descendant(finalized_root, not_finalized_desc)); assert!(!fc.is_descendant(finalized_root, unknown)); + assert!(fc.is_finalized_checkpoint_or_descendant::(finalized_root)); + assert!(fc.is_finalized_checkpoint_or_descendant::(finalized_desc)); + assert!(!fc.is_finalized_checkpoint_or_descendant::(not_finalized_desc)); + assert!(!fc.is_finalized_checkpoint_or_descendant::(unknown)); + assert!(!fc.is_descendant(finalized_desc, not_finalized_desc)); assert!(fc.is_descendant(finalized_desc, finalized_desc)); assert!(!fc.is_descendant(finalized_desc, finalized_root)); @@ -1004,6 +1024,171 @@ mod test_compute_deltas { assert!(!fc.is_descendant(not_finalized_desc, unknown)); } + /// This test covers an interesting case where a block can be a descendant + /// of the finalized *block*, but not a descenant of the finalized + /// *checkpoint*. + /// + /// ## Example + /// + /// Consider this block tree which has three blocks (`A`, `B` and `C`): + /// + /// ```ignore + /// [A] <--- [-] <--- [B] + /// | + /// |--[C] + /// ``` + /// + /// - `A` (slot 31) is the common descendant. + /// - `B` (slot 33) descends from `A`, but there is a single skip slot + /// between it and `A`. + /// - `C` (slot 32) descends from `A` and conflicts with `B`. + /// + /// Imagine that the `B` chain is finalized at epoch 1. This means that the + /// finalized checkpoint points to the skipped slot at 32. The root of the + /// finalized checkpoint is `A`. + /// + /// In this scenario, the block `C` has the finalized root (`A`) as an + /// ancestor whilst simultaneously conflicting with the finalized + /// checkpoint. + /// + /// This means that to ensure a block does not conflict with finality we + /// must check to ensure that it's an ancestor of the finalized + /// *checkpoint*, not just the finalized *block*. + #[test] + fn finalized_descendant_edge_case() { + let get_block_root = Hash256::from_low_u64_be; + let genesis_slot = Slot::new(0); + let junk_state_root = Hash256::zero(); + let junk_shuffling_id = + AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero()); + let execution_status = ExecutionStatus::irrelevant(); + + let genesis_checkpoint = Checkpoint { + epoch: Epoch::new(0), + root: get_block_root(0), + }; + + let mut fc = ProtoArrayForkChoice::new::( + genesis_slot, + junk_state_root, + genesis_checkpoint, + genesis_checkpoint, + junk_shuffling_id.clone(), + junk_shuffling_id.clone(), + execution_status, + CountUnrealizedFull::default(), + ) + .unwrap(); + + struct TestBlock { + slot: u64, + root: u64, + parent_root: u64, + } + + let insert_block = |fc: &mut ProtoArrayForkChoice, block: TestBlock| { + fc.proto_array + .on_block::( + Block { + slot: Slot::from(block.slot), + root: get_block_root(block.root), + parent_root: Some(get_block_root(block.parent_root)), + state_root: Hash256::zero(), + target_root: Hash256::zero(), + current_epoch_shuffling_id: junk_shuffling_id.clone(), + next_epoch_shuffling_id: junk_shuffling_id.clone(), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(0), + root: get_block_root(0), + }, + finalized_checkpoint: genesis_checkpoint, + execution_status, + unrealized_justified_checkpoint: Some(genesis_checkpoint), + unrealized_finalized_checkpoint: Some(genesis_checkpoint), + }, + Slot::from(block.slot), + ) + .unwrap(); + }; + + /* + * Start of interesting part of tests. + */ + + // Produce the 0th epoch of blocks. They should all form a chain from + // the genesis block. + for i in 1..MainnetEthSpec::slots_per_epoch() { + insert_block( + &mut fc, + TestBlock { + slot: i, + root: i, + parent_root: i - 1, + }, + ) + } + + let last_slot_of_epoch_0 = MainnetEthSpec::slots_per_epoch() - 1; + + // Produce a block that descends from the last block of epoch -. + // + // This block will be non-canonical. + let non_canonical_slot = last_slot_of_epoch_0 + 1; + insert_block( + &mut fc, + TestBlock { + slot: non_canonical_slot, + root: non_canonical_slot, + parent_root: non_canonical_slot - 1, + }, + ); + + // Produce a block that descends from the last block of the 0th epoch, + // that skips the 1st slot of the 1st epoch. + // + // This block will be canonical. + let canonical_slot = last_slot_of_epoch_0 + 2; + insert_block( + &mut fc, + TestBlock { + slot: canonical_slot, + root: canonical_slot, + parent_root: non_canonical_slot - 1, + }, + ); + + let finalized_root = get_block_root(last_slot_of_epoch_0); + + // Set the finalized checkpoint to finalize the first slot of epoch 1 on + // the canonical chain. + fc.proto_array.finalized_checkpoint = Checkpoint { + root: finalized_root, + epoch: Epoch::new(1), + }; + + assert!( + fc.proto_array + .is_finalized_checkpoint_or_descendant::(finalized_root), + "the finalized checkpoint is the finalized checkpoint" + ); + + assert!( + fc.proto_array + .is_finalized_checkpoint_or_descendant::(get_block_root( + canonical_slot + )), + "the canonical block is a descendant of the finalized checkpoint" + ); + assert!( + !fc.proto_array + .is_finalized_checkpoint_or_descendant::(get_block_root( + non_canonical_slot + )), + "although the non-canonical block is a descendant of the finalized block, \ + it's not a descendant of the finalized checkpoint" + ); + } + #[test] fn zero_hash() { let validator_count: usize = 16; diff --git a/consensus/serde_utils/src/lib.rs b/consensus/serde_utils/src/lib.rs index 92b5966c9..5c5dafc66 100644 --- a/consensus/serde_utils/src/lib.rs +++ b/consensus/serde_utils/src/lib.rs @@ -12,4 +12,4 @@ pub mod u64_hex_be; pub mod u8_hex; pub use fixed_bytes_hex::{bytes_4_hex, bytes_8_hex}; -pub use quoted_int::{quoted_u256, quoted_u32, quoted_u64, quoted_u8}; +pub use quoted_int::{quoted_i64, quoted_u256, quoted_u32, quoted_u64, quoted_u8}; diff --git a/consensus/serde_utils/src/quoted_int.rs b/consensus/serde_utils/src/quoted_int.rs index 822acb5ee..0cc35aa31 100644 --- a/consensus/serde_utils/src/quoted_int.rs +++ b/consensus/serde_utils/src/quoted_int.rs @@ -11,7 +11,7 @@ use std::convert::TryFrom; use std::marker::PhantomData; macro_rules! define_mod { - ($int: ty, $visit_fn: ident) => { + ($int: ty) => { /// Serde support for deserializing quoted integers. /// /// Configurable so that quotes are either required or optional. @@ -140,19 +140,25 @@ macro_rules! define_mod { pub mod quoted_u8 { use super::*; - define_mod!(u8, visit_u8); + define_mod!(u8); } pub mod quoted_u32 { use super::*; - define_mod!(u32, visit_u32); + define_mod!(u32); } pub mod quoted_u64 { use super::*; - define_mod!(u64, visit_u64); + define_mod!(u64); +} + +pub mod quoted_i64 { + use super::*; + + define_mod!(i64); } pub mod quoted_u256 { @@ -216,4 +222,26 @@ mod test { fn u256_without_quotes() { serde_json::from_str::("1").unwrap_err(); } + + #[derive(Debug, PartialEq, Serialize, Deserialize)] + #[serde(transparent)] + struct WrappedI64(#[serde(with = "quoted_i64")] i64); + + #[test] + fn negative_i64_with_quotes() { + assert_eq!( + serde_json::from_str::("\"-200\"").unwrap().0, + -200 + ); + assert_eq!( + serde_json::to_string(&WrappedI64(-12_500)).unwrap(), + "\"-12500\"" + ); + } + + // It would be OK if this worked, but we don't need it to (i64s should always be quoted). + #[test] + fn negative_i64_without_quotes() { + serde_json::from_str::("-200").unwrap_err(); + } } diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index bbf2c1caa..709302eec 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -348,8 +348,7 @@ where &mut self, block: &'a SignedBeaconBlock, ) -> Result<()> { - // FIXME(capella): to improve performance we might want to decompress the withdrawal pubkeys - // in parallel. + // To improve performance we might want to decompress the withdrawal pubkeys in parallel. if let Ok(bls_to_execution_changes) = block.message().body().bls_to_execution_changes() { for bls_to_execution_change in bls_to_execution_changes { self.sets.push(bls_execution_change_signature_set( diff --git a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs index 34700a33e..15a856c40 100644 --- a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs +++ b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs @@ -37,10 +37,9 @@ pub fn verify_bls_to_execution_change( Invalid::NonBlsWithdrawalCredentials ); + // Re-hashing the pubkey isn't necessary during block replay, so we may want to skip that in + // future. let pubkey_hash = hash(address_change.from_bls_pubkey.as_serialized()); - - // FIXME: Should this check be put inside the verify_signatures.is_true() condition? - // I believe that's used for fuzzing so this is a Mehdi question.. verify!( validator.withdrawal_credentials.as_bytes().get(1..) == pubkey_hash.get(1..), Invalid::WithdrawalCredentialsMismatch diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index f7b9790b4..f960b2117 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -685,6 +685,24 @@ impl From>> } } +impl> ForkVersionDeserialize + for BeaconBlock +{ + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + Ok(map_fork_name!( + fork_name, + Self, + serde_json::from_value(value).map_err(|e| serde::de::Error::custom(format!( + "BeaconBlock failed to deserialize: {:?}", + e + )))? + )) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 11a47ccb0..c71739652 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -278,7 +278,7 @@ impl From>> voluntary_exits, sync_aggregate, execution_payload: BlindedPayloadMerge { - execution_payload_header: From::from(execution_payload.clone()), + execution_payload_header: From::from(&execution_payload), }, }, Some(execution_payload), @@ -319,7 +319,7 @@ impl From>> voluntary_exits, sync_aggregate, execution_payload: BlindedPayloadCapella { - execution_payload_header: From::from(execution_payload.clone()), + execution_payload_header: From::from(&execution_payload), }, bls_to_execution_changes, }, @@ -362,7 +362,7 @@ impl From>> voluntary_exits, sync_aggregate, execution_payload: BlindedPayloadEip4844 { - execution_payload_header: From::from(execution_payload.clone()), + execution_payload_header: From::from(&execution_payload), }, bls_to_execution_changes, blob_kzg_commitments, @@ -413,7 +413,7 @@ impl BeaconBlockBodyMerge> { voluntary_exits: voluntary_exits.clone(), sync_aggregate: sync_aggregate.clone(), execution_payload: BlindedPayloadMerge { - execution_payload_header: From::from(execution_payload.clone()), + execution_payload_header: execution_payload.into(), }, } } @@ -446,7 +446,7 @@ impl BeaconBlockBodyCapella> { voluntary_exits: voluntary_exits.clone(), sync_aggregate: sync_aggregate.clone(), execution_payload: BlindedPayloadCapella { - execution_payload_header: From::from(execution_payload.clone()), + execution_payload_header: execution_payload.into(), }, bls_to_execution_changes: bls_to_execution_changes.clone(), } @@ -481,7 +481,7 @@ impl BeaconBlockBodyEip4844> { voluntary_exits: voluntary_exits.clone(), sync_aggregate: sync_aggregate.clone(), execution_payload: BlindedPayloadEip4844 { - execution_payload_header: From::from(execution_payload.clone()), + execution_payload_header: execution_payload.into(), }, bls_to_execution_changes: bls_to_execution_changes.clone(), blob_kzg_commitments: blob_kzg_commitments.clone(), diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index b44c14ded..e70b88427 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -301,8 +301,10 @@ where // Capella #[superstruct(only(Capella, Eip4844), partial_getter(copy))] + #[serde(with = "eth2_serde_utils::quoted_u64")] pub next_withdrawal_index: u64, #[superstruct(only(Capella, Eip4844), partial_getter(copy))] + #[serde(with = "eth2_serde_utils::quoted_u64")] pub next_withdrawal_validator_index: u64, // Deep history valid from Capella onwards. #[superstruct(only(Capella, Eip4844))] @@ -1853,3 +1855,19 @@ impl CompareFields for BeaconState { } } } + +impl ForkVersionDeserialize for BeaconState { + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + Ok(map_fork_name!( + fork_name, + Self, + serde_json::from_value(value).map_err(|e| serde::de::Error::custom(format!( + "BeaconState failed to deserialize: {:?}", + e + )))? + )) + } +} diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs index 818ec52b8..e922e81c7 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder_bid.rs @@ -1,6 +1,6 @@ use crate::{ - AbstractExecPayload, ChainSpec, EthSpec, ExecPayload, ExecutionPayloadHeader, SignedRoot, - Uint256, + AbstractExecPayload, ChainSpec, EthSpec, ExecPayload, ExecutionPayloadHeader, ForkName, + ForkVersionDeserialize, SignedRoot, Uint256, }; use bls::PublicKeyBytes; use bls::Signature; @@ -34,6 +34,60 @@ pub struct SignedBuilderBid> { pub signature: Signature, } +impl> ForkVersionDeserialize + for BuilderBid +{ + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + let convert_err = |_| { + serde::de::Error::custom( + "BuilderBid failed to deserialize: unable to convert payload header to payload", + ) + }; + + #[derive(Deserialize)] + struct Helper { + header: serde_json::Value, + #[serde(with = "eth2_serde_utils::quoted_u256")] + value: Uint256, + pubkey: PublicKeyBytes, + } + let helper: Helper = serde_json::from_value(value).map_err(serde::de::Error::custom)?; + let payload_header = + ExecutionPayloadHeader::deserialize_by_fork::<'de, D>(helper.header, fork_name)?; + + Ok(Self { + header: Payload::try_from(payload_header).map_err(convert_err)?, + value: helper.value, + pubkey: helper.pubkey, + _phantom_data: Default::default(), + }) + } +} + +impl> ForkVersionDeserialize + for SignedBuilderBid +{ + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + #[derive(Deserialize)] + struct Helper { + pub message: serde_json::Value, + pub signature: Signature, + } + let helper: Helper = serde_json::from_value(value).map_err(serde::de::Error::custom)?; + + Ok(Self { + message: BuilderBid::deserialize_by_fork::<'de, D>(helper.message, fork_name)?, + signature: helper.signature, + }) + } +} + struct BlindedPayloadAsHeader(PhantomData); impl> SerializeAs for BlindedPayloadAsHeader { diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 1721960f8..6e055d0a7 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -35,7 +35,9 @@ pub type Withdrawals = VariableList::MaxWithdrawal arbitrary(bound = "T: EthSpec") ), cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") + partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), + map_into(FullPayload, BlindedPayload), + map_ref_into(ExecutionPayloadHeader) )] #[derive( Debug, Clone, Serialize, Encode, Deserialize, TreeHash, Derivative, arbitrary::Arbitrary, @@ -146,3 +148,26 @@ impl ExecutionPayload { + (T::max_withdrawals_per_payload() * ::ssz_fixed_len()) } } + +impl ForkVersionDeserialize for ExecutionPayload { + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + let convert_err = |e| { + serde::de::Error::custom(format!("ExecutionPayload failed to deserialize: {:?}", e)) + }; + + Ok(match fork_name { + ForkName::Merge => Self::Merge(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Eip4844 => Self::Eip4844(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Base | ForkName::Altair => { + return Err(serde::de::Error::custom(format!( + "ExecutionPayload failed to deserialize: unsupported fork '{}'", + fork_name + ))); + } + }) + } +} diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 42e44ed73..4dc79ddc9 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -159,40 +159,40 @@ impl ExecutionPayloadHeaderCapella { } } -impl From> for ExecutionPayloadHeaderMerge { - fn from(payload: ExecutionPayloadMerge) -> Self { +impl<'a, T: EthSpec> From<&'a ExecutionPayloadMerge> for ExecutionPayloadHeaderMerge { + fn from(payload: &'a ExecutionPayloadMerge) -> Self { Self { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, receipts_root: payload.receipts_root, - logs_bloom: payload.logs_bloom, + logs_bloom: payload.logs_bloom.clone(), prev_randao: payload.prev_randao, block_number: payload.block_number, gas_limit: payload.gas_limit, gas_used: payload.gas_used, timestamp: payload.timestamp, - extra_data: payload.extra_data, + extra_data: payload.extra_data.clone(), base_fee_per_gas: payload.base_fee_per_gas, block_hash: payload.block_hash, transactions_root: payload.transactions.tree_hash_root(), } } } -impl From> for ExecutionPayloadHeaderCapella { - fn from(payload: ExecutionPayloadCapella) -> Self { +impl<'a, T: EthSpec> From<&'a ExecutionPayloadCapella> for ExecutionPayloadHeaderCapella { + fn from(payload: &'a ExecutionPayloadCapella) -> Self { Self { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, receipts_root: payload.receipts_root, - logs_bloom: payload.logs_bloom, + logs_bloom: payload.logs_bloom.clone(), prev_randao: payload.prev_randao, block_number: payload.block_number, gas_limit: payload.gas_limit, gas_used: payload.gas_used, timestamp: payload.timestamp, - extra_data: payload.extra_data, + extra_data: payload.extra_data.clone(), base_fee_per_gas: payload.base_fee_per_gas, block_hash: payload.block_hash, transactions_root: payload.transactions.tree_hash_root(), @@ -200,20 +200,21 @@ impl From> for ExecutionPayloadHeaderCape } } } -impl From> for ExecutionPayloadHeaderEip4844 { - fn from(payload: ExecutionPayloadEip4844) -> Self { + +impl<'a, T: EthSpec> From<&'a ExecutionPayloadEip4844> for ExecutionPayloadHeaderEip4844 { + fn from(payload: &'a ExecutionPayloadEip4844) -> Self { Self { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, receipts_root: payload.receipts_root, - logs_bloom: payload.logs_bloom, + logs_bloom: payload.logs_bloom.clone(), prev_randao: payload.prev_randao, block_number: payload.block_number, gas_limit: payload.gas_limit, gas_used: payload.gas_used, timestamp: payload.timestamp, - extra_data: payload.extra_data, + extra_data: payload.extra_data.clone(), base_fee_per_gas: payload.base_fee_per_gas, excess_data_gas: payload.excess_data_gas, block_hash: payload.block_hash, @@ -223,31 +224,33 @@ impl From> for ExecutionPayloadHeaderEip4 } } -impl From> for ExecutionPayloadHeader { - fn from(payload: ExecutionPayloadMerge) -> Self { - Self::Merge(ExecutionPayloadHeaderMerge::from(payload)) +// These impls are required to work around an inelegance in `to_execution_payload_header`. +// They only clone headers so they should be relatively cheap. +impl<'a, T: EthSpec> From<&'a Self> for ExecutionPayloadHeaderMerge { + fn from(payload: &'a Self) -> Self { + payload.clone() } } -impl From> for ExecutionPayloadHeader { - fn from(payload: ExecutionPayloadCapella) -> Self { - Self::Capella(ExecutionPayloadHeaderCapella::from(payload)) +impl<'a, T: EthSpec> From<&'a Self> for ExecutionPayloadHeaderCapella { + fn from(payload: &'a Self) -> Self { + payload.clone() } } -impl From> for ExecutionPayloadHeader { - fn from(payload: ExecutionPayloadEip4844) -> Self { - Self::Eip4844(ExecutionPayloadHeaderEip4844::from(payload)) +impl<'a, T: EthSpec> From<&'a Self> for ExecutionPayloadHeaderEip4844 { + fn from(payload: &'a Self) -> Self { + payload.clone() } } -impl From> for ExecutionPayloadHeader { - fn from(payload: ExecutionPayload) -> Self { - match payload { - ExecutionPayload::Merge(payload) => Self::from(payload), - ExecutionPayload::Capella(payload) => Self::from(payload), - ExecutionPayload::Eip4844(payload) => Self::from(payload), - } +impl<'a, T: EthSpec> From> for ExecutionPayloadHeader { + fn from(payload: ExecutionPayloadRef<'a, T>) -> Self { + map_execution_payload_ref_into_execution_payload_header!( + &'a _, + payload, + |inner, cons| cons(inner.into()) + ) } } @@ -282,3 +285,29 @@ impl TryFrom> for ExecutionPayloadHeaderEi } } } + +impl ForkVersionDeserialize for ExecutionPayloadHeader { + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + let convert_err = |e| { + serde::de::Error::custom(format!( + "ExecutionPayloadHeader failed to deserialize: {:?}", + e + )) + }; + + Ok(match fork_name { + ForkName::Merge => Self::Merge(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Eip4844 => Self::Eip4844(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Base | ForkName::Altair => { + return Err(serde::de::Error::custom(format!( + "ExecutionPayloadHeader failed to deserialize: unsupported fork '{}'", + fork_name + ))); + } + }) + } +} diff --git a/consensus/types/src/fork_versioned_response.rs b/consensus/types/src/fork_versioned_response.rs new file mode 100644 index 000000000..07ff40b27 --- /dev/null +++ b/consensus/types/src/fork_versioned_response.rs @@ -0,0 +1,138 @@ +use crate::ForkName; +use serde::de::DeserializeOwned; +use serde::{Deserialize, Deserializer, Serialize}; +use serde_json::value::Value; +use std::sync::Arc; + +// Deserialize is only implemented for types that implement ForkVersionDeserialize +#[derive(Debug, PartialEq, Clone, Serialize)] +pub struct ExecutionOptimisticForkVersionedResponse { + #[serde(skip_serializing_if = "Option::is_none")] + pub version: Option, + pub execution_optimistic: Option, + pub data: T, +} + +impl<'de, F> serde::Deserialize<'de> for ExecutionOptimisticForkVersionedResponse +where + F: ForkVersionDeserialize, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + #[derive(Deserialize)] + struct Helper { + version: Option, + execution_optimistic: Option, + data: serde_json::Value, + } + + let helper = Helper::deserialize(deserializer)?; + let data = match helper.version { + Some(fork_name) => F::deserialize_by_fork::<'de, D>(helper.data, fork_name)?, + None => serde_json::from_value(helper.data).map_err(serde::de::Error::custom)?, + }; + + Ok(ExecutionOptimisticForkVersionedResponse { + version: helper.version, + execution_optimistic: helper.execution_optimistic, + data, + }) + } +} + +pub trait ForkVersionDeserialize: Sized + DeserializeOwned { + fn deserialize_by_fork<'de, D: Deserializer<'de>>( + value: Value, + fork_name: ForkName, + ) -> Result; +} + +// Deserialize is only implemented for types that implement ForkVersionDeserialize +#[derive(Debug, PartialEq, Clone, Serialize)] +pub struct ForkVersionedResponse { + #[serde(skip_serializing_if = "Option::is_none")] + pub version: Option, + pub data: T, +} + +impl<'de, F> serde::Deserialize<'de> for ForkVersionedResponse +where + F: ForkVersionDeserialize, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + #[derive(Deserialize)] + struct Helper { + version: Option, + data: serde_json::Value, + } + + let helper = Helper::deserialize(deserializer)?; + let data = match helper.version { + Some(fork_name) => F::deserialize_by_fork::<'de, D>(helper.data, fork_name)?, + None => serde_json::from_value(helper.data).map_err(serde::de::Error::custom)?, + }; + + Ok(ForkVersionedResponse { + version: helper.version, + data, + }) + } +} + +impl ForkVersionDeserialize for Arc { + fn deserialize_by_fork<'de, D: Deserializer<'de>>( + value: Value, + fork_name: ForkName, + ) -> Result { + Ok(Arc::new(F::deserialize_by_fork::<'de, D>( + value, fork_name, + )?)) + } +} + +#[cfg(test)] +mod fork_version_response_tests { + use crate::{ + ExecutionPayload, ExecutionPayloadMerge, ForkName, ForkVersionedResponse, MainnetEthSpec, + }; + use serde_json::json; + + #[test] + fn fork_versioned_response_deserialize_correct_fork() { + type E = MainnetEthSpec; + + let response_json = + serde_json::to_string(&json!(ForkVersionedResponse::> { + version: Some(ForkName::Merge), + data: ExecutionPayload::Merge(ExecutionPayloadMerge::default()), + })) + .unwrap(); + + let result: Result>, _> = + serde_json::from_str(&response_json); + + assert!(result.is_ok()); + } + + #[test] + fn fork_versioned_response_deserialize_incorrect_fork() { + type E = MainnetEthSpec; + + let response_json = + serde_json::to_string(&json!(ForkVersionedResponse::> { + version: Some(ForkName::Capella), + data: ExecutionPayload::Merge(ExecutionPayloadMerge::default()), + })) + .unwrap(); + + let result: Result>, _> = + serde_json::from_str(&response_json); + + assert!(result.is_err()); + } +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 49829389c..4e1951222 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -46,6 +46,7 @@ pub mod execution_payload_header; pub mod fork; pub mod fork_data; pub mod fork_name; +pub mod fork_versioned_response; pub mod free_attestation; pub mod graffiti; pub mod historical_batch; @@ -150,6 +151,9 @@ pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; pub use crate::fork_data::ForkData; pub use crate::fork_name::{ForkName, InconsistentFork}; +pub use crate::fork_versioned_response::{ + ExecutionOptimisticForkVersionedResponse, ForkVersionDeserialize, ForkVersionedResponse, +}; pub use crate::free_attestation::FreeAttestation; pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN}; pub use crate::historical_batch::HistoricalBatch; diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 9b7d3417f..cc22bc3ab 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -4,6 +4,7 @@ use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; +use std::borrow::Cow; use std::convert::TryFrom; use std::fmt::Debug; use std::hash::Hash; @@ -90,15 +91,15 @@ pub trait AbstractExecPayload: type Merge: OwnedExecPayload + Into - + From> + + for<'a> From>> + TryFrom>; type Capella: OwnedExecPayload + Into - + From> + + for<'a> From>> + TryFrom>; type Eip4844: OwnedExecPayload + Into - + From> + + for<'a> From>> + TryFrom>; fn default_at_fork(fork_name: ForkName) -> Result; @@ -150,31 +151,21 @@ pub struct FullPayload { impl From> for ExecutionPayload { fn from(full_payload: FullPayload) -> Self { - match full_payload { - FullPayload::Merge(payload) => ExecutionPayload::Merge(payload.execution_payload), - FullPayload::Capella(payload) => ExecutionPayload::Capella(payload.execution_payload), - FullPayload::Eip4844(payload) => ExecutionPayload::Eip4844(payload.execution_payload), - } + map_full_payload_into_execution_payload!(full_payload, move |payload, cons| { + cons(payload.execution_payload) + }) } } impl<'a, T: EthSpec> From> for ExecutionPayload { fn from(full_payload_ref: FullPayloadRef<'a, T>) -> Self { - match full_payload_ref { - FullPayloadRef::Merge(payload) => { - ExecutionPayload::Merge(payload.execution_payload.clone()) - } - FullPayloadRef::Capella(payload) => { - ExecutionPayload::Capella(payload.execution_payload.clone()) - } - FullPayloadRef::Eip4844(payload) => { - ExecutionPayload::Eip4844(payload.execution_payload.clone()) - } - } + map_full_payload_ref!(&'a _, full_payload_ref, move |payload, cons| { + cons(payload); + payload.execution_payload.clone().into() + }) } } -// FIXME: can this be implemented as Deref or Clone somehow? impl<'a, T: EthSpec> From> for FullPayload { fn from(full_payload_ref: FullPayloadRef<'a, T>) -> Self { map_full_payload_ref!(&'a _, full_payload_ref, move |payload, cons| { @@ -189,11 +180,12 @@ impl ExecPayload for FullPayload { BlockType::Full } - fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { - let payload = map_full_payload_into_execution_payload!(self.clone(), |inner, cons| { - cons(inner.execution_payload) - }); - ExecutionPayloadHeader::from(payload) + fn to_execution_payload_header<'a>(&'a self) -> ExecutionPayloadHeader { + map_full_payload_ref!(&'a _, self.to_ref(), move |inner, cons| { + cons(inner); + let exec_payload_ref: ExecutionPayloadRef<'a, T> = From::from(&inner.execution_payload); + ExecutionPayloadHeader::from(exec_payload_ref) + }) } fn parent_hash<'a>(&'a self) -> ExecutionBlockHash { @@ -404,17 +396,9 @@ impl AbstractExecPayload for FullPayload { impl From> for FullPayload { fn from(execution_payload: ExecutionPayload) -> Self { - match execution_payload { - ExecutionPayload::Merge(execution_payload) => { - Self::Merge(FullPayloadMerge { execution_payload }) - } - ExecutionPayload::Capella(execution_payload) => { - Self::Capella(FullPayloadCapella { execution_payload }) - } - ExecutionPayload::Eip4844(execution_payload) => { - Self::Eip4844(FullPayloadEip4844 { execution_payload }) - } - } + map_execution_payload_into_full_payload!(execution_payload, |inner, cons| { + cons(inner.into()) + }) } } @@ -666,6 +650,7 @@ macro_rules! impl_exec_payload_common { $wrapped_field:ident, // execution_payload_header | execution_payload $fork_variant:ident, // Merge | Merge $block_type_variant:ident, // Blinded | Full + $is_default_with_empty_roots:block, $f:block, $g:block) => { impl ExecPayload for $wrapper_type { @@ -675,7 +660,7 @@ macro_rules! impl_exec_payload_common { fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { ExecutionPayloadHeader::$fork_variant($wrapped_type_header::from( - self.$wrapped_field.clone(), + &self.$wrapped_field, )) } @@ -712,15 +697,8 @@ macro_rules! impl_exec_payload_common { } fn is_default_with_empty_roots(&self) -> bool { - // FIXME: is there a better way than ignoring this lint? - // This is necessary because the first invocation of this macro might expand to: - // self.execution_payload_header == ExecutionPayloadHeaderMerge::from(ExecutionPayloadMerge::default()) - // but the second invocation might expand to: - // self.execution_payload == ExecutionPayloadMerge::from(ExecutionPayloadMerge::default()) - #[allow(clippy::cmp_owned)] - { - self.$wrapped_field == $wrapped_type::from($wrapped_type_full::default()) - } + let f = $is_default_with_empty_roots; + f(self) } fn transactions(&self) -> Option<&Transactions> { @@ -755,6 +733,12 @@ macro_rules! impl_exec_payload_for_fork { execution_payload_header, $fork_variant, // Merge Blinded, + { + |wrapper: &$wrapper_type_header| { + wrapper.execution_payload_header + == $wrapped_type_header::from(&$wrapped_type_full::default()) + } + }, { |_| { None } }, { let c: for<'a> fn(&'a $wrapper_type_header) -> Result = @@ -788,7 +772,7 @@ macro_rules! impl_exec_payload_for_fork { fn default() -> Self { Self { execution_payload_header: $wrapped_type_header::from( - $wrapped_type_full::default(), + &$wrapped_type_full::default(), ), } } @@ -806,11 +790,11 @@ macro_rules! impl_exec_payload_for_fork { } } - // FIXME(sproul): consider adding references to these From impls - impl From<$wrapped_type_full> for $wrapper_type_header { - fn from(execution_payload: $wrapped_type_full) -> Self { + // BlindedPayload* from CoW reference to ExecutionPayload* (hopefully just a reference). + impl<'a, T: EthSpec> From>> for $wrapper_type_header { + fn from(execution_payload: Cow<'a, $wrapped_type_full>) -> Self { Self { - execution_payload_header: $wrapped_type_header::from(execution_payload), + execution_payload_header: $wrapped_type_header::from(&*execution_payload), } } } @@ -825,6 +809,11 @@ macro_rules! impl_exec_payload_for_fork { execution_payload, $fork_variant, // Merge Full, + { + |wrapper: &$wrapper_type_full| { + wrapper.execution_payload == $wrapped_type_full::default() + } + }, { let c: for<'a> fn(&'a $wrapper_type_full) -> Option<&'a Transactions> = |payload: &$wrapper_type_full| Some(&payload.execution_payload.transactions); @@ -848,6 +837,15 @@ macro_rules! impl_exec_payload_for_fork { } } + // FullPayload * from CoW reference to ExecutionPayload* (hopefully already owned). + impl<'a, T: EthSpec> From>> for $wrapper_type_full { + fn from(execution_payload: Cow<'a, $wrapped_type_full>) -> Self { + Self { + execution_payload: $wrapped_type_full::from(execution_payload.into_owned()), + } + } + } + impl TryFrom> for $wrapper_type_full { type Error = Error; fn try_from(_: ExecutionPayloadHeader) -> Result { @@ -915,11 +913,12 @@ impl AbstractExecPayload for BlindedPayload { impl From> for BlindedPayload { fn from(payload: ExecutionPayload) -> Self { - match payload { - ExecutionPayload::Merge(payload) => BlindedPayload::Merge(payload.into()), - ExecutionPayload::Capella(payload) => BlindedPayload::Capella(payload.into()), - ExecutionPayload::Eip4844(payload) => BlindedPayload::Eip4844(payload.into()), - } + // This implementation is a bit wasteful in that it discards the payload body. + // Required by the top-level constraint on AbstractExecPayload but could maybe be loosened + // in future. + map_execution_payload_into_blinded_payload!(payload, |inner, cons| cons(From::from( + Cow::Owned(inner) + ))) } } diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index f147cefe9..ae59690bf 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -522,6 +522,24 @@ impl SignedBeaconBlock { } } +impl> ForkVersionDeserialize + for SignedBeaconBlock +{ + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + Ok(map_fork_name!( + fork_name, + Self, + serde_json::from_value(value).map_err(|e| serde::de::Error::custom(format!( + "SignedBeaconBlock failed to deserialize: {:?}", + e + )))? + )) + } +} + #[cfg(test)] mod test { use super::*; diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index d6cdbd2aa..da1f5036d 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1079,6 +1079,19 @@ fn http_port_flag() { .with_config(|config| assert_eq!(config.http_api.listen_port, port1)); } #[test] +fn empty_self_limiter_flag() { + // Test that empty rate limiter is accepted using the default rate limiting configurations. + CommandLineTest::new() + .flag("self-limiter", None) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.network.outbound_rate_limiter_config, + Some(lighthouse_network::rpc::config::OutboundRateLimiterConfig::default()) + ) + }); +} +#[test] fn http_allow_origin_flag() { CommandLineTest::new() .flag("http-allow-origin", Some("127.0.0.99")) diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index c82a1a9d3..9142a0c7e 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -231,6 +231,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { address of this server (e.g., http://localhost:5064).") .takes_value(true), ) + .arg( + Arg::with_name("enable-high-validator-count-metrics") + .long("enable-high-validator-count-metrics") + .help("Enable per validator metrics for > 64 validators. \ + Note: This flag is automatically enabled for <= 64 validators. \ + Enabling this metric for higher validator counts will lead to higher volume \ + of prometheus metrics being collected.") + .takes_value(false), + ) /* * Explorer metrics */ diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 22741dabb..0f24e81d5 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -53,6 +53,11 @@ pub struct Config { /// If true, enable functionality that monitors the network for attestations or proposals from /// any of the validators managed by this client before starting up. pub enable_doppelganger_protection: bool, + /// If true, then we publish validator specific metrics (e.g next attestation duty slot) + /// for all our managed validators. + /// Note: We publish validator specific metrics for low validator counts without this flag + /// (<= 64 validators) + pub enable_high_validator_count_metrics: bool, /// Enable use of the blinded block endpoints during proposals. pub builder_proposals: bool, /// Overrides the timestamp field in builder api ValidatorRegistrationV1 @@ -99,6 +104,7 @@ impl Default for Config { http_metrics: <_>::default(), monitoring_api: None, enable_doppelganger_protection: false, + enable_high_validator_count_metrics: false, beacon_nodes_tls_certs: None, block_delay: None, builder_proposals: false, @@ -273,6 +279,10 @@ impl Config { config.http_metrics.enabled = true; } + if cli_args.is_present("enable-high-validator-count-metrics") { + config.enable_high_validator_count_metrics = true; + } + if let Some(address) = cli_args.value_of("metrics-address") { config.http_metrics.listen_addr = address .parse::() diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index 86b8ca870..6ba2a2d1f 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -9,6 +9,7 @@ mod sync; use crate::beacon_node_fallback::{BeaconNodeFallback, OfflineOnFailure, RequireSynced}; +use crate::http_metrics::metrics::{get_int_gauge, set_int_gauge, ATTESTATION_DUTY}; use crate::{ block_service::BlockServiceNotification, http_metrics::metrics, @@ -39,6 +40,11 @@ const SUBSCRIPTION_BUFFER_SLOTS: u64 = 2; /// Only retain `HISTORICAL_DUTIES_EPOCHS` duties prior to the current epoch. const HISTORICAL_DUTIES_EPOCHS: u64 = 2; +/// Minimum number of validators for which we auto-enable per-validator metrics. +/// For validators greater than this value, we need to manually set the `enable-per-validator-metrics` +/// flag in the cli to enable collection of per validator metrics. +const VALIDATOR_METRICS_MIN_COUNT: usize = 64; + #[derive(Debug)] pub enum Error { UnableToReadSlotClock, @@ -121,6 +127,7 @@ pub struct DutiesService { /// This functionality is a little redundant since most BNs will likely reject duties when they /// aren't synced, but we keep it around for an emergency. pub require_synced: RequireSynced, + pub enable_high_validator_count_metrics: bool, pub context: RuntimeContext, pub spec: ChainSpec, } @@ -220,6 +227,12 @@ impl DutiesService { .cloned() .collect() } + + /// Returns `true` if we should collect per validator metrics and `false` otherwise. + pub fn per_validator_metrics(&self) -> bool { + self.enable_high_validator_count_metrics + || self.total_validator_count() <= VALIDATOR_METRICS_MIN_COUNT + } } /// Start the service that periodically polls the beacon node for validator duties. This will start @@ -501,6 +514,7 @@ async fn poll_beacon_attesters( current_epoch, &local_indices, &local_pubkeys, + current_slot, ) .await { @@ -520,9 +534,14 @@ async fn poll_beacon_attesters( ); // Download the duties and update the duties for the next epoch. - if let Err(e) = - poll_beacon_attesters_for_epoch(duties_service, next_epoch, &local_indices, &local_pubkeys) - .await + if let Err(e) = poll_beacon_attesters_for_epoch( + duties_service, + next_epoch, + &local_indices, + &local_pubkeys, + current_slot, + ) + .await { error!( log, @@ -619,6 +638,7 @@ async fn poll_beacon_attesters_for_epoch( epoch: Epoch, local_indices: &[u64], local_pubkeys: &HashSet, + current_slot: Slot, ) -> Result<(), Error> { let log = duties_service.context.log(); @@ -671,6 +691,35 @@ async fn poll_beacon_attesters_for_epoch( .data .into_iter() .filter(|duty| { + if duties_service.per_validator_metrics() { + let validator_index = duty.validator_index; + let duty_slot = duty.slot; + if let Some(existing_slot_gauge) = + get_int_gauge(&ATTESTATION_DUTY, &[&validator_index.to_string()]) + { + let existing_slot = Slot::new(existing_slot_gauge.get() as u64); + let existing_epoch = existing_slot.epoch(E::slots_per_epoch()); + + // First condition ensures that we switch to the next epoch duty slot + // once the current epoch duty slot passes. + // Second condition is to ensure that next epoch duties don't override + // current epoch duties. + if existing_slot < current_slot + || (duty_slot.epoch(E::slots_per_epoch()) <= existing_epoch + && duty_slot > current_slot + && duty_slot != existing_slot) + { + existing_slot_gauge.set(duty_slot.as_u64() as i64); + } + } else { + set_int_gauge( + &ATTESTATION_DUTY, + &[&validator_index.to_string()], + duty_slot.as_u64() as i64, + ); + } + } + local_pubkeys.contains(&duty.pubkey) && { // Only update the duties if either is true: // diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index 2d2e7da75..2d5b9b1db 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -177,6 +177,12 @@ lazy_static::lazy_static! { "Duration to obtain a signature", &["type"] ); + + pub static ref ATTESTATION_DUTY: Result = try_create_int_gauge_vec( + "vc_attestation_duty_slot", + "Attestation duty slot for all managed validators", + &["validator"] + ); } pub fn gather_prometheus_metrics( diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 00c3db7aa..f2d647490 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -422,6 +422,7 @@ impl ProductionValidatorClient { }, spec: context.eth2_config.spec.clone(), context: duties_context, + enable_high_validator_count_metrics: config.enable_high_validator_count_metrics, }); // Update the metrics server.