diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index bbe480a94..dac892139 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -49,7 +49,7 @@ jobs: VERSION: ${{ env.VERSION }} VERSION_SUFFIX: ${{ env.VERSION_SUFFIX }} build-docker-single-arch: - name: build-docker-${{ matrix.binary }} + name: build-docker-${{ matrix.binary }}${{ matrix.features.version_suffix }} runs-on: ubuntu-22.04 strategy: matrix: @@ -57,6 +57,10 @@ jobs: aarch64-portable, x86_64, x86_64-portable] + features: [ + {version_suffix: "", env: "gnosis,slasher-lmdb,slasher-mdbx,jemalloc"}, + {version_suffix: "-dev", env: "gnosis,slasher-lmdb,slasher-mdbx,jemalloc,spec-minimal"} + ] include: - profile: maxperf @@ -66,7 +70,9 @@ jobs: DOCKER_CLI_EXPERIMENTAL: enabled VERSION: ${{ needs.extract-version.outputs.VERSION }} VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} - CROSS_FEATURES: null + FEATURE_SUFFIX: ${{ matrix.features.version_suffix }} + FEATURES: ${{ matrix.features.env }} + CROSS_FEATURES: ${{ matrix.features.env }} steps: - uses: actions/checkout@v3 - name: Update Rust @@ -77,7 +83,7 @@ jobs: - name: Cross build Lighthouse binary run: | cargo install cross - env CROSS_PROFILE=${{ matrix.profile }} make build-${{ matrix.binary }} + env CROSS_PROFILE=${{ matrix.profile }} CROSS_FEATURES=${{ matrix.features.env }} make build-${{ matrix.binary }} - name: Move cross-built binary into Docker scope (if ARM) if: startsWith(matrix.binary, 'aarch64') run: | @@ -105,7 +111,8 @@ jobs: docker buildx build \ --platform=linux/${SHORT_ARCH} \ --file ./Dockerfile.cross . \ - --tag ${IMAGE_NAME}:${VERSION}-${SHORT_ARCH}${VERSION_SUFFIX}${MODERNITY_SUFFIX} \ + --tag ${IMAGE_NAME}:${VERSION}-${SHORT_ARCH}${VERSION_SUFFIX}${MODERNITY_SUFFIX}${FEATURE_SUFFIX} \ + --build-arg FEATURES=${FEATURES} \ --provenance=false \ --push build-docker-multiarch: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8ca6ab0f9..2e63b4d6c 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -134,11 +134,17 @@ jobs: - name: Build Lighthouse for Windows portable if: matrix.arch == 'x86_64-windows-portable' - run: cargo install --path lighthouse --force --locked --features portable,gnosis --profile ${{ matrix.profile }} + # NOTE: profile set to release until this rustc issue is fixed: + # + # https://github.com/rust-lang/rust/issues/107781 + # + # tracked at: https://github.com/sigp/lighthouse/issues/3964 + run: cargo install --path lighthouse --force --locked --features portable,gnosis --profile release - name: Build Lighthouse for Windows modern if: matrix.arch == 'x86_64-windows' - run: cargo install --path lighthouse --force --locked --features modern,gnosis --profile ${{ matrix.profile }} + # NOTE: profile set to release (see above) + run: cargo install --path lighthouse --force --locked --features modern,gnosis --profile release - name: Configure GPG and create artifacts if: startsWith(matrix.arch, 'x86_64-windows') != true diff --git a/Cargo.lock b/Cargo.lock index 014e03d25..1d8d88f38 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -205,9 +205,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.68" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cb2f989d18dd141ab8ae82f64d1a8cdd37e0840f73a406896cf5e99502fab61" +checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" [[package]] name = "arbitrary" @@ -244,7 +244,7 @@ dependencies = [ "asn1-rs-derive 0.1.0", "asn1-rs-impl", "displaydoc", - "nom 7.1.2", + "nom 7.1.3", "num-traits", "rusticata-macros", "thiserror", @@ -260,7 +260,7 @@ dependencies = [ "asn1-rs-derive 0.4.0", "asn1-rs-impl", "displaydoc", - "nom 7.1.2", + "nom 7.1.3", "num-traits", "rusticata-macros", "thiserror", @@ -325,7 +325,7 @@ dependencies = [ "slab", "socket2", "waker-fn", - "windows-sys", + "windows-sys 0.42.0", ] [[package]] @@ -361,9 +361,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.61" +version = "0.1.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "705339e0e4a9690e2908d2b3d049d85682cf19fbd5782494498fbf7003a6a282" +checksum = "1cd7fce9ba8c3c042128ce72d8b2ddbf3a05747efb67ea0313c635e10bda47a2" dependencies = [ "proc-macro2", "quote", @@ -396,9 +396,9 @@ dependencies = [ [[package]] name = "atomic-waker" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "065374052e7df7ee4047b1160cca5e1467a12351a40b3da123c870ba0b8eda2a" +checksum = "debc29dde2e69f9e47506b525f639ed42300fc014a3e007832592448fa8e4599" [[package]] name = "attohttpc" @@ -529,6 +529,12 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" +[[package]] +name = "base64" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" + [[package]] name = "base64ct" version = "1.5.3" @@ -844,9 +850,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.11.1" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" +checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" [[package]] name = "byte-slice-cast" @@ -862,9 +868,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c" +checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" dependencies = [ "serde", ] @@ -913,9 +919,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.78" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a20104e2335ce8a659d6dd92a51a767a0c062599c73b343fd152cb401e828c3d" +checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" [[package]] name = "ccm" @@ -934,7 +940,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" dependencies = [ - "nom 7.1.2", + "nom 7.1.3", ] [[package]] @@ -1120,9 +1126,9 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd7bef69dc86e3c610e4e7aed41035e2a7ed12e72dd7530f61327a6579a4390b" +checksum = "c278839b831783b70278b14df4d45e1beb1aad306c07bb796637de9a0e323e8e" dependencies = [ "crossbeam-utils", ] @@ -1191,18 +1197,18 @@ checksum = "dcb25d077389e53838a8158c8e99174c5a9d902dee4904320db714f3c653ffba" [[package]] name = "crc" -version = "3.0.0" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53757d12b596c16c78b83458d732a5d1a17ab3f53f2f7412f6fb57cc8a140ab3" +checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" dependencies = [ "crc-catalog", ] [[package]] name = "crc-catalog" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d0165d2900ae6778e36e80bbc4da3b5eefccee9ba939761f9c2882a5d9af3ff" +checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484" [[package]] name = "crc32fast" @@ -1392,12 +1398,12 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.2.4" +version = "3.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1631ca6e3c59112501a9d87fd86f21591ff77acd31331e8a73f8d80a65bbdd71" +checksum = "bbcf33c2a618cbe41ee43ae6e9f2e48368cd9f9db2896f10167d8d762679f639" dependencies = [ - "nix 0.26.1", - "windows-sys", + "nix 0.26.2", + "windows-sys 0.45.0", ] [[package]] @@ -1415,9 +1421,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.0.0-pre.5" +version = "4.0.0-rc.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67bc65846be335cb20f4e52d49a437b773a2c1fdb42b19fc84e79e6f6771536f" +checksum = "8da00a7a9a4eb92a0a0f8e75660926d48f0d0f3c537e455c457bcdaa1e16b1ac" dependencies = [ "cfg-if", "fiat-crypto", @@ -1429,9 +1435,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.86" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d1075c37807dcf850c379432f0df05ba52cc30f279c5cfc43cc221ce7f8579" +checksum = "bc831ee6a32dd495436e317595e639a587aa9907bef96fe6e6abc290ab6204e9" dependencies = [ "cc", "cxxbridge-flags", @@ -1441,9 +1447,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.86" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5044281f61b27bc598f2f6647d480aed48d2bf52d6eb0b627d84c0361b17aa70" +checksum = "94331d54f1b1a8895cd81049f7eaaaef9d05a7dcb4d1fd08bf3ff0806246789d" dependencies = [ "cc", "codespan-reporting", @@ -1456,15 +1462,15 @@ dependencies = [ [[package]] name = "cxxbridge-flags" -version = "1.0.86" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61b50bc93ba22c27b0d31128d2d130a0a6b3d267ae27ef7e4fae2167dfe8781c" +checksum = "48dcd35ba14ca9b40d6e4b4b39961f23d835dbb8eed74565ded361d93e1feb8a" [[package]] name = "cxxbridge-macro" -version = "1.0.86" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39e61fda7e62115119469c7b3591fd913ecca96fb766cfd3f2e2502ab7bc87a5" +checksum = "81bbeb29798b407ccd82a3324ade1a7286e0d29851475990b612670f6f5124d2" dependencies = [ "proc-macro2", "quote", @@ -1483,12 +1489,12 @@ dependencies = [ [[package]] name = "darling" -version = "0.14.2" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0dd3cd20dc6b5a876612a6e5accfe7f3dd883db6d07acfbf14c128f61550dfa" +checksum = "c0808e1bd8671fb44a113a14e13497557533369847788fa2ae912b6ebfce9fa8" dependencies = [ - "darling_core 0.14.2", - "darling_macro 0.14.2", + "darling_core 0.14.3", + "darling_macro 0.14.3", ] [[package]] @@ -1507,9 +1513,9 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.14.2" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a784d2ccaf7c98501746bf0be29b2022ba41fd62a2e622af997a03e9f972859f" +checksum = "001d80444f28e193f30c2f293455da62dcf9a6b29918a4253152ae2b1de592cb" dependencies = [ "fnv", "ident_case", @@ -1532,11 +1538,11 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.14.2" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7618812407e9402654622dd402b0a89dff9ba93badd6540781526117b92aab7e" +checksum = "b36230598a2d5de7ec1c6f51f72d8a99a9208daff41de2084d06e3fd3ea56685" dependencies = [ - "darling_core 0.14.2", + "darling_core 0.14.3", "quote", "syn", ] @@ -1654,7 +1660,7 @@ checksum = "fe398ac75057914d7d07307bf67dc7f3f574a26783b4fc7805a20ffa9f506e82" dependencies = [ "asn1-rs 0.3.1", "displaydoc", - "nom 7.1.2", + "nom 7.1.3", "num-bigint", "num-traits", "rusticata-macros", @@ -1668,7 +1674,7 @@ checksum = "42d4bc9b0db0a0df9ae64634ac5bdefb7afcb534e182275ca0beadbe486701c1" dependencies = [ "asn1-rs 0.5.1", "displaydoc", - "nom 7.1.2", + "nom 7.1.3", "num-bigint", "num-traits", "rusticata-macros", @@ -1690,7 +1696,7 @@ name = "derive_arbitrary" version = "1.2.2" source = "git+https://github.com/michaelsproul/arbitrary?rev=a572fd8743012a4f1ada5ee5968b1b3619c427ba#a572fd8743012a4f1ada5ee5968b1b3619c427ba" dependencies = [ - "darling 0.14.2", + "darling 0.14.3", "proc-macro2", "quote", "syn", @@ -1711,7 +1717,7 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f91d4cfa921f1c05904dc3c57b4a32c38aed3340cce209f3a6fd1478babafc4" dependencies = [ - "darling 0.14.2", + "darling 0.14.3", "proc-macro2", "quote", "syn", @@ -1875,9 +1881,9 @@ dependencies = [ [[package]] name = "ed25519" -version = "1.5.2" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9c280362032ea4203659fc489832d0204ef09f247a0506f170dafcac08c369" +checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" dependencies = [ "signature", ] @@ -1929,9 +1935,9 @@ dependencies = [ [[package]] name = "either" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" +checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" [[package]] name = "elliptic-curve" @@ -1957,9 +1963,9 @@ dependencies = [ [[package]] name = "encoding_rs" -version = "0.8.31" +version = "0.8.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9852635589dc9f9ea1b6fe9f05b50ef208c85c834a562f0c6abb1c475736ec2b" +checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" dependencies = [ "cfg-if", ] @@ -1970,7 +1976,7 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26fa0a0be8915790626d5759eb51fe47435a8eac92c2f212bd2da9aa7f30ea56" dependencies = [ - "base64", + "base64 0.13.1", "bs58", "bytes", "ed25519-dalek", @@ -2147,7 +2153,7 @@ dependencies = [ name = "eth2_interop_keypairs" version = "0.2.0" dependencies = [ - "base64", + "base64 0.13.1", "bls", "eth2_hashing", "hex", @@ -2426,7 +2432,7 @@ checksum = "a1a9e0597aa6b2fdc810ff58bc95e4eeaa2c219b3e615ed025106ecb027407d8" dependencies = [ "async-trait", "auto_impl", - "base64", + "base64 0.13.1", "ethers-core", "futures-channel", "futures-core", @@ -2607,7 +2613,8 @@ dependencies = [ [[package]] name = "fixed-hash" version = "0.7.0" -source = "git+https://github.com/paritytech/parity-common?rev=df638ab0885293d21d656dc300d39236b69ce57d#df638ab0885293d21d656dc300d39236b69ce57d" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" dependencies = [ "byteorder", "rand 0.8.5", @@ -2700,12 +2707,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "fs_extra" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394" - [[package]] name = "funty" version = "1.1.0" @@ -2720,9 +2721,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0" +checksum = "13e2792b0ff0340399d58445b88fd9770e3489eff258a4cbc1523418f12abf84" dependencies = [ "futures-channel", "futures-core", @@ -2735,9 +2736,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed" +checksum = "2e5317663a9089767a1ec00a487df42e0ca174b61b4483213ac24448e4664df5" dependencies = [ "futures-core", "futures-sink", @@ -2745,15 +2746,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac" +checksum = "ec90ff4d0fe1f57d600049061dc6bb68ed03c7d2fbd697274c41805dcb3f8608" [[package]] name = "futures-executor" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2" +checksum = "e8de0a35a6ab97ec8869e32a2473f4b1324459e14c29275d14b10cb1fd19b50e" dependencies = [ "futures-core", "futures-task", @@ -2763,9 +2764,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" +checksum = "bfb8371b6fb2aeb2d280374607aeabfc99d95c72edfe51692e42d3d7f0d08531" [[package]] name = "futures-lite" @@ -2784,9 +2785,9 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d" +checksum = "95a73af87da33b5acf53acfebdc339fe592ecf5357ac7c0a7734ab9d8c876a70" dependencies = [ "proc-macro2", "quote", @@ -2800,21 +2801,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2411eed028cdf8c8034eaf21f9915f956b6c3abec4d4c7949ee67f0721127bd" dependencies = [ "futures-io", - "rustls 0.20.7", + "rustls 0.20.8", "webpki 0.22.0", ] [[package]] name = "futures-sink" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9" +checksum = "f310820bb3e8cfd46c80db4d7fb8353e15dfff853a127158425f31e0be6c8364" [[package]] name = "futures-task" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea" +checksum = "dcf79a1bf610b10f42aea489289c5a2c478a786509693b80cd39c44ccd936366" [[package]] name = "futures-timer" @@ -2824,9 +2825,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6" +checksum = "9c1d6de3acfef38d2be4b1f543f553131788603495be83da675e180c8d6b7bd1" dependencies = [ "futures-channel", "futures-core", @@ -2928,9 +2929,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.27.0" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dec7af912d60cdbd3677c1af9352ebae6fb8394d165568a2234df0fa00f87793" +checksum = "221996f774192f0f718773def8201c4ae31f02616a54ccfc2d358bb0e5cefdec" [[package]] name = "git-version" @@ -3053,7 +3054,7 @@ version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ - "base64", + "base64 0.13.1", "bitflags", "bytes", "headers-core", @@ -3074,9 +3075,9 @@ dependencies = [ [[package]] name = "heck" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" @@ -3292,9 +3293,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.23" +version = "0.14.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "034711faac9d2166cb1baf1a2fb0b60b1f277f8492fd72176c17f3515e1abd3c" +checksum = "5e011372fa0b68db8350aa7a248930ecc7839bf46d8485577d69f117a75f164c" dependencies = [ "bytes", "futures-channel", @@ -3322,7 +3323,7 @@ checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" dependencies = [ "http", "hyper", - "rustls 0.20.7", + "rustls 0.20.8", "tokio", "tokio-rustls 0.23.4", ] @@ -3469,7 +3470,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec 3.2.1", + "parity-scale-codec 3.3.0", ] [[package]] @@ -3621,12 +3622,11 @@ dependencies = [ [[package]] name = "jemalloc-sys" -version = "0.5.2+5.3.0-patched" +version = "0.5.3+5.3.0-patched" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134163979b6eed9564c98637b710b40979939ba351f59952708234ea11b5f3f8" +checksum = "f9bd5d616ea7ed58b571b2e209a65759664d7fb021a0819d7a790afc67e47ca1" dependencies = [ "cc", - "fs_extra", "libc", ] @@ -3642,9 +3642,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.60" +version = "0.3.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" +checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730" dependencies = [ "wasm-bindgen", ] @@ -3670,7 +3670,7 @@ version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09f4f04699947111ec1733e71778d763555737579e44b85844cae8e1940a1828" dependencies = [ - "base64", + "base64 0.13.1", "pem", "ring", "serde", @@ -3972,7 +3972,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a173171c71c29bb156f98886c7c4824596de3903dadf01e2e79d2ccdcf38cd9f" dependencies = [ "asynchronous-codec", - "base64", + "base64 0.13.1", "byteorder", "bytes", "fnv", @@ -4123,7 +4123,7 @@ dependencies = [ "parking_lot 0.12.1", "quinn-proto", "rand 0.8.5", - "rustls 0.20.7", + "rustls 0.20.8", "thiserror", "tokio", ] @@ -4188,7 +4188,7 @@ dependencies = [ "libp2p-core 0.38.0", "rcgen 0.10.0", "ring", - "rustls 0.20.7", + "rustls 0.20.8", "thiserror", "webpki 0.22.0", "x509-parser 0.14.0", @@ -4266,7 +4266,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95b09eff1b35ed3b33b877ced3a691fc7a481919c7e29c53c906226fcf55e2a1" dependencies = [ "arrayref", - "base64", + "base64 0.13.1", "digest 0.9.0", "hmac-drbg", "libsecp256k1-core", @@ -4589,9 +4589,9 @@ dependencies = [ [[package]] name = "matches" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" +checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" [[package]] name = "matchit" @@ -4747,7 +4747,7 @@ dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys", + "windows-sys 0.42.0", ] [[package]] @@ -4948,9 +4948,9 @@ dependencies = [ [[package]] name = "netlink-packet-utils" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25af9cf0dc55498b7bd94a1508af7a78706aa0ab715a73c5169273e03c84845e" +checksum = "0ede8a08c71ad5a95cdd0e4e52facd37190977039a4704eb82a283f713747d34" dependencies = [ "anyhow", "byteorder", @@ -4975,9 +4975,9 @@ dependencies = [ [[package]] name = "netlink-sys" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92b654097027250401127914afb37cb1f311df6610a9891ff07a757e94199027" +checksum = "260e21fbb6f3d253a14df90eb0000a6066780a15dd901a7519ce02d77a94985b" dependencies = [ "bytes", "futures", @@ -4998,6 +4998,7 @@ dependencies = [ "eth2_ssz", "eth2_ssz_types", "ethereum-types 0.14.1", + "execution_layer", "exit-future", "fnv", "futures", @@ -5058,9 +5059,9 @@ dependencies = [ [[package]] name = "nix" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46a58d1d356c6597d08cde02c2f09d785b09e28711837b1ed667dc652c08a694" +checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" dependencies = [ "bitflags", "cfg-if", @@ -5097,9 +5098,9 @@ checksum = "cf51a729ecf40266a2368ad335a5fdde43471f545a967109cd62146ecf8b66ff" [[package]] name = "nom" -version = "7.1.2" +version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5507769c4919c998e69e49c839d9dc6e693ede4cc4290d6ad8b41d4f09c548c" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ "memchr", "minimal-lexical", @@ -5205,9 +5206,9 @@ dependencies = [ [[package]] name = "object" -version = "0.30.1" +version = "0.30.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d864c91689fdc196779b98dba0aceac6118594c2df6ee5d943eb6a8df4d107a" +checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" dependencies = [ "memchr", ] @@ -5314,9 +5315,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.24.0+1.1.1s" +version = "111.25.0+1.1.1t" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3498f259dab01178c6228c6b00dcef0ed2a2d5e20d648c017861227773ea4abd" +checksum = "3173cd3626c43e3854b1b727422a276e568d9ec5fe8cec197822cf52cfb743d6" dependencies = [ "cc", ] @@ -5413,15 +5414,15 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.2.1" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "366e44391a8af4cfd6002ef6ba072bae071a96aafca98d7d448a34c5dca38b6a" +checksum = "c3840933452adf7b3b9145e27086a5a3376c619dca1a21b1e5a5af0d54979bed" dependencies = [ "arrayvec", "bitvec 1.0.1", "byte-slice-cast", "impl-trait-for-tuples", - "parity-scale-codec-derive 3.1.3", + "parity-scale-codec-derive 3.1.4", "serde", ] @@ -5439,9 +5440,9 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.1.3" +version = "3.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9299338969a3d2f491d65f140b00ddec470858402f888af98e8642fb5e8965cd" +checksum = "86b26a931f824dd4eca30b3e43bb4f31cd5f0d3a403c5f5ff27106b805bfde7b" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -5473,7 +5474,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.5", + "parking_lot_core 0.9.7", ] [[package]] @@ -5492,15 +5493,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.5" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ff9f3fef3968a3ec5945535ed654cb38ff72d7495a25619e2247fb15a2ed9ba" +checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", - "windows-sys", + "windows-sys 0.45.0", ] [[package]] @@ -5535,11 +5536,11 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "pem" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c64931a1a212348ec4f3b4362585eca7159d0d09cbdf4a7f74f02173596fd4" +checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" dependencies = [ - "base64", + "base64 0.13.1", ] [[package]] @@ -5559,9 +5560,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.5.2" +version = "2.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f6e86fb9e7026527a0d46bc308b841d73170ef8f443e1807f6ef88526a816d4" +checksum = "028accff104c4e513bad663bbcd2ad7cfd5304144404c31ed0a77ac103d00660" dependencies = [ "thiserror", "ucd-trie", @@ -5569,9 +5570,9 @@ dependencies = [ [[package]] name = "petgraph" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5014253a1331579ce62aa67443b4a658c5e7dd03d4bc6d302b94474888143" +checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4" dependencies = [ "fixedbitset", "indexmap", @@ -5692,7 +5693,7 @@ dependencies = [ "libc", "log", "wepoll-ffi", - "windows-sys", + "windows-sys 0.42.0", ] [[package]] @@ -5814,9 +5815,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.49" +version = "1.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57a8eca9f9c4ffde41714334dee777596264c7825420f521abc92b5b5deb63a5" +checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6" dependencies = [ "unicode-ident", ] @@ -5873,9 +5874,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.11.5" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c01db6702aa05baa3f57dec92b8eeeeb4cb19e894e73996b32a4093289e54592" +checksum = "21dc42e00223fc37204bd4aa177e69420c604ca4a183209a8f9de30c6d934698" dependencies = [ "bytes", "prost-derive", @@ -5883,9 +5884,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.11.5" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb5320c680de74ba083512704acb90fe00f28f79207286a848e730c45dd73ed6" +checksum = "a3f8ad728fb08fe212df3c05169e940fbb6d9d16a877ddde14644a983ba2012e" dependencies = [ "bytes", "heck", @@ -5918,9 +5919,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.11.5" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8842bad1a5419bca14eac663ba798f6bc19c413c2fdceb5f3ba3b0932d96720" +checksum = "8bda8c0881ea9f722eb9629376db3d0b903b462477c1aafcb0566610ac28ac5d" dependencies = [ "anyhow", "itertools", @@ -5931,9 +5932,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.11.5" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "017f79637768cde62820bc2d4fe0e45daaa027755c323ad077767c6c5f173091" +checksum = "a5e0526209433e96d83d750dd81a99118edbc55739e7e61a46764fd2ad537788" dependencies = [ "bytes", "prost", @@ -6027,7 +6028,7 @@ dependencies = [ "rand 0.8.5", "ring", "rustc-hash", - "rustls 0.20.7", + "rustls 0.20.8", "slab", "thiserror", "tinyvec", @@ -6169,9 +6170,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.10.1" +version = "1.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cac410af5d00ab6884528b4ab69d1e8e146e8d471201800fa1b4524126de6ad3" +checksum = "356a0625f1954f730c0201cdab48611198dc6ce21f4acff55089b5a78e6e835b" dependencies = [ "crossbeam-channel", "crossbeam-deque", @@ -6261,11 +6262,11 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.13" +version = "0.11.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68cc60575865c7831548863cc02356512e3f1dc2f3f82cb837d7fc4cc8f3c97c" +checksum = "21eed90ec8570952d53b772ecf8f206aa1ec9a3d76b2521c56c42973f2d91ee9" dependencies = [ - "base64", + "base64 0.21.0", "bytes", "encoding_rs", "futures-core", @@ -6284,7 +6285,7 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite 0.2.9", - "rustls 0.20.7", + "rustls 0.20.8", "rustls-pemfile", "serde", "serde_json", @@ -6297,6 +6298,7 @@ dependencies = [ "url", "wasm-bindgen", "wasm-bindgen-futures", + "wasm-streams", "web-sys", "webpki-roots", "winreg", @@ -6481,7 +6483,7 @@ version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" dependencies = [ - "nom 7.1.2", + "nom 7.1.3", ] [[package]] @@ -6490,7 +6492,7 @@ version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" dependencies = [ - "base64", + "base64 0.13.1", "log", "ring", "sct 0.6.1", @@ -6499,9 +6501,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.20.7" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "539a2bfe908f471bfa933876bd1eb6a19cf2176d375f82ef7f99530a40e48c2c" +checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ "log", "ring", @@ -6511,11 +6513,11 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0864aeff53f8c05aa08d86e5ef839d3dfcf07aeba2db32f12db0ef716e87bd55" +checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" dependencies = [ - "base64", + "base64 0.21.0", ] [[package]] @@ -6577,7 +6579,7 @@ checksum = "001cf62ece89779fd16105b5f515ad0e5cedcd5440d3dd806bb067978e7c3608" dependencies = [ "cfg-if", "derive_more", - "parity-scale-codec 3.2.1", + "parity-scale-codec 3.3.0", "scale-info-derive", ] @@ -6599,7 +6601,7 @@ version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" dependencies = [ - "windows-sys", + "windows-sys 0.42.0", ] [[package]] @@ -6707,9 +6709,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.7.0" +version = "2.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c" +checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" dependencies = [ "bitflags", "core-foundation", @@ -6720,9 +6722,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.6.1" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" +checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" dependencies = [ "core-foundation-sys", "libc", @@ -6769,9 +6771,9 @@ dependencies = [ [[package]] name = "send_wrapper" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "930c0acf610d3fdb5e2ab6213019aaa04e227ebe9547b0649ba599b16d788bd7" +checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "sensitive_url" @@ -6833,9 +6835,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.91" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877c235533714907a8c2464236f5c4b2a17262ef1bd71f38f35ea592c8da6883" +checksum = "7434af0dc1cbd59268aa98b4c22c131c0584d2232f6fb166efb993e2832e896a" dependencies = [ "itoa 1.0.5", "ryu", @@ -7245,14 +7247,14 @@ checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831" [[package]] name = "snow" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "774d05a3edae07ce6d68ea6984f3c05e9bba8927e3dd591e3b479e5b03213d0d" +checksum = "12ba5f4d4ff12bdb6a169ed51b7c48c0e0ac4b0b4b31012b2571e97d78d3201d" dependencies = [ "aes-gcm 0.9.4", "blake2", "chacha20poly1305", - "curve25519-dalek 4.0.0-pre.5", + "curve25519-dalek 4.0.0-rc.0", "rand_core 0.6.4", "ring", "rustc_version 0.4.0", @@ -7276,7 +7278,7 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ - "base64", + "base64 0.13.1", "bytes", "flate2", "futures", @@ -7436,7 +7438,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7e94b1ec00bad60e6410e058b52f1c66de3dc5fe4d62d09b3e52bb7d3b73e25" dependencies = [ - "base64", + "base64 0.13.1", "crc", "lazy_static", "md-5", @@ -7514,9 +7516,9 @@ dependencies = [ [[package]] name = "sync_wrapper" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "synstructure" @@ -7532,9 +7534,9 @@ dependencies = [ [[package]] name = "sysinfo" -version = "0.26.8" +version = "0.26.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29ddf41e393a9133c81d5f0974195366bd57082deac6e0eb02ed39b8341c2bb6" +checksum = "5c18a6156d1f27a9592ee18c1a846ca8dd5c258b7179fc193ae87c74ebb666f5" dependencies = [ "cfg-if", "core-foundation-sys", @@ -7644,9 +7646,9 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.1.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" +checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" dependencies = [ "winapi-util", ] @@ -7814,15 +7816,15 @@ dependencies = [ [[package]] name = "tinyvec_macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.24.1" +version = "1.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d9f76183f91ecfb55e1d7d5602bd1d979e38a3a522fe900241cf195624d67ae" +checksum = "c8e00990ebabbe4c14c08aca901caed183ecd5c09562a12c824bb53d3c3fd3af" dependencies = [ "autocfg 1.1.0", "bytes", @@ -7835,7 +7837,7 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys", + "windows-sys 0.42.0", ] [[package]] @@ -7861,9 +7863,9 @@ dependencies = [ [[package]] name = "tokio-native-tls" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" dependencies = [ "native-tls", "tokio", @@ -7886,7 +7888,7 @@ version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ - "rustls 0.20.7", + "rustls 0.20.8", "tokio", "webpki 0.22.0", ] @@ -7924,7 +7926,7 @@ checksum = "f714dd15bead90401d77e04243611caec13726c2408afd5b31901dfcdcb3b181" dependencies = [ "futures-util", "log", - "rustls 0.20.7", + "rustls 0.20.8", "tokio", "tokio-rustls 0.23.4", "tungstenite 0.17.3", @@ -7965,9 +7967,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.10" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1333c76748e868a4d9d1017b5ab53171dfd095f70c712fdb4653a406547f598f" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ "serde", ] @@ -8203,7 +8205,7 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0b2d8558abd2e276b0a8df5c05a2ec762609344191e5fd23e292c910e9165b5" dependencies = [ - "base64", + "base64 0.13.1", "byteorder", "bytes", "http", @@ -8222,14 +8224,14 @@ version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" dependencies = [ - "base64", + "base64 0.13.1", "byteorder", "bytes", "http", "httparse", "log", "rand 0.8.5", - "rustls 0.20.7", + "rustls 0.20.8", "sha-1 0.10.1", "thiserror", "url", @@ -8244,7 +8246,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4712ee30d123ec7ae26d1e1b218395a16c87cdbaf4b3925d170d684af62ea5e8" dependencies = [ "async-trait", - "base64", + "base64 0.13.1", "futures", "log", "md-5", @@ -8359,9 +8361,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.8" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" +checksum = "d54675592c1dbefd78cbd98db9bacd89886e1ca50692a0692baefffdeb92dd58" [[package]] name = "unicode-ident" @@ -8459,9 +8461,9 @@ dependencies = [ [[package]] name = "uuid" -version = "1.2.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "422ee0de9031b5b948b97a8fc04e3aa35230001a722ddd27943e0be31564ce4c" +checksum = "1674845326ee10d37ca60470760d4288a6f80f304007d92e5c53bab78c9cfd79" dependencies = [ "getrandom 0.2.8", ] @@ -8672,9 +8674,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" +checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -8682,9 +8684,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" +checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9" dependencies = [ "bumpalo", "log", @@ -8697,9 +8699,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.33" +version = "0.4.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d" +checksum = "f219e0d211ba40266969f6dbdd90636da12f75bee4fc9d6c23d1260dadb51454" dependencies = [ "cfg-if", "js-sys", @@ -8709,9 +8711,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" +checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -8719,9 +8721,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" +checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" dependencies = [ "proc-macro2", "quote", @@ -8732,15 +8734,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" +checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d" [[package]] name = "wasm-bindgen-test" -version = "0.3.33" +version = "0.3.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d2fff962180c3fadf677438054b1db62bee4aa32af26a45388af07d1287e1d" +checksum = "6db36fc0f9fb209e88fb3642590ae0205bb5a56216dabd963ba15879fe53a30b" dependencies = [ "console_error_panic_hook", "js-sys", @@ -8752,14 +8754,27 @@ dependencies = [ [[package]] name = "wasm-bindgen-test-macro" -version = "0.3.33" +version = "0.3.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4683da3dfc016f704c9f82cf401520c4f1cb3ee440f7f52b3d6ac29506a49ca7" +checksum = "0734759ae6b3b1717d661fe4f016efcfb9828f5edb4520c18eaee05af3b43be9" dependencies = [ "proc-macro2", "quote", ] +[[package]] +name = "wasm-streams" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bbae3363c08332cadccd13b67db371814cd214c2524020932f0804b8cf7c078" +dependencies = [ + "futures-util", + "js-sys", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "wasm-timer" version = "0.2.5" @@ -8777,9 +8792,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.60" +version = "0.3.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" +checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97" dependencies = [ "js-sys", "wasm-bindgen", @@ -8792,7 +8807,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44f258e254752d210b84fe117b31f1e3cc9cbf04c0d747eb7f8cf7cf5e370f6d" dependencies = [ "arrayvec", - "base64", + "base64 0.13.1", "bytes", "derive_more", "ethabi 16.0.0", @@ -9001,7 +9016,7 @@ dependencies = [ "tokio", "turn", "url", - "uuid 1.2.2", + "uuid 1.3.0", "waitgroup", "webrtc-mdns", "webrtc-util", @@ -9109,9 +9124,9 @@ dependencies = [ [[package]] name = "which" -version = "4.3.0" +version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c831fbbee9e129a8cf93e7747a82da9d95ba8e16621cae60ec2cdc849bacb7b" +checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" dependencies = [ "either", "libc", @@ -9193,19 +9208,43 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ "windows_aarch64_gnullvm", - "windows_aarch64_msvc 0.42.0", - "windows_i686_gnu 0.42.0", - "windows_i686_msvc 0.42.0", - "windows_x86_64_gnu 0.42.0", + "windows_aarch64_msvc 0.42.1", + "windows_i686_gnu 0.42.1", + "windows_i686_msvc 0.42.1", + "windows_x86_64_gnu 0.42.1", "windows_x86_64_gnullvm", - "windows_x86_64_msvc 0.42.0", + "windows_x86_64_msvc 0.42.1", +] + +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc 0.42.1", + "windows_i686_gnu 0.42.1", + "windows_i686_msvc 0.42.1", + "windows_x86_64_gnu 0.42.1", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc 0.42.1", ] [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" +checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" [[package]] name = "windows_aarch64_msvc" @@ -9215,9 +9254,9 @@ checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" [[package]] name = "windows_aarch64_msvc" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" +checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" [[package]] name = "windows_i686_gnu" @@ -9227,9 +9266,9 @@ checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" [[package]] name = "windows_i686_gnu" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" +checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" [[package]] name = "windows_i686_msvc" @@ -9239,9 +9278,9 @@ checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" [[package]] name = "windows_i686_msvc" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" +checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" [[package]] name = "windows_x86_64_gnu" @@ -9251,15 +9290,15 @@ checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" [[package]] name = "windows_x86_64_gnu" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" +checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" +checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" [[package]] name = "windows_x86_64_msvc" @@ -9269,9 +9308,9 @@ checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" [[package]] name = "windows_x86_64_msvc" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" +checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" [[package]] name = "winreg" @@ -9284,13 +9323,14 @@ dependencies = [ [[package]] name = "ws_stream_wasm" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47ca1ab42f5afed7fc332b22b6e932ca5414b209465412c8cdf0ad23bc0de645" +checksum = "7999f5f4217fe3818726b66257a4475f71e74ffd190776ad053fa159e50737f5" dependencies = [ "async_io_stream", "futures", "js-sys", + "log", "pharos", "rustc_version 0.4.0", "send_wrapper", @@ -9344,11 +9384,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fb9bace5b5589ffead1afb76e43e34cff39cd0f3ce7e170ae0c29e53b88eb1c" dependencies = [ "asn1-rs 0.3.1", - "base64", + "base64 0.13.1", "data-encoding", "der-parser 7.0.0", "lazy_static", - "nom 7.1.2", + "nom 7.1.3", "oid-registry 0.4.0", "ring", "rusticata-macros", @@ -9363,11 +9403,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0ecbeb7b67ce215e40e3cc7f2ff902f94a223acf44995934763467e7b1febc8" dependencies = [ "asn1-rs 0.5.1", - "base64", + "base64 0.13.1", "data-encoding", "der-parser 8.1.0", "lazy_static", - "nom 7.1.2", + "nom 7.1.3", "oid-registry 0.6.1", "rusticata-macros", "thiserror", diff --git a/Cargo.toml b/Cargo.toml index c80a734ae..46852645e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -92,7 +92,6 @@ resolver = "2" [patch] [patch.crates-io] -fixed-hash = { git = "https://github.com/paritytech/parity-common", rev="df638ab0885293d21d656dc300d39236b69ce57d" } warp = { git = "https://github.com/macladson/warp", rev="7e75acc368229a46a236a8c991bf251fe7fe50ef" } eth2_ssz = { path = "consensus/ssz" } eth2_ssz_derive = { path = "consensus/ssz_derive" } diff --git a/Makefile b/Makefile index 458ceefc7..05c6c74d5 100644 --- a/Makefile +++ b/Makefile @@ -193,7 +193,7 @@ arbitrary-fuzz: # Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database) audit: cargo install --force cargo-audit - cargo audit --ignore RUSTSEC-2020-0071 --ignore RUSTSEC-2020-0159 + cargo audit --ignore RUSTSEC-2020-0071 # Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose. vendor: diff --git a/beacon_node/beacon_chain/src/attestation_rewards.rs b/beacon_node/beacon_chain/src/attestation_rewards.rs new file mode 100644 index 000000000..a4a661197 --- /dev/null +++ b/beacon_node/beacon_chain/src/attestation_rewards.rs @@ -0,0 +1,195 @@ +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::lighthouse::attestation_rewards::{IdealAttestationRewards, TotalAttestationRewards}; +use eth2::lighthouse::StandardAttestationRewards; +use participation_cache::ParticipationCache; +use safe_arith::SafeArith; +use slog::{debug, Logger}; +use state_processing::{ + common::altair::BaseRewardPerIncrement, + per_epoch_processing::altair::{participation_cache, rewards_and_penalties::get_flag_weight}, +}; +use std::collections::HashMap; +use store::consts::altair::{ + PARTICIPATION_FLAG_WEIGHTS, TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, + TIMELY_TARGET_FLAG_INDEX, +}; +use types::consts::altair::WEIGHT_DENOMINATOR; + +use types::{Epoch, EthSpec}; + +use eth2::types::ValidatorId; + +impl BeaconChain { + pub fn compute_attestation_rewards( + &self, + epoch: Epoch, + validators: Vec, + log: Logger, + ) -> Result { + debug!(log, "computing attestation rewards"; "epoch" => epoch, "validator_count" => validators.len()); + + // Get state + let spec = &self.spec; + + let state_slot = (epoch + 1).end_slot(T::EthSpec::slots_per_epoch()); + + let state_root = self + .state_root_at_slot(state_slot)? + .ok_or(BeaconChainError::NoStateForSlot(state_slot))?; + + let mut state = self + .get_state(&state_root, Some(state_slot))? + .ok_or(BeaconChainError::MissingBeaconState(state_root))?; + + // Calculate ideal_rewards + let participation_cache = ParticipationCache::new(&state, spec)?; + + let previous_epoch = state.previous_epoch(); + + let mut ideal_rewards_hashmap = HashMap::new(); + + for flag_index in 0..PARTICIPATION_FLAG_WEIGHTS.len() { + let weight = get_flag_weight(flag_index) + .map_err(|_| BeaconChainError::AttestationRewardsError)?; + + let unslashed_participating_indices = participation_cache + .get_unslashed_participating_indices(flag_index, previous_epoch)?; + + let unslashed_participating_balance = + unslashed_participating_indices + .total_balance() + .map_err(|_| BeaconChainError::AttestationRewardsError)?; + + let unslashed_participating_increments = + unslashed_participating_balance.safe_div(spec.effective_balance_increment)?; + + let total_active_balance = participation_cache.current_epoch_total_active_balance(); + + let active_increments = + total_active_balance.safe_div(spec.effective_balance_increment)?; + + let base_reward_per_increment = + BaseRewardPerIncrement::new(total_active_balance, spec)?; + + for effective_balance_eth in 0..=32 { + let effective_balance = + effective_balance_eth.safe_mul(spec.effective_balance_increment)?; + let base_reward = + effective_balance_eth.safe_mul(base_reward_per_increment.as_u64())?; + + let penalty = -(base_reward.safe_mul(weight)?.safe_div(WEIGHT_DENOMINATOR)? as i64); + + let reward_numerator = base_reward + .safe_mul(weight)? + .safe_mul(unslashed_participating_increments)?; + + let ideal_reward = reward_numerator + .safe_div(active_increments)? + .safe_div(WEIGHT_DENOMINATOR)?; + if !state.is_in_inactivity_leak(previous_epoch, spec) { + ideal_rewards_hashmap + .insert((flag_index, effective_balance), (ideal_reward, penalty)); + } else { + ideal_rewards_hashmap.insert((flag_index, effective_balance), (0, penalty)); + } + } + } + + // Calculate total_rewards + let mut total_rewards: Vec = Vec::new(); + + let validators = if validators.is_empty() { + participation_cache.eligible_validator_indices().to_vec() + } else { + validators + .into_iter() + .map(|validator| match validator { + ValidatorId::Index(i) => Ok(i as usize), + ValidatorId::PublicKey(pubkey) => state + .get_validator_index(&pubkey)? + .ok_or(BeaconChainError::ValidatorPubkeyUnknown(pubkey)), + }) + .collect::, _>>()? + }; + + for validator_index in &validators { + let eligible = state.is_eligible_validator(previous_epoch, *validator_index)?; + let mut head_reward = 0u64; + let mut target_reward = 0i64; + let mut source_reward = 0i64; + + if eligible { + let effective_balance = state.get_effective_balance(*validator_index)?; + + for flag_index in 0..PARTICIPATION_FLAG_WEIGHTS.len() { + let (ideal_reward, penalty) = ideal_rewards_hashmap + .get(&(flag_index, effective_balance)) + .ok_or(BeaconChainError::AttestationRewardsError)?; + let voted_correctly = participation_cache + .get_unslashed_participating_indices(flag_index, previous_epoch) + .map_err(|_| BeaconChainError::AttestationRewardsError)? + .contains(*validator_index) + .map_err(|_| BeaconChainError::AttestationRewardsError)?; + if voted_correctly { + if flag_index == TIMELY_HEAD_FLAG_INDEX { + head_reward += ideal_reward; + } else if flag_index == TIMELY_TARGET_FLAG_INDEX { + target_reward += *ideal_reward as i64; + } else if flag_index == TIMELY_SOURCE_FLAG_INDEX { + source_reward += *ideal_reward as i64; + } + } else if flag_index == TIMELY_HEAD_FLAG_INDEX { + head_reward = 0; + } else if flag_index == TIMELY_TARGET_FLAG_INDEX { + target_reward = *penalty; + } else if flag_index == TIMELY_SOURCE_FLAG_INDEX { + source_reward = *penalty; + } + } + } + total_rewards.push(TotalAttestationRewards { + validator_index: *validator_index as u64, + head: head_reward, + target: target_reward, + source: source_reward, + }); + } + + // Convert hashmap to vector + let mut ideal_rewards: Vec = ideal_rewards_hashmap + .iter() + .map( + |((flag_index, effective_balance), (ideal_reward, _penalty))| { + (flag_index, effective_balance, ideal_reward) + }, + ) + .fold( + HashMap::new(), + |mut acc, (flag_index, &effective_balance, ideal_reward)| { + let entry = acc + .entry(effective_balance) + .or_insert(IdealAttestationRewards { + effective_balance, + head: 0, + target: 0, + source: 0, + }); + match *flag_index { + TIMELY_SOURCE_FLAG_INDEX => entry.source += ideal_reward, + TIMELY_TARGET_FLAG_INDEX => entry.target += ideal_reward, + TIMELY_HEAD_FLAG_INDEX => entry.head += ideal_reward, + _ => {} + } + acc + }, + ) + .into_values() + .collect::>(); + ideal_rewards.sort_by(|a, b| a.effective_balance.cmp(&b.effective_balance)); + + Ok(StandardAttestationRewards { + ideal_rewards, + total_rewards, + }) + } +} diff --git a/beacon_node/beacon_chain/src/beacon_block_reward.rs b/beacon_node/beacon_chain/src/beacon_block_reward.rs new file mode 100644 index 000000000..3f186c37c --- /dev/null +++ b/beacon_node/beacon_chain/src/beacon_block_reward.rs @@ -0,0 +1,237 @@ +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::lighthouse::StandardBlockReward; +use operation_pool::RewardCache; +use safe_arith::SafeArith; +use slog::error; +use state_processing::{ + common::{ + altair, get_attestation_participation_flag_indices, get_attesting_indices_from_state, + }, + per_block_processing::{ + altair::sync_committee::compute_sync_aggregate_rewards, get_slashable_indices, + }, +}; +use store::{ + consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}, + RelativeEpoch, +}; +use types::{BeaconBlockRef, BeaconState, BeaconStateError, ExecPayload, Hash256}; + +type BeaconBlockSubRewardValue = u64; + +impl BeaconChain { + pub fn compute_beacon_block_reward>( + &self, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, + block_root: Hash256, + state: &mut BeaconState, + ) -> Result { + if block.slot() != state.slot() { + return Err(BeaconChainError::BlockRewardSlotError); + } + + state.build_committee_cache(RelativeEpoch::Previous, &self.spec)?; + state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; + + let proposer_index = block.proposer_index(); + + let sync_aggregate_reward = + self.compute_beacon_block_sync_aggregate_reward(block, state)?; + + let proposer_slashing_reward = self + .compute_beacon_block_proposer_slashing_reward(block, state) + .map_err(|e| { + error!( + self.log, + "Error calculating proposer slashing reward"; + "error" => ?e + ); + BeaconChainError::BlockRewardError + })?; + + let attester_slashing_reward = self + .compute_beacon_block_attester_slashing_reward(block, state) + .map_err(|e| { + error!( + self.log, + "Error calculating attester slashing reward"; + "error" => ?e + ); + BeaconChainError::BlockRewardError + })?; + + let block_attestation_reward = if let BeaconState::Base(_) = state { + self.compute_beacon_block_attestation_reward_base(block, block_root, state) + .map_err(|e| { + error!( + self.log, + "Error calculating base block attestation reward"; + "error" => ?e + ); + BeaconChainError::BlockRewardAttestationError + })? + } else { + self.compute_beacon_block_attestation_reward_altair(block, state) + .map_err(|e| { + error!( + self.log, + "Error calculating altair block attestation reward"; + "error" => ?e + ); + BeaconChainError::BlockRewardAttestationError + })? + }; + + let total_reward = sync_aggregate_reward + .safe_add(proposer_slashing_reward)? + .safe_add(attester_slashing_reward)? + .safe_add(block_attestation_reward)?; + + Ok(StandardBlockReward { + proposer_index, + total: total_reward, + attestations: block_attestation_reward, + sync_aggregate: sync_aggregate_reward, + proposer_slashings: proposer_slashing_reward, + attester_slashings: attester_slashing_reward, + }) + } + + fn compute_beacon_block_sync_aggregate_reward>( + &self, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, + state: &BeaconState, + ) -> Result { + if let Ok(sync_aggregate) = block.body().sync_aggregate() { + let (_, proposer_reward_per_bit) = compute_sync_aggregate_rewards(state, &self.spec) + .map_err(|_| BeaconChainError::BlockRewardSyncError)?; + Ok(sync_aggregate.sync_committee_bits.num_set_bits() as u64 * proposer_reward_per_bit) + } else { + Ok(0) + } + } + + fn compute_beacon_block_proposer_slashing_reward>( + &self, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, + state: &BeaconState, + ) -> Result { + let mut proposer_slashing_reward = 0; + + let proposer_slashings = block.body().proposer_slashings(); + + for proposer_slashing in proposer_slashings { + proposer_slashing_reward.safe_add_assign( + state + .get_validator(proposer_slashing.proposer_index() as usize)? + .effective_balance + .safe_div(self.spec.whistleblower_reward_quotient)?, + )?; + } + + Ok(proposer_slashing_reward) + } + + fn compute_beacon_block_attester_slashing_reward>( + &self, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, + state: &BeaconState, + ) -> Result { + let mut attester_slashing_reward = 0; + + let attester_slashings = block.body().attester_slashings(); + + for attester_slashing in attester_slashings { + for attester_index in get_slashable_indices(state, attester_slashing)? { + attester_slashing_reward.safe_add_assign( + state + .get_validator(attester_index as usize)? + .effective_balance + .safe_div(self.spec.whistleblower_reward_quotient)?, + )?; + } + } + + Ok(attester_slashing_reward) + } + + fn compute_beacon_block_attestation_reward_base>( + &self, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, + block_root: Hash256, + state: &BeaconState, + ) -> Result { + // Call compute_block_reward in the base case + // Since base does not have sync aggregate, we only grab attesation portion of the returned + // value + let mut reward_cache = RewardCache::default(); + let block_attestation_reward = self + .compute_block_reward(block, block_root, state, &mut reward_cache, true)? + .attestation_rewards + .total; + + Ok(block_attestation_reward) + } + + fn compute_beacon_block_attestation_reward_altair>( + &self, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, + state: &mut BeaconState, + ) -> Result { + let total_active_balance = state.get_total_active_balance()?; + let base_reward_per_increment = + altair::BaseRewardPerIncrement::new(total_active_balance, &self.spec)?; + + let mut total_proposer_reward = 0; + + let proposer_reward_denominator = WEIGHT_DENOMINATOR + .safe_sub(PROPOSER_WEIGHT)? + .safe_mul(WEIGHT_DENOMINATOR)? + .safe_div(PROPOSER_WEIGHT)?; + + for attestation in block.body().attestations() { + let data = &attestation.data; + let inclusion_delay = state.slot().safe_sub(data.slot)?.as_u64(); + let participation_flag_indices = get_attestation_participation_flag_indices( + state, + data, + inclusion_delay, + &self.spec, + )?; + + let attesting_indices = get_attesting_indices_from_state(state, attestation)?; + + let mut proposer_reward_numerator = 0; + for index in attesting_indices { + let index = index as usize; + for (flag_index, &weight) in PARTICIPATION_FLAG_WEIGHTS.iter().enumerate() { + let epoch_participation = + state.get_epoch_participation_mut(data.target.epoch)?; + let validator_participation = epoch_participation + .get_mut(index) + .ok_or(BeaconStateError::ParticipationOutOfBounds(index))?; + + if participation_flag_indices.contains(&flag_index) + && !validator_participation.has_flag(flag_index)? + { + validator_participation.add_flag(flag_index)?; + proposer_reward_numerator.safe_add_assign( + altair::get_base_reward( + state, + index, + base_reward_per_increment, + &self.spec, + )? + .safe_mul(weight)?, + )?; + } + } + } + total_proposer_reward.safe_add_assign( + proposer_reward_numerator.safe_div(proposer_reward_denominator)?, + )?; + } + + Ok(total_proposer_reward) + } +} diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 2aae03b72..31d52deae 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -8,7 +8,7 @@ use crate::beacon_proposer_cache::compute_proposer_duties_from_head; use crate::beacon_proposer_cache::BeaconProposerCache; use crate::block_times_cache::BlockTimesCache; use crate::block_verification::{ - check_block_is_finalized_descendant, check_block_relevancy, get_block_root, + check_block_is_finalized_checkpoint_or_descendant, check_block_relevancy, get_block_root, signature_verify_chain_segment, BlockError, ExecutionPendingBlock, GossipVerifiedBlock, IntoExecutionPendingBlock, PayloadVerificationOutcome, POS_PANDA_BANNER, }; @@ -2795,7 +2795,7 @@ impl BeaconChain { // is so we don't have to think about lock ordering with respect to the fork choice lock. // There are a bunch of places where we lock both fork choice and the pubkey cache and it // would be difficult to check that they all lock fork choice first. - let mut kv_store_ops = self + let mut ops = self .validator_pubkey_cache .try_write_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) .ok_or(Error::ValidatorPubkeyCacheLockTimeout)? @@ -2817,7 +2817,7 @@ impl BeaconChain { let mut fork_choice = self.canonical_head.fork_choice_write_lock(); // Do not import a block that doesn't descend from the finalized root. - check_block_is_finalized_descendant(self, &fork_choice, &signed_block)?; + check_block_is_finalized_checkpoint_or_descendant(self, &fork_choice, &signed_block)?; // Register the new block with the fork choice service. { @@ -2897,9 +2897,14 @@ impl BeaconChain { // ---------------------------- BLOCK PROBABLY ATTESTABLE ---------------------------------- // Most blocks are now capable of being attested to thanks to the `early_attester_cache` // cache above. Resume non-essential processing. + // + // It is important NOT to return errors here before the database commit, because the block + // has already been added to fork choice and the database would be left in an inconsistent + // state if we returned early without committing. In other words, an error here would + // corrupt the node's database permanently. // ----------------------------------------------------------------------------------------- - self.import_block_update_shuffling_cache(block_root, &mut state)?; + self.import_block_update_shuffling_cache(block_root, &mut state); self.import_block_observe_attestations( block, &state, @@ -2922,17 +2927,16 @@ impl BeaconChain { // If the write fails, revert fork choice to the version from disk, else we can // end up with blocks in fork choice that are missing from disk. // See https://github.com/sigp/lighthouse/issues/2028 - let mut ops: Vec<_> = confirmed_state_roots - .into_iter() - .map(StoreOp::DeleteStateTemporaryFlag) - .collect(); + ops.extend( + confirmed_state_roots + .into_iter() + .map(StoreOp::DeleteStateTemporaryFlag), + ); ops.push(StoreOp::PutBlock(block_root, signed_block.clone())); ops.push(StoreOp::PutState(block.state_root(), &state)); let txn_lock = self.store.hot_db.begin_rw_transaction(); - kv_store_ops.extend(self.store.convert_to_kv_batch(ops)?); - - if let Err(e) = self.store.hot_db.do_atomically(kv_store_ops) { + if let Err(e) = self.store.do_atomically(ops) { error!( self.log, "Database write failed!"; @@ -3361,13 +3365,27 @@ impl BeaconChain { } } + // For the current and next epoch of this state, ensure we have the shuffling from this + // block in our cache. fn import_block_update_shuffling_cache( &self, block_root: Hash256, state: &mut BeaconState, + ) { + if let Err(e) = self.import_block_update_shuffling_cache_fallible(block_root, state) { + warn!( + self.log, + "Failed to prime shuffling cache"; + "error" => ?e + ); + } + } + + fn import_block_update_shuffling_cache_fallible( + &self, + block_root: Hash256, + state: &mut BeaconState, ) -> Result<(), BlockError> { - // For the current and next epoch of this state, ensure we have the shuffling from this - // block in our cache. for relative_epoch in [RelativeEpoch::Current, RelativeEpoch::Next] { let shuffling_id = AttestationShufflingId::new(block_root, state, relative_epoch)?; diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index e91df27e3..7d5d35010 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -745,7 +745,7 @@ impl GossipVerifiedBlock { // Do not process a block that doesn't descend from the finalized root. // // We check this *before* we load the parent so that we can return a more detailed error. - check_block_is_finalized_descendant( + check_block_is_finalized_checkpoint_or_descendant( chain, &chain.canonical_head.fork_choice_write_lock(), &block, @@ -1565,12 +1565,12 @@ fn check_block_against_finalized_slot( /// ## Warning /// /// Taking a lock on the `chain.canonical_head.fork_choice` might cause a deadlock here. -pub fn check_block_is_finalized_descendant( +pub fn check_block_is_finalized_checkpoint_or_descendant( chain: &BeaconChain, fork_choice: &BeaconForkChoice, block: &Arc>, ) -> Result<(), BlockError> { - if fork_choice.is_descendant_of_finalized(block.parent_root()) { + if fork_choice.is_finalized_checkpoint_or_descendant(block.parent_root()) { Ok(()) } else { // If fork choice does *not* consider the parent to be a descendant of the finalized block, diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 9cf7939b7..102b0c06c 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -51,7 +51,6 @@ pub enum BeaconChainError { }, SlotClockDidNotStart, NoStateForSlot(Slot), - UnableToFindTargetRoot(Slot), BeaconStateError(BeaconStateError), DBInconsistent(String), DBError(store::Error), @@ -157,10 +156,12 @@ pub enum BeaconChainError { ExecutionForkChoiceUpdateInvalid { status: PayloadStatus, }, + BlockRewardError, BlockRewardSlotError, BlockRewardAttestationError, BlockRewardSyncError, SyncCommitteeRewardsSyncError, + AttestationRewardsError, HeadMissingFromForkChoice(Hash256), FinalizedBlockMissingFromForkChoice(Hash256), HeadBlockMissingFromForkChoice(Hash256), diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 2444c144f..6b980eea7 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -1,6 +1,8 @@ #![recursion_limit = "128"] // For lazy-static +pub mod attestation_rewards; pub mod attestation_verification; mod attester_cache; +pub mod beacon_block_reward; mod beacon_chain; mod beacon_fork_choice_store; pub mod beacon_proposer_cache; diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index 26aea2d27..79910df29 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -4,7 +4,7 @@ use ssz::{Decode, Encode}; use std::collections::HashMap; use std::convert::TryInto; use std::marker::PhantomData; -use store::{DBColumn, Error as StoreError, KeyValueStore, KeyValueStoreOp, StoreItem}; +use store::{DBColumn, Error as StoreError, StoreItem, StoreOp}; use types::{BeaconState, Hash256, PublicKey, PublicKeyBytes}; /// Provides a mapping of `validator_index -> validator_publickey`. @@ -38,7 +38,7 @@ impl ValidatorPubkeyCache { }; let store_ops = cache.import_new_pubkeys(state)?; - store.hot_db.do_atomically(store_ops)?; + store.do_atomically(store_ops)?; Ok(cache) } @@ -79,7 +79,7 @@ impl ValidatorPubkeyCache { pub fn import_new_pubkeys( &mut self, state: &BeaconState, - ) -> Result, BeaconChainError> { + ) -> Result>, BeaconChainError> { if state.validators().len() > self.pubkeys.len() { self.import( state.validators()[self.pubkeys.len()..] @@ -92,7 +92,10 @@ impl ValidatorPubkeyCache { } /// Adds zero or more validators to `self`. - fn import(&mut self, validator_keys: I) -> Result, BeaconChainError> + fn import( + &mut self, + validator_keys: I, + ) -> Result>, BeaconChainError> where I: Iterator + ExactSizeIterator, { @@ -112,7 +115,9 @@ impl ValidatorPubkeyCache { // It will be committed atomically when the block that introduced it is written to disk. // Notably it is NOT written while the write lock on the cache is held. // See: https://github.com/sigp/lighthouse/issues/2327 - store_ops.push(DatabasePubkey(pubkey).as_kv_store_op(DatabasePubkey::key_for_index(i))); + store_ops.push(StoreOp::KeyValueOp( + DatabasePubkey(pubkey).as_kv_store_op(DatabasePubkey::key_for_index(i)), + )); self.pubkeys.push( (&pubkey) @@ -294,7 +299,7 @@ mod test { let ops = cache .import_new_pubkeys(&state) .expect("should import pubkeys"); - store.hot_db.do_atomically(ops).unwrap(); + store.do_atomically(ops).unwrap(); check_cache_get(&cache, &keypairs[..]); drop(cache); diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index 5532fbb34..fe4058af0 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -364,7 +364,7 @@ impl Engine { Ok(result) } Err(error) => { - error!( + warn!( self.log, "Execution engine call failed"; "error" => ?error, diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 3b354d62e..6798d49bc 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -1821,10 +1821,10 @@ impl ExecutionLayer { &metrics::EXECUTION_LAYER_BUILDER_REVEAL_PAYLOAD_OUTCOME, &[metrics::FAILURE], ); - error!( + warn!( self.log(), "Builder failed to reveal payload"; - "info" => "this relay failure may cause a missed proposal", + "info" => "this is common behaviour for some builders and may not indicate an issue", "error" => ?e, "relay_response_ms" => duration.as_millis(), "block_root" => ?block_root, diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 0edbaf8f7..bfc61ea11 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -15,6 +15,7 @@ mod database; mod metrics; mod proposer_duties; mod publish_blocks; +mod standard_block_rewards; mod state_id; mod sync_committee_rewards; mod sync_committees; @@ -1806,6 +1807,27 @@ pub fn serve( }, ); + let beacon_rewards_path = eth_v1 + .and(warp::path("beacon")) + .and(warp::path("rewards")) + .and(chain_filter.clone()); + + // GET beacon/rewards/blocks/{block_id} + let get_beacon_rewards_blocks = beacon_rewards_path + .clone() + .and(warp::path("blocks")) + .and(block_id_or_err) + .and(warp::path::end()) + .and_then(|chain: Arc>, block_id: BlockId| { + blocking_json_task(move || { + let (rewards, execution_optimistic) = + standard_block_rewards::compute_beacon_block_rewards(chain, block_id)?; + Ok(rewards) + .map(api_types::GenericResponse::from) + .map(|resp| resp.add_execution_optimistic(execution_optimistic)) + }) + }); + /* * beacon/rewards */ @@ -1815,6 +1837,58 @@ pub fn serve( .and(warp::path("rewards")) .and(chain_filter.clone()); + // POST beacon/rewards/attestations/{epoch} + let post_beacon_rewards_attestations = beacon_rewards_path + .clone() + .and(warp::path("attestations")) + .and(warp::path::param::()) + .and(warp::path::end()) + .and(warp::body::json()) + .and(log_filter.clone()) + .and_then( + |chain: Arc>, + epoch: Epoch, + validators: Vec, + log: Logger| { + blocking_json_task(move || { + let attestation_rewards = chain + .compute_attestation_rewards(epoch, validators, log) + .map_err(|e| match e { + BeaconChainError::MissingBeaconState(root) => { + warp_utils::reject::custom_not_found(format!( + "missing state {root:?}", + )) + } + BeaconChainError::NoStateForSlot(slot) => { + warp_utils::reject::custom_not_found(format!( + "missing state at slot {slot}" + )) + } + BeaconChainError::BeaconStateError( + BeaconStateError::UnknownValidator(validator_index), + ) => warp_utils::reject::custom_bad_request(format!( + "validator is unknown: {validator_index}" + )), + BeaconChainError::ValidatorPubkeyUnknown(pubkey) => { + warp_utils::reject::custom_bad_request(format!( + "validator pubkey is unknown: {pubkey:?}" + )) + } + e => warp_utils::reject::custom_server_error(format!( + "unexpected error: {:?}", + e + )), + })?; + let execution_optimistic = + chain.is_optimistic_or_invalid_head().unwrap_or_default(); + + Ok(attestation_rewards) + .map(api_types::GenericResponse::from) + .map(|resp| resp.add_execution_optimistic(execution_optimistic)) + }) + }, + ); + // POST beacon/rewards/sync_committee/{block_id} let post_beacon_rewards_sync_committee = beacon_rewards_path .clone() @@ -2887,7 +2961,7 @@ pub fn serve( .await .map(|resp| warp::reply::json(&resp)) .map_err(|e| { - error!( + warn!( log, "Relay error when registering validator(s)"; "num_registrations" => filtered_registration_data.len(), @@ -3488,6 +3562,7 @@ pub fn serve( .or(get_beacon_pool_voluntary_exits.boxed()) .or(get_beacon_pool_bls_to_execution_changes.boxed()) .or(get_beacon_deposit_snapshot.boxed()) + .or(get_beacon_rewards_blocks.boxed()) .or(get_config_fork_schedule.boxed()) .or(get_config_spec.boxed()) .or(get_config_deposit_contract.boxed()) @@ -3540,6 +3615,7 @@ pub fn serve( .or(post_beacon_pool_voluntary_exits.boxed()) .or(post_beacon_pool_sync_committees.boxed()) .or(post_beacon_pool_bls_to_execution_changes.boxed()) + .or(post_beacon_rewards_attestations.boxed()) .or(post_beacon_rewards_sync_committee.boxed()) .or(post_validator_duties_attester.boxed()) .or(post_validator_duties_sync.boxed()) diff --git a/beacon_node/http_api/src/standard_block_rewards.rs b/beacon_node/http_api/src/standard_block_rewards.rs new file mode 100644 index 000000000..b3c90d08a --- /dev/null +++ b/beacon_node/http_api/src/standard_block_rewards.rs @@ -0,0 +1,27 @@ +use crate::sync_committee_rewards::get_state_before_applying_block; +use crate::BlockId; +use crate::ExecutionOptimistic; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2::lighthouse::StandardBlockReward; +use std::sync::Arc; +use warp_utils::reject::beacon_chain_error; +//// The difference between block_rewards and beacon_block_rewards is the later returns block +//// reward format that satisfies beacon-api specs +pub fn compute_beacon_block_rewards( + chain: Arc>, + block_id: BlockId, +) -> Result<(StandardBlockReward, ExecutionOptimistic), warp::Rejection> { + let (block, execution_optimistic) = block_id.blinded_block(&chain)?; + + let block_ref = block.message(); + + let block_root = block.canonical_root(); + + let mut state = get_state_before_applying_block(chain.clone(), &block)?; + + let rewards = chain + .compute_beacon_block_reward(block_ref, block_root, &mut state) + .map_err(beacon_chain_error)?; + + Ok((rewards, execution_optimistic)) +} diff --git a/beacon_node/http_api/src/sync_committee_rewards.rs b/beacon_node/http_api/src/sync_committee_rewards.rs index ae369115d..cefa98db4 100644 --- a/beacon_node/http_api/src/sync_committee_rewards.rs +++ b/beacon_node/http_api/src/sync_committee_rewards.rs @@ -47,7 +47,7 @@ pub fn compute_sync_committee_rewards( Ok((data, execution_optimistic)) } -fn get_state_before_applying_block( +pub fn get_state_before_applying_block( chain: Arc>, block: &SignedBlindedBeaconBlock, ) -> Result, warp::reject::Rejection> { diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 32712e32a..e40384332 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -1,3 +1,4 @@ +use crate::rpc::config::OutboundRateLimiterConfig; use crate::types::GossipKind; use crate::{Enr, PeerIdSerialized}; use directory::{ @@ -133,6 +134,9 @@ pub struct Config { /// Whether light client protocols should be enabled. pub enable_light_client_server: bool, + + /// Configuration for the outbound rate limiter (requests made by this node). + pub outbound_rate_limiter_config: Option, } impl Default for Config { @@ -211,6 +215,7 @@ impl Default for Config { topics: Vec::new(), metrics_enabled: false, enable_light_client_server: false, + outbound_rate_limiter_config: None, } } } diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs new file mode 100644 index 000000000..bea0929fb --- /dev/null +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -0,0 +1,173 @@ +use std::{ + fmt::{Debug, Display}, + str::FromStr, + time::Duration, +}; + +use super::{methods, rate_limiter::Quota, Protocol}; + +use serde_derive::{Deserialize, Serialize}; + +/// Auxiliary struct to aid on configuration parsing. +/// +/// A protocol's quota is specified as `protocol_name:tokens/time_in_seconds`. +#[derive(Debug, PartialEq, Eq)] +struct ProtocolQuota { + protocol: Protocol, + quota: Quota, +} + +impl Display for ProtocolQuota { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}:{}/{}", + self.protocol.as_ref(), + self.quota.max_tokens, + self.quota.replenish_all_every.as_secs() + ) + } +} + +impl FromStr for ProtocolQuota { + type Err = &'static str; + + fn from_str(s: &str) -> Result { + let (protocol_str, quota_str) = s + .split_once(':') + .ok_or("Missing ':' from quota definition.")?; + let protocol = protocol_str + .parse() + .map_err(|_parse_err| "Wrong protocol representation in quota")?; + let (tokens_str, time_str) = quota_str + .split_once('/') + .ok_or("Quota should be defined as \"n/t\" (t in seconds). Missing '/' from quota.")?; + let tokens = tokens_str + .parse() + .map_err(|_| "Failed to parse tokens from quota.")?; + let seconds = time_str + .parse::() + .map_err(|_| "Failed to parse time in seconds from quota.")?; + Ok(ProtocolQuota { + protocol, + quota: Quota { + replenish_all_every: Duration::from_secs(seconds), + max_tokens: tokens, + }, + }) + } +} + +/// Configurations for the rate limiter applied to outbound requests (made by the node itself). +#[derive(Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct OutboundRateLimiterConfig { + pub(super) ping_quota: Quota, + pub(super) meta_data_quota: Quota, + pub(super) status_quota: Quota, + pub(super) goodbye_quota: Quota, + pub(super) blocks_by_range_quota: Quota, + pub(super) blocks_by_root_quota: Quota, +} + +impl OutboundRateLimiterConfig { + pub const DEFAULT_PING_QUOTA: Quota = Quota::n_every(2, 10); + pub const DEFAULT_META_DATA_QUOTA: Quota = Quota::n_every(2, 5); + pub const DEFAULT_STATUS_QUOTA: Quota = Quota::n_every(5, 15); + pub const DEFAULT_GOODBYE_QUOTA: Quota = Quota::one_every(10); + pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota = + Quota::n_every(methods::MAX_REQUEST_BLOCKS, 10); + pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10); +} + +impl Default for OutboundRateLimiterConfig { + fn default() -> Self { + OutboundRateLimiterConfig { + ping_quota: Self::DEFAULT_PING_QUOTA, + meta_data_quota: Self::DEFAULT_META_DATA_QUOTA, + status_quota: Self::DEFAULT_STATUS_QUOTA, + goodbye_quota: Self::DEFAULT_GOODBYE_QUOTA, + blocks_by_range_quota: Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA, + blocks_by_root_quota: Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA, + } + } +} + +impl Debug for OutboundRateLimiterConfig { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + macro_rules! fmt_q { + ($quota:expr) => { + &format_args!( + "{}/{}s", + $quota.max_tokens, + $quota.replenish_all_every.as_secs() + ) + }; + } + + f.debug_struct("OutboundRateLimiterConfig") + .field("ping", fmt_q!(&self.ping_quota)) + .field("metadata", fmt_q!(&self.meta_data_quota)) + .field("status", fmt_q!(&self.status_quota)) + .field("goodbye", fmt_q!(&self.goodbye_quota)) + .field("blocks_by_range", fmt_q!(&self.blocks_by_range_quota)) + .field("blocks_by_root", fmt_q!(&self.blocks_by_root_quota)) + .finish() + } +} + +/// Parse configurations for the outbound rate limiter. Protocols that are not specified use +/// the default values. Protocol specified more than once use only the first given Quota. +/// +/// The expected format is a ';' separated list of [`ProtocolQuota`]. +impl FromStr for OutboundRateLimiterConfig { + type Err = &'static str; + + fn from_str(s: &str) -> Result { + let mut ping_quota = None; + let mut meta_data_quota = None; + let mut status_quota = None; + let mut goodbye_quota = None; + let mut blocks_by_range_quota = None; + let mut blocks_by_root_quota = None; + for proto_def in s.split(';') { + let ProtocolQuota { protocol, quota } = proto_def.parse()?; + let quota = Some(quota); + match protocol { + Protocol::Status => status_quota = status_quota.or(quota), + Protocol::Goodbye => goodbye_quota = goodbye_quota.or(quota), + Protocol::BlocksByRange => blocks_by_range_quota = blocks_by_range_quota.or(quota), + Protocol::BlocksByRoot => blocks_by_root_quota = blocks_by_root_quota.or(quota), + Protocol::Ping => ping_quota = ping_quota.or(quota), + Protocol::MetaData => meta_data_quota = meta_data_quota.or(quota), + Protocol::LightClientBootstrap => return Err("Lighthouse does not send LightClientBootstrap requests. Quota should not be set."), + } + } + Ok(OutboundRateLimiterConfig { + ping_quota: ping_quota.unwrap_or(Self::DEFAULT_PING_QUOTA), + meta_data_quota: meta_data_quota.unwrap_or(Self::DEFAULT_META_DATA_QUOTA), + status_quota: status_quota.unwrap_or(Self::DEFAULT_STATUS_QUOTA), + goodbye_quota: goodbye_quota.unwrap_or(Self::DEFAULT_GOODBYE_QUOTA), + blocks_by_range_quota: blocks_by_range_quota + .unwrap_or(Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA), + blocks_by_root_quota: blocks_by_root_quota + .unwrap_or(Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA), + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_quota_inverse() { + let quota = ProtocolQuota { + protocol: Protocol::Goodbye, + quota: Quota { + replenish_all_every: Duration::from_secs(10), + max_tokens: 8, + }, + }; + assert_eq!(quota.to_string().parse(), Ok(quota)) + } +} diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 57b00586d..a455cef1a 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -12,7 +12,7 @@ use libp2p::swarm::{ PollParameters, SubstreamProtocol, }; use libp2p::PeerId; -use rate_limiter::{RPCRateLimiter as RateLimiter, RPCRateLimiterBuilder, RateLimitedErr}; +use rate_limiter::{RPCRateLimiter as RateLimiter, RateLimitedErr}; use slog::{crit, debug, o}; use std::marker::PhantomData; use std::sync::Arc; @@ -33,12 +33,17 @@ pub use methods::{ pub(crate) use outbound::OutboundRequest; pub use protocol::{max_rpc_size, Protocol, RPCError}; +use self::config::OutboundRateLimiterConfig; +use self::self_limiter::SelfRateLimiter; + pub(crate) mod codec; +pub mod config; mod handler; pub mod methods; mod outbound; mod protocol; mod rate_limiter; +mod self_limiter; /// Composite trait for a request id. pub trait ReqId: Send + 'static + std::fmt::Debug + Copy + Clone {} @@ -101,13 +106,18 @@ pub struct RPCMessage { pub event: HandlerEvent, } +type BehaviourAction = + NetworkBehaviourAction, RPCHandler>; + /// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level /// logic. pub struct RPC { /// Rate limiter limiter: RateLimiter, + /// Rate limiter for our own requests. + self_limiter: Option>, /// Queue of events to be processed. - events: Vec, RPCHandler>>, + events: Vec>, fork_context: Arc, enable_light_client_server: bool, /// Slog logger for RPC behaviour. @@ -118,10 +128,12 @@ impl RPC { pub fn new( fork_context: Arc, enable_light_client_server: bool, + outbound_rate_limiter_config: Option, log: slog::Logger, ) -> Self { let log = log.new(o!("service" => "libp2p_rpc")); - let limiter = RPCRateLimiterBuilder::new() + + let limiter = RateLimiter::builder() .n_every(Protocol::MetaData, 2, Duration::from_secs(5)) .n_every(Protocol::Ping, 2, Duration::from_secs(10)) .n_every(Protocol::Status, 5, Duration::from_secs(15)) @@ -140,8 +152,14 @@ impl RPC { ) .build() .expect("Configuration parameters are valid"); + + let self_limiter = outbound_rate_limiter_config.map(|config| { + SelfRateLimiter::new(config, log.clone()).expect("Configuration parameters are valid") + }); + RPC { limiter, + self_limiter, events: Vec::new(), fork_context, enable_light_client_server, @@ -168,12 +186,24 @@ impl RPC { /// Submits an RPC request. /// /// The peer must be connected for this to succeed. - pub fn send_request(&mut self, peer_id: PeerId, request_id: Id, event: OutboundRequest) { - self.events.push(NetworkBehaviourAction::NotifyHandler { - peer_id, - handler: NotifyHandler::Any, - event: RPCSend::Request(request_id, event), - }); + pub fn send_request(&mut self, peer_id: PeerId, request_id: Id, req: OutboundRequest) { + let event = if let Some(self_limiter) = self.self_limiter.as_mut() { + match self_limiter.allows(peer_id, request_id, req) { + Ok(event) => event, + Err(_e) => { + // Request is logged and queued internally in the self rate limiter. + return; + } + } + } else { + NetworkBehaviourAction::NotifyHandler { + peer_id, + handler: NotifyHandler::Any, + event: RPCSend::Request(request_id, req), + } + }; + + self.events.push(event); } /// Lighthouse wishes to disconnect from this peer by sending a Goodbye message. This @@ -278,11 +308,19 @@ where cx: &mut Context, _: &mut impl PollParameters, ) -> Poll> { - // let the rate limiter prune + // let the rate limiter prune. let _ = self.limiter.poll_unpin(cx); + + if let Some(self_limiter) = self.self_limiter.as_mut() { + if let Poll::Ready(event) = self_limiter.poll_ready(cx) { + self.events.push(event) + } + } + if !self.events.is_empty() { return Poll::Ready(self.events.remove(0)); } + Poll::Pending } } diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 691b16e41..529713207 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -14,7 +14,7 @@ use std::io; use std::marker::PhantomData; use std::sync::Arc; use std::time::Duration; -use strum::IntoStaticStr; +use strum::{AsRefStr, Display, EnumString, IntoStaticStr}; use tokio_io_timeout::TimeoutStream; use tokio_util::{ codec::Framed, @@ -169,23 +169,28 @@ pub fn rpc_block_limits_by_fork(current_fork: ForkName) -> RpcLimits { } /// Protocol names to be used. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EnumString, AsRefStr, Display)] +#[strum(serialize_all = "snake_case")] pub enum Protocol { /// The Status protocol name. Status, /// The Goodbye protocol name. Goodbye, /// The `BlocksByRange` protocol name. + #[strum(serialize = "beacon_blocks_by_range")] BlocksByRange, /// The `BlocksByRoot` protocol name. + #[strum(serialize = "beacon_blocks_by_root")] BlocksByRoot, /// The `BlobsByRange` protocol name. BlobsByRange, /// The `Ping` protocol name. Ping, /// The `MetaData` protocol name. + #[strum(serialize = "metadata")] MetaData, /// The `LightClientBootstrap` protocol name. + #[strum(serialize = "light_client_bootstrap")] LightClientBootstrap, } @@ -204,22 +209,6 @@ pub enum Encoding { SSZSnappy, } -impl std::fmt::Display for Protocol { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let repr = match self { - Protocol::Status => "status", - Protocol::Goodbye => "goodbye", - Protocol::BlocksByRange => "beacon_blocks_by_range", - Protocol::BlocksByRoot => "beacon_blocks_by_root", - Protocol::BlobsByRange => "blobs_sidecars_by_range", - Protocol::Ping => "ping", - Protocol::MetaData => "metadata", - Protocol::LightClientBootstrap => "light_client_bootstrap", - }; - f.write_str(repr) - } -} - impl std::fmt::Display for Encoding { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let repr = match self { diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index bdd61861a..163d9a84e 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -1,6 +1,7 @@ -use crate::rpc::{InboundRequest, Protocol}; +use crate::rpc::Protocol; use fnv::FnvHashMap; use libp2p::PeerId; +use serde_derive::{Deserialize, Serialize}; use std::convert::TryInto; use std::future::Future; use std::hash::Hash; @@ -47,12 +48,31 @@ type Nanosecs = u64; /// n*`replenish_all_every`/`max_tokens` units of time since their last request. /// /// To produce hard limits, set `max_tokens` to 1. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct Quota { /// How often are `max_tokens` fully replenished. - replenish_all_every: Duration, + pub(super) replenish_all_every: Duration, /// Token limit. This translates on how large can an instantaneous batch of /// tokens be. - max_tokens: u64, + pub(super) max_tokens: u64, +} + +impl Quota { + /// A hard limit of one token every `seconds`. + pub const fn one_every(seconds: u64) -> Self { + Quota { + replenish_all_every: Duration::from_secs(seconds), + max_tokens: 1, + } + } + + /// Allow `n` tokens to be use used every `seconds`. + pub const fn n_every(n: u64, seconds: u64) -> Self { + Quota { + replenish_all_every: Duration::from_secs(seconds), + max_tokens: n, + } + } } /// Manages rate limiting of requests per peer, with differentiated rates per protocol. @@ -80,6 +100,7 @@ pub struct RPCRateLimiter { } /// Error type for non conformant requests +#[derive(Debug)] pub enum RateLimitedErr { /// Required tokens for this request exceed the maximum TooLarge, @@ -88,7 +109,7 @@ pub enum RateLimitedErr { } /// User-friendly builder of a `RPCRateLimiter` -#[derive(Default)] +#[derive(Default, Clone)] pub struct RPCRateLimiterBuilder { /// Quota for the Goodbye protocol. goodbye_quota: Option, @@ -109,13 +130,8 @@ pub struct RPCRateLimiterBuilder { } impl RPCRateLimiterBuilder { - /// Get an empty `RPCRateLimiterBuilder`. - pub fn new() -> Self { - Default::default() - } - /// Set a quota for a protocol. - fn set_quota(mut self, protocol: Protocol, quota: Quota) -> Self { + pub fn set_quota(mut self, protocol: Protocol, quota: Quota) -> Self { let q = Some(quota); match protocol { Protocol::Ping => self.ping_quota = q, @@ -202,11 +218,40 @@ impl RPCRateLimiterBuilder { } } +pub trait RateLimiterItem { + fn protocol(&self) -> Protocol; + fn expected_responses(&self) -> u64; +} + +impl RateLimiterItem for super::InboundRequest { + fn protocol(&self) -> Protocol { + self.protocol() + } + + fn expected_responses(&self) -> u64 { + self.expected_responses() + } +} + +impl RateLimiterItem for super::OutboundRequest { + fn protocol(&self) -> Protocol { + self.protocol() + } + + fn expected_responses(&self) -> u64 { + self.expected_responses() + } +} impl RPCRateLimiter { - pub fn allows( + /// Get a builder instance. + pub fn builder() -> RPCRateLimiterBuilder { + RPCRateLimiterBuilder::default() + } + + pub fn allows( &mut self, peer_id: &PeerId, - request: &InboundRequest, + request: &Item, ) -> Result<(), RateLimitedErr> { let time_since_start = self.init_time.elapsed(); let tokens = request.expected_responses().max(1); diff --git a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs new file mode 100644 index 000000000..451c6206f --- /dev/null +++ b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs @@ -0,0 +1,202 @@ +use std::{ + collections::{hash_map::Entry, HashMap, VecDeque}, + task::{Context, Poll}, + time::Duration, +}; + +use futures::FutureExt; +use libp2p::{swarm::NotifyHandler, PeerId}; +use slog::{crit, debug, Logger}; +use smallvec::SmallVec; +use tokio_util::time::DelayQueue; +use types::EthSpec; + +use super::{ + config::OutboundRateLimiterConfig, + rate_limiter::{RPCRateLimiter as RateLimiter, RateLimitedErr}, + BehaviourAction, OutboundRequest, Protocol, RPCSend, ReqId, +}; + +/// A request that was rate limited or waiting on rate limited requests for the same peer and +/// protocol. +struct QueuedRequest { + req: OutboundRequest, + request_id: Id, +} + +pub(crate) struct SelfRateLimiter { + /// Requests queued for sending per peer. This requests are stored when the self rate + /// limiter rejects them. Rate limiting is based on a Peer and Protocol basis, therefore + /// are stored in the same way. + delayed_requests: HashMap<(PeerId, Protocol), VecDeque>>, + /// The delay required to allow a peer's outbound request per protocol. + next_peer_request: DelayQueue<(PeerId, Protocol)>, + /// Rate limiter for our own requests. + limiter: RateLimiter, + /// Requests that are ready to be sent. + ready_requests: SmallVec<[BehaviourAction; 3]>, + /// Slog logger. + log: Logger, +} + +/// Error returned when the rate limiter does not accept a request. +// NOTE: this is currently not used, but might be useful for debugging. +pub enum Error { + /// There are queued requests for this same peer and protocol. + PendingRequests, + /// Request was tried but rate limited. + RateLimited, +} + +impl SelfRateLimiter { + /// Creates a new [`SelfRateLimiter`] based on configration values. + pub fn new(config: OutboundRateLimiterConfig, log: Logger) -> Result { + debug!(log, "Using self rate limiting params"; "config" => ?config); + // Destructure to make sure every configuration value is used. + let OutboundRateLimiterConfig { + ping_quota, + meta_data_quota, + status_quota, + goodbye_quota, + blocks_by_range_quota, + blocks_by_root_quota, + } = config; + + let limiter = RateLimiter::builder() + .set_quota(Protocol::Ping, ping_quota) + .set_quota(Protocol::MetaData, meta_data_quota) + .set_quota(Protocol::Status, status_quota) + .set_quota(Protocol::Goodbye, goodbye_quota) + .set_quota(Protocol::BlocksByRange, blocks_by_range_quota) + .set_quota(Protocol::BlocksByRoot, blocks_by_root_quota) + // Manually set the LightClientBootstrap quota, since we use the same rate limiter for + // inbound and outbound requests, and the LightClientBootstrap is an only inbound + // protocol. + .one_every(Protocol::LightClientBootstrap, Duration::from_secs(10)) + .build()?; + + Ok(SelfRateLimiter { + delayed_requests: Default::default(), + next_peer_request: Default::default(), + limiter, + ready_requests: Default::default(), + log, + }) + } + + /// Checks if the rate limiter allows the request. If it's allowed, returns the + /// [`NetworkBehaviourAction`] that should be emitted. When not allowed, the request is delayed + /// until it can be sent. + pub fn allows( + &mut self, + peer_id: PeerId, + request_id: Id, + req: OutboundRequest, + ) -> Result, Error> { + let protocol = req.protocol(); + // First check that there are not already other requests waiting to be sent. + if let Some(queued_requests) = self.delayed_requests.get_mut(&(peer_id, protocol)) { + queued_requests.push_back(QueuedRequest { req, request_id }); + + return Err(Error::PendingRequests); + } + match Self::try_send_request(&mut self.limiter, peer_id, request_id, req, &self.log) { + Err((rate_limited_req, wait_time)) => { + let key = (peer_id, protocol); + self.next_peer_request.insert(key, wait_time); + self.delayed_requests + .entry(key) + .or_default() + .push_back(rate_limited_req); + + Err(Error::RateLimited) + } + Ok(event) => Ok(event), + } + } + + /// Auxiliary function to deal with self rate limiting outcomes. If the rate limiter allows the + /// request, the [`NetworkBehaviourAction`] that should be emitted is returned. If the request + /// should be delayed, it's returned with the duration to wait. + fn try_send_request( + limiter: &mut RateLimiter, + peer_id: PeerId, + request_id: Id, + req: OutboundRequest, + log: &Logger, + ) -> Result, (QueuedRequest, Duration)> { + match limiter.allows(&peer_id, &req) { + Ok(()) => Ok(BehaviourAction::NotifyHandler { + peer_id, + handler: NotifyHandler::Any, + event: RPCSend::Request(request_id, req), + }), + Err(e) => { + let protocol = req.protocol(); + match e { + RateLimitedErr::TooLarge => { + // this should never happen with default parameters. Let's just send the request. + // Log a crit since this is a config issue. + crit!( + log, + "Self rate limiting error for a batch that will never fit. Sending request anyway. Check configuration parameters."; + "protocol" => %req.protocol() + ); + Ok(BehaviourAction::NotifyHandler { + peer_id, + handler: NotifyHandler::Any, + event: RPCSend::Request(request_id, req), + }) + } + RateLimitedErr::TooSoon(wait_time) => { + debug!(log, "Self rate limiting"; "protocol" => %protocol, "wait_time_ms" => wait_time.as_millis(), "peer_id" => %peer_id); + Err((QueuedRequest { req, request_id }, wait_time)) + } + } + } + } + } + + /// When a peer and protocol are allowed to send a next request, this function checks the + /// queued requests and attempts marking as ready as many as the limiter allows. + fn next_peer_request_ready(&mut self, peer_id: PeerId, protocol: Protocol) { + if let Entry::Occupied(mut entry) = self.delayed_requests.entry((peer_id, protocol)) { + let queued_requests = entry.get_mut(); + while let Some(QueuedRequest { req, request_id }) = queued_requests.pop_front() { + match Self::try_send_request(&mut self.limiter, peer_id, request_id, req, &self.log) + { + Err((rate_limited_req, wait_time)) => { + let key = (peer_id, protocol); + self.next_peer_request.insert(key, wait_time); + queued_requests.push_back(rate_limited_req); + // If one fails just wait for the next window that allows sending requests. + return; + } + Ok(event) => self.ready_requests.push(event), + } + } + if queued_requests.is_empty() { + entry.remove(); + } + } + } + + pub fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + // First check the requests that were self rate limited, since those might add events to + // the queue. Also do this this before rate limiter prunning to avoid removing and + // immediately adding rate limiting keys. + if let Poll::Ready(Some(Ok(expired))) = self.next_peer_request.poll_expired(cx) { + let (peer_id, protocol) = expired.into_inner(); + self.next_peer_request_ready(peer_id, protocol); + } + // Prune the rate limiter. + let _ = self.limiter.poll_unpin(cx); + + // Finally return any queued events. + if !self.ready_requests.is_empty() { + return Poll::Ready(self.ready_requests.remove(0)); + } + + Poll::Pending + } +} diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index e7e106ba6..151eef4e8 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -264,6 +264,7 @@ impl Network { let eth2_rpc = RPC::new( ctx.fork_context.clone(), config.enable_light_client_server, + config.outbound_rate_limiter_config.clone(), log.clone(), ); diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 5ce331169..95d8a294c 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -46,6 +46,7 @@ derivative = "2.2.0" delay_map = "0.1.1" ethereum-types = { version = "0.14.1", optional = true } operation_pool = { path = "../operation_pool" } +execution_layer = { path = "../execution_layer" } [features] deterministic_long_lived_attnets = [ "ethereum-types" ] diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index bfa0ea516..afcc15280 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -7,7 +7,7 @@ use itertools::process_results; use lighthouse_network::rpc::StatusMessage; use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; -use slog::{debug, error}; +use slog::{debug, error, warn}; use slot_clock::SlotClock; use std::sync::Arc; use task_executor::TaskExecutor; @@ -392,12 +392,26 @@ impl Worker { break; } Err(e) => { - error!( - self.log, - "Error fetching block for peer"; - "block_root" => ?root, - "error" => ?e - ); + if matches!( + e, + BeaconChainError::ExecutionLayerErrorPayloadReconstruction(_block_hash, ref boxed_error) + if matches!(**boxed_error, execution_layer::Error::EngineError(_)) + ) { + warn!( + self.log, + "Error rebuilding payload for peer"; + "info" => "this may occur occasionally when the EE is busy", + "block_root" => ?root, + "error" => ?e, + ); + } else { + error!( + self.log, + "Error fetching block for peer"; + "block_root" => ?root, + "error" => ?e + ); + } // send the stream terminator self.send_error_response( diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index d401deb89..c5be4f0a6 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -10,7 +10,7 @@ mod reward_cache; mod sync_aggregate_id; pub use crate::bls_to_execution_changes::ReceivedPreCapella; -pub use attestation::AttMaxCover; +pub use attestation::{earliest_attestation_validators, AttMaxCover}; pub use attestation_storage::{AttestationRef, SplitAttestation}; pub use max_cover::MaxCover; pub use persistence::{ diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 38d81512e..b4da83315 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -194,6 +194,21 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("Lighthouse by default does not discover private IP addresses. Set this flag to enable connection attempts to local addresses.") .takes_value(false), ) + .arg( + Arg::with_name("self-limiter") + .long("self-limiter") + .help( + "Enables the outbound rate limiter (requests made by this node).\ + \ + Rate limit quotas per protocol can be set in the form of \ + :/. To set quotas for multiple protocols, \ + separate them by ';'. If the self rate limiter is enabled and a protocol is not \ + present in the configuration, the quotas used for the inbound rate limiter will be \ + used." + ) + .min_values(0) + .hidden(true) + ) /* REST API related arguments */ .arg( Arg::with_name("http") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 294568cca..726f8368e 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -967,6 +967,13 @@ pub fn set_network_config( // Light client server config. config.enable_light_client_server = cli_args.is_present("light-client-server"); + // This flag can be used both with or without a value. Try to parse it first with a value, if + // no value is defined but the flag is present, use the default params. + config.outbound_rate_limiter_config = clap_utils::parse_optional(cli_args, "self-limiter")?; + if cli_args.is_present("self-limiter") && config.outbound_rate_limiter_config.is_none() { + config.outbound_rate_limiter_config = Some(Default::default()); + } + Ok(()) } diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 93af0ca18..833158082 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -789,6 +789,10 @@ impl, Cold: ItemStore> HotColdDB let key = get_key_for_col(DBColumn::ExecPayload.into(), block_root.as_bytes()); key_value_batch.push(KeyValueStoreOp::DeleteKey(key)); } + + StoreOp::KeyValueOp(kv_op) => { + key_value_batch.push(kv_op); + } } } Ok(key_value_batch) @@ -825,6 +829,8 @@ impl, Cold: ItemStore> HotColdDB StoreOp::DeleteState(_, _) => (), StoreOp::DeleteExecutionPayload(_) => (), + + StoreOp::KeyValueOp(_) => (), } } diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 9db5f45ec..47ddd8fc7 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -162,6 +162,7 @@ pub enum StoreOp<'a, E: EthSpec> { DeleteBlock(Hash256), DeleteState(Hash256, Option), DeleteExecutionPayload(Hash256), + KeyValueOp(KeyValueStoreOp), } /// A unique column identifier. diff --git a/book/src/docker.md b/book/src/docker.md index f22b8a200..7484f9f52 100644 --- a/book/src/docker.md +++ b/book/src/docker.md @@ -57,7 +57,7 @@ $ docker pull sigp/lighthouse:latest-modern Image tags follow this format: ``` -${version}${arch}${stability}${modernity} +${version}${arch}${stability}${modernity}${features} ``` The `version` is: @@ -81,6 +81,12 @@ The `modernity` is: * `-modern` for optimized builds * empty for a `portable` unoptimized build +The `features` is: + +* `-dev` for a development build with `minimal-spec` preset enabled. +* empty for a standard build with no custom feature enabled. + + Examples: * `latest-unstable-modern`: most recent `unstable` build for all modern CPUs (x86_64 or ARM) diff --git a/book/src/validator-inclusion.md b/book/src/validator-inclusion.md index e6fbc0f16..0793af20d 100644 --- a/book/src/validator-inclusion.md +++ b/book/src/validator-inclusion.md @@ -59,14 +59,7 @@ The following fields are returned: - `previous_epoch_head_attesting_gwei`: the total staked gwei that attested to a head beacon block that is in the canonical chain. -From this data you can calculate some interesting figures: - -#### Participation Rate - -`previous_epoch_attesting_gwei / previous_epoch_active_gwei` - -Expresses the ratio of validators that managed to have an attestation -voting upon the previous epoch included in a block. +From this data you can calculate: #### Justification/Finalization Rate diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 4a3114e43..03f96f34e 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1005,6 +1005,40 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `GET beacon/rewards/blocks` + pub async fn get_beacon_rewards_blocks(&self, epoch: Epoch) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("rewards") + .push("blocks"); + + path.query_pairs_mut() + .append_pair("epoch", &epoch.to_string()); + + self.get(path).await + } + + /// `POST beacon/rewards/attestations` + pub async fn post_beacon_rewards_attestations( + &self, + attestations: &[ValidatorId], + ) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("rewards") + .push("attestations"); + + self.post(path, &attestations).await?; + + Ok(()) + } + /// `POST validator/contribution_and_proofs` pub async fn post_validator_contribution_and_proofs( &self, diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 068abd693..e50d9f4dc 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -1,8 +1,10 @@ //! This module contains endpoints that are non-standard and only available on Lighthouse servers. mod attestation_performance; +pub mod attestation_rewards; mod block_packing_efficiency; mod block_rewards; +mod standard_block_rewards; mod sync_committee_rewards; use crate::{ @@ -23,11 +25,13 @@ use store::{AnchorInfo, Split, StoreConfig}; pub use attestation_performance::{ AttestationPerformance, AttestationPerformanceQuery, AttestationPerformanceStatistics, }; +pub use attestation_rewards::StandardAttestationRewards; pub use block_packing_efficiency::{ BlockPackingEfficiency, BlockPackingEfficiencyQuery, ProposerInfo, UniqueAttestation, }; pub use block_rewards::{AttestationRewards, BlockReward, BlockRewardMeta, BlockRewardsQuery}; pub use lighthouse_network::{types::SyncState, PeerInfo}; +pub use standard_block_rewards::StandardBlockReward; pub use sync_committee_rewards::SyncCommitteeReward; // Define "legacy" implementations of `Option` which use four bytes for encoding the union diff --git a/common/eth2/src/lighthouse/attestation_rewards.rs b/common/eth2/src/lighthouse/attestation_rewards.rs new file mode 100644 index 000000000..314ffb851 --- /dev/null +++ b/common/eth2/src/lighthouse/attestation_rewards.rs @@ -0,0 +1,44 @@ +use serde::{Deserialize, Serialize}; + +// Details about the rewards paid for attestations +// All rewards in GWei + +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +pub struct IdealAttestationRewards { + // Validator's effective balance in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub effective_balance: u64, + // Ideal attester's reward for head vote in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub head: u64, + // Ideal attester's reward for target vote in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub target: u64, + // Ideal attester's reward for source vote in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub source: u64, +} + +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +pub struct TotalAttestationRewards { + // one entry for every validator based on their attestations in the epoch + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub validator_index: u64, + // attester's reward for head vote in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub head: u64, + // attester's reward for target vote in gwei + #[serde(with = "eth2_serde_utils::quoted_i64")] + pub target: i64, + // attester's reward for source vote in gwei + #[serde(with = "eth2_serde_utils::quoted_i64")] + pub source: i64, + // TBD attester's inclusion_delay reward in gwei (phase0 only) + // pub inclusion_delay: u64, +} + +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +pub struct StandardAttestationRewards { + pub ideal_rewards: Vec, + pub total_rewards: Vec, +} diff --git a/common/eth2/src/lighthouse/standard_block_rewards.rs b/common/eth2/src/lighthouse/standard_block_rewards.rs new file mode 100644 index 000000000..502577500 --- /dev/null +++ b/common/eth2/src/lighthouse/standard_block_rewards.rs @@ -0,0 +1,26 @@ +use serde::{Deserialize, Serialize}; + +// Details about the rewards for a single block +// All rewards in GWei +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct StandardBlockReward { + // proposer of the block, the proposer index who receives these rewards + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub proposer_index: u64, + // total block reward in gwei, + // equal to attestations + sync_aggregate + proposer_slashings + attester_slashings + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub total: u64, + // block reward component due to included attestations in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub attestations: u64, + // block reward component due to included sync_aggregate in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub sync_aggregate: u64, + // block reward component due to included proposer_slashings in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub proposer_slashings: u64, + // block reward component due to included attester_slashings in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub attester_slashings: u64, +} diff --git a/common/eth2/src/lighthouse/sync_committee_rewards.rs b/common/eth2/src/lighthouse/sync_committee_rewards.rs index cdd685065..e215d8e3e 100644 --- a/common/eth2/src/lighthouse/sync_committee_rewards.rs +++ b/common/eth2/src/lighthouse/sync_committee_rewards.rs @@ -8,5 +8,6 @@ pub struct SyncCommitteeReward { #[serde(with = "eth2_serde_utils::quoted_u64")] pub validator_index: u64, // sync committee reward in gwei for the validator + #[serde(with = "eth2_serde_utils::quoted_i64")] pub reward: i64, } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 36a690911..1a0b46e38 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -255,11 +255,20 @@ pub struct FinalityCheckpointsData { } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(try_from = "&str")] pub enum ValidatorId { PublicKey(PublicKeyBytes), Index(u64), } +impl TryFrom<&str> for ValidatorId { + type Error = String; + + fn try_from(s: &str) -> Result { + Self::from_str(s) + } +} + impl FromStr for ValidatorId { type Err = String; diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index f795b0790..9ca9ef0ce 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -722,7 +722,7 @@ where op: &InvalidationOperation, ) -> Result<(), Error> { self.proto_array - .process_execution_payload_invalidation(op) + .process_execution_payload_invalidation::(op) .map_err(Error::FailedToProcessInvalidExecutionPayload) } @@ -1288,7 +1288,7 @@ where if store.best_justified_checkpoint().epoch > store.justified_checkpoint().epoch { let store = &self.fc_store; - if self.is_descendant_of_finalized(store.best_justified_checkpoint().root) { + if self.is_finalized_checkpoint_or_descendant(store.best_justified_checkpoint().root) { let store = &mut self.fc_store; store .set_justified_checkpoint(*store.best_justified_checkpoint()) @@ -1329,12 +1329,13 @@ where /// Returns `true` if the block is known **and** a descendant of the finalized root. pub fn contains_block(&self, block_root: &Hash256) -> bool { - self.proto_array.contains_block(block_root) && self.is_descendant_of_finalized(*block_root) + self.proto_array.contains_block(block_root) + && self.is_finalized_checkpoint_or_descendant(*block_root) } /// Returns a `ProtoBlock` if the block is known **and** a descendant of the finalized root. pub fn get_block(&self, block_root: &Hash256) -> Option { - if self.is_descendant_of_finalized(*block_root) { + if self.is_finalized_checkpoint_or_descendant(*block_root) { self.proto_array.get_block(block_root) } else { None @@ -1343,7 +1344,7 @@ where /// Returns an `ExecutionStatus` if the block is known **and** a descendant of the finalized root. pub fn get_block_execution_status(&self, block_root: &Hash256) -> Option { - if self.is_descendant_of_finalized(*block_root) { + if self.is_finalized_checkpoint_or_descendant(*block_root) { self.proto_array.get_block_execution_status(block_root) } else { None @@ -1378,10 +1379,10 @@ where }) } - /// Return `true` if `block_root` is equal to the finalized root, or a known descendant of it. - pub fn is_descendant_of_finalized(&self, block_root: Hash256) -> bool { + /// Return `true` if `block_root` is equal to the finalized checkpoint, or a known descendant of it. + pub fn is_finalized_checkpoint_or_descendant(&self, block_root: Hash256) -> bool { self.proto_array - .is_descendant(self.fc_store.finalized_checkpoint().root, block_root) + .is_finalized_checkpoint_or_descendant::(block_root) } /// Returns `Ok(true)` if `block_root` has been imported optimistically or deemed invalid. diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 035fb799e..68b3fb719 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -273,7 +273,7 @@ impl ForkChoiceTestDefinition { } }; fork_choice - .process_execution_payload_invalidation(&op) + .process_execution_payload_invalidation::(&op) .unwrap() } Operation::AssertWeight { block_root, weight } => assert_eq!( diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index add84f547..bf50c0802 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -451,7 +451,7 @@ impl ProtoArray { /// Invalidate zero or more blocks, as specified by the `InvalidationOperation`. /// /// See the documentation of `InvalidationOperation` for usage. - pub fn propagate_execution_payload_invalidation( + pub fn propagate_execution_payload_invalidation( &mut self, op: &InvalidationOperation, ) -> Result<(), Error> { @@ -482,7 +482,7 @@ impl ProtoArray { let latest_valid_ancestor_is_descendant = latest_valid_ancestor_root.map_or(false, |ancestor_root| { self.is_descendant(ancestor_root, head_block_root) - && self.is_descendant(self.finalized_checkpoint.root, ancestor_root) + && self.is_finalized_checkpoint_or_descendant::(ancestor_root) }); // Collect all *ancestors* which were declared invalid since they reside between the @@ -977,6 +977,12 @@ impl ProtoArray { /// ## Notes /// /// Still returns `true` if `ancestor_root` is known and `ancestor_root == descendant_root`. + /// + /// ## Warning + /// + /// Do not use this function to check if a block is a descendant of the + /// finalized checkpoint. Use `Self::is_finalized_checkpoint_or_descendant` + /// instead. pub fn is_descendant(&self, ancestor_root: Hash256, descendant_root: Hash256) -> bool { self.indices .get(&ancestor_root) @@ -990,6 +996,70 @@ impl ProtoArray { .unwrap_or(false) } + /// Returns `true` if `root` is equal to or a descendant of + /// `self.finalized_checkpoint`. + /// + /// Notably, this function is checking ancestory of the finalized + /// *checkpoint* not the finalized *block*. + pub fn is_finalized_checkpoint_or_descendant(&self, root: Hash256) -> bool { + let finalized_root = self.finalized_checkpoint.root; + let finalized_slot = self + .finalized_checkpoint + .epoch + .start_slot(E::slots_per_epoch()); + + let mut node = if let Some(node) = self + .indices + .get(&root) + .and_then(|index| self.nodes.get(*index)) + { + node + } else { + // An unknown root is not a finalized descendant. This line can only + // be reached if the user supplies a root that is not known to fork + // choice. + return false; + }; + + // The finalized and justified checkpoints represent a list of known + // ancestors of `node` that are likely to coincide with the store's + // finalized checkpoint. + // + // Run this check once, outside of the loop rather than inside the loop. + // If the conditions don't match for this node then they're unlikely to + // start matching for its ancestors. + for checkpoint in &[ + node.finalized_checkpoint, + node.justified_checkpoint, + node.unrealized_finalized_checkpoint, + node.unrealized_justified_checkpoint, + ] { + if checkpoint.map_or(false, |cp| cp == self.finalized_checkpoint) { + return true; + } + } + + loop { + // If `node` is less than or equal to the finalized slot then `node` + // must be the finalized block. + if node.slot <= finalized_slot { + return node.root == finalized_root; + } + + // Since `node` is from a higher slot that the finalized checkpoint, + // replace `node` with the parent of `node`. + if let Some(parent) = node.parent.and_then(|index| self.nodes.get(index)) { + node = parent + } else { + // If `node` is not the finalized block and its parent does not + // exist in fork choice, then the parent must have been pruned. + // Proto-array only prunes blocks prior to the finalized block, + // so this means the parent conflicts with finality. + return false; + }; + } + } + /// Returns the first *beacon block root* which contains an execution payload with the given /// `block_hash`, if any. pub fn execution_block_hash_to_beacon_block_root( diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index cbd369ae6..0e0d806e7 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -358,12 +358,12 @@ impl ProtoArrayForkChoice { } /// See `ProtoArray::propagate_execution_payload_invalidation` for documentation. - pub fn process_execution_payload_invalidation( + pub fn process_execution_payload_invalidation( &mut self, op: &InvalidationOperation, ) -> Result<(), String> { self.proto_array - .propagate_execution_payload_invalidation(op) + .propagate_execution_payload_invalidation::(op) .map_err(|e| format!("Failed to process invalid payload: {:?}", e)) } @@ -748,6 +748,15 @@ impl ProtoArrayForkChoice { .is_descendant(ancestor_root, descendant_root) } + /// See `ProtoArray` documentation. + pub fn is_finalized_checkpoint_or_descendant( + &self, + descendant_root: Hash256, + ) -> bool { + self.proto_array + .is_finalized_checkpoint_or_descendant::(descendant_root) + } + pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Epoch)> { if validator_index < self.votes.0.len() { let vote = &self.votes.0[validator_index]; @@ -928,6 +937,10 @@ mod test_compute_deltas { epoch: genesis_epoch, root: finalized_root, }; + let junk_checkpoint = Checkpoint { + epoch: Epoch::new(42), + root: Hash256::repeat_byte(42), + }; let mut fc = ProtoArrayForkChoice::new::( genesis_slot, @@ -973,8 +986,10 @@ mod test_compute_deltas { target_root: finalized_root, current_epoch_shuffling_id: junk_shuffling_id.clone(), next_epoch_shuffling_id: junk_shuffling_id, - justified_checkpoint: genesis_checkpoint, - finalized_checkpoint: genesis_checkpoint, + // Use the junk checkpoint for the next to values to prevent + // the loop-shortcutting mechanism from triggering. + justified_checkpoint: junk_checkpoint, + finalized_checkpoint: junk_checkpoint, execution_status, unrealized_justified_checkpoint: None, unrealized_finalized_checkpoint: None, @@ -993,6 +1008,11 @@ mod test_compute_deltas { assert!(!fc.is_descendant(finalized_root, not_finalized_desc)); assert!(!fc.is_descendant(finalized_root, unknown)); + assert!(fc.is_finalized_checkpoint_or_descendant::(finalized_root)); + assert!(fc.is_finalized_checkpoint_or_descendant::(finalized_desc)); + assert!(!fc.is_finalized_checkpoint_or_descendant::(not_finalized_desc)); + assert!(!fc.is_finalized_checkpoint_or_descendant::(unknown)); + assert!(!fc.is_descendant(finalized_desc, not_finalized_desc)); assert!(fc.is_descendant(finalized_desc, finalized_desc)); assert!(!fc.is_descendant(finalized_desc, finalized_root)); @@ -1004,6 +1024,171 @@ mod test_compute_deltas { assert!(!fc.is_descendant(not_finalized_desc, unknown)); } + /// This test covers an interesting case where a block can be a descendant + /// of the finalized *block*, but not a descenant of the finalized + /// *checkpoint*. + /// + /// ## Example + /// + /// Consider this block tree which has three blocks (`A`, `B` and `C`): + /// + /// ```ignore + /// [A] <--- [-] <--- [B] + /// | + /// |--[C] + /// ``` + /// + /// - `A` (slot 31) is the common descendant. + /// - `B` (slot 33) descends from `A`, but there is a single skip slot + /// between it and `A`. + /// - `C` (slot 32) descends from `A` and conflicts with `B`. + /// + /// Imagine that the `B` chain is finalized at epoch 1. This means that the + /// finalized checkpoint points to the skipped slot at 32. The root of the + /// finalized checkpoint is `A`. + /// + /// In this scenario, the block `C` has the finalized root (`A`) as an + /// ancestor whilst simultaneously conflicting with the finalized + /// checkpoint. + /// + /// This means that to ensure a block does not conflict with finality we + /// must check to ensure that it's an ancestor of the finalized + /// *checkpoint*, not just the finalized *block*. + #[test] + fn finalized_descendant_edge_case() { + let get_block_root = Hash256::from_low_u64_be; + let genesis_slot = Slot::new(0); + let junk_state_root = Hash256::zero(); + let junk_shuffling_id = + AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero()); + let execution_status = ExecutionStatus::irrelevant(); + + let genesis_checkpoint = Checkpoint { + epoch: Epoch::new(0), + root: get_block_root(0), + }; + + let mut fc = ProtoArrayForkChoice::new::( + genesis_slot, + junk_state_root, + genesis_checkpoint, + genesis_checkpoint, + junk_shuffling_id.clone(), + junk_shuffling_id.clone(), + execution_status, + CountUnrealizedFull::default(), + ) + .unwrap(); + + struct TestBlock { + slot: u64, + root: u64, + parent_root: u64, + } + + let insert_block = |fc: &mut ProtoArrayForkChoice, block: TestBlock| { + fc.proto_array + .on_block::( + Block { + slot: Slot::from(block.slot), + root: get_block_root(block.root), + parent_root: Some(get_block_root(block.parent_root)), + state_root: Hash256::zero(), + target_root: Hash256::zero(), + current_epoch_shuffling_id: junk_shuffling_id.clone(), + next_epoch_shuffling_id: junk_shuffling_id.clone(), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(0), + root: get_block_root(0), + }, + finalized_checkpoint: genesis_checkpoint, + execution_status, + unrealized_justified_checkpoint: Some(genesis_checkpoint), + unrealized_finalized_checkpoint: Some(genesis_checkpoint), + }, + Slot::from(block.slot), + ) + .unwrap(); + }; + + /* + * Start of interesting part of tests. + */ + + // Produce the 0th epoch of blocks. They should all form a chain from + // the genesis block. + for i in 1..MainnetEthSpec::slots_per_epoch() { + insert_block( + &mut fc, + TestBlock { + slot: i, + root: i, + parent_root: i - 1, + }, + ) + } + + let last_slot_of_epoch_0 = MainnetEthSpec::slots_per_epoch() - 1; + + // Produce a block that descends from the last block of epoch -. + // + // This block will be non-canonical. + let non_canonical_slot = last_slot_of_epoch_0 + 1; + insert_block( + &mut fc, + TestBlock { + slot: non_canonical_slot, + root: non_canonical_slot, + parent_root: non_canonical_slot - 1, + }, + ); + + // Produce a block that descends from the last block of the 0th epoch, + // that skips the 1st slot of the 1st epoch. + // + // This block will be canonical. + let canonical_slot = last_slot_of_epoch_0 + 2; + insert_block( + &mut fc, + TestBlock { + slot: canonical_slot, + root: canonical_slot, + parent_root: non_canonical_slot - 1, + }, + ); + + let finalized_root = get_block_root(last_slot_of_epoch_0); + + // Set the finalized checkpoint to finalize the first slot of epoch 1 on + // the canonical chain. + fc.proto_array.finalized_checkpoint = Checkpoint { + root: finalized_root, + epoch: Epoch::new(1), + }; + + assert!( + fc.proto_array + .is_finalized_checkpoint_or_descendant::(finalized_root), + "the finalized checkpoint is the finalized checkpoint" + ); + + assert!( + fc.proto_array + .is_finalized_checkpoint_or_descendant::(get_block_root( + canonical_slot + )), + "the canonical block is a descendant of the finalized checkpoint" + ); + assert!( + !fc.proto_array + .is_finalized_checkpoint_or_descendant::(get_block_root( + non_canonical_slot + )), + "although the non-canonical block is a descendant of the finalized block, \ + it's not a descendant of the finalized checkpoint" + ); + } + #[test] fn zero_hash() { let validator_count: usize = 16; diff --git a/consensus/serde_utils/src/lib.rs b/consensus/serde_utils/src/lib.rs index 92b5966c9..5c5dafc66 100644 --- a/consensus/serde_utils/src/lib.rs +++ b/consensus/serde_utils/src/lib.rs @@ -12,4 +12,4 @@ pub mod u64_hex_be; pub mod u8_hex; pub use fixed_bytes_hex::{bytes_4_hex, bytes_8_hex}; -pub use quoted_int::{quoted_u256, quoted_u32, quoted_u64, quoted_u8}; +pub use quoted_int::{quoted_i64, quoted_u256, quoted_u32, quoted_u64, quoted_u8}; diff --git a/consensus/serde_utils/src/quoted_int.rs b/consensus/serde_utils/src/quoted_int.rs index 822acb5ee..0cc35aa31 100644 --- a/consensus/serde_utils/src/quoted_int.rs +++ b/consensus/serde_utils/src/quoted_int.rs @@ -11,7 +11,7 @@ use std::convert::TryFrom; use std::marker::PhantomData; macro_rules! define_mod { - ($int: ty, $visit_fn: ident) => { + ($int: ty) => { /// Serde support for deserializing quoted integers. /// /// Configurable so that quotes are either required or optional. @@ -140,19 +140,25 @@ macro_rules! define_mod { pub mod quoted_u8 { use super::*; - define_mod!(u8, visit_u8); + define_mod!(u8); } pub mod quoted_u32 { use super::*; - define_mod!(u32, visit_u32); + define_mod!(u32); } pub mod quoted_u64 { use super::*; - define_mod!(u64, visit_u64); + define_mod!(u64); +} + +pub mod quoted_i64 { + use super::*; + + define_mod!(i64); } pub mod quoted_u256 { @@ -216,4 +222,26 @@ mod test { fn u256_without_quotes() { serde_json::from_str::("1").unwrap_err(); } + + #[derive(Debug, PartialEq, Serialize, Deserialize)] + #[serde(transparent)] + struct WrappedI64(#[serde(with = "quoted_i64")] i64); + + #[test] + fn negative_i64_with_quotes() { + assert_eq!( + serde_json::from_str::("\"-200\"").unwrap().0, + -200 + ); + assert_eq!( + serde_json::to_string(&WrappedI64(-12_500)).unwrap(), + "\"-12500\"" + ); + } + + // It would be OK if this worked, but we don't need it to (i64s should always be quoted). + #[test] + fn negative_i64_without_quotes() { + serde_json::from_str::("-200").unwrap_err(); + } } diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 7e581ee61..053a04f87 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1079,6 +1079,19 @@ fn http_port_flag() { .with_config(|config| assert_eq!(config.http_api.listen_port, port1)); } #[test] +fn empty_self_limiter_flag() { + // Test that empty rate limiter is accepted using the default rate limiting configurations. + CommandLineTest::new() + .flag("self-limiter", None) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.network.outbound_rate_limiter_config, + Some(lighthouse_network::rpc::config::OutboundRateLimiterConfig::default()) + ) + }); +} +#[test] fn http_allow_origin_flag() { CommandLineTest::new() .flag("http-allow-origin", Some("127.0.0.99")) diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index c82a1a9d3..9142a0c7e 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -231,6 +231,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { address of this server (e.g., http://localhost:5064).") .takes_value(true), ) + .arg( + Arg::with_name("enable-high-validator-count-metrics") + .long("enable-high-validator-count-metrics") + .help("Enable per validator metrics for > 64 validators. \ + Note: This flag is automatically enabled for <= 64 validators. \ + Enabling this metric for higher validator counts will lead to higher volume \ + of prometheus metrics being collected.") + .takes_value(false), + ) /* * Explorer metrics */ diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 22741dabb..0f24e81d5 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -53,6 +53,11 @@ pub struct Config { /// If true, enable functionality that monitors the network for attestations or proposals from /// any of the validators managed by this client before starting up. pub enable_doppelganger_protection: bool, + /// If true, then we publish validator specific metrics (e.g next attestation duty slot) + /// for all our managed validators. + /// Note: We publish validator specific metrics for low validator counts without this flag + /// (<= 64 validators) + pub enable_high_validator_count_metrics: bool, /// Enable use of the blinded block endpoints during proposals. pub builder_proposals: bool, /// Overrides the timestamp field in builder api ValidatorRegistrationV1 @@ -99,6 +104,7 @@ impl Default for Config { http_metrics: <_>::default(), monitoring_api: None, enable_doppelganger_protection: false, + enable_high_validator_count_metrics: false, beacon_nodes_tls_certs: None, block_delay: None, builder_proposals: false, @@ -273,6 +279,10 @@ impl Config { config.http_metrics.enabled = true; } + if cli_args.is_present("enable-high-validator-count-metrics") { + config.enable_high_validator_count_metrics = true; + } + if let Some(address) = cli_args.value_of("metrics-address") { config.http_metrics.listen_addr = address .parse::() diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index 86b8ca870..6ba2a2d1f 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -9,6 +9,7 @@ mod sync; use crate::beacon_node_fallback::{BeaconNodeFallback, OfflineOnFailure, RequireSynced}; +use crate::http_metrics::metrics::{get_int_gauge, set_int_gauge, ATTESTATION_DUTY}; use crate::{ block_service::BlockServiceNotification, http_metrics::metrics, @@ -39,6 +40,11 @@ const SUBSCRIPTION_BUFFER_SLOTS: u64 = 2; /// Only retain `HISTORICAL_DUTIES_EPOCHS` duties prior to the current epoch. const HISTORICAL_DUTIES_EPOCHS: u64 = 2; +/// Minimum number of validators for which we auto-enable per-validator metrics. +/// For validators greater than this value, we need to manually set the `enable-per-validator-metrics` +/// flag in the cli to enable collection of per validator metrics. +const VALIDATOR_METRICS_MIN_COUNT: usize = 64; + #[derive(Debug)] pub enum Error { UnableToReadSlotClock, @@ -121,6 +127,7 @@ pub struct DutiesService { /// This functionality is a little redundant since most BNs will likely reject duties when they /// aren't synced, but we keep it around for an emergency. pub require_synced: RequireSynced, + pub enable_high_validator_count_metrics: bool, pub context: RuntimeContext, pub spec: ChainSpec, } @@ -220,6 +227,12 @@ impl DutiesService { .cloned() .collect() } + + /// Returns `true` if we should collect per validator metrics and `false` otherwise. + pub fn per_validator_metrics(&self) -> bool { + self.enable_high_validator_count_metrics + || self.total_validator_count() <= VALIDATOR_METRICS_MIN_COUNT + } } /// Start the service that periodically polls the beacon node for validator duties. This will start @@ -501,6 +514,7 @@ async fn poll_beacon_attesters( current_epoch, &local_indices, &local_pubkeys, + current_slot, ) .await { @@ -520,9 +534,14 @@ async fn poll_beacon_attesters( ); // Download the duties and update the duties for the next epoch. - if let Err(e) = - poll_beacon_attesters_for_epoch(duties_service, next_epoch, &local_indices, &local_pubkeys) - .await + if let Err(e) = poll_beacon_attesters_for_epoch( + duties_service, + next_epoch, + &local_indices, + &local_pubkeys, + current_slot, + ) + .await { error!( log, @@ -619,6 +638,7 @@ async fn poll_beacon_attesters_for_epoch( epoch: Epoch, local_indices: &[u64], local_pubkeys: &HashSet, + current_slot: Slot, ) -> Result<(), Error> { let log = duties_service.context.log(); @@ -671,6 +691,35 @@ async fn poll_beacon_attesters_for_epoch( .data .into_iter() .filter(|duty| { + if duties_service.per_validator_metrics() { + let validator_index = duty.validator_index; + let duty_slot = duty.slot; + if let Some(existing_slot_gauge) = + get_int_gauge(&ATTESTATION_DUTY, &[&validator_index.to_string()]) + { + let existing_slot = Slot::new(existing_slot_gauge.get() as u64); + let existing_epoch = existing_slot.epoch(E::slots_per_epoch()); + + // First condition ensures that we switch to the next epoch duty slot + // once the current epoch duty slot passes. + // Second condition is to ensure that next epoch duties don't override + // current epoch duties. + if existing_slot < current_slot + || (duty_slot.epoch(E::slots_per_epoch()) <= existing_epoch + && duty_slot > current_slot + && duty_slot != existing_slot) + { + existing_slot_gauge.set(duty_slot.as_u64() as i64); + } + } else { + set_int_gauge( + &ATTESTATION_DUTY, + &[&validator_index.to_string()], + duty_slot.as_u64() as i64, + ); + } + } + local_pubkeys.contains(&duty.pubkey) && { // Only update the duties if either is true: // diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index 2d2e7da75..2d5b9b1db 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -177,6 +177,12 @@ lazy_static::lazy_static! { "Duration to obtain a signature", &["type"] ); + + pub static ref ATTESTATION_DUTY: Result = try_create_int_gauge_vec( + "vc_attestation_duty_slot", + "Attestation duty slot for all managed validators", + &["validator"] + ); } pub fn gather_prometheus_metrics( diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 00c3db7aa..f2d647490 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -422,6 +422,7 @@ impl ProductionValidatorClient { }, spec: context.eth2_config.spec.clone(), context: duties_context, + enable_high_validator_count_metrics: config.enable_high_validator_count_metrics, }); // Update the metrics server.