diff --git a/.github/workflows/book.yml b/.github/workflows/book.yml index db458a3db..031a88b03 100644 --- a/.github/workflows/book.yml +++ b/.github/workflows/book.yml @@ -13,7 +13,7 @@ jobs: build-and-upload-to-s3: runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@master + - uses: actions/checkout@v4 - name: Setup mdBook uses: peaceiris/actions-mdbook@v1 diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 007070dbb..54b355e63 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -71,7 +71,7 @@ jobs: VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} FEATURE_SUFFIX: ${{ matrix.features.version_suffix }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Update Rust if: env.SELF_HOSTED_RUNNERS == 'false' run: rustup update stable @@ -106,10 +106,10 @@ jobs: - name: Set up Docker Buildx if: env.SELF_HOSTED_RUNNERS == 'false' - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Build and push - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 with: file: ./Dockerfile.cross context: . @@ -129,7 +129,7 @@ jobs: VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} steps: - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Dockerhub login run: | @@ -148,14 +148,16 @@ jobs: VERSION: ${{ needs.extract-version.outputs.VERSION }} VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Dockerhub login run: | echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin - - name: Build lcli dockerfile (with push) - run: | - docker build \ - --build-arg PORTABLE=true \ - --tag ${LCLI_IMAGE_NAME}:${VERSION}${VERSION_SUFFIX} \ - --file ./lcli/Dockerfile . - docker push ${LCLI_IMAGE_NAME}:${VERSION}${VERSION_SUFFIX} + - name: Build lcli and push + uses: docker/build-push-action@v5 + with: + build-args: | + FEATURES=portable + context: . + push: true + file: ./lcli/Dockerfile + tags: ${{ env.LCLI_IMAGE_NAME }}:${{ env.VERSION }}${{ env.VERSION_SUFFIX }} diff --git a/.github/workflows/linkcheck.yml b/.github/workflows/linkcheck.yml index 7f5d3e0b6..7e8d9135d 100644 --- a/.github/workflows/linkcheck.yml +++ b/.github/workflows/linkcheck.yml @@ -20,7 +20,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Run mdbook server run: | diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index 75a81ce0e..42293d38a 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -24,7 +24,7 @@ jobs: # Enable portable to prevent issues with caching `blst` for the wrong CPU type FEATURES: portable,jemalloc steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust run: rustup update stable @@ -46,7 +46,7 @@ jobs: echo "$(brew --prefix)/opt/gnu-sed/libexec/gnubin" >> $GITHUB_PATH echo "$(brew --prefix)/opt/grep/libexec/gnubin" >> $GITHUB_PATH # https://github.com/actions/cache/blob/main/examples.md#rust---cargo - - uses: actions/cache@v3 + - uses: actions/cache@v4 id: cache-cargo with: path: | @@ -95,6 +95,6 @@ jobs: runs-on: ubuntu-latest needs: ["run-local-testnet"] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Check that success job is dependent on all others run: ./scripts/ci/check-success-job.sh ./.github/workflows/local-testnet.yml local-testnet-success diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 30ac52b6e..3d23b4110 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -68,7 +68,7 @@ jobs: needs: extract-version steps: - name: Checkout sources - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == 'false' run: rustup update stable @@ -80,7 +80,7 @@ jobs: - uses: KyleMayes/install-llvm-action@v1 if: env.SELF_HOSTED_RUNNERS == 'false' && startsWith(matrix.arch, 'x86_64-windows') with: - version: "15.0" + version: "16.0" directory: ${{ runner.temp }}/llvm - name: Set LIBCLANG_PATH if: startsWith(matrix.arch, 'x86_64-windows') @@ -172,17 +172,19 @@ jobs: # This is required to share artifacts between different jobs # ======================================================================= - - name: Upload artifact - uses: actions/upload-artifact@v3 + - name: Upload artifact + uses: actions/upload-artifact@v4 with: name: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz + compression-level: 0 - name: Upload signature - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz.asc path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz.asc + compression-level: 0 draft-release: name: Draft Release @@ -193,7 +195,7 @@ jobs: steps: # This is necessary for generating the changelog. It has to come before "Download Artifacts" or else it deletes the artifacts. - name: Checkout sources - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 @@ -202,7 +204,7 @@ jobs: # ============================== - name: Download artifacts - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 # ============================== # Create release draft diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 70fb59424..c501c8cab 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -41,7 +41,7 @@ jobs: # Use self-hosted runners only on the sigp repo. runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == 'false' uses: moonrepo/setup-rust@v1 @@ -65,7 +65,7 @@ jobs: name: release-tests-windows runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "windows", "CI"]') || 'windows-2019' }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == 'false' uses: moonrepo/setup-rust@v1 @@ -86,7 +86,7 @@ jobs: # - uses: KyleMayes/install-llvm-action@v1 # if: env.SELF_HOSTED_RUNNERS == 'false' # with: -# version: "15.0" +# version: "16.0" # directory: ${{ runner.temp }}/llvm - name: Set LIBCLANG_PATH run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV @@ -102,7 +102,7 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == 'false' uses: moonrepo/setup-rust@v1 @@ -121,7 +121,7 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust uses: moonrepo/setup-rust@v1 with: @@ -136,7 +136,7 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust uses: moonrepo/setup-rust@v1 with: @@ -151,7 +151,7 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust uses: moonrepo/setup-rust@v1 with: @@ -167,7 +167,7 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == 'false' uses: moonrepo/setup-rust@v1 @@ -188,7 +188,7 @@ jobs: name: state-transition-vectors-ubuntu runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust uses: moonrepo/setup-rust@v1 with: @@ -203,7 +203,7 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == 'false' uses: moonrepo/setup-rust@v1 @@ -211,7 +211,7 @@ jobs: channel: stable cache-target: release bins: cargo-nextest - - name: Run consensus-spec-tests with blst, milagro and fake_crypto + - name: Run consensus-spec-tests with blst and fake_crypto run: make nextest-ef - name: Show cache stats if: env.SELF_HOSTED_RUNNERS == 'true' @@ -220,7 +220,7 @@ jobs: name: dockerfile-ubuntu runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Build the root Dockerfile run: docker build --build-arg FEATURES=portable -t lighthouse:local . - name: Test the built image @@ -229,7 +229,7 @@ jobs: name: eth1-simulator-ubuntu runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust uses: moonrepo/setup-rust@v1 with: @@ -245,7 +245,7 @@ jobs: name: merge-transition-ubuntu runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust uses: moonrepo/setup-rust@v1 with: @@ -261,7 +261,7 @@ jobs: name: no-eth1-simulator-ubuntu runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust uses: moonrepo/setup-rust@v1 with: @@ -273,7 +273,7 @@ jobs: name: syncing-simulator-ubuntu runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust uses: moonrepo/setup-rust@v1 with: @@ -292,7 +292,7 @@ jobs: # Enable portable to prevent issues with caching `blst` for the wrong CPU type FEATURES: jemalloc,portable steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == 'false' uses: moonrepo/setup-rust@v1 @@ -309,7 +309,9 @@ jobs: run: | make - name: Install lcli - if: env.SELF_HOSTED_RUNNERS == 'false' + # TODO: uncomment after the version of lcli in https://github.com/sigp/lighthouse/pull/5137 + # is installed on the runners + # if: env.SELF_HOSTED_RUNNERS == 'false' run: make install-lcli - name: Run the doppelganger protection failure test script run: | @@ -323,7 +325,7 @@ jobs: name: execution-engine-integration-ubuntu runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest' }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == 'false' uses: moonrepo/setup-rust@v1 @@ -344,7 +346,7 @@ jobs: env: CARGO_INCREMENTAL: 1 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust uses: moonrepo/setup-rust@v1 with: @@ -370,7 +372,7 @@ jobs: name: check-msrv runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust at Minimum Supported Rust Version (MSRV) run: | metadata=$(cargo metadata --no-deps --format-version 1) @@ -382,7 +384,7 @@ jobs: name: cargo-udeps runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of nightly Rust uses: moonrepo/setup-rust@v1 with: @@ -404,9 +406,9 @@ jobs: name: compile-with-beta-compiler runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install dependencies - run: sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang + run: sudo apt update && sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang - name: Use Rust beta run: rustup override set beta - name: Run make @@ -415,7 +417,7 @@ jobs: name: cli-check runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust uses: moonrepo/setup-rust@v1 with: @@ -453,6 +455,6 @@ jobs: 'cli-check', ] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Check that success job is dependent on all others run: ./scripts/ci/check-success-job.sh ./.github/workflows/test-suite.yml test-suite-success diff --git a/Cargo.lock b/Cargo.lock index 1d3dc3070..306bdaa72 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -115,9 +115,9 @@ dependencies = [ [[package]] name = "aes" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" +checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" dependencies = [ "cfg-if", "cipher 0.4.4", @@ -126,14 +126,14 @@ dependencies = [ [[package]] name = "aes-gcm" -version = "0.9.2" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc3be92e19a7ef47457b8e6f90707e12b6ac5d20c6f3866584fa3be0787d839f" +checksum = "df5f85a83a7d8b0442b6aa7b504b8212c1733da07b98aae43d4bc21b2cb3cdf6" dependencies = [ "aead 0.4.3", "aes 0.7.5", "cipher 0.3.0", - "ctr 0.7.0", + "ctr 0.8.0", "ghash 0.4.4", "subtle", ] @@ -145,7 +145,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" dependencies = [ "aead 0.5.2", - "aes 0.8.3", + "aes 0.8.4", "cipher 0.4.4", "ctr 0.9.2", "ghash 0.5.0", @@ -154,9 +154,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.7" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" +checksum = "8b79b82693f705137f8fb9b37871d99e4f9a7df12b917eed79c3d3954830a60b" dependencies = [ "cfg-if", "once_cell", @@ -180,9 +180,93 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" [[package]] -name = "amcl" -version = "0.3.0" -source = "git+https://github.com/sigp/milagro_bls?tag=v1.5.1#d3fc0a40cfe8b72ccda46ba050ee6786a59ce753" +name = "alloy-consensus" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy.git?rev=974d488bab5e21e9f17452a39a4bfa56677367b2#974d488bab5e21e9f17452a39a4bfa56677367b2" +dependencies = [ + "alloy-eips", + "alloy-network", + "alloy-primitives", + "alloy-rlp", +] + +[[package]] +name = "alloy-eips" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy.git?rev=974d488bab5e21e9f17452a39a4bfa56677367b2#974d488bab5e21e9f17452a39a4bfa56677367b2" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "serde", + "thiserror", +] + +[[package]] +name = "alloy-json-rpc" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy.git?rev=974d488bab5e21e9f17452a39a4bfa56677367b2#974d488bab5e21e9f17452a39a4bfa56677367b2" +dependencies = [ + "alloy-primitives", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "alloy-network" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy.git?rev=974d488bab5e21e9f17452a39a4bfa56677367b2#974d488bab5e21e9f17452a39a4bfa56677367b2" +dependencies = [ + "alloy-eips", + "alloy-json-rpc", + "alloy-primitives", + "alloy-rlp", + "serde", +] + +[[package]] +name = "alloy-primitives" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "600d34d8de81e23b6d909c094e23b3d357e01ca36b78a8c5424c501eedbe86f0" +dependencies = [ + "alloy-rlp", + "bytes", + "cfg-if", + "const-hex", + "derive_more", + "hex-literal", + "itoa", + "k256 0.13.3", + "keccak-asm", + "proptest", + "rand", + "ruint", + "serde", + "tiny-keccak", +] + +[[package]] +name = "alloy-rlp" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d58d9f5da7b40e9bfff0b7e7816700be4019db97d4b6359fe7f94a9e22e42ac" +dependencies = [ + "alloy-rlp-derive", + "arrayvec", + "bytes", +] + +[[package]] +name = "alloy-rlp-derive" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a047897373be4bbb0224c1afdabca92648dc57a9c9ef6e7b0be3aff7a859c83" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.52", +] [[package]] name = "android-tzdata" @@ -210,9 +294,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.79" +version = "1.0.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" +checksum = "5ad32ce52e4161730f7098c077cd2ed6229b5804ccf99e5366be1ab72a98b4e1" [[package]] name = "arbitrary" @@ -229,6 +313,130 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" +[[package]] +name = "ark-ff" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" +dependencies = [ + "ark-ff-asm 0.3.0", + "ark-ff-macros 0.3.0", + "ark-serialize 0.3.0", + "ark-std 0.3.0", + "derivative", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.3.3", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm 0.4.2", + "ark-ff-macros 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "digest 0.10.7", + "itertools", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.4.0", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" +dependencies = [ + "num-bigint", + "num-traits", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-serialize" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" +dependencies = [ + "ark-std 0.3.0", + "digest 0.9.0", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-std 0.4.0", + "digest 0.10.7", + "num-bigint", +] + +[[package]] +name = "ark-std" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" +dependencies = [ + "num-traits", + "rand", +] + +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand", +] + [[package]] name = "arrayref" version = "0.3.7" @@ -298,24 +506,95 @@ dependencies = [ ] [[package]] -name = "async-io" -version = "2.3.0" +name = "async-channel" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb41eb19024a91746eba0773aa5e16036045bbf45733766661099e182ea6a744" +checksum = "f28243a43d821d11341ab73c80bed182dc015c514b951616cf79bd4af39af0c3" dependencies = [ - "async-lock", + "concurrent-queue", + "event-listener 5.2.0", + "event-listener-strategy 0.5.0", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-executor" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17ae5ebefcc48e7452b4987947920dac9450be1110cadf34d1b8c116bdbaf97c" +dependencies = [ + "async-lock 3.3.0", + "async-task", + "concurrent-queue", + "fastrand 2.0.1", + "futures-lite 2.2.0", + "slab", +] + +[[package]] +name = "async-global-executor" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" +dependencies = [ + "async-channel 2.2.0", + "async-executor", + "async-io 2.3.1", + "async-lock 3.3.0", + "blocking", + "futures-lite 2.2.0", + "once_cell", +] + +[[package]] +name = "async-io" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" +dependencies = [ + "async-lock 2.8.0", + "autocfg", + "cfg-if", + "concurrent-queue", + "futures-lite 1.13.0", + "log", + "parking", + "polling 2.8.0", + "rustix 0.37.27", + "slab", + "socket2 0.4.10", + "waker-fn", +] + +[[package]] +name = "async-io" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f97ab0c5b00a7cdbe5a371b9a782ee7be1316095885c8a4ea1daf490eb0ef65" +dependencies = [ + "async-lock 3.3.0", "cfg-if", "concurrent-queue", "futures-io", - "futures-lite", + "futures-lite 2.2.0", "parking", - "polling", - "rustix 0.38.30", + "polling 3.5.0", + "rustix 0.38.31", "slab", "tracing", "windows-sys 0.52.0", ] +[[package]] +name = "async-lock" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" +dependencies = [ + "event-listener 2.5.3", +] + [[package]] name = "async-lock" version = "3.3.0" @@ -323,10 +602,78 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" dependencies = [ "event-listener 4.0.3", - "event-listener-strategy", + "event-listener-strategy 0.4.0", "pin-project-lite", ] +[[package]] +name = "async-process" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea6438ba0a08d81529c69b36700fa2f95837bfe3e776ab39cde9c14d9149da88" +dependencies = [ + "async-io 1.13.0", + "async-lock 2.8.0", + "async-signal", + "blocking", + "cfg-if", + "event-listener 3.1.0", + "futures-lite 1.13.0", + "rustix 0.38.31", + "windows-sys 0.48.0", +] + +[[package]] +name = "async-signal" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e47d90f65a225c4527103a8d747001fc56e375203592b25ad103e1ca13124c5" +dependencies = [ + "async-io 2.3.1", + "async-lock 2.8.0", + "atomic-waker", + "cfg-if", + "futures-core", + "futures-io", + "rustix 0.38.31", + "signal-hook-registry", + "slab", + "windows-sys 0.48.0", +] + +[[package]] +name = "async-std" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" +dependencies = [ + "async-channel 1.9.0", + "async-global-executor", + "async-io 1.13.0", + "async-lock 2.8.0", + "async-process", + "crossbeam-utils", + "futures-channel", + "futures-core", + "futures-io", + "futures-lite 1.13.0", + "gloo-timers", + "kv-log-macro", + "log", + "memchr", + "once_cell", + "pin-project-lite", + "pin-utils", + "slab", + "wasm-bindgen-futures", +] + +[[package]] +name = "async-task" +version = "4.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbb36e985947064623dbd357f727af08ffd077f93d696782f3c56365fa2e2799" + [[package]] name = "async-trait" version = "0.1.77" @@ -335,7 +682,7 @@ checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -346,7 +693,20 @@ checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" dependencies = [ "futures", "pharos", - "rustc_version", + "rustc_version 0.4.0", +] + +[[package]] +name = "asynchronous-codec" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4057f2c32adbb2fc158e22fb38433c8e9bbf76b75a4732c7c0cbaf695fb65568" +dependencies = [ + "bytes", + "futures-sink", + "futures-util", + "memchr", + "pin-project-lite", ] [[package]] @@ -363,16 +723,10 @@ dependencies = [ ] [[package]] -name = "attohttpc" -version = "0.16.3" +name = "atomic-waker" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdb8867f378f33f78a811a8eb9bf108ad99430d7aad43315dd9319c827ef6247" -dependencies = [ - "http 0.2.11", - "log", - "url", - "wildmatch", -] +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "attohttpc" @@ -398,14 +752,13 @@ dependencies = [ [[package]] name = "auto_impl" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fee3da8ef1276b0bee5dd1c7258010d8fffd31801447323115a25560e1327b89" +checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ - "proc-macro-error", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.52", ] [[package]] @@ -427,7 +780,7 @@ dependencies = [ "http 1.0.0", "http-body 1.0.0", "http-body-util", - "hyper 1.1.0", + "hyper 1.2.0", "hyper-util", "itoa", "matchit", @@ -536,7 +889,6 @@ dependencies = [ "ethereum_ssz", "ethereum_ssz_derive", "execution_layer", - "exit-future", "fork_choice", "futures", "genesis", @@ -583,7 +935,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "4.6.0" +version = "5.1.1" dependencies = [ "beacon_chain", "clap", @@ -599,7 +951,7 @@ dependencies = [ "genesis", "hex", "http_api", - "hyper 1.1.0", + "hyper 1.2.0", "lighthouse_network", "lighthouse_version", "monitoring_api", @@ -689,10 +1041,25 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.48", + "syn 2.0.52", "which", ] +[[package]] +name = "bit-set" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" + [[package]] name = "bitflags" version = "1.3.2" @@ -763,6 +1130,22 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" +[[package]] +name = "blocking" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118" +dependencies = [ + "async-channel 2.2.0", + "async-lock 3.3.0", + "async-task", + "fastrand 2.0.1", + "futures-io", + "futures-lite 2.2.0", + "piper", + "tracing", +] + [[package]] name = "bls" version = "0.2.0" @@ -774,7 +1157,6 @@ dependencies = [ "ethereum_serde_utils", "ethereum_ssz", "hex", - "milagro_bls", "rand", "serde", "tree_hash", @@ -805,7 +1187,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "4.6.0" +version = "5.1.1" dependencies = [ "beacon_node", "clap", @@ -857,9 +1239,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.14.0" +version = "3.15.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" +checksum = "8ea184aa71bb362a1157c896979544cc23974e08fd265f29ea96b59f0b4a555b" [[package]] name = "byte-slice-cast" @@ -943,9 +1325,9 @@ dependencies = [ [[package]] name = "cargo-platform" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ceed8ef69d8518a5dda55c07425450b58a4e1946f4951eab6d7191ee86c2443d" +checksum = "694c8807f2ae16faecc43dc17d74b3eb042482789fd0eb64b39a2e04e087053f" dependencies = [ "serde", ] @@ -958,7 +1340,7 @@ checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a" dependencies = [ "camino", "cargo-platform", - "semver", + "semver 1.0.22", "serde", "serde_json", "thiserror", @@ -972,9 +1354,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.83" +version = "1.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +checksum = "8cd6604a82acf3039f1144f54b8eb34e91ffba622051189e71b781822d5ee1f5" dependencies = [ "jobserver", "libc", @@ -1021,14 +1403,14 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.32" +version = "0.4.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41daef31d7a747c5c847246f36de49ced6f7403b4cdabc807a97b5cc184cda7a" +checksum = "5bc015644b92d5890fab7489e49d21f879d5c990186827d42ec511919404f38b" dependencies = [ "android-tzdata", "iana-time-zone", "num-traits", - "windows-targets 0.52.0", + "windows-targets 0.52.4", ] [[package]] @@ -1106,7 +1488,9 @@ dependencies = [ "eth1", "eth2", "eth2_config", + "ethereum_ssz", "execution_layer", + "futures", "genesis", "http_api", "http_metrics", @@ -1169,6 +1553,19 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "const-hex" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "efbd12d49ab0eaf8193ba9175e45f56bbc2e4b27d57b8cfe62aa47942a46b9a9" +dependencies = [ + "cfg-if", + "cpufeatures", + "hex", + "proptest", + "serde", +] + [[package]] name = "const-oid" version = "0.9.6" @@ -1223,9 +1620,9 @@ dependencies = [ [[package]] name = "crc32fast" -version = "1.3.2" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" +checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" dependencies = [ "cfg-if", ] @@ -1268,9 +1665,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.11" +version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "176dc175b78f56c0f321911d9c8eb2b77a78a4860b9c19db83835fea1a46649b" +checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95" dependencies = [ "crossbeam-utils", ] @@ -1353,9 +1750,9 @@ dependencies = [ [[package]] name = "crypto-mac" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25fab6889090c8133f3deb8f73ba3c65a7f456f66436fc012a1b1e272b1e103e" +checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" dependencies = [ "generic-array", "subtle", @@ -1382,15 +1779,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "ctr" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a232f92a03f37dd7d7dd2adc67166c77e9cd88de5b019b9a9eecfaeaf7bfd481" -dependencies = [ - "cipher 0.3.0", -] - [[package]] name = "ctr" version = "0.8.0" @@ -1421,9 +1809,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.1.1" +version = "4.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89b8c6a2e4b1f45971ad09761aafb85514a84744b67a95e32c3cc1352d1f65c" +checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" dependencies = [ "cfg-if", "cpufeatures", @@ -1431,7 +1819,7 @@ dependencies = [ "digest 0.10.7", "fiat-crypto", "platforms 3.3.0", - "rustc_version", + "rustc_version 0.4.0", "subtle", "zeroize", ] @@ -1444,7 +1832,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -1502,6 +1890,12 @@ dependencies = [ "libc", ] +[[package]] +name = "dary_heap" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7762d17f1241643615821a8455a0b2c3e803784b058693d990b11f2dce25a0ca" + [[package]] name = "data-encoding" version = "2.5.0" @@ -1640,7 +2034,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -1652,7 +2046,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "rustc_version", + "rustc_version 0.4.0", "syn 1.0.109", ] @@ -1679,7 +2073,7 @@ dependencies = [ "diesel_table_macro_syntax", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -1699,7 +2093,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc5557efc453706fed5e4fa85006fe9817c224c3f480a34c7e5959fd700921c5" dependencies = [ - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -1775,11 +2169,12 @@ dependencies = [ [[package]] name = "discv5" -version = "0.4.0" -source = "git+https://github.com/sigp/discv5?rev=e30a2c31b7ac0c57876458b971164654dfa4513b#e30a2c31b7ac0c57876458b971164654dfa4513b" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bac33cb3f99889a57e56a8c6ccb77aaf0cfc7787602b7af09783f736d77314e1" dependencies = [ "aes 0.7.5", - "aes-gcm 0.9.2", + "aes-gcm 0.9.4", "arrayvec", "delay_map", "enr", @@ -1811,7 +2206,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -1864,9 +2259,9 @@ dependencies = [ [[package]] name = "ed25519-dalek" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f628eaec48bfd21b865dc2950cfa014450c01d2fa2b69a86c2fd5844ec523c0" +checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ "curve25519-dalek", "ed25519", @@ -1914,9 +2309,9 @@ dependencies = [ [[package]] name = "either" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" [[package]] name = "elliptic-curve" @@ -1994,7 +2389,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -2024,10 +2419,10 @@ dependencies = [ name = "environment" version = "0.1.2" dependencies = [ + "async-channel 1.9.0", "ctrlc", "eth2_config", "eth2_network_config", - "exit-future", "futures", "logging", "serde", @@ -2526,6 +2921,17 @@ version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" +[[package]] +name = "event-listener" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d93877bcde0eb80ca09131a08d23f0a5c18a620b01db137dba666d18cd9b30c2" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + [[package]] name = "event-listener" version = "4.0.3" @@ -2537,6 +2943,17 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "event-listener" +version = "5.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b5fb89194fa3cad959b833185b3063ba881dbfc7030680b314250779fb4cc91" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + [[package]] name = "event-listener-strategy" version = "0.4.0" @@ -2547,16 +2964,26 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "event-listener-strategy" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "feedafcaa9b749175d5ac357452a9d41ea2911da598fde46ce1fe02c37751291" +dependencies = [ + "event-listener 5.2.0", + "pin-project-lite", +] + [[package]] name = "execution_engine_integration" version = "0.1.0" dependencies = [ + "async-channel 1.9.0", "deposit_contract", "environment", "ethers-core", "ethers-providers", "execution_layer", - "exit-future", "fork_choice", "futures", "hex", @@ -2575,6 +3002,8 @@ dependencies = [ name = "execution_layer" version = "0.1.0" dependencies = [ + "alloy-consensus", + "alloy-rlp", "arc-swap", "async-trait", "builder_client", @@ -2585,7 +3014,6 @@ dependencies = [ "ethereum_serde_utils", "ethereum_ssz", "ethers-core", - "exit-future", "fork_choice", "futures", "hash-db", @@ -2622,20 +3050,11 @@ dependencies = [ "zeroize", ] -[[package]] -name = "exit-future" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e43f2f1833d64e33f15592464d6fdd70f349dda7b1a53088eb83cd94014008c5" -dependencies = [ - "futures", -] - [[package]] name = "eyre" -version = "0.6.11" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6267a1fa6f59179ea4afc8e50fd8612a3cc60bc858f786ff877a4a8cb042799" +checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" dependencies = [ "indenter", "once_cell", @@ -2653,12 +3072,32 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" +[[package]] +name = "fastrand" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" +dependencies = [ + "instant", +] + [[package]] name = "fastrand" version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" +[[package]] +name = "fastrlp" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] + [[package]] name = "ff" version = "0.12.1" @@ -2687,9 +3126,9 @@ checksum = "ec54ac60a7f2ee9a97cad9946f9bf629a3bc6a7ae59e68983dc9318f5a54b81a" [[package]] name = "fiat-crypto" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27573eac26f4dd11e2b1916c3fe1baa56407c83c71a773a8ba17ec0bca03b6b7" +checksum = "1676f435fc1dadde4d03e43f5d62b259e1ce5f40bd4ffb21db2b42ebe59c1382" [[package]] name = "field-offset" @@ -2698,7 +3137,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38e2275cc4e4fc009b0669731a1e5ab7ebf11f469eaede2bab9309a5b4d6057f" dependencies = [ "memoffset", - "rustc_version", + "rustc_version 0.4.0", ] [[package]] @@ -2836,7 +3275,8 @@ dependencies = [ [[package]] name = "futures-bounded" version = "0.2.3" -source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1e2774cc104e198ef3d3e1ff4ab40f86fa3245d6cb6a3a46174f21463cee173" dependencies = [ "futures-timer", "futures-util", @@ -2876,13 +3316,31 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +[[package]] +name = "futures-lite" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" +dependencies = [ + "fastrand 1.9.0", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite", + "waker-fn", +] + [[package]] name = "futures-lite" version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "445ba825b27408685aaecefd65178908c36c6e96aaf6d8599419d46e624192ba" dependencies = [ + "fastrand 2.0.1", "futures-core", + "futures-io", + "parking", "pin-project-lite", ] @@ -2894,7 +3352,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -2904,7 +3362,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35bd3cf68c183738046838e300353e4716c674dc5e56890de4826801a6622a28" dependencies = [ "futures-io", - "rustls 0.21.10", + "rustls", ] [[package]] @@ -2932,9 +3390,9 @@ dependencies = [ [[package]] name = "futures-timer" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" [[package]] name = "futures-util" @@ -3051,7 +3509,7 @@ checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -3060,6 +3518,18 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +[[package]] +name = "gloo-timers" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + [[package]] name = "group" version = "0.12.1" @@ -3094,7 +3564,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.11", - "indexmap 2.1.0", + "indexmap 2.2.5", "slab", "tokio", "tokio-util 0.7.10", @@ -3113,7 +3583,7 @@ dependencies = [ "futures-sink", "futures-util", "http 1.0.0", - "indexmap 2.1.0", + "indexmap 2.2.5", "slab", "tokio", "tokio-util 0.7.10", @@ -3122,9 +3592,9 @@ dependencies = [ [[package]] name = "half" -version = "1.8.2" +version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" +checksum = "1b43ede17f21864e81be2fa654110bf1e793774238d86ef8555c37e6519c0403" [[package]] name = "hash-db" @@ -3147,6 +3617,15 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +dependencies = [ + "ahash", +] + [[package]] name = "hashbrown" version = "0.14.3" @@ -3216,15 +3695,24 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.3.4" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d3d0e0f38255e7fa3cf31335b3a56f05febd18025f4db5ef7a0cfb4f8da651f" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hex" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +dependencies = [ + "serde", +] + +[[package]] +name = "hex-literal" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" [[package]] name = "hex_fmt" @@ -3249,7 +3737,7 @@ dependencies = [ "ipnet", "once_cell", "rand", - "socket2 0.5.5", + "socket2 0.5.6", "thiserror", "tinyvec", "tokio", @@ -3303,7 +3791,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" dependencies = [ - "crypto-mac 0.11.0", + "crypto-mac 0.11.1", "digest 0.9.0", ] @@ -3506,7 +3994,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.5", + "socket2 0.5.6", "tokio", "tower-service", "tracing", @@ -3515,9 +4003,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5aa53871fc917b1a9ed87b683a5d86db645e23acb32c2e0785a353e522fb75" +checksum = "186548d73ac615b32a73aafe38fb4f56c0d340e110e5a200bcadbaf2e199263a" dependencies = [ "bytes", "futures-channel", @@ -3529,6 +4017,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", + "smallvec", "tokio", ] @@ -3541,9 +4030,9 @@ dependencies = [ "futures-util", "http 0.2.11", "hyper 0.14.28", - "rustls 0.21.10", + "rustls", "tokio", - "tokio-rustls 0.24.1", + "tokio-rustls", ] [[package]] @@ -3561,27 +4050,25 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdea9aac0dbe5a9240d68cfd9501e2db94222c6dc06843e06640b9e07f0fdc67" +checksum = "ca38ef113da30126bbff9cd1705f9273e15d45498615d138b0c20279ac7a76aa" dependencies = [ "bytes", - "futures-channel", "futures-util", "http 1.0.0", "http-body 1.0.0", - "hyper 1.1.0", + "hyper 1.2.0", "pin-project-lite", - "socket2 0.5.5", + "socket2 0.5.6", "tokio", - "tracing", ] [[package]] name = "iana-time-zone" -version = "0.1.59" +version = "0.1.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6a67363e2aa4443928ce15e57ebae94fd8949958fd1223c4cfc0cd473ad7539" +checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -3626,17 +4113,6 @@ dependencies = [ "unicode-normalization", ] -[[package]] -name = "if-addrs" -version = "0.6.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2273e421f7c4f0fc99e1934fe4776f59d8df2972f4199d703fc0da9f2a9f73de" -dependencies = [ - "if-addrs-sys", - "libc", - "winapi", -] - [[package]] name = "if-addrs" version = "0.10.2" @@ -3647,27 +4123,17 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "if-addrs-sys" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de74b9dd780476e837e5eb5ab7c88b49ed304126e412030a0adba99c8efe79ea" -dependencies = [ - "cc", - "libc", -] - [[package]] name = "if-watch" version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6b0422c86d7ce0e97169cc42e04ae643caf278874a7a3c87b8150a220dc7e1e" dependencies = [ - "async-io", + "async-io 2.3.1", "core-foundation", "fnv", "futures", - "if-addrs 0.10.2", + "if-addrs", "ipnet", "log", "rtnetlink", @@ -3676,19 +4142,6 @@ dependencies = [ "windows", ] -[[package]] -name = "igd" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "556b5a75cd4adb7c4ea21c64af1c48cefb2ce7d43dc4352c720a1fe47c21f355" -dependencies = [ - "attohttpc 0.16.3", - "log", - "rand", - "url", - "xmltree", -] - [[package]] name = "igd-next" version = "0.14.3" @@ -3696,7 +4149,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "064d90fec10d541084e7b39ead8875a5a80d9114a2b18791565253bae25f49e4" dependencies = [ "async-trait", - "attohttpc 0.24.1", + "attohttpc", "bytes", "futures", "http 0.2.11", @@ -3782,9 +4235,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.1.0" +version = "2.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" +checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -3835,7 +4288,7 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi 0.3.4", + "hermit-abi 0.3.9", "libc", "windows-sys 0.48.0", ] @@ -3846,7 +4299,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.5", + "socket2 0.5.6", "widestring 1.0.2", "windows-sys 0.48.0", "winreg", @@ -3858,6 +4311,17 @@ version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +[[package]] +name = "is-terminal" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" +dependencies = [ + "hermit-abi 0.3.9", + "libc", + "windows-sys 0.52.0", +] + [[package]] name = "itertools" version = "0.10.5" @@ -3906,18 +4370,18 @@ dependencies = [ [[package]] name = "jobserver" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" +checksum = "ab46a6e9526ddef3ae7f787c06f0f2600639ba80ea3eade3d8e670a2230f51d6" dependencies = [ "libc", ] [[package]] name = "js-sys" -version = "0.3.67" +version = "0.3.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a1d36f1235bc969acba30b7f5990b864423a6068a10f7c90ae8f0112e3a59d1" +checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee" dependencies = [ "wasm-bindgen", ] @@ -3972,6 +4436,16 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "keccak-asm" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb8515fff80ed850aea4a1595f2e519c003e2a00a82fe168ebf5269196caf444" +dependencies = [ + "digest 0.10.7", + "sha3-asm", +] + [[package]] name = "keccak-hash" version = "0.10.0" @@ -3982,6 +4456,15 @@ dependencies = [ "tiny-keccak", ] +[[package]] +name = "kv-log-macro" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" +dependencies = [ + "log", +] + [[package]] name = "kzg" version = "0.1.0" @@ -4015,7 +4498,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "4.6.0" +version = "5.1.1" dependencies = [ "account_utils", "beacon_chain", @@ -4078,38 +4561,42 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.152" +version = "0.2.153" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libflate" -version = "1.4.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ff4ae71b685bbad2f2f391fe74f6b7659a34871c08b210fdc039e43bee07d18" +checksum = "9f7d5654ae1795afc7ff76f4365c2c8791b0feb18e8996a96adad8ffd7c3b2bf" dependencies = [ "adler32", + "core2", "crc32fast", + "dary_heap", "libflate_lz77", ] [[package]] name = "libflate_lz77" -version = "1.2.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a52d3a8bfc85f250440e4424db7d857e241a3aebbbe301f3eb606ab15c39acbf" +checksum = "be5f52fb8c451576ec6b79d3f4deb327398bc05bbdbd99021a6e77a4c855d524" dependencies = [ + "core2", + "hashbrown 0.13.2", "rle-decode-fast", ] [[package]] name = "libloading" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c571b676ddfc9a8c12f1f3d3085a7b163966a8fd8098a90640953ce5f6170161" +checksum = "2caa5afb8bf9f3a2652760ce7d4f62d21c4d5a423e68466fca30df82f2330164" dependencies = [ "cfg-if", - "windows-sys 0.48.0", + "windows-targets 0.52.4", ] [[package]] @@ -4135,8 +4622,9 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.54.0" -source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" +version = "0.53.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "681fb3f183edfbedd7a57d32ebe5dcdc0b9f94061185acf3c30249349cc6fc99" dependencies = [ "bytes", "either", @@ -4148,7 +4636,6 @@ dependencies = [ "libp2p-connection-limits", "libp2p-core", "libp2p-dns", - "libp2p-gossipsub", "libp2p-identify", "libp2p-identity", "libp2p-mdns", @@ -4169,7 +4656,8 @@ dependencies = [ [[package]] name = "libp2p-allow-block-list" version = "0.3.0" -source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "107b238b794cb83ab53b74ad5dcf7cca3200899b72fe662840cfb52f5b0a32e6" dependencies = [ "libp2p-core", "libp2p-identity", @@ -4180,7 +4668,8 @@ dependencies = [ [[package]] name = "libp2p-connection-limits" version = "0.3.1" -source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7cd50a78ccfada14de94cbacd3ce4b0138157f376870f13d3a8422cd075b4fd" dependencies = [ "libp2p-core", "libp2p-identity", @@ -4191,7 +4680,8 @@ dependencies = [ [[package]] name = "libp2p-core" version = "0.41.2" -source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8130a8269e65a2554d55131c770bdf4bcd94d2b8d4efb24ca23699be65066c05" dependencies = [ "either", "fnv", @@ -4218,7 +4708,8 @@ dependencies = [ [[package]] name = "libp2p-dns" version = "0.41.1" -source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d17cbcf7160ff35c3e8e560de4a068fe9d6cb777ea72840e48eb76ff9576c4b6" dependencies = [ "async-trait", "futures", @@ -4230,44 +4721,13 @@ dependencies = [ "tracing", ] -[[package]] -name = "libp2p-gossipsub" -version = "0.46.1" -source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" -dependencies = [ - "async-channel", - "asynchronous-codec", - "base64 0.21.7", - "byteorder", - "bytes", - "either", - "fnv", - "futures", - "futures-ticker", - "futures-timer", - "getrandom", - "hex_fmt", - "instant", - "libp2p-core", - "libp2p-identity", - "libp2p-swarm", - "prometheus-client", - "quick-protobuf", - "quick-protobuf-codec", - "rand", - "regex", - "sha2 0.10.8", - "smallvec", - "tracing", - "void", -] - [[package]] name = "libp2p-identify" version = "0.44.1" -source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20499a945d2f0221fdc6269b3848892c0f370d2ee3e19c7f65a29d8f860f6126" dependencies = [ - "asynchronous-codec", + "asynchronous-codec 0.7.0", "either", "futures", "futures-bounded", @@ -4277,7 +4737,7 @@ dependencies = [ "libp2p-swarm", "lru", "quick-protobuf", - "quick-protobuf-codec", + "quick-protobuf-codec 0.3.1", "smallvec", "thiserror", "tracing", @@ -4310,7 +4770,8 @@ dependencies = [ [[package]] name = "libp2p-mdns" version = "0.45.1" -source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49007d9a339b3e1d7eeebc4d67c05dbf23d300b7d091193ec2d3f26802d7faf2" dependencies = [ "data-encoding", "futures", @@ -4321,7 +4782,7 @@ dependencies = [ "libp2p-swarm", "rand", "smallvec", - "socket2 0.5.5", + "socket2 0.5.6", "tokio", "tracing", "void", @@ -4330,12 +4791,12 @@ dependencies = [ [[package]] name = "libp2p-metrics" version = "0.14.1" -source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdac91ae4f291046a3b2660c039a2830c931f84df2ee227989af92f7692d3357" dependencies = [ "futures", "instant", "libp2p-core", - "libp2p-gossipsub", "libp2p-identify", "libp2p-identity", "libp2p-swarm", @@ -4346,9 +4807,10 @@ dependencies = [ [[package]] name = "libp2p-mplex" version = "0.41.0" -source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e895765e27e30217b25f7cb7ac4686dad1ff80bf2fdeffd1d898566900a924" dependencies = [ - "asynchronous-codec", + "asynchronous-codec 0.6.2", "bytes", "futures", "libp2p-core", @@ -4358,15 +4820,16 @@ dependencies = [ "rand", "smallvec", "tracing", - "unsigned-varint 0.8.0", + "unsigned-varint 0.7.2", ] [[package]] name = "libp2p-noise" version = "0.44.0" -source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecd0545ce077f6ea5434bcb76e8d0fe942693b4380aaad0d34a358c2bd05793" dependencies = [ - "asynchronous-codec", + "asynchronous-codec 0.7.0", "bytes", "curve25519-dalek", "futures", @@ -4389,22 +4852,24 @@ dependencies = [ [[package]] name = "libp2p-plaintext" version = "0.41.0" -source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67330af40b67217e746d42551913cfb7ad04c74fa300fb329660a56318590b3f" dependencies = [ - "asynchronous-codec", + "asynchronous-codec 0.6.2", "bytes", "futures", "libp2p-core", "libp2p-identity", "quick-protobuf", - "quick-protobuf-codec", + "quick-protobuf-codec 0.2.0", "tracing", ] [[package]] name = "libp2p-quic" version = "0.10.2" -source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0375cdfee57b47b313ef1f0fdb625b78aed770d33a40cf1c294a371ff5e6666" dependencies = [ "bytes", "futures", @@ -4417,8 +4882,8 @@ dependencies = [ "quinn", "rand", "ring 0.16.20", - "rustls 0.21.10", - "socket2 0.5.5", + "rustls", + "socket2 0.5.6", "thiserror", "tokio", "tracing", @@ -4426,8 +4891,9 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.45.0" -source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" +version = "0.44.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e92532fc3c4fb292ae30c371815c9b10103718777726ea5497abc268a4761866" dependencies = [ "either", "fnv", @@ -4449,18 +4915,20 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" version = "0.34.1" -source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b644268b4acfdaa6a6100b31226ee7a36d96ab4c43287d113bfd2308607d8b6f" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] name = "libp2p-tcp" version = "0.41.0" -source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b2460fc2748919adff99ecbc1aab296e4579e41f374fb164149bd2c9e529d4c" dependencies = [ "futures", "futures-timer", @@ -4468,7 +4936,7 @@ dependencies = [ "libc", "libp2p-core", "libp2p-identity", - "socket2 0.5.5", + "socket2 0.5.6", "tokio", "tracing", ] @@ -4476,7 +4944,8 @@ dependencies = [ [[package]] name = "libp2p-tls" version = "0.3.0" -source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93ce7e3c2e7569d685d08ec795157981722ff96e9e9f9eae75df3c29d02b07a5" dependencies = [ "futures", "futures-rustls", @@ -4484,8 +4953,8 @@ dependencies = [ "libp2p-identity", "rcgen", "ring 0.16.20", - "rustls 0.21.10", - "rustls-webpki 0.101.7", + "rustls", + "rustls-webpki", "thiserror", "x509-parser", "yasna", @@ -4493,8 +4962,9 @@ dependencies = [ [[package]] name = "libp2p-upnp" -version = "0.2.0" -source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b49cc89949bf0e06869297cd4fe2c132358c23fe93e76ad43950453df4da3d35" dependencies = [ "futures", "futures-timer", @@ -4509,7 +4979,8 @@ dependencies = [ [[package]] name = "libp2p-yamux" version = "0.45.1" -source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "200cbe50349a44760927d50b431d77bed79b9c0a3959de1af8d24a63434b71e5" dependencies = [ "either", "futures", @@ -4592,9 +5063,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.14" +version = "1.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "295c17e837573c8c821dbaeb3cceb3d745ad082f7572191409e69cbc1b3fd050" +checksum = "037731f5d3aaa87a5675e895b63ddff1a87624bc29f77004ea829809654e48f6" dependencies = [ "cc", "pkg-config", @@ -4603,7 +5074,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "4.6.0" +version = "5.1.1" dependencies = [ "account_manager", "account_utils", @@ -4657,17 +5128,28 @@ dependencies = [ name = "lighthouse_network" version = "0.2.0" dependencies = [ + "async-channel 1.9.0", + "async-std", + "asynchronous-codec 0.7.0", + "base64 0.21.7", + "byteorder", + "bytes", "delay_map", "directory", "dirs", "discv5", + "either", "error-chain", "ethereum_ssz", "ethereum_ssz_derive", - "exit-future", "fnv", "futures", + "futures-ticker", + "futures-timer", + "getrandom", "hex", + "hex_fmt", + "instant", "lazy_static", "libp2p", "libp2p-mplex", @@ -4677,6 +5159,8 @@ dependencies = [ "lru_cache", "parking_lot 0.12.1", "prometheus-client", + "quick-protobuf", + "quick-protobuf-codec 0.3.1", "quickcheck", "quickcheck_macros", "rand", @@ -4697,6 +5181,7 @@ dependencies = [ "tokio", "tokio-io-timeout", "tokio-util 0.6.10", + "tracing", "tree_hash", "tree_hash_derive", "types", @@ -4726,6 +5211,12 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" +[[package]] +name = "linux-raw-sys" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" + [[package]] name = "linux-raw-sys" version = "0.4.13" @@ -4773,9 +5264,12 @@ dependencies = [ [[package]] name = "log" -version = "0.4.20" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +dependencies = [ + "value-bag", +] [[package]] name = "logging" @@ -4802,9 +5296,9 @@ dependencies = [ [[package]] name = "lru" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2994eeba8ed550fd9b47a0b38f0242bc3344e496483c6180b69139cc2fa5d1d7" +checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" dependencies = [ "hashbrown 0.14.3", ] @@ -4903,9 +5397,9 @@ dependencies = [ [[package]] name = "mediatype" -version = "0.19.17" +version = "0.19.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83a018c36a54f4e12c30464bbc59311f85d3f6f4d6c1b4fa4ea9db2b174ddefc" +checksum = "8878cd8d1b3c8c8ae4b2ba0a36652b7cf192f618a599a7fbdfa25cffd4ea72dd" [[package]] name = "memchr" @@ -4978,18 +5472,6 @@ dependencies = [ "quote", ] -[[package]] -name = "milagro_bls" -version = "1.5.1" -source = "git+https://github.com/sigp/milagro_bls?tag=v1.5.1#d3fc0a40cfe8b72ccda46ba050ee6786a59ce753" -dependencies = [ - "amcl", - "hex", - "lazy_static", - "rand", - "zeroize", -] - [[package]] name = "mime" version = "0.3.17" @@ -5014,18 +5496,18 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" dependencies = [ "adler", ] [[package]] name = "mio" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "wasi", @@ -5034,9 +5516,9 @@ dependencies = [ [[package]] name = "mock_instant" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c1a54de846c4006b88b1516731cc1f6026eb5dc4bcb186aa071ef66d40524ec" +checksum = "9366861eb2a2c436c20b12c8dbec5f798cea6b47ad99216be0282942e2c81ea0" [[package]] name = "monitoring_api" @@ -5106,14 +5588,15 @@ dependencies = [ [[package]] name = "multistream-select" version = "0.13.0" -source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea0df8e5eec2298a62b326ee4f0d7fe1a6b90a09dfcf9df37b38f947a8c42f19" dependencies = [ "bytes", "futures", + "log", "pin-project", "smallvec", - "tracing", - "unsigned-varint 0.8.0", + "unsigned-varint 0.7.2", ] [[package]] @@ -5204,6 +5687,8 @@ dependencies = [ name = "network" version = "0.2.0" dependencies = [ + "anyhow", + "async-channel 1.9.0", "beacon_chain", "beacon_processor", "delay_map", @@ -5214,13 +5699,11 @@ dependencies = [ "ethereum-types 0.14.1", "ethereum_ssz", "execution_layer", - "exit-future", "fnv", "futures", "genesis", "hex", - "if-addrs 0.6.7", - "igd", + "igd-next", "itertools", "lazy_static", "lighthouse_metrics", @@ -5353,20 +5836,25 @@ dependencies = [ ] [[package]] -name = "num-integer" -version = "0.1.45" +name = "num-conv" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ - "autocfg", "num-traits", ] [[package]] name = "num-iter" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" +checksum = "d869c01cc0c455284163fd0092f1f93835385ccab5a98a0dcc497b2f8bf055a9" dependencies = [ "autocfg", "num-integer", @@ -5375,11 +5863,12 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" dependencies = [ "autocfg", + "libm", ] [[package]] @@ -5388,16 +5877,7 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.3.4", - "libc", -] - -[[package]] -name = "num_threads" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" -dependencies = [ + "hermit-abi 0.3.9", "libc", ] @@ -5440,9 +5920,9 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" [[package]] name = "opaque-debug" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "open-fastrlp" @@ -5471,9 +5951,9 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.63" +version = "0.10.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15c9d69dd87a29568d4d017cfe8ec518706046a05184e5aea92d0af890b803c8" +checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" dependencies = [ "bitflags 2.4.2", "cfg-if", @@ -5492,7 +5972,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -5503,18 +5983,18 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "300.2.1+3.2.0" +version = "300.2.3+3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fe476c29791a5ca0d1273c697e96085bbabbbea2ef7afd5617e78a4b40332d3" +checksum = "5cff92b6f71555b61bb9315f7c64da3ca43d87531622120fea0195fc761b4843" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.99" +version = "0.9.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22e1bf214306098e4832460f797824c05d25aacdf896f64a985fb0fd992454ae" +checksum = "dda2b0f344e78efc2facf7d195d098df0dd72151b26ab98da807afc26c198dff" dependencies = [ "cc", "libc", @@ -5693,7 +6173,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d95f5254224e617595d2cc3cc73ff0a5eaf2637519e25f03388154e9378b6ffa" dependencies = [ - "crypto-mac 0.11.0", + "crypto-mac 0.11.1", ] [[package]] @@ -5748,6 +6228,17 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +[[package]] +name = "pest" +version = "2.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56f8023d0fb78c8e03784ea1c7f3fa36e68a723138990b8d5a47d916b651e7a8" +dependencies = [ + "memchr", + "thiserror", + "ucd-trie", +] + [[package]] name = "pharos" version = "0.5.3" @@ -5755,7 +6246,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" dependencies = [ "futures", - "rustc_version", + "rustc_version 0.4.0", ] [[package]] @@ -5778,22 +6269,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" +checksum = "0302c4a0442c456bd56f841aee5c3bfd17967563f6fadc9ceb9f9c23cf3807e0" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" +checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -5808,6 +6299,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "piper" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" +dependencies = [ + "atomic-waker", + "fastrand 2.0.1", + "futures-io", +] + [[package]] name = "pkcs8" version = "0.9.0" @@ -5830,9 +6332,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2900ede94e305130c13ddd391e0ab7cbaeb783945ae07a279c268cb05109c6cb" +checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" [[package]] name = "platforms" @@ -5876,14 +6378,30 @@ dependencies = [ [[package]] name = "polling" -version = "3.3.2" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "545c980a3880efd47b2e262f6a4bb6daad6555cf3367aa9c4e52895f69537a41" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" +dependencies = [ + "autocfg", + "bitflags 1.3.2", + "cfg-if", + "concurrent-queue", + "libc", + "log", + "pin-project-lite", + "windows-sys 0.48.0", +] + +[[package]] +name = "polling" +version = "3.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24f040dee2588b4963afb4e420540439d126f73fdacf4a9c486a96d840bac3c9" dependencies = [ "cfg-if", "concurrent-queue", "pin-project-lite", - "rustix 0.38.30", + "rustix 0.38.31", "tracing", "windows-sys 0.52.0", ] @@ -5908,7 +6426,7 @@ dependencies = [ "cfg-if", "cpufeatures", "opaque-debug", - "universal-hash 0.4.0", + "universal-hash 0.4.1", ] [[package]] @@ -5988,7 +6506,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5" dependencies = [ "proc-macro2", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -6046,30 +6564,6 @@ dependencies = [ "toml_edit 0.20.7", ] -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn 1.0.109", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2", - "quote", - "version_check", -] - [[package]] name = "proc-macro2" version = "1.0.78" @@ -6111,9 +6605,9 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.22.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "510c4f1c9d81d556458f94c98f857748130ea9737bbd6053da497503b26ea63c" +checksum = "6f87c10af16e0af74010d2a123d202e8363c04db5acfa91d8747f64a8524da3a" dependencies = [ "dtoa", "itoa", @@ -6129,7 +6623,27 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", +] + +[[package]] +name = "proptest" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" +dependencies = [ + "bit-set", + "bit-vec", + "bitflags 2.4.2", + "lazy_static", + "num-traits", + "rand", + "rand_chacha", + "rand_xorshift", + "regex-syntax 0.8.2", + "rusty-fork", + "tempfile", + "unarray", ] [[package]] @@ -6187,10 +6701,24 @@ dependencies = [ [[package]] name = "quick-protobuf-codec" -version = "0.3.1" -source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ededb1cd78531627244d51dd0c7139fbe736c7d57af0092a76f0ffb2f56e98" dependencies = [ - "asynchronous-codec", + "asynchronous-codec 0.6.2", + "bytes", + "quick-protobuf", + "thiserror", + "unsigned-varint 0.7.2", +] + +[[package]] +name = "quick-protobuf-codec" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15a0580ab32b169745d7a39db2ba969226ca16738931be152a3209b409de2474" +dependencies = [ + "asynchronous-codec 0.7.0", "bytes", "quick-protobuf", "thiserror", @@ -6231,7 +6759,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls 0.21.10", + "rustls", "thiserror", "tokio", "tracing", @@ -6247,7 +6775,7 @@ dependencies = [ "rand", "ring 0.16.20", "rustc-hash", - "rustls 0.21.10", + "rustls", "slab", "thiserror", "tinyvec", @@ -6262,7 +6790,7 @@ checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" dependencies = [ "bytes", "libc", - "socket2 0.5.5", + "socket2 0.5.6", "tracing", "windows-sys 0.48.0", ] @@ -6350,9 +6878,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7237101a77a10773db45d62004a272517633fbcc3df19d96455ede1122e051" +checksum = "e4963ed1bc86e4f3ee217022bd855b297cef07fb9eac5dfa1f788b220b49b3bd" dependencies = [ "either", "rayon-core", @@ -6417,7 +6945,7 @@ checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.4", + "regex-automata 0.4.5", "regex-syntax 0.8.2", ] @@ -6432,9 +6960,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b7fa1134405e2ec9353fd416b17f8dacd46c473d7d3fd1cf202706a14eb792a" +checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" dependencies = [ "aho-corasick", "memchr", @@ -6455,9 +6983,9 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "reqwest" -version = "0.11.23" +version = "0.11.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41" +checksum = "c6920094eb85afde5e4a138be3f2de8bbdf28000f0029e72c45025a56b042251" dependencies = [ "base64 0.21.7", "bytes", @@ -6478,15 +7006,16 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls 0.21.10", - "rustls-pemfile 1.0.4", + "rustls", + "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", + "sync_wrapper", "system-configuration", "tokio", "tokio-native-tls", - "tokio-rustls 0.24.1", + "tokio-rustls", "tokio-util 0.7.10", "tower-service", "url", @@ -6546,16 +7075,17 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.7" +version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", + "cfg-if", "getrandom", "libc", "spin 0.9.8", "untrusted 0.9.0", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -6610,6 +7140,36 @@ dependencies = [ "tokio", ] +[[package]] +name = "ruint" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49b1d9521f889713d1221270fdd63370feca7e5c71a18745343402fa86e4f04f" +dependencies = [ + "alloy-rlp", + "ark-ff 0.3.0", + "ark-ff 0.4.2", + "bytes", + "fastrlp", + "num-bigint", + "num-traits", + "parity-scale-codec 3.6.9", + "primitive-types 0.12.2", + "proptest", + "rand", + "rlp", + "ruint-macro", + "serde", + "valuable", + "zeroize", +] + +[[package]] +name = "ruint-macro" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f86854cf50259291520509879a5c294c3c9a4c334e9ff65071c51e42ef1e2343" + [[package]] name = "rusqlite" version = "0.28.0" @@ -6642,13 +7202,22 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" +[[package]] +name = "rustc_version" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +dependencies = [ + "semver 0.11.0", +] + [[package]] name = "rustc_version" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver", + "semver 1.0.22", ] [[package]] @@ -6676,9 +7245,23 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.30" +version = "0.37.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322394588aaf33c24007e8bb3238ee3e4c5c09c084ab32bc73890b99ff326bca" +checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" +dependencies = [ + "bitflags 1.3.2", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys 0.3.8", + "windows-sys 0.48.0", +] + +[[package]] +name = "rustix" +version = "0.38.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" dependencies = [ "bitflags 2.4.2", "errno", @@ -6694,25 +7277,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" dependencies = [ "log", - "ring 0.17.7", - "rustls-webpki 0.101.7", + "ring 0.17.8", + "rustls-webpki", "sct", ] -[[package]] -name = "rustls" -version = "0.22.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41" -dependencies = [ - "log", - "ring 0.17.7", - "rustls-pki-types", - "rustls-webpki 0.102.1", - "subtle", - "zeroize", -] - [[package]] name = "rustls-pemfile" version = "1.0.4" @@ -6722,40 +7291,13 @@ dependencies = [ "base64 0.21.7", ] -[[package]] -name = "rustls-pemfile" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e4980fa29e4c4b212ffb3db068a564cbf560e51d3944b7c88bd8bf5bec64f4" -dependencies = [ - "base64 0.21.7", - "rustls-pki-types", -] - -[[package]] -name = "rustls-pki-types" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e9d979b3ce68192e42760c7810125eb6cf2ea10efae545a156063e61f314e2a" - [[package]] name = "rustls-webpki" version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring 0.17.7", - "untrusted 0.9.0", -] - -[[package]] -name = "rustls-webpki" -version = "0.102.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef4ca26037c909dedb327b48c3327d0ba91d3dd3c4e05dad328f210ffb68e95b" -dependencies = [ - "ring 0.17.7", - "rustls-pki-types", + "ring 0.17.8", "untrusted 0.9.0", ] @@ -6765,10 +7307,23 @@ version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +[[package]] +name = "rusty-fork" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] + [[package]] name = "rw-stream-sink" version = "0.4.0" -source = "git+https://github.com/sigp/rust-libp2p/?rev=cfa3275ca17e502799ed56e555b6c0611752e369#cfa3275ca17e502799ed56e555b6c0611752e369" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8c9026ff5d2f23da5e45bbc283f156383001bfb09c4e44256d02c1a685fe9a1" dependencies = [ "futures", "pin-project", @@ -6777,9 +7332,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" +checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" [[package]] name = "safe_arith" @@ -6875,7 +7430,7 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring 0.17.7", + "ring 0.17.8", "untrusted 0.9.0", ] @@ -6932,13 +7487,31 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.21" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" dependencies = [ "serde", ] +[[package]] +name = "semver-parser" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" +dependencies = [ + "pest", +] + [[package]] name = "send_wrapper" version = "0.6.0" @@ -6955,9 +7528,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.195" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02" +checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" dependencies = [ "serde_derive", ] @@ -6984,20 +7557,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.195" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c" +checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] name = "serde_json" -version = "1.0.111" +version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "176e46fa42316f18edd598015a5166857fc835ec732f5215eac6b7bdbf0a84f4" +checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" dependencies = [ "itoa", "ryu", @@ -7022,7 +7595,7 @@ checksum = "0b2e6b945e9d3df726b65d6ee24060aff8e3533d431f677a9695db04eff9dfdb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -7070,11 +7643,11 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.30" +version = "0.9.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1bf28c79a99f70ee1f1d83d10c875d2e70618417fda01ad1785e027579d9d38" +checksum = "8fd075d994154d4a774f95b51fb96bdc2832b0ea48425c92546073816cda1f2f" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.5", "itoa", "ryu", "serde", @@ -7138,6 +7711,16 @@ dependencies = [ "keccak", ] +[[package]] +name = "sha3-asm" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bac61da6b35ad76b195eb4771210f947734321a8d81d7738e1580d953bc7a15e" +dependencies = [ + "cc", + "cfg-if", +] + [[package]] name = "sharded-slab" version = "0.1.7" @@ -7361,11 +7944,11 @@ dependencies = [ [[package]] name = "slog-term" -version = "2.9.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87d29185c55b7b258b4f120eab00f48557d4d9bc814f41713f449d35b0f8977c" +checksum = "b6e022d0b998abfe5c3782c1f03551a596269450ccd677ea51c56f8b214610e8" dependencies = [ - "atty", + "is-terminal", "slog", "term", "thread_local", @@ -7374,9 +7957,9 @@ dependencies = [ [[package]] name = "sloggers" -version = "2.1.2" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a0a4d8569a69ee56f277bffc2f6eee637b98ed468448e8a5a84fa63efe4de9d" +checksum = "75062c2738b82cd45ae633623caae3393f43eb00aada1dc2d3ebe88db6b0db9b" dependencies = [ "chrono", "libc", @@ -7420,17 +8003,17 @@ checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b" [[package]] name = "snow" -version = "0.9.4" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58021967fd0a5eeeb23b08df6cc244a4d4a5b4aec1d27c9e02fad1a58b4cd74e" +checksum = "850948bee068e713b8ab860fe1adc4d109676ab4c3b621fd8147f06b261f2f85" dependencies = [ "aes-gcm 0.10.3", "blake2", "chacha20poly1305", "curve25519-dalek", "rand_core", - "ring 0.17.7", - "rustc_version", + "ring 0.17.8", + "rustc_version 0.4.0", "sha2 0.10.8", "subtle", ] @@ -7447,12 +8030,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -7621,9 +8204,9 @@ dependencies = [ [[package]] name = "subtle" -version = "2.5.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" +checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "superstruct" @@ -7661,9 +8244,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.48" +version = "2.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" +checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07" dependencies = [ "proc-macro2", "quote", @@ -7765,7 +8348,7 @@ checksum = "c63f48baada5c52e65a29eef93ab4f8982681b67f9e8d29c7b05abcfec2b9ffe" name = "task_executor" version = "0.1.0" dependencies = [ - "exit-future", + "async-channel 1.9.0", "futures", "lazy_static", "lighthouse_metrics", @@ -7776,14 +8359,13 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.9.0" +version = "3.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa" +checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if", - "fastrand", - "redox_syscall 0.4.1", - "rustix 0.38.30", + "fastrand 2.0.1", + "rustix 0.38.31", "windows-sys 0.52.0", ] @@ -7851,29 +8433,29 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.56" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" +checksum = "1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.56" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" +checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] name = "thread_local" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ "cfg-if", "once_cell", @@ -7890,14 +8472,13 @@ dependencies = [ [[package]] name = "time" -version = "0.3.31" +version = "0.3.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f657ba42c3f86e7680e53c8cd3af8abbe56b5491790b46e22e19c0d57463583e" +checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" dependencies = [ "deranged", "itoa", - "libc", - "num_threads", + "num-conv", "powerfmt", "serde", "time-core", @@ -7912,10 +8493,11 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26197e33420244aeb70c3e8c78376ca46571bc4e701e4791c2cd9f57dcb3a43f" +checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774" dependencies = [ + "num-conv", "time-core", ] @@ -7985,9 +8567,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.35.1" +version = "1.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104" +checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" dependencies = [ "backtrace", "bytes", @@ -7996,7 +8578,7 @@ dependencies = [ "num_cpus", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.5", + "socket2 0.5.6", "tokio-macros", "windows-sys 0.48.0", ] @@ -8019,7 +8601,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -8052,7 +8634,7 @@ dependencies = [ "postgres-protocol", "postgres-types", "rand", - "socket2 0.5.5", + "socket2 0.5.6", "tokio", "tokio-util 0.7.10", "whoami", @@ -8064,18 +8646,7 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls 0.21.10", - "tokio", -] - -[[package]] -name = "tokio-rustls" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" -dependencies = [ - "rustls 0.22.2", - "rustls-pki-types", + "rustls", "tokio", ] @@ -8158,7 +8729,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.5", "serde", "serde_spanned", "toml_datetime", @@ -8171,7 +8742,7 @@ version = "0.20.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.5", "toml_datetime", "winnow", ] @@ -8236,7 +8807,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -8403,6 +8974,12 @@ dependencies = [ "tree_hash_derive", ] +[[package]] +name = "ucd-trie" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" + [[package]] name = "uint" version = "0.9.5" @@ -8416,6 +8993,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + [[package]] name = "unescape" version = "0.1.0" @@ -8445,9 +9028,9 @@ checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" dependencies = [ "tinyvec", ] @@ -8466,9 +9049,9 @@ checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" [[package]] name = "universal-hash" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" +checksum = "9f214e8f697e925001e66ec2c6e37a4ef93f0f78c2eed7814394e10c62025b05" dependencies = [ "generic-array", "subtle", @@ -8505,16 +9088,16 @@ name = "unsigned-varint" version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" +dependencies = [ + "asynchronous-codec 0.6.2", + "bytes", +] [[package]] name = "unsigned-varint" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb066959b24b5196ae73cb057f45598450d2c5f71460e98c49b738086eff9c06" -dependencies = [ - "asynchronous-codec", - "bytes", -] [[package]] name = "untrusted" @@ -8574,11 +9157,10 @@ dependencies = [ "eth2", "eth2_keystore", "ethereum_serde_utils", - "exit-future", "filesystem", "futures", "hex", - "hyper 1.1.0", + "hyper 1.2.0", "itertools", "lazy_static", "libsecp256k1", @@ -8663,6 +9245,12 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +[[package]] +name = "value-bag" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126e423afe2dd9ac52142e7e9d5ce4135d7e13776c529d27fd6bc49f19e3280b" + [[package]] name = "vcpkg" version = "0.2.15" @@ -8688,10 +9276,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" [[package]] -name = "walkdir" -version = "2.4.0" +name = "wait-timeout" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" +checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +dependencies = [ + "libc", +] + +[[package]] +name = "waker-fn" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", @@ -8709,7 +9312,8 @@ dependencies = [ [[package]] name = "warp" version = "0.3.6" -source = "git+https://github.com/seanmonstar/warp.git#7b07043cee0ca24e912155db4e8f6d9ab7c049ed" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1e92e22e03ff1230c03a1a8ee37d2f89cd489e2e541b7550d6afad96faed169" dependencies = [ "bytes", "futures-channel", @@ -8722,13 +9326,14 @@ dependencies = [ "mime_guess", "percent-encoding", "pin-project", - "rustls-pemfile 2.0.0", + "rustls-pemfile", "scoped-tls", "serde", "serde_json", "serde_urlencoded", "tokio", - "tokio-rustls 0.25.0", + "tokio-rustls", + "tokio-stream", "tokio-util 0.7.10", "tower-service", "tracing", @@ -8739,6 +9344,7 @@ name = "warp_utils" version = "0.1.0" dependencies = [ "beacon_chain", + "bytes", "eth2", "headers", "lazy_static", @@ -8746,6 +9352,7 @@ dependencies = [ "safe_arith", "serde", "serde_array_query", + "serde_json", "state_processing", "tokio", "types", @@ -8759,10 +9366,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] -name = "wasm-bindgen" -version = "0.2.90" +name = "wasite" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1223296a201415c7fad14792dbefaace9bd52b62d33453ade1c5b5f07555406" +checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" + +[[package]] +name = "wasm-bindgen" +version = "0.2.91" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -8770,24 +9383,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.90" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcdc935b63408d58a32f8cc9738a0bffd8f05cc7c002086c6ef20b7312ad9dcd" +checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.40" +version = "0.4.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bde2032aeb86bdfaecc8b261eef3cba735cc426c1f3a3416d1e0791be95fc461" +checksum = "877b9c3f61ceea0e56331985743b13f3d25c406a7098d45180fb5f09bc19ed97" dependencies = [ "cfg-if", "js-sys", @@ -8797,9 +9410,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.90" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e4c238561b2d428924c49815533a8b9121c664599558a5d9ec51f8a1740a999" +checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -8807,28 +9420,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.90" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bae1abb6806dc1ad9e560ed242107c0f6c84335f1749dd4e8ddb012ebd5e25a7" +checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.90" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d91413b1c31d7539ba5ef2451af3f0b833a005eb27a631cec32bc0635a8602b" +checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838" [[package]] name = "wasm-streams" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4609d447824375f43e1ffbc051b50ad8f4b3ae8219680c94452ea05eb240ac7" +checksum = "b65dc4c90b63b118468cf747d8bf3566c1913ef60be765b5730ead9e0a3ba129" dependencies = [ "futures-util", "js-sys", @@ -8868,7 +9481,7 @@ dependencies = [ "eth2", "hex", "http_api", - "hyper 1.1.0", + "hyper 1.2.0", "log", "logging", "network", @@ -8889,9 +9502,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.67" +version = "0.3.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58cd2333b6e0be7a39605f0e255892fd7418a682d8da8fe042fe25128794d2ed" +checksum = "96565907687f7aceb35bc5fc03770a8a0471d82e479f25832f54a0e3f4b28446" dependencies = [ "js-sys", "wasm-bindgen", @@ -8902,10 +9515,10 @@ name = "web3signer_tests" version = "0.1.0" dependencies = [ "account_utils", + "async-channel 1.9.0", "environment", "eth2_keystore", "eth2_network_config", - "exit-future", "futures", "lazy_static", "parking_lot 0.12.1", @@ -8925,9 +9538,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.25.3" +version = "0.25.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1778a42e8b3b90bff8d0f5032bf22250792889a5cdc752aa0020c84abe3aaf10" +checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" [[package]] name = "which" @@ -8938,16 +9551,17 @@ dependencies = [ "either", "home", "once_cell", - "rustix 0.38.30", + "rustix 0.38.31", ] [[package]] name = "whoami" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22fc3756b8a9133049b26c7f61ab35416c130e8c09b660f5b3958b446f52cc50" +checksum = "0fec781d48b41f8163426ed18e8fc2864c12937df9ce54c88ede7bd47270893e" dependencies = [ - "wasm-bindgen", + "redox_syscall 0.4.1", + "wasite", "web-sys", ] @@ -8963,12 +9577,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" -[[package]] -name = "wildmatch" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f44b95f62d34113cf558c93511ac93027e03e9c29a60dd0fd70e6e025c7270a" - [[package]] name = "winapi" version = "0.3.9" @@ -9037,7 +9645,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.4", ] [[package]] @@ -9064,7 +9672,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.4", ] [[package]] @@ -9099,17 +9707,17 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" dependencies = [ - "windows_aarch64_gnullvm 0.52.0", - "windows_aarch64_msvc 0.52.0", - "windows_i686_gnu 0.52.0", - "windows_i686_msvc 0.52.0", - "windows_x86_64_gnu 0.52.0", - "windows_x86_64_gnullvm 0.52.0", - "windows_x86_64_msvc 0.52.0", + "windows_aarch64_gnullvm 0.52.4", + "windows_aarch64_msvc 0.52.4", + "windows_i686_gnu 0.52.4", + "windows_i686_msvc 0.52.4", + "windows_x86_64_gnu 0.52.4", + "windows_x86_64_gnullvm 0.52.4", + "windows_x86_64_msvc 0.52.4", ] [[package]] @@ -9126,9 +9734,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" +checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" [[package]] name = "windows_aarch64_msvc" @@ -9144,9 +9752,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" +checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" [[package]] name = "windows_i686_gnu" @@ -9162,9 +9770,9 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" +checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" [[package]] name = "windows_i686_msvc" @@ -9180,9 +9788,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" +checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" [[package]] name = "windows_x86_64_gnu" @@ -9198,9 +9806,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" +checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" [[package]] name = "windows_x86_64_gnullvm" @@ -9216,9 +9824,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" +checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" [[package]] name = "windows_x86_64_msvc" @@ -9234,15 +9842,15 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" [[package]] name = "winnow" -version = "0.5.34" +version = "0.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7cf47b659b318dccbd69cc4797a39ae128f533dce7902a1096044d1967b9c16" +checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" dependencies = [ "memchr", ] @@ -9268,7 +9876,7 @@ dependencies = [ "js-sys", "log", "pharos", - "rustc_version", + "rustc_version 0.4.0", "send_wrapper", "thiserror", "wasm-bindgen", @@ -9293,9 +9901,9 @@ dependencies = [ [[package]] name = "x25519-dalek" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb66477291e7e8d2b0ff1bcb900bf29489a9692816d79874bea351e7a8b6de96" +checksum = "c7e468321c81fb07fa7f4c636c3972b9100f0346e5b6a9f2bd0603a52f7ed277" dependencies = [ "curve25519-dalek", "rand_core", @@ -9362,8 +9970,7 @@ dependencies = [ [[package]] name = "yamux" version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad1d0148b89300047e72994bee99ecdabd15a9166a7b70c8b8c37c314dcc9002" +source = "git+https://github.com/sigp/rust-yamux.git#12a23aa0e34b7807c0c5f87f06b3438f7d6c2ed0" dependencies = [ "futures", "instant", @@ -9401,7 +10008,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -9421,7 +10028,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -9430,7 +10037,7 @@ version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "760394e246e4c28189f19d488c058bf16f564016aefac5d32bb1f3b51d5e9261" dependencies = [ - "aes 0.8.3", + "aes 0.8.4", "byteorder", "bzip2", "constant_time_eq", diff --git a/Cargo.toml b/Cargo.toml index ca55d00d4..d3465a9e8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -94,7 +94,9 @@ resolver = "2" edition = "2021" [workspace.dependencies] +anyhow = "1" arbitrary = { version = "1", features = ["derive"] } +async-channel = "1.9.0" bincode = "1" bitvec = "1" byteorder = "1" @@ -105,12 +107,13 @@ criterion = "0.3" delay_map = "0.3" derivative = "2" dirs = "3" -discv5 = { git="https://github.com/sigp/discv5", rev="e30a2c31b7ac0c57876458b971164654dfa4513b", features = ["libp2p"] } +either = "1.9" +discv5 = { version = "0.4.1", features = ["libp2p"] } env_logger = "0.9" error-chain = "0.12" ethereum-types = "0.14" ethereum_hashing = "1.0.0-beta.2" -ethereum_serde_utils = "0.5" +ethereum_serde_utils = "0.5.2" ethereum_ssz = "0.5" ethereum_ssz_derive = "0.5" ethers-core = "1" @@ -160,6 +163,7 @@ tempfile = "3" tokio = { version = "1", features = ["rt-multi-thread", "sync", "signal"] } tokio-stream = { version = "0.1", features = ["sync"] } tokio-util = { version = "0.6", features = ["codec", "compat", "time"] } +tracing = "0.1.40" tracing-appender = "0.2" tracing-core = "0.1" tracing-log = "0.2" @@ -168,8 +172,7 @@ tree_hash = "0.5" tree_hash_derive = "0.5" url = "2" uuid = { version = "0.8", features = ["serde", "v4"] } -# TODO update to warp 0.3.6 after released. -warp = { git = "https://github.com/seanmonstar/warp.git", default-features = false, features = ["tls"] } +warp = { version = "0.3.6", default-features = false, features = ["tls"] } zeroize = { version = "1", features = ["zeroize_derive"] } zip = "0.6" @@ -228,6 +231,9 @@ validator_client = { path = "validator_client" } validator_dir = { path = "common/validator_dir" } warp_utils = { path = "common/warp_utils" } +[patch.crates-io] +yamux = { git = "https://github.com/sigp/rust-yamux.git" } + [profile.maxperf] inherits = "release" lto = "fat" diff --git a/Dockerfile b/Dockerfile index a8dadf2ad..901c1b83d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.73.0-bullseye AS builder +FROM rust:1.75.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse ARG FEATURES diff --git a/Makefile b/Makefile index 8392d0017..6b6418cb8 100644 --- a/Makefile +++ b/Makefile @@ -143,7 +143,6 @@ run-ef-tests: rm -rf $(EF_TESTS)/.accessed_file_log.txt cargo test --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES)" cargo test --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),fake_crypto" - cargo test --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),milagro" ./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/consensus-spec-tests # Runs EF test vectors with nextest @@ -151,7 +150,6 @@ nextest-run-ef-tests: rm -rf $(EF_TESTS)/.accessed_file_log.txt cargo nextest run --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES)" cargo nextest run --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),fake_crypto" - cargo nextest run --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),milagro" ./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/consensus-spec-tests # Run the tests in the `beacon_chain` crate for all known forks. diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 8428a30a3..60a9f95a2 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "4.6.0" +version = "5.1.1" authors = [ "Paul Hauner ", "Age Manning IndexedAggregatedAttestation<'a, T> { Err(e) => return Err(SignatureNotChecked(&signed_aggregate.message.aggregate, e)), }; - let indexed_attestation = - match map_attestation_committee(chain, attestation, |(committee, _)| { + let get_indexed_attestation_with_committee = + |(committee, _): (BeaconCommittee, CommitteesPerSlot)| { // Note: this clones the signature which is known to be a relatively slow operation. // // Future optimizations should remove this clone. @@ -561,11 +561,17 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { get_indexed_attestation(committee.committee, attestation) .map_err(|e| BeaconChainError::from(e).into()) - }) { - Ok(indexed_attestation) => indexed_attestation, - Err(e) => return Err(SignatureNotChecked(&signed_aggregate.message.aggregate, e)), }; + let indexed_attestation = match map_attestation_committee( + chain, + attestation, + get_indexed_attestation_with_committee, + ) { + Ok(indexed_attestation) => indexed_attestation, + Err(e) => return Err(SignatureNotChecked(&signed_aggregate.message.aggregate, e)), + }; + Ok(IndexedAggregatedAttestation { signed_aggregate, indexed_attestation, diff --git a/beacon_node/beacon_chain/src/beacon_block_streamer.rs b/beacon_node/beacon_chain/src/beacon_block_streamer.rs index 9312d4511..4f4f8ed1f 100644 --- a/beacon_node/beacon_chain/src/beacon_block_streamer.rs +++ b/beacon_node/beacon_chain/src/beacon_block_streamer.rs @@ -19,7 +19,7 @@ use types::{ }; #[derive(PartialEq)] -pub enum CheckEarlyAttesterCache { +pub enum CheckCaches { Yes, No, } @@ -385,14 +385,14 @@ impl EngineRequest { pub struct BeaconBlockStreamer { execution_layer: ExecutionLayer, - check_early_attester_cache: CheckEarlyAttesterCache, + check_caches: CheckCaches, beacon_chain: Arc>, } impl BeaconBlockStreamer { pub fn new( beacon_chain: &Arc>, - check_early_attester_cache: CheckEarlyAttesterCache, + check_caches: CheckCaches, ) -> Result { let execution_layer = beacon_chain .execution_layer @@ -402,17 +402,17 @@ impl BeaconBlockStreamer { Ok(Self { execution_layer, - check_early_attester_cache, + check_caches, beacon_chain: beacon_chain.clone(), }) } - fn check_early_attester_cache( - &self, - root: Hash256, - ) -> Option>> { - if self.check_early_attester_cache == CheckEarlyAttesterCache::Yes { - self.beacon_chain.early_attester_cache.get_block(root) + fn check_caches(&self, root: Hash256) -> Option>> { + if self.check_caches == CheckCaches::Yes { + self.beacon_chain + .data_availability_checker + .get_block(&root) + .or(self.beacon_chain.early_attester_cache.get_block(root)) } else { None } @@ -422,10 +422,7 @@ impl BeaconBlockStreamer { let mut db_blocks = Vec::new(); for root in block_roots { - if let Some(cached_block) = self - .check_early_attester_cache(root) - .map(LoadedBeaconBlock::Full) - { + if let Some(cached_block) = self.check_caches(root).map(LoadedBeaconBlock::Full) { db_blocks.push((root, Ok(Some(cached_block)))); continue; } @@ -554,7 +551,7 @@ impl BeaconBlockStreamer { "Using slower fallback method of eth_getBlockByHash()" ); for root in block_roots { - let cached_block = self.check_early_attester_cache(root); + let cached_block = self.check_caches(root); let block_result = if cached_block.is_some() { Ok(cached_block) } else { @@ -682,7 +679,7 @@ impl From for BeaconChainError { #[cfg(test)] mod tests { - use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckEarlyAttesterCache}; + use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckCaches}; use crate::test_utils::{test_spec, BeaconChainHarness, EphemeralHarnessType}; use execution_layer::test_utils::{Block, DEFAULT_ENGINE_CAPABILITIES}; use execution_layer::EngineCapabilities; @@ -804,7 +801,7 @@ mod tests { let start = epoch * slots_per_epoch; let mut epoch_roots = vec![Hash256::zero(); slots_per_epoch]; epoch_roots[..].clone_from_slice(&block_roots[start..(start + slots_per_epoch)]); - let streamer = BeaconBlockStreamer::new(&harness.chain, CheckEarlyAttesterCache::No) + let streamer = BeaconBlockStreamer::new(&harness.chain, CheckCaches::No) .expect("should create streamer"); let (block_tx, mut block_rx) = mpsc::unbounded_channel(); streamer.stream(epoch_roots.clone(), block_tx).await; @@ -945,7 +942,7 @@ mod tests { let start = epoch * slots_per_epoch; let mut epoch_roots = vec![Hash256::zero(); slots_per_epoch]; epoch_roots[..].clone_from_slice(&block_roots[start..(start + slots_per_epoch)]); - let streamer = BeaconBlockStreamer::new(&harness.chain, CheckEarlyAttesterCache::No) + let streamer = BeaconBlockStreamer::new(&harness.chain, CheckCaches::No) .expect("should create streamer"); let (block_tx, mut block_rx) = mpsc::unbounded_channel(); streamer.stream(epoch_roots.clone(), block_tx).await; diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index fcd7be791..ea1c5089a 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4,7 +4,7 @@ use crate::attestation_verification::{ VerifiedUnaggregatedAttestation, }; use crate::attester_cache::{AttesterCache, AttesterCacheKey}; -use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckEarlyAttesterCache}; +use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckCaches}; use crate::beacon_proposer_cache::compute_proposer_duties_from_head; use crate::beacon_proposer_cache::BeaconProposerCache; use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob}; @@ -38,6 +38,7 @@ use crate::light_client_finality_update_verification::{ use crate::light_client_optimistic_update_verification::{ Error as LightClientOptimisticUpdateError, VerifiedLightClientOptimisticUpdate, }; +use crate::light_client_server_cache::LightClientServerCache; use crate::migrate::BackgroundMigrator; use crate::naive_aggregation_pool::{ AggregatedAttestationMap, Error as NaiveAggregationError, NaiveAggregationPool, @@ -339,6 +340,8 @@ struct PartialBeaconBlock { bls_to_execution_changes: Vec, } +pub type LightClientProducerEvent = (Hash256, Slot, SyncAggregate); + pub type BeaconForkChoice = ForkChoice< BeaconForkChoiceStore< ::EthSpec, @@ -420,10 +423,6 @@ pub struct BeaconChain { /// Maintains a record of which validators we've seen BLS to execution changes for. pub(crate) observed_bls_to_execution_changes: Mutex>, - /// The most recently validated light client finality update received on gossip. - pub latest_seen_finality_update: Mutex>>, - /// The most recently validated light client optimistic update received on gossip. - pub latest_seen_optimistic_update: Mutex>>, /// Provides information from the Ethereum 1 (PoW) chain. pub eth1_chain: Option>, /// Interfaces with the execution client. @@ -466,6 +465,10 @@ pub struct BeaconChain { pub block_times_cache: Arc>, /// A cache used to track pre-finalization block roots for quick rejection. pub pre_finalization_block_cache: PreFinalizationBlockCache, + /// A cache used to produce light_client server messages + pub light_client_server_cache: LightClientServerCache, + /// Sender to signal the light_client server to produce new updates + pub light_client_server_tx: Option>>, /// Sender given to tasks, so that if they encounter a state in which execution cannot /// continue they can request that everything shuts down. pub shutdown_sender: Sender, @@ -1128,7 +1131,7 @@ impl BeaconChain { /// ## Errors /// /// May return a database error. - pub fn get_blocks_checking_early_attester_cache( + pub fn get_blocks_checking_caches( self: &Arc, block_roots: Vec, executor: &TaskExecutor, @@ -1141,10 +1144,8 @@ impl BeaconChain { >, Error, > { - Ok( - BeaconBlockStreamer::::new(self, CheckEarlyAttesterCache::Yes)? - .launch_stream(block_roots, executor), - ) + Ok(BeaconBlockStreamer::::new(self, CheckCaches::Yes)? + .launch_stream(block_roots, executor)) } pub fn get_blocks( @@ -1160,10 +1161,8 @@ impl BeaconChain { >, Error, > { - Ok( - BeaconBlockStreamer::::new(self, CheckEarlyAttesterCache::No)? - .launch_stream(block_roots, executor), - ) + Ok(BeaconBlockStreamer::::new(self, CheckCaches::No)? + .launch_stream(block_roots, executor)) } pub fn get_blobs_checking_early_attester_cache( @@ -1344,6 +1343,19 @@ impl BeaconChain { self.state_at_slot(load_slot, StateSkipConfig::WithoutStateRoots) } + pub fn recompute_and_cache_light_client_updates( + &self, + (parent_root, slot, sync_aggregate): LightClientProducerEvent, + ) -> Result<(), Error> { + self.light_client_server_cache.recompute_and_cache_updates( + &self.log, + self.store.clone(), + &parent_root, + slot, + &sync_aggregate, + ) + } + /// Returns the current heads of the `BeaconChain`. For the canonical head, see `Self::head`. /// /// Returns `(block_root, block_slot)`. @@ -2944,18 +2956,8 @@ impl BeaconChain { unverified_block: B, notify_execution_layer: NotifyExecutionLayer, ) -> Result> { - if let Ok(commitments) = unverified_block - .block() - .message() - .body() - .blob_kzg_commitments() - { - self.data_availability_checker.notify_block_commitments( - unverified_block.block().slot(), - block_root, - commitments.clone(), - ); - }; + self.data_availability_checker + .notify_block(block_root, unverified_block.block_cloned()); let r = self .process_block(block_root, unverified_block, notify_execution_layer, || { Ok(()) @@ -3521,6 +3523,20 @@ impl BeaconChain { }; let current_finalized_checkpoint = state.finalized_checkpoint(); + // compute state proofs for light client updates before inserting the state into the + // snapshot cache. + if self.config.enable_light_client_server { + self.light_client_server_cache + .cache_state_data( + &self.spec, block, block_root, + // mutable reference on the state is needed to compute merkle proofs + &mut state, + ) + .unwrap_or_else(|e| { + error!(self.log, "error caching light_client data {:?}", e); + }); + } + self.snapshot_cache .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) .ok_or(Error::SnapshotCacheLockTimeout) @@ -3893,6 +3909,28 @@ impl BeaconChain { })); } } + + // Do not trigger light_client server update producer for old blocks, to extra work + // during sync. + if self.config.enable_light_client_server + && block_delay_total < self.slot_clock.slot_duration() * 32 + { + if let Some(mut light_client_server_tx) = self.light_client_server_tx.clone() { + if let Ok(sync_aggregate) = block.body().sync_aggregate() { + if let Err(e) = light_client_server_tx.try_send(( + block.parent_root(), + block.slot(), + sync_aggregate.clone(), + )) { + warn!( + self.log, + "Failed to send light_client server event"; + "error" => ?e + ); + } + } + } + } } // For the current and next epoch of this state, ensure we have the shuffling from this @@ -4070,7 +4108,7 @@ impl BeaconChain { .task_executor .spawn_blocking_handle( move || chain.load_state_for_block_production(slot), - "produce_partial_beacon_block", + "load_state_for_block_production", ) .ok_or(BlockProductionError::ShuttingDown)? .await @@ -4128,21 +4166,14 @@ impl BeaconChain { (re_org_state.pre_state, re_org_state.state_root) } // Normal case: proposing a block atop the current head using the cache. - else if let Some((_, cached_state)) = self - .block_production_state - .lock() - .take() - .filter(|(cached_block_root, _)| *cached_block_root == head_block_root) + else if let Some((_, cached_state)) = + self.get_state_from_block_production_cache(head_block_root) { (cached_state.pre_state, cached_state.state_root) } // Fall back to a direct read of the snapshot cache. - else if let Some(pre_state) = self - .snapshot_cache - .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .and_then(|snapshot_cache| { - snapshot_cache.get_state_for_block_production(head_block_root) - }) + else if let Some(pre_state) = + self.get_state_from_snapshot_cache_for_block_production(head_block_root) { warn!( self.log, @@ -4183,6 +4214,40 @@ impl BeaconChain { Ok((state, state_root_opt)) } + /// Get the state cached for block production *if* it matches `head_block_root`. + /// + /// This will clear the cache regardless of whether the block root matches, so only call this if + /// you think the `head_block_root` is likely to match! + fn get_state_from_block_production_cache( + &self, + head_block_root: Hash256, + ) -> Option<(Hash256, BlockProductionPreState)> { + // Take care to drop the lock as quickly as possible. + let mut lock = self.block_production_state.lock(); + let result = lock + .take() + .filter(|(cached_block_root, _)| *cached_block_root == head_block_root); + drop(lock); + result + } + + /// Get a state for block production from the snapshot cache. + fn get_state_from_snapshot_cache_for_block_production( + &self, + head_block_root: Hash256, + ) -> Option> { + if let Some(lock) = self + .snapshot_cache + .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) + { + let result = lock.get_state_for_block_production(head_block_root); + drop(lock); + result + } else { + None + } + } + /// Fetch the beacon state to use for producing a block if a 1-slot proposer re-org is viable. /// /// This function will return `None` if proposer re-orgs are disabled. @@ -4275,12 +4340,8 @@ impl BeaconChain { // Only attempt a re-org if we hit the block production cache or snapshot cache. let pre_state = self - .block_production_state - .lock() - .take() - .and_then(|(cached_block_root, state)| { - (cached_block_root == re_org_parent_block).then_some(state) - }) + .get_state_from_block_production_cache(re_org_parent_block) + .map(|(_, state)| state) .or_else(|| { warn!( self.log, @@ -4289,11 +4350,7 @@ impl BeaconChain { "slot" => slot, "block_root" => ?re_org_parent_block ); - self.snapshot_cache - .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .and_then(|snapshot_cache| { - snapshot_cache.get_state_for_block_production(re_org_parent_block) - }) + self.get_state_from_snapshot_cache_for_block_production(re_org_parent_block) }) .or_else(|| { debug!( diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index e8df5b811..ac3d3e3ab 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -764,6 +764,7 @@ pub trait IntoExecutionPendingBlock: Sized { ) -> Result, BlockSlashInfo>>; fn block(&self) -> &SignedBeaconBlock; + fn block_cloned(&self) -> Arc>; } impl GossipVerifiedBlock { @@ -1017,6 +1018,10 @@ impl IntoExecutionPendingBlock for GossipVerifiedBlock &SignedBeaconBlock { self.block.as_block() } + + fn block_cloned(&self) -> Arc> { + self.block.clone() + } } impl SignatureVerifiedBlock { @@ -1168,6 +1173,10 @@ impl IntoExecutionPendingBlock for SignatureVerifiedBloc fn block(&self) -> &SignedBeaconBlock { self.block.as_block() } + + fn block_cloned(&self) -> Arc> { + self.block.block_cloned() + } } impl IntoExecutionPendingBlock for Arc> { @@ -1198,6 +1207,10 @@ impl IntoExecutionPendingBlock for Arc &SignedBeaconBlock { self } + + fn block_cloned(&self) -> Arc> { + self.clone() + } } impl IntoExecutionPendingBlock for RpcBlock { @@ -1228,6 +1241,10 @@ impl IntoExecutionPendingBlock for RpcBlock fn block(&self) -> &SignedBeaconBlock { self.as_block() } + + fn block_cloned(&self) -> Arc> { + self.block_cloned() + } } impl ExecutionPendingBlock { diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index a6840ed76..edba7a211 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -46,6 +46,13 @@ impl RpcBlock { } } + pub fn block_cloned(&self) -> Arc> { + match &self.block { + RpcBlockInner::Block(block) => block.clone(), + RpcBlockInner::BlockAndBlobs(block, _) => block.clone(), + } + } + pub fn blobs(&self) -> Option<&BlobSidecarList> { match &self.block { RpcBlockInner::Block(_) => None, diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 330036d43..dd4b612f6 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1,4 +1,6 @@ -use crate::beacon_chain::{CanonicalHead, BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY}; +use crate::beacon_chain::{ + CanonicalHead, LightClientProducerEvent, BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY, +}; use crate::beacon_proposer_cache::BeaconProposerCache; use crate::data_availability_checker::DataAvailabilityChecker; use crate::eth1_chain::{CachingEth1Backend, SszEth1}; @@ -6,10 +8,11 @@ use crate::eth1_finalization_cache::Eth1FinalizationCache; use crate::fork_choice_signal::ForkChoiceSignalTx; use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary}; use crate::head_tracker::HeadTracker; +use crate::light_client_server_cache::LightClientServerCache; use crate::migrate::{BackgroundMigrator, MigratorConfig}; use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; -use crate::snapshot_cache::{SnapshotCache, DEFAULT_SNAPSHOT_CACHE_SIZE}; +use crate::snapshot_cache::SnapshotCache; use crate::timeout_rw_lock::TimeoutRwLock; use crate::validator_monitor::{ValidatorMonitor, ValidatorMonitorConfig}; use crate::validator_pubkey_cache::ValidatorPubkeyCache; @@ -36,8 +39,8 @@ use std::time::Duration; use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp}; use task_executor::{ShutdownReason, TaskExecutor}; use types::{ - BeaconBlock, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, Graffiti, Hash256, Signature, - SignedBeaconBlock, Slot, + BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, Checkpoint, Epoch, EthSpec, Graffiti, + Hash256, Signature, SignedBeaconBlock, Slot, }; /// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing @@ -87,6 +90,7 @@ pub struct BeaconChainBuilder { event_handler: Option>, slot_clock: Option, shutdown_sender: Option>, + light_client_server_tx: Option>>, head_tracker: Option, validator_pubkey_cache: Option>, spec: ChainSpec, @@ -129,6 +133,7 @@ where event_handler: None, slot_clock: None, shutdown_sender: None, + light_client_server_tx: None, head_tracker: None, validator_pubkey_cache: None, spec: TEthSpec::default_spec(), @@ -427,6 +432,7 @@ where mut self, mut weak_subj_state: BeaconState, weak_subj_block: SignedBeaconBlock, + weak_subj_blobs: Option>, genesis_state: BeaconState, ) -> Result { let store = self @@ -485,6 +491,29 @@ where )); } + // Verify that blobs (if provided) match the block. + if let Some(blobs) = &weak_subj_blobs { + let commitments = weak_subj_block + .message() + .body() + .blob_kzg_commitments() + .map_err(|e| format!("Blobs provided but block does not reference them: {e:?}"))?; + if blobs.len() != commitments.len() { + return Err(format!( + "Wrong number of blobs, expected: {}, got: {}", + commitments.len(), + blobs.len() + )); + } + if commitments + .iter() + .zip(blobs.iter()) + .any(|(commitment, blob)| *commitment != blob.kzg_commitment) + { + return Err("Checkpoint blob does not match block commitment".into()); + } + } + // Set the store's split point *before* storing genesis so that genesis is stored // immediately in the freezer DB. store.set_split(weak_subj_slot, weak_subj_state_root, weak_subj_block_root); @@ -506,14 +535,19 @@ where .do_atomically(block_root_batch) .map_err(|e| format!("Error writing frozen block roots: {e:?}"))?; - // Write the state and block non-atomically, it doesn't matter if they're forgotten + // Write the state, block and blobs non-atomically, it doesn't matter if they're forgotten // about on a crash restart. store .put_state(&weak_subj_state_root, &weak_subj_state) - .map_err(|e| format!("Failed to store weak subjectivity state: {:?}", e))?; + .map_err(|e| format!("Failed to store weak subjectivity state: {e:?}"))?; store .put_block(&weak_subj_block_root, weak_subj_block.clone()) - .map_err(|e| format!("Failed to store weak subjectivity block: {:?}", e))?; + .map_err(|e| format!("Failed to store weak subjectivity block: {e:?}"))?; + if let Some(blobs) = weak_subj_blobs { + store + .put_blobs(&weak_subj_block_root, blobs) + .map_err(|e| format!("Failed to store weak subjectivity blobs: {e:?}"))?; + } // Stage the database's metadata fields for atomic storage when `build` is called. // This prevents the database from restarting in an inconsistent state if the anchor @@ -603,6 +637,15 @@ where self } + /// Sets a `Sender` to allow the beacon chain to trigger light_client update production. + pub fn light_client_server_tx( + mut self, + sender: Sender>, + ) -> Self { + self.light_client_server_tx = Some(sender); + self + } + /// Creates a new, empty operation pool. fn empty_op_pool(mut self) -> Self { self.op_pool = Some(OperationPool::new()); @@ -827,15 +870,20 @@ where let head_for_snapshot_cache = head_snapshot.clone(); let canonical_head = CanonicalHead::new(fork_choice, Arc::new(head_snapshot)); let shuffling_cache_size = self.chain_config.shuffling_cache_size; + let snapshot_cache_size = self.chain_config.snapshot_cache_size; // Calculate the weak subjectivity point in which to backfill blocks to. let genesis_backfill_slot = if self.chain_config.genesis_backfill { Slot::new(0) } else { - let backfill_epoch_range = (self.spec.min_validator_withdrawability_delay - + self.spec.churn_limit_quotient) - .as_u64() - / 2; + let backfill_epoch_range = if cfg!(feature = "test_backfill") { + 3 + } else { + (self.spec.min_validator_withdrawability_delay + self.spec.churn_limit_quotient) + .as_u64() + / 2 + }; + match slot_clock.now() { Some(current_slot) => { let genesis_backfill_epoch = current_slot @@ -887,8 +935,6 @@ where observed_proposer_slashings: <_>::default(), observed_attester_slashings: <_>::default(), observed_bls_to_execution_changes: <_>::default(), - latest_seen_finality_update: <_>::default(), - latest_seen_optimistic_update: <_>::default(), eth1_chain: self.eth1_chain, execution_layer: self.execution_layer, genesis_validators_root, @@ -901,7 +947,7 @@ where event_handler: self.event_handler, head_tracker, snapshot_cache: TimeoutRwLock::new(SnapshotCache::new( - DEFAULT_SNAPSHOT_CACHE_SIZE, + snapshot_cache_size, head_for_snapshot_cache, )), shuffling_cache: TimeoutRwLock::new(ShufflingCache::new( @@ -916,6 +962,8 @@ where validator_pubkey_cache: TimeoutRwLock::new(validator_pubkey_cache), attester_cache: <_>::default(), early_attester_cache: <_>::default(), + light_client_server_cache: LightClientServerCache::new(), + light_client_server_tx: self.light_client_server_tx, shutdown_sender: self .shutdown_sender .ok_or("Cannot build without a shutdown sender.")?, diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 7bcb764ab..36481b4dc 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -72,6 +72,8 @@ pub struct ChainConfig { pub optimistic_finalized_sync: bool, /// The size of the shuffling cache, pub shuffling_cache_size: usize, + /// The size of the snapshot cache. + pub snapshot_cache_size: usize, /// If using a weak-subjectivity sync, whether we should download blocks all the way back to /// genesis. pub genesis_backfill: bool, @@ -83,6 +85,8 @@ pub struct ChainConfig { pub progressive_balances_mode: ProgressiveBalancesMode, /// Number of epochs between each migration of data from the hot database to the freezer. pub epochs_per_migration: u64, + /// When set to true Light client server computes and caches state proofs for serving updates + pub enable_light_client_server: bool, } impl Default for ChainConfig { @@ -110,10 +114,12 @@ impl Default for ChainConfig { // This value isn't actually read except in tests. optimistic_finalized_sync: true, shuffling_cache_size: crate::shuffling_cache::DEFAULT_CACHE_SIZE, + snapshot_cache_size: crate::snapshot_cache::DEFAULT_SNAPSHOT_CACHE_SIZE, genesis_backfill: false, always_prepare_payload: false, progressive_balances_mode: ProgressiveBalancesMode::Fast, epochs_per_migration: crate::migrate::DEFAULT_EPOCHS_PER_MIGRATION, + enable_light_client_server: false, } } } diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 21cac9a26..f906032ec 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -20,7 +20,7 @@ use std::fmt::Debug; use std::num::NonZeroUsize; use std::sync::Arc; use task_executor::TaskExecutor; -use types::beacon_block_body::{KzgCommitmentOpts, KzgCommitments}; +use types::beacon_block_body::KzgCommitmentOpts; use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList}; use types::{BlobSidecarList, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; @@ -192,6 +192,14 @@ impl DataAvailabilityChecker { self.availability_cache.peek_blob(blob_id) } + /// Get a block from the availability cache. Includes any blocks we are currently processing. + pub fn get_block(&self, block_root: &Hash256) -> Option>> { + self.processing_cache + .read() + .get(block_root) + .and_then(|cached| cached.block.clone()) + } + /// Put a list of blobs received via RPC into the availability cache. This performs KZG /// verification on the blobs in the list. pub fn put_rpc_blobs( @@ -344,20 +352,16 @@ impl DataAvailabilityChecker { block.num_expected_blobs() > 0 && self.da_check_required_for_epoch(block.epoch()) } - /// Adds block commitments to the processing cache. These commitments are unverified but caching + /// Adds a block to the processing cache. This block's commitments are unverified but caching /// them here is useful to avoid duplicate downloads of blocks, as well as understanding - /// our blob download requirements. - pub fn notify_block_commitments( - &self, - slot: Slot, - block_root: Hash256, - commitments: KzgCommitments, - ) { + /// our blob download requirements. We will also serve this over RPC. + pub fn notify_block(&self, block_root: Hash256, block: Arc>) { + let slot = block.slot(); self.processing_cache .write() .entry(block_root) .or_insert_with(|| ProcessingComponents::new(slot)) - .merge_block(commitments); + .merge_block(block); } /// Add a single blob commitment to the processing cache. This commitment is unverified but caching @@ -450,6 +454,24 @@ impl DataAvailabilityChecker { pub fn persist_all(&self) -> Result<(), AvailabilityCheckError> { self.availability_cache.write_all_to_disk() } + + /// Collects metrics from the data availability checker. + pub fn metrics(&self) -> DataAvailabilityCheckerMetrics { + DataAvailabilityCheckerMetrics { + processing_cache_size: self.processing_cache.read().len(), + num_store_entries: self.availability_cache.num_store_entries(), + state_cache_size: self.availability_cache.state_cache_size(), + block_cache_size: self.availability_cache.block_cache_size(), + } + } +} + +/// Helper struct to group data availability checker metrics. +pub struct DataAvailabilityCheckerMetrics { + pub processing_cache_size: usize, + pub num_store_entries: usize, + pub state_cache_size: usize, + pub block_cache_size: usize, } pub fn start_availability_cache_maintenance_service( @@ -545,6 +567,18 @@ pub struct AvailableBlock { } impl AvailableBlock { + pub fn __new_for_testing( + block_root: Hash256, + block: Arc>, + blobs: Option>, + ) -> Self { + Self { + block_root, + block, + blobs, + } + } + pub fn block(&self) -> &SignedBeaconBlock { &self.block } @@ -585,6 +619,15 @@ pub enum MaybeAvailableBlock { }, } +impl MaybeAvailableBlock { + pub fn block_cloned(&self) -> Arc> { + match self { + Self::Available(block) => block.block_cloned(), + Self::AvailabilityPending { block, .. } => block.clone(), + } + } +} + #[derive(Debug, Clone)] pub enum MissingBlobs { /// We know for certain these blobs are missing. diff --git a/beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs b/beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs index 776f81ee5..65093db26 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs @@ -182,9 +182,9 @@ macro_rules! impl_availability_view { impl_availability_view!( ProcessingComponents, - KzgCommitments, + Arc>, KzgCommitment, - block_commitments, + block, blob_commitments ); @@ -212,12 +212,6 @@ pub trait GetCommitment { fn get_commitment(&self) -> &KzgCommitment; } -// These implementations are required to implement `AvailabilityView` for `ProcessingView`. -impl GetCommitments for KzgCommitments { - fn get_commitments(&self) -> KzgCommitments { - self.clone() - } -} impl GetCommitment for KzgCommitment { fn get_commitment(&self) -> &KzgCommitment { self @@ -310,7 +304,7 @@ pub mod tests { } type ProcessingViewSetup = ( - KzgCommitments, + Arc>, FixedVector, ::MaxBlobsPerBlock>, FixedVector, ::MaxBlobsPerBlock>, ); @@ -320,12 +314,6 @@ pub mod tests { valid_blobs: FixedVector>>, ::MaxBlobsPerBlock>, invalid_blobs: FixedVector>>, ::MaxBlobsPerBlock>, ) -> ProcessingViewSetup { - let commitments = block - .message() - .body() - .blob_kzg_commitments() - .unwrap() - .clone(); let blobs = FixedVector::from( valid_blobs .iter() @@ -338,7 +326,7 @@ pub mod tests { .map(|blob_opt| blob_opt.as_ref().map(|blob| blob.kzg_commitment)) .collect::>(), ); - (commitments, blobs, invalid_blobs) + (Arc::new(block), blobs, invalid_blobs) } type PendingComponentsSetup = ( diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 34c9bc76f..80cbc6c89 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -363,6 +363,16 @@ impl Critical { } } } + + /// Returns the number of pending component entries in memory. + pub fn num_blocks(&self) -> usize { + self.in_memory.len() + } + + /// Returns the number of entries that have overflowed to disk. + pub fn num_store_entries(&self) -> usize { + self.store_keys.len() + } } /// This is the main struct for this module. Outside methods should @@ -671,6 +681,21 @@ impl OverflowLRUCache { pub fn state_lru_cache(&self) -> &StateLRUCache { &self.state_cache } + + /// Number of states stored in memory in the cache. + pub fn state_cache_size(&self) -> usize { + self.state_cache.lru_cache().read().len() + } + + /// Number of pending component entries in memory in the cache. + pub fn block_cache_size(&self) -> usize { + self.critical.read().num_blocks() + } + + /// Returns the number of entries in the cache that have overflowed to disk. + pub fn num_store_entries(&self) -> usize { + self.critical.read().num_store_entries() + } } impl ssz::Encode for OverflowKey { diff --git a/beacon_node/beacon_chain/src/data_availability_checker/processing_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/processing_cache.rs index 969034c65..af94803dc 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/processing_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/processing_cache.rs @@ -1,8 +1,9 @@ use crate::data_availability_checker::AvailabilityView; use std::collections::hash_map::Entry; use std::collections::HashMap; -use types::beacon_block_body::{KzgCommitmentOpts, KzgCommitments}; -use types::{EthSpec, Hash256, Slot}; +use std::sync::Arc; +use types::beacon_block_body::KzgCommitmentOpts; +use types::{EthSpec, Hash256, SignedBeaconBlock, Slot}; /// This cache is used only for gossip blocks/blobs and single block/blob lookups, to give req/resp /// a view of what we have and what we require. This cache serves a slightly different purpose than @@ -37,6 +38,9 @@ impl ProcessingCache { } roots_missing_components } + pub fn len(&self) -> usize { + self.processing_cache.len() + } } #[derive(Debug, Clone)] @@ -45,7 +49,7 @@ pub struct ProcessingComponents { /// Blobs required for a block can only be known if we have seen the block. So `Some` here /// means we've seen it, a `None` means we haven't. The `kzg_commitments` value helps us figure /// out whether incoming blobs actually match the block. - pub block_commitments: Option>, + pub block: Option>>, /// `KzgCommitments` for blobs are always known, even if we haven't seen the block. See /// `AvailabilityView`'s trait definition for more details. pub blob_commitments: KzgCommitmentOpts, @@ -55,7 +59,7 @@ impl ProcessingComponents { pub fn new(slot: Slot) -> Self { Self { slot, - block_commitments: None, + block: None, blob_commitments: KzgCommitmentOpts::::default(), } } @@ -67,7 +71,7 @@ impl ProcessingComponents { pub fn empty(_block_root: Hash256) -> Self { Self { slot: Slot::new(0), - block_commitments: None, + block: None, blob_commitments: KzgCommitmentOpts::::default(), } } diff --git a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs index bd125a7f4..35c114db5 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs @@ -190,8 +190,7 @@ impl StateLRUCache { }) } - /// returns the state cache for inspection in tests - #[cfg(test)] + /// returns the state cache for inspection pub fn lru_cache(&self) -> &RwLock>> { &self.states } diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 8b6c6b374..563c29659 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -967,7 +967,7 @@ mod test { let spec = &E::default_spec(); let state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); - let blocks = vec![]; + let blocks = []; assert_eq!( get_votes_to_consider( diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index e25976c2a..fd790c884 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -81,14 +81,10 @@ impl PayloadNotifier { match notify_execution_layer { NotifyExecutionLayer::No if chain.config.optimistic_finalized_sync => { - // Verify the block hash here in Lighthouse and immediately mark the block as - // optimistically imported. This saves a lot of roundtrips to the EL. - let execution_layer = chain - .execution_layer - .as_ref() - .ok_or(ExecutionPayloadError::NoExecutionConnection)?; - - if let Err(e) = execution_layer.verify_payload_block_hash(block_message) { + // Create a NewPayloadRequest (no clones required) and check optimistic sync verifications + let new_payload_request: NewPayloadRequest = + block_message.try_into()?; + if let Err(e) = new_payload_request.perform_optimistic_sync_verifications() { warn!( chain.log, "Falling back to slow block hash verification"; @@ -143,11 +139,8 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>( .as_ref() .ok_or(ExecutionPayloadError::NoExecutionConnection)?; - let new_payload_request: NewPayloadRequest = block.try_into()?; - let execution_block_hash = new_payload_request.block_hash(); - let new_payload_response = execution_layer - .notify_new_payload(new_payload_request) - .await; + let execution_block_hash = block.execution_payload()?.block_hash(); + let new_payload_response = execution_layer.notify_new_payload(block.try_into()?).await; match new_payload_response { Ok(status) => match status { diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index b5b42fcfc..85208c8ad 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -135,20 +135,20 @@ impl BeaconChain { prev_block_slot = block.slot(); expected_block_root = block.message().parent_root(); + signed_blocks.push(block); // If we've reached genesis, add the genesis block root to the batch for all slots // between 0 and the first block slot, and set the anchor slot to 0 to indicate // completion. if expected_block_root == self.genesis_block_root { let genesis_slot = self.spec.genesis_slot; - for slot in genesis_slot.as_usize()..block.slot().as_usize() { + for slot in genesis_slot.as_usize()..prev_block_slot.as_usize() { chunk_writer.set(slot, self.genesis_block_root, &mut cold_batch)?; } prev_block_slot = genesis_slot; expected_block_root = Hash256::zero(); break; } - signed_blocks.push(block); } chunk_writer.write(&mut cold_batch)?; // these were pushed in reverse order so we reverse again diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index ce841b106..529f269be 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -32,6 +32,7 @@ pub mod historical_blocks; pub mod kzg_utils; pub mod light_client_finality_update_verification; pub mod light_client_optimistic_update_verification; +mod light_client_server_cache; pub mod merge_readiness; pub mod metrics; pub mod migrate; @@ -49,7 +50,7 @@ mod pre_finalization_cache; pub mod proposer_prep_service; pub mod schema_change; pub mod shuffling_cache; -mod snapshot_cache; +pub mod snapshot_cache; pub mod state_advance_timer; pub mod sync_committee_rewards; pub mod sync_committee_verification; @@ -61,8 +62,8 @@ pub mod validator_pubkey_cache; pub use self::beacon_chain::{ AttestationProcessingOutcome, AvailabilityProcessingStatus, BeaconBlockResponse, BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, - ForkChoiceError, OverrideForkchoiceUpdate, ProduceBlockVerification, StateSkipConfig, - WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, + ForkChoiceError, LightClientProducerEvent, OverrideForkchoiceUpdate, ProduceBlockVerification, + StateSkipConfig, WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; pub use self::beacon_snapshot::BeaconSnapshot; diff --git a/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs index 791d63ccf..35863aa05 100644 --- a/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs @@ -1,11 +1,9 @@ -use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use crate::{BeaconChain, BeaconChainTypes}; use derivative::Derivative; use slot_clock::SlotClock; use std::time::Duration; use strum::AsRefStr; -use types::{ - light_client_update::Error as LightClientUpdateError, LightClientFinalityUpdate, Slot, -}; +use types::LightClientFinalityUpdate; /// Returned when a light client finality update was not successfully verified. It might not have been verified for /// two reasons: @@ -16,8 +14,6 @@ use types::{ /// (the `BeaconChainError` variant) #[derive(Debug, AsRefStr)] pub enum Error { - /// Light client finality update message with a lower or equal finalized_header slot already forwarded. - FinalityUpdateAlreadySeen, /// The light client finality message was received is prior to one-third of slot duration passage. (with /// respect to the gossip clock disparity and slot clock duration). /// @@ -26,29 +22,11 @@ pub enum Error { /// Assuming the local clock is correct, the peer has sent an invalid message. TooEarly, /// Light client finality update message does not match the locally constructed one. - /// - /// ## Peer Scoring - /// InvalidLightClientFinalityUpdate, /// Signature slot start time is none. SigSlotStartIsNone, /// Failed to construct a LightClientFinalityUpdate from state. FailedConstructingUpdate, - /// Beacon chain error occurred. - BeaconChainError(BeaconChainError), - LightClientUpdateError(LightClientUpdateError), -} - -impl From for Error { - fn from(e: BeaconChainError) -> Self { - Error::BeaconChainError(e) - } -} - -impl From for Error { - fn from(e: LightClientUpdateError) -> Self { - Error::LightClientUpdateError(e) - } } /// Wraps a `LightClientFinalityUpdate` that has been verified for propagation on the gossip network. @@ -63,71 +41,34 @@ impl VerifiedLightClientFinalityUpdate { /// Returns `Ok(Self)` if the `light_client_finality_update` is valid to be (re)published on the gossip /// network. pub fn verify( - light_client_finality_update: LightClientFinalityUpdate, + rcv_finality_update: LightClientFinalityUpdate, chain: &BeaconChain, seen_timestamp: Duration, ) -> Result { - let gossiped_finality_slot = light_client_finality_update.finalized_header.beacon.slot; - let one_third_slot_duration = Duration::new(chain.spec.seconds_per_slot / 3, 0); - let signature_slot = light_client_finality_update.signature_slot; - let start_time = chain.slot_clock.start_of(signature_slot); - let mut latest_seen_finality_update = chain.latest_seen_finality_update.lock(); - - let head = chain.canonical_head.cached_head(); - let head_block = &head.snapshot.beacon_block; - let attested_block_root = head_block.message().parent_root(); - let attested_block = chain - .get_blinded_block(&attested_block_root)? - .ok_or(Error::FailedConstructingUpdate)?; - let mut attested_state = chain - .get_state(&attested_block.state_root(), Some(attested_block.slot()))? - .ok_or(Error::FailedConstructingUpdate)?; - - let finalized_block_root = attested_state.finalized_checkpoint().root; - let finalized_block = chain - .get_blinded_block(&finalized_block_root)? - .ok_or(Error::FailedConstructingUpdate)?; - let latest_seen_finality_update_slot = match latest_seen_finality_update.as_ref() { - Some(update) => update.finalized_header.beacon.slot, - None => Slot::new(0), - }; - - // verify that no other finality_update with a lower or equal - // finalized_header.slot was already forwarded on the network - if gossiped_finality_slot <= latest_seen_finality_update_slot { - return Err(Error::FinalityUpdateAlreadySeen); - } - // verify that enough time has passed for the block to have been propagated - match start_time { - Some(time) => { - if seen_timestamp + chain.spec.maximum_gossip_clock_disparity() - < time + one_third_slot_duration - { - return Err(Error::TooEarly); - } - } - None => return Err(Error::SigSlotStartIsNone), + let start_time = chain + .slot_clock + .start_of(rcv_finality_update.signature_slot) + .ok_or(Error::SigSlotStartIsNone)?; + let one_third_slot_duration = Duration::new(chain.spec.seconds_per_slot / 3, 0); + if seen_timestamp + chain.spec.maximum_gossip_clock_disparity() + < start_time + one_third_slot_duration + { + return Err(Error::TooEarly); } - let head_state = &head.snapshot.beacon_state; - let finality_update = LightClientFinalityUpdate::new( - &chain.spec, - head_state, - head_block, - &mut attested_state, - &finalized_block, - )?; + let latest_finality_update = chain + .light_client_server_cache + .get_latest_finality_update() + .ok_or(Error::FailedConstructingUpdate)?; // verify that the gossiped finality update is the same as the locally constructed one. - if finality_update != light_client_finality_update { + if latest_finality_update != rcv_finality_update { return Err(Error::InvalidLightClientFinalityUpdate); } - *latest_seen_finality_update = Some(light_client_finality_update.clone()); - Ok(Self { - light_client_finality_update, + light_client_finality_update: rcv_finality_update, seen_timestamp, }) } diff --git a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs index 374cc9a77..813b112db 100644 --- a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs @@ -1,12 +1,10 @@ -use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use crate::{BeaconChain, BeaconChainTypes}; use derivative::Derivative; use eth2::types::Hash256; use slot_clock::SlotClock; use std::time::Duration; use strum::AsRefStr; -use types::{ - light_client_update::Error as LightClientUpdateError, LightClientOptimisticUpdate, Slot, -}; +use types::LightClientOptimisticUpdate; /// Returned when a light client optimistic update was not successfully verified. It might not have been verified for /// two reasons: @@ -17,8 +15,6 @@ use types::{ /// (the `BeaconChainError` variant) #[derive(Debug, AsRefStr)] pub enum Error { - /// Light client optimistic update message with a lower or equal optimistic_header slot already forwarded. - OptimisticUpdateAlreadySeen, /// The light client optimistic message was received is prior to one-third of slot duration passage. (with /// respect to the gossip clock disparity and slot clock duration). /// @@ -27,9 +23,6 @@ pub enum Error { /// Assuming the local clock is correct, the peer has sent an invalid message. TooEarly, /// Light client optimistic update message does not match the locally constructed one. - /// - /// ## Peer Scoring - /// InvalidLightClientOptimisticUpdate, /// Signature slot start time is none. SigSlotStartIsNone, @@ -37,21 +30,6 @@ pub enum Error { FailedConstructingUpdate, /// Unknown block with parent root. UnknownBlockParentRoot(Hash256), - /// Beacon chain error occurred. - BeaconChainError(BeaconChainError), - LightClientUpdateError(LightClientUpdateError), -} - -impl From for Error { - fn from(e: BeaconChainError) -> Self { - Error::BeaconChainError(e) - } -} - -impl From for Error { - fn from(e: LightClientUpdateError) -> Self { - Error::LightClientUpdateError(e) - } } /// Wraps a `LightClientOptimisticUpdate` that has been verified for propagation on the gossip network. @@ -67,52 +45,27 @@ impl VerifiedLightClientOptimisticUpdate { /// Returns `Ok(Self)` if the `light_client_optimistic_update` is valid to be (re)published on the gossip /// network. pub fn verify( - light_client_optimistic_update: LightClientOptimisticUpdate, + rcv_optimistic_update: LightClientOptimisticUpdate, chain: &BeaconChain, seen_timestamp: Duration, ) -> Result { - let gossiped_optimistic_slot = light_client_optimistic_update.attested_header.beacon.slot; + // verify that enough time has passed for the block to have been propagated + let start_time = chain + .slot_clock + .start_of(rcv_optimistic_update.signature_slot) + .ok_or(Error::SigSlotStartIsNone)?; let one_third_slot_duration = Duration::new(chain.spec.seconds_per_slot / 3, 0); - let signature_slot = light_client_optimistic_update.signature_slot; - let start_time = chain.slot_clock.start_of(signature_slot); - let mut latest_seen_optimistic_update = chain.latest_seen_optimistic_update.lock(); + if seen_timestamp + chain.spec.maximum_gossip_clock_disparity() + < start_time + one_third_slot_duration + { + return Err(Error::TooEarly); + } let head = chain.canonical_head.cached_head(); let head_block = &head.snapshot.beacon_block; - let attested_block_root = head_block.message().parent_root(); - let attested_block = chain - .get_blinded_block(&attested_block_root)? - .ok_or(Error::FailedConstructingUpdate)?; - - let attested_state = chain - .get_state(&attested_block.state_root(), Some(attested_block.slot()))? - .ok_or(Error::FailedConstructingUpdate)?; - let latest_seen_optimistic_update_slot = match latest_seen_optimistic_update.as_ref() { - Some(update) => update.attested_header.beacon.slot, - None => Slot::new(0), - }; - - // verify that no other optimistic_update with a lower or equal - // optimistic_header.slot was already forwarded on the network - if gossiped_optimistic_slot <= latest_seen_optimistic_update_slot { - return Err(Error::OptimisticUpdateAlreadySeen); - } - - // verify that enough time has passed for the block to have been propagated - match start_time { - Some(time) => { - if seen_timestamp + chain.spec.maximum_gossip_clock_disparity() - < time + one_third_slot_duration - { - return Err(Error::TooEarly); - } - } - None => return Err(Error::SigSlotStartIsNone), - } - // check if we can process the optimistic update immediately // otherwise queue - let canonical_root = light_client_optimistic_update + let canonical_root = rcv_optimistic_update .attested_header .beacon .canonical_root(); @@ -121,19 +74,20 @@ impl VerifiedLightClientOptimisticUpdate { return Err(Error::UnknownBlockParentRoot(canonical_root)); } - let optimistic_update = - LightClientOptimisticUpdate::new(&chain.spec, head_block, &attested_state)?; + let latest_optimistic_update = chain + .light_client_server_cache + .get_latest_optimistic_update() + .ok_or(Error::FailedConstructingUpdate)?; // verify that the gossiped optimistic update is the same as the locally constructed one. - if optimistic_update != light_client_optimistic_update { + if latest_optimistic_update != rcv_optimistic_update { return Err(Error::InvalidLightClientOptimisticUpdate); } - *latest_seen_optimistic_update = Some(light_client_optimistic_update.clone()); - + let parent_root = rcv_optimistic_update.attested_header.beacon.parent_root; Ok(Self { - light_client_optimistic_update, - parent_root: canonical_root, + light_client_optimistic_update: rcv_optimistic_update, + parent_root, seen_timestamp, }) } diff --git a/beacon_node/beacon_chain/src/light_client_server_cache.rs b/beacon_node/beacon_chain/src/light_client_server_cache.rs new file mode 100644 index 000000000..1397e3fc9 --- /dev/null +++ b/beacon_node/beacon_chain/src/light_client_server_cache.rs @@ -0,0 +1,256 @@ +use crate::errors::BeaconChainError; +use crate::{metrics, BeaconChainTypes, BeaconStore}; +use parking_lot::{Mutex, RwLock}; +use slog::{debug, Logger}; +use ssz_types::FixedVector; +use std::num::NonZeroUsize; +use types::light_client_update::{FinalizedRootProofLen, FINALIZED_ROOT_INDEX}; +use types::non_zero_usize::new_non_zero_usize; +use types::{ + BeaconBlockRef, BeaconState, ChainSpec, EthSpec, ForkName, Hash256, LightClientFinalityUpdate, + LightClientHeader, LightClientOptimisticUpdate, Slot, SyncAggregate, +}; + +/// A prev block cache miss requires to re-generate the state of the post-parent block. Items in the +/// prev block cache are very small 32 * (6 + 1) = 224 bytes. 32 is an arbitrary number that +/// represents unlikely re-orgs, while keeping the cache very small. +const PREV_BLOCK_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(32); + +/// This cache computes light client messages ahead of time, required to satisfy p2p and API +/// requests. These messages include proofs on historical states, so on-demand computation is +/// expensive. +/// +pub struct LightClientServerCache { + /// Tracks a single global latest finality update out of all imported blocks. + /// + /// TODO: Active discussion with @etan-status if this cache should be fork aware to return + /// latest canonical (update with highest signature slot, where its attested header is part of + /// the head chain) instead of global latest (update with highest signature slot, out of all + /// branches). + latest_finality_update: RwLock>>, + /// Tracks a single global latest optimistic update out of all imported blocks. + latest_optimistic_update: RwLock>>, + /// Caches state proofs by block root + prev_block_cache: Mutex>, +} + +impl LightClientServerCache { + pub fn new() -> Self { + Self { + latest_finality_update: None.into(), + latest_optimistic_update: None.into(), + prev_block_cache: lru::LruCache::new(PREV_BLOCK_CACHE_SIZE).into(), + } + } + + /// Compute and cache state proofs for latter production of light-client messages. Does not + /// trigger block replay. + pub fn cache_state_data( + &self, + spec: &ChainSpec, + block: BeaconBlockRef, + block_root: Hash256, + block_post_state: &mut BeaconState, + ) -> Result<(), BeaconChainError> { + let _timer = metrics::start_timer(&metrics::LIGHT_CLIENT_SERVER_CACHE_STATE_DATA_TIMES); + + // Only post-altair + if spec.fork_name_at_slot::(block.slot()) == ForkName::Base { + return Ok(()); + } + + // Persist in memory cache for a descendent block + + let cached_data = LightClientCachedData::from_state(block_post_state)?; + self.prev_block_cache.lock().put(block_root, cached_data); + + Ok(()) + } + + /// Given a block with a SyncAggregte computes better or more recent light client updates. The + /// results are cached either on disk or memory to be served via p2p and rest API + pub fn recompute_and_cache_updates( + &self, + log: &Logger, + store: BeaconStore, + block_parent_root: &Hash256, + block_slot: Slot, + sync_aggregate: &SyncAggregate, + ) -> Result<(), BeaconChainError> { + let _timer = + metrics::start_timer(&metrics::LIGHT_CLIENT_SERVER_CACHE_RECOMPUTE_UPDATES_TIMES); + + let signature_slot = block_slot; + let attested_block_root = block_parent_root; + + let attested_block = store.get_blinded_block(attested_block_root)?.ok_or( + BeaconChainError::DBInconsistent(format!( + "Block not available {:?}", + attested_block_root + )), + )?; + + let cached_parts = self.get_or_compute_prev_block_cache( + store.clone(), + attested_block_root, + &attested_block.state_root(), + attested_block.slot(), + )?; + + let attested_slot = attested_block.slot(); + + // Spec: Full nodes SHOULD provide the LightClientOptimisticUpdate with the highest + // attested_header.beacon.slot (if multiple, highest signature_slot) as selected by fork choice + let is_latest_optimistic = match &self.latest_optimistic_update.read().clone() { + Some(latest_optimistic_update) => { + is_latest_optimistic_update(latest_optimistic_update, attested_slot, signature_slot) + } + None => true, + }; + if is_latest_optimistic { + // can create an optimistic update, that is more recent + *self.latest_optimistic_update.write() = Some(LightClientOptimisticUpdate { + attested_header: block_to_light_client_header(attested_block.message()), + sync_aggregate: sync_aggregate.clone(), + signature_slot, + }); + }; + + // Spec: Full nodes SHOULD provide the LightClientFinalityUpdate with the highest + // attested_header.beacon.slot (if multiple, highest signature_slot) as selected by fork choice + let is_latest_finality = match &self.latest_finality_update.read().clone() { + Some(latest_finality_update) => { + is_latest_finality_update(latest_finality_update, attested_slot, signature_slot) + } + None => true, + }; + if is_latest_finality & !cached_parts.finalized_block_root.is_zero() { + // Immediately after checkpoint sync the finalized block may not be available yet. + if let Some(finalized_block) = + store.get_blinded_block(&cached_parts.finalized_block_root)? + { + *self.latest_finality_update.write() = Some(LightClientFinalityUpdate { + // TODO: may want to cache this result from latest_optimistic_update if producing a + // light_client header becomes expensive + attested_header: block_to_light_client_header(attested_block.message()), + finalized_header: block_to_light_client_header(finalized_block.message()), + finality_branch: cached_parts.finality_branch.clone(), + sync_aggregate: sync_aggregate.clone(), + signature_slot, + }); + } else { + debug!( + log, + "Finalized block not available in store for light_client server"; + "finalized_block_root" => format!("{}", cached_parts.finalized_block_root), + ); + } + } + + Ok(()) + } + + /// Retrieves prev block cached data from cache. If not present re-computes by retrieving the + /// parent state, and inserts an entry to the cache. + /// + /// In separate function since FnOnce of get_or_insert can not be fallible. + fn get_or_compute_prev_block_cache( + &self, + store: BeaconStore, + block_root: &Hash256, + block_state_root: &Hash256, + block_slot: Slot, + ) -> Result { + // Attempt to get the value from the cache first. + if let Some(cached_parts) = self.prev_block_cache.lock().get(block_root) { + return Ok(cached_parts.clone()); + } + metrics::inc_counter(&metrics::LIGHT_CLIENT_SERVER_CACHE_PREV_BLOCK_CACHE_MISS); + + // Compute the value, handling potential errors. + let mut state = store + .get_state(block_state_root, Some(block_slot))? + .ok_or_else(|| { + BeaconChainError::DBInconsistent(format!("Missing state {:?}", block_state_root)) + })?; + let new_value = LightClientCachedData::from_state(&mut state)?; + + // Insert value and return owned + self.prev_block_cache + .lock() + .put(*block_root, new_value.clone()); + Ok(new_value) + } + + pub fn get_latest_finality_update(&self) -> Option> { + self.latest_finality_update.read().clone() + } + + pub fn get_latest_optimistic_update(&self) -> Option> { + self.latest_optimistic_update.read().clone() + } +} + +impl Default for LightClientServerCache { + fn default() -> Self { + Self::new() + } +} + +type FinalityBranch = FixedVector; + +#[derive(Clone)] +struct LightClientCachedData { + finality_branch: FinalityBranch, + finalized_block_root: Hash256, +} + +impl LightClientCachedData { + fn from_state(state: &mut BeaconState) -> Result { + Ok(Self { + finality_branch: state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?.into(), + finalized_block_root: state.finalized_checkpoint().root, + }) + } +} + +// Implements spec priorization rules: +// > Full nodes SHOULD provide the LightClientFinalityUpdate with the highest attested_header.beacon.slot (if multiple, highest signature_slot) +// +// ref: https://github.com/ethereum/consensus-specs/blob/113c58f9bf9c08867f6f5f633c4d98e0364d612a/specs/altair/light-client/full-node.md#create_light_client_finality_update +fn is_latest_finality_update( + prev: &LightClientFinalityUpdate, + attested_slot: Slot, + signature_slot: Slot, +) -> bool { + if attested_slot > prev.attested_header.beacon.slot { + true + } else { + attested_slot == prev.attested_header.beacon.slot && signature_slot > prev.signature_slot + } +} + +// Implements spec priorization rules: +// > Full nodes SHOULD provide the LightClientOptimisticUpdate with the highest attested_header.beacon.slot (if multiple, highest signature_slot) +// +// ref: https://github.com/ethereum/consensus-specs/blob/113c58f9bf9c08867f6f5f633c4d98e0364d612a/specs/altair/light-client/full-node.md#create_light_client_optimistic_update +fn is_latest_optimistic_update( + prev: &LightClientOptimisticUpdate, + attested_slot: Slot, + signature_slot: Slot, +) -> bool { + if attested_slot > prev.attested_header.beacon.slot { + true + } else { + attested_slot == prev.attested_header.beacon.slot && signature_slot > prev.signature_slot + } +} + +fn block_to_light_client_header( + block: BeaconBlockRef>, +) -> LightClientHeader { + // TODO: make fork aware + LightClientHeader { + beacon: block.block_header(), + } +} diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index ad095b37b..abac2c80e 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1128,6 +1128,47 @@ lazy_static! { // Create a custom bucket list for greater granularity in block delay Ok(vec![0.1, 0.2, 0.3,0.4,0.5,0.75,1.0,1.25,1.5,1.75,2.0,2.5,3.0,3.5,4.0,5.0,6.0,7.0,8.0,9.0,10.0,15.0,20.0]) ); + + + /* + * Data Availability cache metrics + */ + pub static ref DATA_AVAILABILITY_PROCESSING_CACHE_SIZE: Result = + try_create_int_gauge( + "data_availability_processing_cache_size", + "Number of entries in the data availability processing cache." + ); + pub static ref DATA_AVAILABILITY_OVERFLOW_MEMORY_BLOCK_CACHE_SIZE: Result = + try_create_int_gauge( + "data_availability_overflow_memory_block_cache_size", + "Number of entries in the data availability overflow block memory cache." + ); + pub static ref DATA_AVAILABILITY_OVERFLOW_MEMORY_STATE_CACHE_SIZE: Result = + try_create_int_gauge( + "data_availability_overflow_memory_state_cache_size", + "Number of entries in the data availability overflow state memory cache." + ); + pub static ref DATA_AVAILABILITY_OVERFLOW_STORE_CACHE_SIZE: Result = + try_create_int_gauge( + "data_availability_overflow_store_cache_size", + "Number of entries in the data availability overflow store cache." + ); + + /* + * light_client server metrics + */ + pub static ref LIGHT_CLIENT_SERVER_CACHE_STATE_DATA_TIMES: Result = try_create_histogram( + "beacon_light_client_server_cache_state_data_seconds", + "Time taken to produce and cache state data", + ); + pub static ref LIGHT_CLIENT_SERVER_CACHE_RECOMPUTE_UPDATES_TIMES: Result = try_create_histogram( + "beacon_light_client_server_cache_recompute_updates_seconds", + "Time taken to recompute and cache updates", + ); + pub static ref LIGHT_CLIENT_SERVER_CACHE_PREV_BLOCK_CACHE_MISS: Result = try_create_int_counter( + "beacon_light_client_server_cache_prev_block_cache_miss", + "Count of prev block cache misses", + ); } /// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot, @@ -1155,6 +1196,24 @@ pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { ) } + let da_checker_metrics = beacon_chain.data_availability_checker.metrics(); + set_gauge_by_usize( + &DATA_AVAILABILITY_PROCESSING_CACHE_SIZE, + da_checker_metrics.processing_cache_size, + ); + set_gauge_by_usize( + &DATA_AVAILABILITY_OVERFLOW_MEMORY_BLOCK_CACHE_SIZE, + da_checker_metrics.block_cache_size, + ); + set_gauge_by_usize( + &DATA_AVAILABILITY_OVERFLOW_MEMORY_STATE_CACHE_SIZE, + da_checker_metrics.state_cache_size, + ); + set_gauge_by_usize( + &DATA_AVAILABILITY_OVERFLOW_STORE_CACHE_SIZE, + da_checker_metrics.num_store_entries, + ); + if let Some((size, num_lookups)) = beacon_chain.pre_finalization_block_cache.metrics() { set_gauge_by_usize(&PRE_FINALIZATION_BLOCK_CACHE_SIZE, size); set_gauge_by_usize(&PRE_FINALIZATION_BLOCK_LOOKUP_COUNT, num_lookups); diff --git a/beacon_node/beacon_chain/src/observed_aggregates.rs b/beacon_node/beacon_chain/src/observed_aggregates.rs index 18a761e29..aa513da54 100644 --- a/beacon_node/beacon_chain/src/observed_aggregates.rs +++ b/beacon_node/beacon_chain/src/observed_aggregates.rs @@ -43,7 +43,7 @@ impl Consts for Attestation { /// We need to keep attestations for each slot of the current epoch. fn max_slot_capacity() -> usize { - T::slots_per_epoch() as usize + 2 * T::slots_per_epoch() as usize } /// As a DoS protection measure, the maximum number of distinct `Attestations` or diff --git a/beacon_node/beacon_chain/src/observed_attesters.rs b/beacon_node/beacon_chain/src/observed_attesters.rs index 605a13432..a1c6adc3e 100644 --- a/beacon_node/beacon_chain/src/observed_attesters.rs +++ b/beacon_node/beacon_chain/src/observed_attesters.rs @@ -24,18 +24,16 @@ use types::{Epoch, EthSpec, Hash256, Slot, Unsigned}; /// The maximum capacity of the `AutoPruningEpochContainer`. /// -/// Fits the next, current and previous epochs. We require the next epoch due to the -/// `MAXIMUM_GOSSIP_CLOCK_DISPARITY`. We require the previous epoch since the specification -/// declares: +/// If the current epoch is N, this fits epoch N + 1, N, N - 1, and N - 2. We require the next epoch due +/// to the `MAXIMUM_GOSSIP_CLOCK_DISPARITY`. We require the N - 2 epoch since the specification declares: /// /// ```ignore -/// aggregate.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE -/// >= current_slot >= aggregate.data.slot +/// the epoch of `aggregate.data.slot` is either the current or previous epoch /// ``` /// -/// This means that during the current epoch we will always accept an attestation -/// from at least one slot in the previous epoch. -pub const MAX_CACHED_EPOCHS: u64 = 3; +/// This means that during the current epoch we will always accept an attestation from +/// at least one slot in the epoch prior to the previous epoch. +pub const MAX_CACHED_EPOCHS: u64 = 4; pub type ObservedAttesters = AutoPruningEpochContainer; pub type ObservedSyncContributors = diff --git a/beacon_node/beacon_chain/src/snapshot_cache.rs b/beacon_node/beacon_chain/src/snapshot_cache.rs index d2846c085..765ed0cb2 100644 --- a/beacon_node/beacon_chain/src/snapshot_cache.rs +++ b/beacon_node/beacon_chain/src/snapshot_cache.rs @@ -9,7 +9,7 @@ use types::{ }; /// The default size of the cache. -pub const DEFAULT_SNAPSHOT_CACHE_SIZE: usize = 4; +pub const DEFAULT_SNAPSHOT_CACHE_SIZE: usize = 3; /// The minimum block delay to clone the state in the cache instead of removing it. /// This helps keep block processing fast during re-orgs from late blocks. @@ -174,6 +174,7 @@ impl SnapshotCache { self.snapshots.iter().map(|s| s.beacon_block_root).collect() } + #[allow(clippy::len_without_is_empty)] /// The number of snapshots contained in `self`. pub fn len(&self) -> usize { self.snapshots.len() diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index c04815ebc..39d35f811 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -51,7 +51,8 @@ const MAX_BLOCK_PRODUCTION_CACHE_DISTANCE: u64 = 4; #[derive(Debug)] enum Error { BeaconChain(BeaconChainError), - HeadMissingFromSnapshotCache(Hash256), + // We don't use the inner value directly, but it's used in the Debug impl. + HeadMissingFromSnapshotCache(#[allow(dead_code)] Hash256), MaxDistanceExceeded { current_slot: Slot, head_slot: Slot, diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 541e97436..9b89ee094 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -1,4 +1,4 @@ -// #![cfg(not(debug_assertions))] +#![cfg(not(debug_assertions))] use beacon_chain::block_verification_types::{AsBlock, ExecutedBlock, RpcBlock}; use beacon_chain::{ diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index a0b7fbd36..597d53fdd 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1820,81 +1820,94 @@ struct InvalidHeadSetup { } impl InvalidHeadSetup { + /// This function aims to produce two things: + /// + /// 1. A chain where the only viable head block has an invalid execution payload. + /// 2. A block (`fork_block`) which will become the head of the chain when + /// it is imported. async fn new() -> InvalidHeadSetup { + let slots_per_epoch = E::slots_per_epoch(); let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); rig.import_block(Payload::Valid).await; // Import a valid transition block. - // Import blocks until the first time the chain finalizes. + // Import blocks until the first time the chain finalizes. This avoids + // some edge-cases around genesis. while rig.cached_head().finalized_checkpoint().epoch == 0 { rig.import_block(Payload::Syncing).await; } - let slots_per_epoch = E::slots_per_epoch(); - let start_slot = rig.cached_head().head_slot() + 1; - let mut opt_fork_block = None; + // Define a helper function. + let chain = rig.harness.chain.clone(); + let get_unrealized_justified_epoch = move || { + chain + .canonical_head + .fork_choice_read_lock() + .unrealized_justified_checkpoint() + .epoch + }; - assert_eq!(start_slot % slots_per_epoch, 1); - for i in 0..slots_per_epoch - 1 { - let slot = start_slot + i; - let slot_offset = slot.as_u64() % slots_per_epoch; - - rig.harness.set_current_slot(slot); - - if slot_offset == slots_per_epoch - 1 { - // Optimistic head block right before epoch boundary. - let is_valid = Payload::Syncing; - rig.import_block_parametric(is_valid, is_valid, Some(slot), |error| { - matches!( - error, - BlockError::ExecutionPayloadError( - ExecutionPayloadError::RejectedByExecutionEngine { .. } - ) - ) - }) - .await; - } else if 3 * slot_offset < 2 * slots_per_epoch { - // Valid block in previous epoch. - rig.import_block(Payload::Valid).await; - } else if slot_offset == slots_per_epoch - 2 { - // Fork block one slot prior to invalid head, not applied immediately. - let parent_state = rig - .harness - .chain - .state_at_slot(slot - 1, StateSkipConfig::WithStateRoots) - .unwrap(); - let (fork_block_tuple, _) = rig.harness.make_block(parent_state, slot).await; - opt_fork_block = Some(fork_block_tuple.0); - } else { - // Skipped slot. - }; + // Import more blocks until there is a new and higher unrealized + // justified checkpoint. + // + // The result will be a single chain where the head block has a higher + // unrealized justified checkpoint than all other blocks in the chain. + let initial_unrealized_justified = get_unrealized_justified_epoch(); + while get_unrealized_justified_epoch() == initial_unrealized_justified { + rig.import_block(Payload::Syncing).await; } - let invalid_head = rig.cached_head(); - assert_eq!( - invalid_head.head_slot() % slots_per_epoch, - slots_per_epoch - 1 - ); + // Create a forked block that competes with the head block. Both the + // head block and this fork block will share the same parent. + // + // The fork block and head block will both have an unrealized justified + // checkpoint at epoch `N` whilst their parent is at `N - 1`. + let head_slot = rig.cached_head().head_slot(); + let parent_slot = head_slot - 1; + let fork_block_slot = head_slot + 1; + let parent_state = rig + .harness + .chain + .state_at_slot(parent_slot, StateSkipConfig::WithStateRoots) + .unwrap(); + let (fork_block_tuple, _) = rig.harness.make_block(parent_state, fork_block_slot).await; + let fork_block = fork_block_tuple.0; - // Advance clock to new epoch to realize the justification of soon-to-be-invalid head block. - rig.harness.set_current_slot(invalid_head.head_slot() + 1); + let invalid_head = rig.cached_head(); + + // Advance the chain forward two epochs past the current head block. + // + // This ensures that `voting_source.epoch + 2 >= current_epoch` is + // `false` in the `node_is_viable_for_head` function. In effect, this + // ensures that no other block but the current head block is viable as a + // head block. + let invalid_head_epoch = invalid_head.head_slot().epoch(slots_per_epoch); + let new_wall_clock_epoch = invalid_head_epoch + 2; + rig.harness + .set_current_slot(new_wall_clock_epoch.start_slot(slots_per_epoch)); // Invalidate the head block. rig.invalidate_manually(invalid_head.head_block_root()) .await; + // Since our setup ensures that there is only a single, invalid block + // that's viable for head (according to FFG filtering), setting the + // head block as invalid should not result in another head being chosen. + // Rather, it should fail to run fork choice and leave the invalid block as + // the head. assert!(rig .canonical_head() .head_execution_status() .unwrap() .is_invalid()); - // Finding a new head should fail since the only possible head is not valid. + // Ensure that we're getting the correct error when trying to find a new + // head. rig.assert_get_head_error_contains("InvalidBestNode"); Self { rig, - fork_block: opt_fork_block.unwrap(), + fork_block, invalid_head, } } diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 9b832bd76..ff2017298 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -3,6 +3,7 @@ use beacon_chain::attestation_verification::Error as AttnError; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::builder::BeaconChainBuilder; +use beacon_chain::data_availability_checker::AvailableBlock; use beacon_chain::schema_change::migrate_schema; use beacon_chain::test_utils::{ mock_execution_layer_from_parts, test_spec, AttestationStrategy, BeaconChainHarness, @@ -2395,6 +2396,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .get_full_block(&wss_block_root) .unwrap() .unwrap(); + let wss_blobs_opt = harness.chain.store.get_blobs(&wss_block_root).unwrap(); let wss_state = full_store .get_state(&wss_state_root, Some(checkpoint_slot)) .unwrap() @@ -2437,7 +2439,12 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .custom_spec(test_spec::()) .task_executor(harness.chain.task_executor.clone()) .logger(log.clone()) - .weak_subjectivity_state(wss_state, wss_block.clone(), genesis_state) + .weak_subjectivity_state( + wss_state, + wss_block.clone(), + wss_blobs_opt.clone(), + genesis_state, + ) .unwrap() .store_migrator_config(MigratorConfig::default().blocking()) .dummy_eth1_backend() @@ -2455,6 +2462,17 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .expect("should build"); let beacon_chain = Arc::new(beacon_chain); + let wss_block_root = wss_block.canonical_root(); + let store_wss_block = harness + .chain + .get_block(&wss_block_root) + .await + .unwrap() + .unwrap(); + let store_wss_blobs_opt = beacon_chain.store.get_blobs(&wss_block_root).unwrap(); + + assert_eq!(store_wss_block, wss_block); + assert_eq!(store_wss_blobs_opt, wss_blobs_opt); // Apply blocks forward to reach head. let chain_dump = harness.chain.chain_dump().unwrap(); @@ -2547,6 +2565,25 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { } } + // Corrupt the signature on the 1st block to ensure that the backfill processor is checking + // signatures correctly. Regression test for https://github.com/sigp/lighthouse/pull/5120. + let mut batch_with_invalid_first_block = available_blocks.clone(); + batch_with_invalid_first_block[0] = { + let (block_root, block, blobs) = available_blocks[0].clone().deconstruct(); + let mut corrupt_block = (*block).clone(); + *corrupt_block.signature_mut() = Signature::empty(); + AvailableBlock::__new_for_testing(block_root, Arc::new(corrupt_block), blobs) + }; + + // Importing the invalid batch should error. + assert!(matches!( + beacon_chain + .import_historical_block_batch(batch_with_invalid_first_block) + .unwrap_err(), + BeaconChainError::HistoricalBlockError(HistoricalBlockError::InvalidSignature) + )); + + // Importing the batch with valid signatures should succeed. beacon_chain .import_historical_block_batch(available_blocks.clone()) .unwrap(); diff --git a/beacon_node/beacon_processor/src/work_reprocessing_queue.rs b/beacon_node/beacon_processor/src/work_reprocessing_queue.rs index 9191509d3..20f3e21d0 100644 --- a/beacon_node/beacon_processor/src/work_reprocessing_queue.rs +++ b/beacon_node/beacon_processor/src/work_reprocessing_queue.rs @@ -82,12 +82,15 @@ pub enum ReprocessQueueMessage { /// A gossip block for hash `X` is being imported, we should queue the rpc block for the same /// hash until the gossip block is imported. RpcBlock(QueuedRpcBlock), - /// A block that was successfully processed. We use this to handle attestations and light client updates + /// A block that was successfully processed. We use this to handle attestations updates /// for unknown blocks. BlockImported { block_root: Hash256, parent_root: Hash256, }, + /// A new `LightClientOptimisticUpdate` has been produced. We use this to handle light client + /// updates for unknown parent blocks. + NewLightClientOptimisticUpdate { parent_root: Hash256 }, /// An unaggregated attestation that references an unknown block. UnknownBlockUnaggregate(QueuedUnaggregate), /// An aggregated attestation that references an unknown block. @@ -688,6 +691,8 @@ impl ReprocessQueue { ); } } + } + InboundEvent::Msg(NewLightClientOptimisticUpdate { parent_root }) => { // Unqueue the light client optimistic updates we have for this root, if any. if let Some(queued_lc_id) = self .awaiting_lc_updates_per_parent_root diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 26c53154e..03cbcc9ff 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -25,6 +25,7 @@ serde = { workspace = true } error-chain = { workspace = true } slog = { workspace = true } tokio = { workspace = true } +futures = { workspace = true } dirs = { workspace = true } eth1 = { workspace = true } eth2 = { workspace = true } @@ -44,3 +45,4 @@ monitoring_api = { workspace = true } execution_layer = { workspace = true } beacon_processor = { workspace = true } num_cpus = { workspace = true } +ethereum_ssz = { workspace = true } diff --git a/beacon_node/client/src/address_change_broadcast.rs b/beacon_node/client/src/address_change_broadcast.rs deleted file mode 100644 index 69614159f..000000000 --- a/beacon_node/client/src/address_change_broadcast.rs +++ /dev/null @@ -1,322 +0,0 @@ -use crate::*; -use lighthouse_network::PubsubMessage; -use network::NetworkMessage; -use slog::{debug, info, warn, Logger}; -use slot_clock::SlotClock; -use std::cmp; -use std::collections::HashSet; -use std::mem; -use std::time::Duration; -use tokio::sync::mpsc::UnboundedSender; -use tokio::time::sleep; -use types::EthSpec; - -/// The size of each chunk of addresses changes to be broadcast at the Capella -/// fork. -const BROADCAST_CHUNK_SIZE: usize = 128; -/// The delay between broadcasting each chunk. -const BROADCAST_CHUNK_DELAY: Duration = Duration::from_millis(500); - -/// If the Capella fork has already been reached, `broadcast_address_changes` is -/// called immediately. -/// -/// If the Capella fork has not been reached, waits until the start of the fork -/// epoch and then calls `broadcast_address_changes`. -pub async fn broadcast_address_changes_at_capella( - chain: &BeaconChain, - network_send: UnboundedSender>, - log: &Logger, -) { - let spec = &chain.spec; - let slot_clock = &chain.slot_clock; - - let capella_fork_slot = if let Some(epoch) = spec.capella_fork_epoch { - epoch.start_slot(T::EthSpec::slots_per_epoch()) - } else { - // Exit now if Capella is not defined. - return; - }; - - // Wait until the Capella fork epoch. - while chain.slot().map_or(true, |slot| slot < capella_fork_slot) { - match slot_clock.duration_to_slot(capella_fork_slot) { - Some(duration) => { - // Sleep until the Capella fork. - sleep(duration).await; - break; - } - None => { - // We were unable to read the slot clock wait another slot - // and then try again. - sleep(slot_clock.slot_duration()).await; - } - } - } - - // The following function will be called in two scenarios: - // - // 1. The node has been running for some time and the Capella fork has just - // been reached. - // 2. The node has just started and it is *after* the Capella fork. - broadcast_address_changes(chain, network_send, log).await -} - -/// Broadcasts any address changes that are flagged for broadcasting at the -/// Capella fork epoch. -/// -/// Address changes are published in chunks, with a delay between each chunk. -/// This helps reduce the load on the P2P network and also helps prevent us from -/// clogging our `network_send` channel and being late to publish -/// blocks, attestations, etc. -pub async fn broadcast_address_changes( - chain: &BeaconChain, - network_send: UnboundedSender>, - log: &Logger, -) { - let head = chain.head_snapshot(); - let mut changes = chain - .op_pool - .get_bls_to_execution_changes_received_pre_capella(&head.beacon_state, &chain.spec); - - while !changes.is_empty() { - // This `split_off` approach is to allow us to have owned chunks of the - // `changes` vec. The `std::slice::Chunks` method uses references and - // the `itertools` iterator that achives this isn't `Send` so it doesn't - // work well with the `sleep` at the end of the loop. - let tail = changes.split_off(cmp::min(BROADCAST_CHUNK_SIZE, changes.len())); - let chunk = mem::replace(&mut changes, tail); - - let mut published_indices = HashSet::with_capacity(BROADCAST_CHUNK_SIZE); - let mut num_ok = 0; - let mut num_err = 0; - - // Publish each individual address change. - for address_change in chunk { - let validator_index = address_change.message.validator_index; - - let pubsub_message = PubsubMessage::BlsToExecutionChange(Box::new(address_change)); - let message = NetworkMessage::Publish { - messages: vec![pubsub_message], - }; - // It seems highly unlikely that this unbounded send will fail, but - // we handle the result nonetheless. - if let Err(e) = network_send.send(message) { - debug!( - log, - "Failed to publish change message"; - "error" => ?e, - "validator_index" => validator_index - ); - num_err += 1; - } else { - debug!( - log, - "Published address change message"; - "validator_index" => validator_index - ); - num_ok += 1; - published_indices.insert(validator_index); - } - } - - // Remove any published indices from the list of indices that need to be - // published. - chain - .op_pool - .register_indices_broadcasted_at_capella(&published_indices); - - info!( - log, - "Published address change messages"; - "num_published" => num_ok, - ); - - if num_err > 0 { - warn!( - log, - "Failed to publish address changes"; - "info" => "failed messages will be retried", - "num_unable_to_publish" => num_err, - ); - } - - sleep(BROADCAST_CHUNK_DELAY).await; - } - - debug!( - log, - "Address change routine complete"; - ); -} - -#[cfg(not(debug_assertions))] // Tests run too slow in debug. -#[cfg(test)] -mod tests { - use super::*; - use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; - use operation_pool::ReceivedPreCapella; - use state_processing::{SigVerifiedOp, VerifyOperation}; - use std::collections::HashSet; - use tokio::sync::mpsc; - use types::*; - - type E = MainnetEthSpec; - - pub const VALIDATOR_COUNT: usize = BROADCAST_CHUNK_SIZE * 3; - pub const EXECUTION_ADDRESS: Address = Address::repeat_byte(42); - - struct Tester { - harness: BeaconChainHarness>, - /// Changes which should be broadcast at the Capella fork. - received_pre_capella_changes: Vec>, - /// Changes which should *not* be broadcast at the Capella fork. - not_received_pre_capella_changes: Vec>, - } - - impl Tester { - fn new() -> Self { - let altair_fork_epoch = Epoch::new(0); - let bellatrix_fork_epoch = Epoch::new(0); - let capella_fork_epoch = Epoch::new(2); - - let mut spec = E::default_spec(); - spec.altair_fork_epoch = Some(altair_fork_epoch); - spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); - spec.capella_fork_epoch = Some(capella_fork_epoch); - - let harness = BeaconChainHarness::builder(E::default()) - .spec(spec) - .logger(logging::test_logger()) - .deterministic_keypairs(VALIDATOR_COUNT) - .deterministic_withdrawal_keypairs(VALIDATOR_COUNT) - .fresh_ephemeral_store() - .mock_execution_layer() - .build(); - - Self { - harness, - received_pre_capella_changes: <_>::default(), - not_received_pre_capella_changes: <_>::default(), - } - } - - fn produce_verified_address_change( - &self, - validator_index: u64, - ) -> SigVerifiedOp { - let change = self - .harness - .make_bls_to_execution_change(validator_index, EXECUTION_ADDRESS); - let head = self.harness.chain.head_snapshot(); - - change - .validate(&head.beacon_state, &self.harness.spec) - .unwrap() - } - - fn produce_received_pre_capella_changes(mut self, indices: Vec) -> Self { - for validator_index in indices { - self.received_pre_capella_changes - .push(self.produce_verified_address_change(validator_index)); - } - self - } - - fn produce_not_received_pre_capella_changes(mut self, indices: Vec) -> Self { - for validator_index in indices { - self.not_received_pre_capella_changes - .push(self.produce_verified_address_change(validator_index)); - } - self - } - - async fn run(self) { - let harness = self.harness; - let chain = harness.chain.clone(); - - let mut broadcast_indices = HashSet::new(); - for change in self.received_pre_capella_changes { - broadcast_indices.insert(change.as_inner().message.validator_index); - chain - .op_pool - .insert_bls_to_execution_change(change, ReceivedPreCapella::Yes); - } - - let mut non_broadcast_indices = HashSet::new(); - for change in self.not_received_pre_capella_changes { - non_broadcast_indices.insert(change.as_inner().message.validator_index); - chain - .op_pool - .insert_bls_to_execution_change(change, ReceivedPreCapella::No); - } - - harness.set_current_slot( - chain - .spec - .capella_fork_epoch - .unwrap() - .start_slot(E::slots_per_epoch()), - ); - - let (sender, mut receiver) = mpsc::unbounded_channel(); - - broadcast_address_changes_at_capella(&chain, sender, &logging::test_logger()).await; - - let mut broadcasted_changes = vec![]; - while let Some(NetworkMessage::Publish { mut messages }) = receiver.recv().await { - match messages.pop().unwrap() { - PubsubMessage::BlsToExecutionChange(change) => broadcasted_changes.push(change), - _ => panic!("unexpected message"), - } - } - - assert_eq!( - broadcasted_changes.len(), - broadcast_indices.len(), - "all expected changes should have been broadcast" - ); - - for broadcasted in &broadcasted_changes { - assert!( - !non_broadcast_indices.contains(&broadcasted.message.validator_index), - "messages not flagged for broadcast should not have been broadcast" - ); - } - - let head = chain.head_snapshot(); - assert!( - chain - .op_pool - .get_bls_to_execution_changes_received_pre_capella( - &head.beacon_state, - &chain.spec, - ) - .is_empty(), - "there shouldn't be any capella broadcast changes left in the op pool" - ); - } - } - - // Useful for generating even-numbered indices. Required since only even - // numbered genesis validators have BLS credentials. - fn even_indices(start: u64, count: usize) -> Vec { - (start..).filter(|i| i % 2 == 0).take(count).collect() - } - - #[tokio::test] - async fn one_chunk() { - Tester::new() - .produce_received_pre_capella_changes(even_indices(0, 4)) - .produce_not_received_pre_capella_changes(even_indices(10, 4)) - .run() - .await; - } - - #[tokio::test] - async fn multiple_chunks() { - Tester::new() - .produce_received_pre_capella_changes(even_indices(0, BROADCAST_CHUNK_SIZE * 3 / 2)) - .run() - .await; - } -} diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 9c88eccc7..243dd1324 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -1,4 +1,6 @@ -use crate::address_change_broadcast::broadcast_address_changes_at_capella; +use crate::compute_light_client_updates::{ + compute_light_client_updates, LIGHT_CLIENT_SERVER_CHANNEL_CAPACITY, +}; use crate::config::{ClientGenesis, Config as ClientConfig}; use crate::notifier::spawn_notifier; use crate::Client; @@ -7,6 +9,7 @@ use beacon_chain::data_availability_checker::start_availability_cache_maintenanc use beacon_chain::otb_verification_service::start_otb_verification_service; use beacon_chain::proposer_prep_service::start_proposer_prep_service; use beacon_chain::schema_change::migrate_schema; +use beacon_chain::LightClientProducerEvent; use beacon_chain::{ builder::{BeaconChainBuilder, Witness}, eth1_chain::{CachingEth1Backend, Eth1Chain}, @@ -24,6 +27,7 @@ use eth2::{ BeaconNodeHttpClient, Error as ApiError, Timeouts, }; use execution_layer::ExecutionLayer; +use futures::channel::mpsc::Receiver; use genesis::{interop_genesis_state, Eth1GenesisService, DEFAULT_ETH1_BLOCK_HASH}; use lighthouse_network::{prometheus_client::registry::Registry, NetworkGlobals}; use monitoring_api::{MonitoringHttpClient, ProcessType}; @@ -31,6 +35,7 @@ use network::{NetworkConfig, NetworkSenders, NetworkService}; use slasher::Slasher; use slasher_service::SlasherService; use slog::{debug, info, warn, Logger}; +use ssz::Decode; use std::net::TcpListener; use std::path::{Path, PathBuf}; use std::sync::Arc; @@ -39,7 +44,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use timer::spawn_timer; use tokio::sync::oneshot; use types::{ - test_utils::generate_deterministic_keypairs, BeaconState, ChainSpec, EthSpec, + test_utils::generate_deterministic_keypairs, BeaconState, BlobSidecarList, ChainSpec, EthSpec, ExecutionBlockHash, Hash256, SignedBeaconBlock, }; @@ -83,6 +88,7 @@ pub struct ClientBuilder { slasher: Option>>, beacon_processor_config: Option, beacon_processor_channels: Option>, + light_client_server_rv: Option>>, eth_spec_instance: T::EthSpec, } @@ -118,6 +124,7 @@ where eth_spec_instance, beacon_processor_config: None, beacon_processor_channels: None, + light_client_server_rv: None, } } @@ -206,6 +213,16 @@ where builder }; + let builder = if config.network.enable_light_client_server { + let (tx, rv) = futures::channel::mpsc::channel::>( + LIGHT_CLIENT_SERVER_CHANNEL_CAPACITY, + ); + self.light_client_server_rv = Some(rv); + builder.light_client_server_tx(tx) + } else { + builder + }; + let chain_exists = builder.store_contains_beacon_chain().unwrap_or(false); // If the client is expect to resume but there's no beacon chain in the database, @@ -302,6 +319,7 @@ where ClientGenesis::WeakSubjSszBytes { anchor_state_bytes, anchor_block_bytes, + anchor_blobs_bytes, } => { info!(context.log(), "Starting checkpoint sync"); if config.chain.genesis_backfill { @@ -315,10 +333,25 @@ where .map_err(|e| format!("Unable to parse weak subj state SSZ: {:?}", e))?; let anchor_block = SignedBeaconBlock::from_ssz_bytes(&anchor_block_bytes, &spec) .map_err(|e| format!("Unable to parse weak subj block SSZ: {:?}", e))?; + let anchor_blobs = if anchor_block.message().body().has_blobs() { + let anchor_blobs_bytes = anchor_blobs_bytes + .ok_or("Blobs for checkpoint must be provided using --checkpoint-blobs")?; + Some( + BlobSidecarList::from_ssz_bytes(&anchor_blobs_bytes) + .map_err(|e| format!("Unable to parse weak subj blobs SSZ: {e:?}"))?, + ) + } else { + None + }; let genesis_state = genesis_state(&runtime_context, &config, log).await?; builder - .weak_subjectivity_state(anchor_state, anchor_block, genesis_state) + .weak_subjectivity_state( + anchor_state, + anchor_block, + anchor_blobs, + genesis_state, + ) .map(|v| (v, None))? } ClientGenesis::CheckpointSyncUrl { url } => { @@ -413,9 +446,33 @@ where e => format!("Error fetching finalized block from remote: {:?}", e), })? .ok_or("Finalized block missing from remote, it returned 404")?; + let block_root = block.canonical_root(); debug!(context.log(), "Downloaded finalized block"); + let blobs = if block.message().body().has_blobs() { + debug!(context.log(), "Downloading finalized blobs"); + if let Some(response) = remote + .get_blobs::(BlockId::Root(block_root), None) + .await + .map_err(|e| format!("Error fetching finalized blobs from remote: {e:?}"))? + { + debug!(context.log(), "Downloaded finalized blobs"); + Some(response.data) + } else { + warn!( + context.log(), + "Checkpoint server is missing blobs"; + "block_root" => %block_root, + "hint" => "use a different URL or ask the provider to update", + "impact" => "db will be slightly corrupt until these blobs are pruned", + ); + None + } + } else { + None + }; + let genesis_state = genesis_state(&runtime_context, &config, log).await?; info!( @@ -423,7 +480,7 @@ where "Loaded checkpoint block and state"; "block_slot" => block.slot(), "state_slot" => state.slot(), - "block_root" => ?block.canonical_root(), + "block_root" => ?block_root, ); let service = @@ -451,7 +508,7 @@ where }); builder - .weak_subjectivity_state(state, block, genesis_state) + .weak_subjectivity_state(state, block, blobs, genesis_state) .map(|v| (v, service))? } ClientGenesis::DepositContract => { @@ -488,6 +545,7 @@ where network_senders: None, network_globals: None, beacon_processor_send: None, + beacon_processor_reprocess_send: None, eth1_service: Some(genesis_service.eth1_service.clone()), log: context.log().clone(), sse_logging_components: runtime_context.sse_logging_components.clone(), @@ -730,6 +788,9 @@ where network_globals: self.network_globals.clone(), eth1_service: self.eth1_service.clone(), beacon_processor_send: Some(beacon_processor_channels.beacon_processor_tx.clone()), + beacon_processor_reprocess_send: Some( + beacon_processor_channels.work_reprocessing_tx.clone(), + ), sse_logging_components: runtime_context.sse_logging_components.clone(), log: log.clone(), }); @@ -797,7 +858,7 @@ where } .spawn_manager( beacon_processor_channels.beacon_processor_rx, - beacon_processor_channels.work_reprocessing_tx, + beacon_processor_channels.work_reprocessing_tx.clone(), beacon_processor_channels.work_reprocessing_rx, None, beacon_chain.slot_clock.clone(), @@ -858,25 +919,26 @@ where beacon_chain.slot_clock.clone(), ); } + } - // Spawn a service to publish BLS to execution changes at the Capella fork. - if let Some(network_senders) = self.network_senders { - let inner_chain = beacon_chain.clone(); - let broadcast_context = - runtime_context.service_context("addr_bcast".to_string()); - let log = broadcast_context.log().clone(); - broadcast_context.executor.spawn( - async move { - broadcast_address_changes_at_capella( - &inner_chain, - network_senders.network_send(), - &log, - ) - .await - }, - "addr_broadcast", - ); - } + // Spawn service to publish light_client updates at some interval into the slot. + if let Some(light_client_server_rv) = self.light_client_server_rv { + let inner_chain = beacon_chain.clone(); + let light_client_update_context = + runtime_context.service_context("lc_update".to_string()); + let log = light_client_update_context.log().clone(); + light_client_update_context.executor.spawn( + async move { + compute_light_client_updates( + &inner_chain, + light_client_server_rv, + beacon_processor_channels.work_reprocessing_tx, + &log, + ) + .await + }, + "lc_update", + ); } start_proposer_prep_service(runtime_context.executor.clone(), beacon_chain.clone()); diff --git a/beacon_node/client/src/compute_light_client_updates.rs b/beacon_node/client/src/compute_light_client_updates.rs new file mode 100644 index 000000000..1eb977d42 --- /dev/null +++ b/beacon_node/client/src/compute_light_client_updates.rs @@ -0,0 +1,39 @@ +use beacon_chain::{BeaconChain, BeaconChainTypes, LightClientProducerEvent}; +use beacon_processor::work_reprocessing_queue::ReprocessQueueMessage; +use futures::channel::mpsc::Receiver; +use futures::StreamExt; +use slog::{error, Logger}; +use tokio::sync::mpsc::Sender; + +// Each `LightClientProducerEvent` is ~200 bytes. With the light_client server producing only recent +// updates it is okay to drop some events in case of overloading. In normal network conditions +// there's one event emitted per block at most every 12 seconds, while consuming the event should +// take a few milliseconds. 32 is a small enough arbitrary number. +pub(crate) const LIGHT_CLIENT_SERVER_CHANNEL_CAPACITY: usize = 32; + +pub async fn compute_light_client_updates( + chain: &BeaconChain, + mut light_client_server_rv: Receiver>, + reprocess_tx: Sender, + log: &Logger, +) { + // Should only receive events for recent blocks, import_block filters by blocks close to clock. + // + // Intents to process SyncAggregates of all recent blocks sequentially, without skipping. + // Uses a bounded receiver, so may drop some SyncAggregates if very overloaded. This is okay + // since only the most recent updates have value. + while let Some(event) = light_client_server_rv.next().await { + let parent_root = event.0; + + chain + .recompute_and_cache_light_client_updates(event) + .unwrap_or_else(|e| { + error!(log, "error computing light_client updates {:?}", e); + }); + + let msg = ReprocessQueueMessage::NewLightClientOptimisticUpdate { parent_root }; + if reprocess_tx.try_send(msg).is_err() { + error!(log, "Failed to inform light client update"; "parent_root" => %parent_root) + }; + } +} diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 275f99986..197f21c64 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -35,6 +35,7 @@ pub enum ClientGenesis { WeakSubjSszBytes { anchor_state_bytes: Vec, anchor_block_bytes: Vec, + anchor_blobs_bytes: Option>, }, CheckpointSyncUrl { url: SensitiveUrl, diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 399aa0651..fd92c2825 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -1,6 +1,6 @@ extern crate slog; -mod address_change_broadcast; +mod compute_light_client_updates; pub mod config; mod metrics; mod notifier; diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 7f6526898..ace8e24a8 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -29,7 +29,6 @@ kzg = { workspace = true } state_processing = { workspace = true } superstruct = { workspace = true } lru = { workspace = true } -exit-future = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } parking_lot = { workspace = true } @@ -51,3 +50,5 @@ hash-db = "0.15.2" pretty_reqwest_error = { workspace = true } arc-swap = "1.6.0" eth2_network_config = { workspace = true } +alloy-rlp = "0.3" +alloy-consensus = { git = "https://github.com/alloy-rs/alloy.git", rev = "974d488bab5e21e9f17452a39a4bfa56677367b2" } diff --git a/beacon_node/execution_layer/src/block_hash.rs b/beacon_node/execution_layer/src/block_hash.rs index 5ba61beaf..074ef8b0c 100644 --- a/beacon_node/execution_layer/src/block_hash.rs +++ b/beacon_node/execution_layer/src/block_hash.rs @@ -1,92 +1,61 @@ use crate::{ json_structures::JsonWithdrawal, keccak::{keccak256, KeccakHasher}, - metrics, Error, ExecutionLayer, }; use ethers_core::utils::rlp::RlpStream; use keccak_hash::KECCAK_EMPTY_LIST_RLP; use triehash::ordered_trie_root; use types::{ - map_execution_block_header_fields_base, Address, BeaconBlockRef, EthSpec, ExecutionBlockHash, + map_execution_block_header_fields_base, Address, EthSpec, ExecutionBlockHash, ExecutionBlockHeader, ExecutionPayloadRef, Hash256, Hash64, Uint256, }; -impl ExecutionLayer { - /// Calculate the block hash of an execution block. - /// - /// Return `(block_hash, transactions_root)`, where `transactions_root` is the root of the RLP - /// transactions. - pub fn calculate_execution_block_hash( - payload: ExecutionPayloadRef, - parent_beacon_block_root: Hash256, - ) -> (ExecutionBlockHash, Hash256) { - // Calculate the transactions root. - // We're currently using a deprecated Parity library for this. We should move to a - // better alternative when one appears, possibly following Reth. - let rlp_transactions_root = ordered_trie_root::( - payload.transactions().iter().map(|txn_bytes| &**txn_bytes), - ); +/// Calculate the block hash of an execution block. +/// +/// Return `(block_hash, transactions_root)`, where `transactions_root` is the root of the RLP +/// transactions. +pub fn calculate_execution_block_hash( + payload: ExecutionPayloadRef, + parent_beacon_block_root: Option, +) -> (ExecutionBlockHash, Hash256) { + // Calculate the transactions root. + // We're currently using a deprecated Parity library for this. We should move to a + // better alternative when one appears, possibly following Reth. + let rlp_transactions_root = ordered_trie_root::( + payload.transactions().iter().map(|txn_bytes| &**txn_bytes), + ); - // Calculate withdrawals root (post-Capella). - let rlp_withdrawals_root = if let Ok(withdrawals) = payload.withdrawals() { - Some(ordered_trie_root::( - withdrawals.iter().map(|withdrawal| { - rlp_encode_withdrawal(&JsonWithdrawal::from(withdrawal.clone())) - }), - )) - } else { - None - }; + // Calculate withdrawals root (post-Capella). + let rlp_withdrawals_root = if let Ok(withdrawals) = payload.withdrawals() { + Some(ordered_trie_root::( + withdrawals + .iter() + .map(|withdrawal| rlp_encode_withdrawal(&JsonWithdrawal::from(withdrawal.clone()))), + )) + } else { + None + }; - let rlp_blob_gas_used = payload.blob_gas_used().ok(); - let rlp_excess_blob_gas = payload.excess_blob_gas().ok(); + let rlp_blob_gas_used = payload.blob_gas_used().ok(); + let rlp_excess_blob_gas = payload.excess_blob_gas().ok(); - // Calculate parent beacon block root (post-Deneb). - let rlp_parent_beacon_block_root = rlp_excess_blob_gas - .as_ref() - .map(|_| parent_beacon_block_root); + // Construct the block header. + let exec_block_header = ExecutionBlockHeader::from_payload( + payload, + KECCAK_EMPTY_LIST_RLP.as_fixed_bytes().into(), + rlp_transactions_root, + rlp_withdrawals_root, + rlp_blob_gas_used, + rlp_excess_blob_gas, + parent_beacon_block_root, + ); - // Construct the block header. - let exec_block_header = ExecutionBlockHeader::from_payload( - payload, - KECCAK_EMPTY_LIST_RLP.as_fixed_bytes().into(), - rlp_transactions_root, - rlp_withdrawals_root, - rlp_blob_gas_used, - rlp_excess_blob_gas, - rlp_parent_beacon_block_root, - ); - - // Hash the RLP encoding of the block header. - let rlp_block_header = rlp_encode_block_header(&exec_block_header); - ( - ExecutionBlockHash::from_root(keccak256(&rlp_block_header)), - rlp_transactions_root, - ) - } - - /// Verify `payload.block_hash` locally within Lighthouse. - /// - /// No remote calls to the execution client will be made, so this is quite a cheap check. - pub fn verify_payload_block_hash(&self, block: BeaconBlockRef) -> Result<(), Error> { - let payload = block.execution_payload()?.execution_payload_ref(); - let parent_beacon_block_root = block.parent_root(); - - let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_VERIFY_BLOCK_HASH); - - let (header_hash, rlp_transactions_root) = - Self::calculate_execution_block_hash(payload, parent_beacon_block_root); - - if header_hash != payload.block_hash() { - return Err(Error::BlockHashMismatch { - computed: header_hash, - payload: payload.block_hash(), - transactions_root: rlp_transactions_root, - }); - } - - Ok(()) - } + // Hash the RLP encoding of the block header. + let rlp_block_header = rlp_encode_block_header(&exec_block_header); + ( + ExecutionBlockHash::from_root(keccak256(&rlp_block_header)), + rlp_transactions_root, + ) } /// RLP encode a withdrawal. diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 19b9a58eb..e20009e28 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -17,7 +17,6 @@ pub use json_structures::{JsonWithdrawal, TransitionConfigurationV1}; use pretty_reqwest_error::PrettyReqwestError; use reqwest::StatusCode; use serde::{Deserialize, Serialize}; -use state_processing::per_block_processing::deneb::kzg_commitment_to_versioned_hash; use std::convert::TryFrom; use strum::IntoStaticStr; use superstruct::superstruct; @@ -26,14 +25,16 @@ pub use types::{ ExecutionPayloadRef, FixedVector, ForkName, Hash256, Transactions, Uint256, VariableList, Withdrawal, Withdrawals, }; -use types::{ - BeaconStateError, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadMerge, - KzgProofs, VersionedHash, -}; +use types::{ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadMerge, KzgProofs}; pub mod auth; pub mod http; pub mod json_structures; +mod new_payload_request; + +pub use new_payload_request::{ + NewPayloadRequest, NewPayloadRequestCapella, NewPayloadRequestDeneb, NewPayloadRequestMerge, +}; pub const LATEST_TAG: &str = "latest"; @@ -571,110 +572,6 @@ impl ExecutionPayloadBodyV1 { } } -#[superstruct( - variants(Merge, Capella, Deneb), - variant_attributes(derive(Clone, Debug, PartialEq),), - map_into(ExecutionPayload), - map_ref_into(ExecutionPayloadRef), - cast_error( - ty = "BeaconStateError", - expr = "BeaconStateError::IncorrectStateVariant" - ), - partial_getter_error( - ty = "BeaconStateError", - expr = "BeaconStateError::IncorrectStateVariant" - ) -)] -#[derive(Clone, Debug, PartialEq)] -pub struct NewPayloadRequest { - #[superstruct(only(Merge), partial_getter(rename = "execution_payload_merge"))] - pub execution_payload: ExecutionPayloadMerge, - #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] - pub execution_payload: ExecutionPayloadCapella, - #[superstruct(only(Deneb), partial_getter(rename = "execution_payload_deneb"))] - pub execution_payload: ExecutionPayloadDeneb, - #[superstruct(only(Deneb))] - pub versioned_hashes: Vec, - #[superstruct(only(Deneb))] - pub parent_beacon_block_root: Hash256, -} - -impl NewPayloadRequest { - pub fn parent_hash(&self) -> ExecutionBlockHash { - match self { - Self::Merge(payload) => payload.execution_payload.parent_hash, - Self::Capella(payload) => payload.execution_payload.parent_hash, - Self::Deneb(payload) => payload.execution_payload.parent_hash, - } - } - - pub fn block_hash(&self) -> ExecutionBlockHash { - match self { - Self::Merge(payload) => payload.execution_payload.block_hash, - Self::Capella(payload) => payload.execution_payload.block_hash, - Self::Deneb(payload) => payload.execution_payload.block_hash, - } - } - - pub fn block_number(&self) -> u64 { - match self { - Self::Merge(payload) => payload.execution_payload.block_number, - Self::Capella(payload) => payload.execution_payload.block_number, - Self::Deneb(payload) => payload.execution_payload.block_number, - } - } - - pub fn into_execution_payload(self) -> ExecutionPayload { - map_new_payload_request_into_execution_payload!(self, |request, cons| { - cons(request.execution_payload) - }) - } -} - -impl<'a, E: EthSpec> TryFrom> for NewPayloadRequest { - type Error = BeaconStateError; - - fn try_from(block: BeaconBlockRef<'a, E>) -> Result { - match block { - BeaconBlockRef::Base(_) | BeaconBlockRef::Altair(_) => { - Err(Self::Error::IncorrectStateVariant) - } - BeaconBlockRef::Merge(block_ref) => Ok(Self::Merge(NewPayloadRequestMerge { - execution_payload: block_ref.body.execution_payload.execution_payload.clone(), - })), - BeaconBlockRef::Capella(block_ref) => Ok(Self::Capella(NewPayloadRequestCapella { - execution_payload: block_ref.body.execution_payload.execution_payload.clone(), - })), - BeaconBlockRef::Deneb(block_ref) => Ok(Self::Deneb(NewPayloadRequestDeneb { - execution_payload: block_ref.body.execution_payload.execution_payload.clone(), - versioned_hashes: block_ref - .body - .blob_kzg_commitments - .iter() - .map(kzg_commitment_to_versioned_hash) - .collect(), - parent_beacon_block_root: block_ref.parent_root, - })), - } - } -} - -impl TryFrom> for NewPayloadRequest { - type Error = BeaconStateError; - - fn try_from(payload: ExecutionPayload) -> Result { - match payload { - ExecutionPayload::Merge(payload) => Ok(Self::Merge(NewPayloadRequestMerge { - execution_payload: payload, - })), - ExecutionPayload::Capella(payload) => Ok(Self::Capella(NewPayloadRequestCapella { - execution_payload: payload, - })), - ExecutionPayload::Deneb(_) => Err(Self::Error::IncorrectStateVariant), - } - } -} - #[derive(Clone, Copy, Debug)] pub struct EngineCapabilities { pub new_payload_v1: bool, diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index ac7dfa57e..df0f79c61 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -803,10 +803,10 @@ impl HttpJsonRpc { pub async fn new_payload_v3( &self, - new_payload_request_deneb: NewPayloadRequestDeneb, + new_payload_request_deneb: NewPayloadRequestDeneb<'_, T>, ) -> Result { let params = json!([ - JsonExecutionPayload::V3(new_payload_request_deneb.execution_payload.into()), + JsonExecutionPayload::V3(new_payload_request_deneb.execution_payload.clone().into()), new_payload_request_deneb.versioned_hashes, new_payload_request_deneb.parent_beacon_block_root, ]); @@ -1079,7 +1079,7 @@ impl HttpJsonRpc { // new_payload that the execution engine supports pub async fn new_payload( &self, - new_payload_request: NewPayloadRequest, + new_payload_request: NewPayloadRequest<'_, T>, ) -> Result { let engine_capabilities = self.get_engine_capabilities(None).await?; match new_payload_request { diff --git a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs new file mode 100644 index 000000000..b9527ed09 --- /dev/null +++ b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs @@ -0,0 +1,332 @@ +use crate::{block_hash::calculate_execution_block_hash, metrics, Error}; + +use crate::versioned_hashes::verify_versioned_hashes; +use state_processing::per_block_processing::deneb::kzg_commitment_to_versioned_hash; +use superstruct::superstruct; +use types::{ + BeaconBlockRef, BeaconStateError, EthSpec, ExecutionBlockHash, ExecutionPayload, + ExecutionPayloadRef, Hash256, VersionedHash, +}; +use types::{ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadMerge}; + +#[superstruct( + variants(Merge, Capella, Deneb), + variant_attributes(derive(Clone, Debug, PartialEq),), + map_into(ExecutionPayload), + map_ref_into(ExecutionPayloadRef), + cast_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), + partial_getter_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ) +)] +#[derive(Clone, Debug, PartialEq)] +pub struct NewPayloadRequest<'block, E: EthSpec> { + #[superstruct(only(Merge), partial_getter(rename = "execution_payload_merge"))] + pub execution_payload: &'block ExecutionPayloadMerge, + #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] + pub execution_payload: &'block ExecutionPayloadCapella, + #[superstruct(only(Deneb), partial_getter(rename = "execution_payload_deneb"))] + pub execution_payload: &'block ExecutionPayloadDeneb, + #[superstruct(only(Deneb))] + pub versioned_hashes: Vec, + #[superstruct(only(Deneb))] + pub parent_beacon_block_root: Hash256, +} + +impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { + pub fn parent_hash(&self) -> ExecutionBlockHash { + match self { + Self::Merge(payload) => payload.execution_payload.parent_hash, + Self::Capella(payload) => payload.execution_payload.parent_hash, + Self::Deneb(payload) => payload.execution_payload.parent_hash, + } + } + + pub fn block_hash(&self) -> ExecutionBlockHash { + match self { + Self::Merge(payload) => payload.execution_payload.block_hash, + Self::Capella(payload) => payload.execution_payload.block_hash, + Self::Deneb(payload) => payload.execution_payload.block_hash, + } + } + + pub fn block_number(&self) -> u64 { + match self { + Self::Merge(payload) => payload.execution_payload.block_number, + Self::Capella(payload) => payload.execution_payload.block_number, + Self::Deneb(payload) => payload.execution_payload.block_number, + } + } + + pub fn execution_payload_ref(&self) -> ExecutionPayloadRef<'block, E> { + match self { + Self::Merge(request) => ExecutionPayloadRef::Merge(request.execution_payload), + Self::Capella(request) => ExecutionPayloadRef::Capella(request.execution_payload), + Self::Deneb(request) => ExecutionPayloadRef::Deneb(request.execution_payload), + } + } + + pub fn into_execution_payload(self) -> ExecutionPayload { + match self { + Self::Merge(request) => ExecutionPayload::Merge(request.execution_payload.clone()), + Self::Capella(request) => ExecutionPayload::Capella(request.execution_payload.clone()), + Self::Deneb(request) => ExecutionPayload::Deneb(request.execution_payload.clone()), + } + } + + /// Performs the required verifications of the payload when the chain is optimistically syncing. + /// + /// ## Specification + /// + /// Performs the verifications in the `verify_and_notify_new_payload` function: + /// + /// https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/deneb/beacon-chain.md#modified-verify_and_notify_new_payload + pub fn perform_optimistic_sync_verifications(&self) -> Result<(), Error> { + self.verify_payload_block_hash()?; + self.verify_versioned_hashes()?; + + Ok(()) + } + + /// Verify the block hash is consistent locally within Lighthouse. + /// + /// ## Specification + /// + /// Equivalent to `is_valid_block_hash` in the spec: + /// https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/deneb/beacon-chain.md#is_valid_block_hash + pub fn verify_payload_block_hash(&self) -> Result<(), Error> { + let payload = self.execution_payload_ref(); + let parent_beacon_block_root = self.parent_beacon_block_root().ok().cloned(); + + let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_VERIFY_BLOCK_HASH); + + let (header_hash, rlp_transactions_root) = + calculate_execution_block_hash(payload, parent_beacon_block_root); + + if header_hash != self.block_hash() { + return Err(Error::BlockHashMismatch { + computed: header_hash, + payload: payload.block_hash(), + transactions_root: rlp_transactions_root, + }); + } + + Ok(()) + } + + /// Verify the versioned hashes computed by the blob transactions match the versioned hashes computed from the commitments. + /// + /// ## Specification + /// + /// Equivalent to `is_valid_versioned_hashes` in the spec: + /// https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/deneb/beacon-chain.md#is_valid_versioned_hashes + pub fn verify_versioned_hashes(&self) -> Result<(), Error> { + if let Ok(versioned_hashes) = self.versioned_hashes() { + verify_versioned_hashes(self.execution_payload_ref(), versioned_hashes) + .map_err(Error::VerifyingVersionedHashes)?; + } + Ok(()) + } +} + +impl<'a, E: EthSpec> TryFrom> for NewPayloadRequest<'a, E> { + type Error = BeaconStateError; + + fn try_from(block: BeaconBlockRef<'a, E>) -> Result { + match block { + BeaconBlockRef::Base(_) | BeaconBlockRef::Altair(_) => { + Err(Self::Error::IncorrectStateVariant) + } + BeaconBlockRef::Merge(block_ref) => Ok(Self::Merge(NewPayloadRequestMerge { + execution_payload: &block_ref.body.execution_payload.execution_payload, + })), + BeaconBlockRef::Capella(block_ref) => Ok(Self::Capella(NewPayloadRequestCapella { + execution_payload: &block_ref.body.execution_payload.execution_payload, + })), + BeaconBlockRef::Deneb(block_ref) => Ok(Self::Deneb(NewPayloadRequestDeneb { + execution_payload: &block_ref.body.execution_payload.execution_payload, + versioned_hashes: block_ref + .body + .blob_kzg_commitments + .iter() + .map(kzg_commitment_to_versioned_hash) + .collect(), + parent_beacon_block_root: block_ref.parent_root, + })), + } + } +} + +impl<'a, E: EthSpec> TryFrom> for NewPayloadRequest<'a, E> { + type Error = BeaconStateError; + + fn try_from(payload: ExecutionPayloadRef<'a, E>) -> Result { + match payload { + ExecutionPayloadRef::Merge(payload) => Ok(Self::Merge(NewPayloadRequestMerge { + execution_payload: payload, + })), + ExecutionPayloadRef::Capella(payload) => Ok(Self::Capella(NewPayloadRequestCapella { + execution_payload: payload, + })), + ExecutionPayloadRef::Deneb(_) => Err(Self::Error::IncorrectStateVariant), + } + } +} + +#[cfg(test)] +mod test { + use crate::versioned_hashes::Error as VersionedHashError; + use crate::{Error, NewPayloadRequest}; + use state_processing::per_block_processing::deneb::kzg_commitment_to_versioned_hash; + use types::{BeaconBlock, ExecPayload, ExecutionBlockHash, Hash256, MainnetEthSpec}; + + #[test] + fn test_optimistic_sync_verifications_valid_block() { + let beacon_block = get_valid_beacon_block(); + let new_payload_request = NewPayloadRequest::try_from(beacon_block.to_ref()) + .expect("should create new payload request"); + + assert!( + new_payload_request + .perform_optimistic_sync_verifications() + .is_ok(), + "validations should pass" + ); + } + + #[test] + fn test_optimistic_sync_verifications_bad_block_hash() { + let mut beacon_block = get_valid_beacon_block(); + let correct_block_hash = beacon_block + .body() + .execution_payload() + .expect("should get payload") + .block_hash(); + let invalid_block_hash = ExecutionBlockHash(Hash256::repeat_byte(0x42)); + + // now mutate the block hash + beacon_block + .body_mut() + .execution_payload_deneb_mut() + .expect("should get payload") + .execution_payload + .block_hash = invalid_block_hash; + + let new_payload_request = NewPayloadRequest::try_from(beacon_block.to_ref()) + .expect("should create new payload request"); + let verification_result = new_payload_request.perform_optimistic_sync_verifications(); + println!("verification_result: {:?}", verification_result); + let got_expected_result = match verification_result { + Err(Error::BlockHashMismatch { + computed, payload, .. + }) => computed == correct_block_hash && payload == invalid_block_hash, + _ => false, + }; + assert!(got_expected_result, "should return expected error"); + } + + #[test] + fn test_optimistic_sync_verifications_bad_versioned_hashes() { + let mut beacon_block = get_valid_beacon_block(); + + let mut commitments: Vec<_> = beacon_block + .body() + .blob_kzg_commitments() + .expect("should get commitments") + .clone() + .into(); + + let correct_versioned_hash = kzg_commitment_to_versioned_hash( + commitments.last().expect("should get last commitment"), + ); + + // mutate the last commitment + commitments + .last_mut() + .expect("should get last commitment") + .0[0] = 0x42; + + // calculate versioned hash from mutated commitment + let bad_versioned_hash = kzg_commitment_to_versioned_hash( + commitments.last().expect("should get last commitment"), + ); + + *beacon_block + .body_mut() + .blob_kzg_commitments_mut() + .expect("should get commitments") = commitments.into(); + + let new_payload_request = NewPayloadRequest::try_from(beacon_block.to_ref()) + .expect("should create new payload request"); + let verification_result = new_payload_request.perform_optimistic_sync_verifications(); + println!("verification_result: {:?}", verification_result); + + let got_expected_result = match verification_result { + Err(Error::VerifyingVersionedHashes(VersionedHashError::VersionHashMismatch { + expected, + found, + })) => expected == bad_versioned_hash && found == correct_versioned_hash, + _ => false, + }; + assert!(got_expected_result, "should return expected error"); + } + + fn get_valid_beacon_block() -> BeaconBlock { + BeaconBlock::Deneb(serde_json::from_str(r#"{ + "slot": "88160", + "proposer_index": "583", + "parent_root": "0x60770cd86a497ca3aa2e91f1687aa3ebafac87af52c30a920b5f40bd9e930eb6", + "state_root": "0x4a0e0abbcbcf576f2cb7387c4289ab13b8a128e32127642f056143d6164941a6", + "body": { + "randao_reveal": "0xb5253d5739496abc4f67c7c92e39e46cca452c2fdfc5275e3e0426a012aa62df82f47f7dece348e28db4bb212f0e793d187120bbd47b8031ed79344116eb4128f0ce0b05ba18cd615bb13966c1bd7d89e23cc769c8e4d8e4a63755f623ac3bed", + "eth1_data": { + "deposit_root": "0xe4785ac914d8673797f886e3151ce2647f81ae070c7ddb6845e65fd1c47d1222", + "deposit_count": "1181", + "block_hash": "0x010671bdfbfce6b0071984a06a7ded6deef13b4f8fdbae402c606a7a0c8780d1" + }, + "graffiti": "0x6c6f6465737461722f6765746800000000000000000000000000000000000000", + "proposer_slashings": [], + "attester_slashings": [], + "attestations": [], + "deposits": [], + "voluntary_exits": [], + "sync_aggregate": { + "sync_committee_bits": "0xfebffffffebfff7fff7f7fffbbefffff6affffffffbfffffefffebfffdbf77fff7fd77ffffefffdff7ffffeffffffe7e5ffffffdefffff7ffbffff7fffffffff", + "sync_committee_signature": "0x91939b5baf2a6f52d405b6dd396f5346ec435eca7d25912c91cc6a2f7030d870d68bebe4f2b21872a06929ff4cf3e5e9191053cb43eb24ebe34b9a75fb88a3acd06baf329c87f68bd664b49891260c698d7bca0f5365870b5b2b3a76f582156c" + }, + "execution_payload": { + "parent_hash": "0xa6f3ed782a992f79ad38da2af91b3e8923c71b801c50bc9033bb35a2e1da885f", + "fee_recipient": "0xf97e180c050e5ab072211ad2c213eb5aee4df134", + "state_root": "0x3bfd1a7f309ed35048c349a8daf01815bdc09a6d5df86ea77d1056f248ba2017", + "receipts_root": "0xcb5b8ffea57cd0fa87194d49bc8bb7fad08c93c9934b886489503c328d15fd36", + "logs_bloom": "0x002000000000000000000000800000000000000000001040000000000000000000000001000000000000000000000000000000000000100000000020000c0800000000000000008000000008000000200000800000000000000000000000000000000000000000008000000000008000000000000000000002000010000000000000000000000000000000000000000000000000000000080000004000000000800000000000000000000100000000000000000000000000000000000800000000000102000000000000000000000000000000080000001000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "prev_randao": "0xb2693020177d99ffbd4c267023be172d759e7306ff51b0e7d677d3148fbd7f1d", + "block_number": "74807", + "gas_limit": "30000000", + "gas_used": "128393", + "timestamp": "1697039520", + "extra_data": "0xd883010d03846765746888676f312e32312e31856c696e7578", + "base_fee_per_gas": "7", + "block_hash": "0xc64f3a43c64aeb98518a237f6279fa03095b9f95ca673c860ad7f16fb9340062", + "transactions": [ + "0x02f9017a8501a1f0ff4382317585012a05f2008512a05f2000830249f094c1b0bc605e2c808aa0867bfc98e51a1fe3e9867f80b901040cc7326300000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000036e534e16b8920d000000000000000000000000fb3e9c7cb92443931ee6b5b9728598d4eb9618c1000000000000000000000000fc7360b3b28cf4204268a8354dbec60720d155d2000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000009a054a063f0fe7b9c68de8df91aaa5e96c15ab540000000000000000000000000c8d41b8fcc066cdabaf074d78e5153e8ce018a9c080a07dd9be0d014ffcd5b6883d0917c66b74ba51f0d976c8fc5674af192af6fa9450a02dad2c660974c125f5f22b1e6e8862a292e08cc2b4cafda35af650ee62868a43", + "0x03f8db8501a1f0ff430d84773594008504a817c8008252089454e594b6de0aa4b0188cd1549dd7ba715a455d078080c08504a817c800f863a001253ce00f525e3495cffa0b865eadb90a4c5ee812185cc796af74b6ec0a5dd7a0010720372a4d7dcab84413ed0cfc164fb91fb6ef1562ec2f7a82e912a1d9e129a0015a73e97950397896ed2c47dcab7c0360220bcfb413a8f210a7b6e6264e698880a04402cb0f13c17ef41dca106b1e1520c7aadcbe62984d81171e29914f587d67c1a02db62a8edb581917958e4a3884e7eececbaec114c5ee496e238033e896f997ac" + ], + "withdrawals": [], + "blob_gas_used": "393216", + "excess_blob_gas": "58720256" + }, + "bls_to_execution_changes": [], + "blob_kzg_commitments": [ + "0xa7accb7a25224a8c2e0cee9cd569fc1798665bfbfe780e08945fa9098ec61da4061f5b04e750a88d3340a801850a54fa", + "0xac7b47f99836510ae9076dc5f5da1f370679dea1d47073307a14cbb125cdc7822ae619637135777cb40e13d897fd00a7", + "0x997794110b9655833a88ad5a4ec40a3dc7964877bfbeb04ca1abe1d51bdc43e20e4c5757028896d298d7da954a6f14a1" + ] + } + }"#).expect("should decode")) + } +} diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 868d81944..69b84adbb 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -7,6 +7,7 @@ use crate::payload_cache::PayloadCache; use arc_swap::ArcSwapOption; use auth::{strip_prefix, Auth, JwtKey}; +pub use block_hash::calculate_execution_block_hash; use builder_client::BuilderHttpClient; pub use engine_api::EngineCapabilities; use engine_api::Error as ApiError; @@ -61,6 +62,7 @@ mod metrics; pub mod payload_cache; mod payload_status; pub mod test_utils; +mod versioned_hashes; /// Indicates the default jwt authenticated execution endpoint. pub const DEFAULT_EXECUTION_ENDPOINT: &str = "http://localhost:8551/"; @@ -141,6 +143,7 @@ pub enum Error { InvalidBlobConversion(String), BeaconStateError(BeaconStateError), PayloadTypeMismatch, + VerifyingVersionedHashes(versioned_hashes::Error), } impl From for Error { @@ -1321,7 +1324,7 @@ impl ExecutionLayer { /// Maps to the `engine_newPayload` JSON-RPC call. pub async fn notify_new_payload( &self, - new_payload_request: NewPayloadRequest, + new_payload_request: NewPayloadRequest<'_, T>, ) -> Result { let _timer = metrics::start_timer_vec( &metrics::EXECUTION_LAYER_REQUEST_TIMES, diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 182cad50f..6af988fa8 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -699,7 +699,7 @@ pub fn generate_blobs( Ok((bundle, transactions.into())) } -fn static_valid_tx() -> Result, String> { +pub fn static_valid_tx() -> Result, String> { // This is a real transaction hex encoded, but we don't care about the contents of the transaction. let transaction: EthersTransaction = serde_json::from_str( r#"{ diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 3d4ea51f4..2c5bde55e 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -54,7 +54,8 @@ impl Operation { } #[derive(Debug)] -struct Custom(String); +// We don't use the string value directly, but it's used in the Debug impl which is required by `warp::reject::Reject`. +struct Custom(#[allow(dead_code)] String); impl warp::reject::Reject for Custom {} diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 7afeafc32..77c2410ab 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -244,7 +244,7 @@ impl MockExecutionLayer { // TODO: again consider forks let status = self .el - .notify_new_payload(payload.try_into().unwrap()) + .notify_new_payload(payload.to_ref().try_into().unwrap()) .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index f0be51114..425329a52 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -25,8 +25,8 @@ use warp::{http::StatusCode, Filter, Rejection}; use crate::EngineCapabilities; pub use execution_block_generator::{ - generate_blobs, generate_genesis_block, generate_genesis_header, generate_pow_block, Block, - ExecutionBlockGenerator, + generate_blobs, generate_genesis_block, generate_genesis_header, generate_pow_block, + static_valid_tx, Block, ExecutionBlockGenerator, }; pub use hook::Hook; pub use mock_builder::{MockBuilder, Operation}; @@ -599,8 +599,8 @@ async fn handle_rejection(err: Rejection) -> Result() { - message = format!("Authorization error: {:?}", e); + if let Some(AuthError(e)) = err.find::() { + message = format!("Authorization error: {}", e); code = StatusCode::UNAUTHORIZED; } else { message = "BAD_REQUEST".to_string(); diff --git a/beacon_node/execution_layer/src/versioned_hashes.rs b/beacon_node/execution_layer/src/versioned_hashes.rs new file mode 100644 index 000000000..37bd35646 --- /dev/null +++ b/beacon_node/execution_layer/src/versioned_hashes.rs @@ -0,0 +1,135 @@ +extern crate alloy_consensus; +extern crate alloy_rlp; +use alloy_consensus::TxEnvelope; +use alloy_rlp::Decodable; +use types::{EthSpec, ExecutionPayloadRef, Hash256, Unsigned, VersionedHash}; + +#[derive(Debug)] +pub enum Error { + DecodingTransaction(String), + LengthMismatch { expected: usize, found: usize }, + VersionHashMismatch { expected: Hash256, found: Hash256 }, +} + +pub fn verify_versioned_hashes( + execution_payload: ExecutionPayloadRef, + expected_versioned_hashes: &[VersionedHash], +) -> Result<(), Error> { + let versioned_hashes = + extract_versioned_hashes_from_transactions::(execution_payload.transactions())?; + if versioned_hashes.len() != expected_versioned_hashes.len() { + return Err(Error::LengthMismatch { + expected: expected_versioned_hashes.len(), + found: versioned_hashes.len(), + }); + } + for (found, expected) in versioned_hashes + .iter() + .zip(expected_versioned_hashes.iter()) + { + if found != expected { + return Err(Error::VersionHashMismatch { + expected: *expected, + found: *found, + }); + } + } + + Ok(()) +} + +pub fn extract_versioned_hashes_from_transactions( + transactions: &types::Transactions, +) -> Result, Error> { + let mut versioned_hashes = Vec::new(); + + for tx in transactions { + match beacon_tx_to_tx_envelope(tx)? { + TxEnvelope::Eip4844(signed_tx_eip4844) => { + versioned_hashes.extend( + signed_tx_eip4844 + .tx() + .blob_versioned_hashes + .iter() + .map(|fb| Hash256::from(fb.0)), + ); + } + // enumerating all variants explicitly to make pattern irrefutable + // in case new types are added in the future which also have blobs + TxEnvelope::Legacy(_) + | TxEnvelope::TaggedLegacy(_) + | TxEnvelope::Eip2930(_) + | TxEnvelope::Eip1559(_) => {} + } + } + + Ok(versioned_hashes) +} + +pub fn beacon_tx_to_tx_envelope( + tx: &types::Transaction, +) -> Result { + let tx_bytes = Vec::from(tx.clone()); + TxEnvelope::decode(&mut tx_bytes.as_slice()) + .map_err(|e| Error::DecodingTransaction(e.to_string())) +} + +#[cfg(test)] +mod test { + use super::*; + use crate::test_utils::static_valid_tx; + use alloy_consensus::{TxKind, TxLegacy}; + + type E = types::MainnetEthSpec; + + #[test] + fn test_decode_static_transaction() { + let valid_tx = static_valid_tx::().expect("should give me known valid transaction"); + let tx_envelope = beacon_tx_to_tx_envelope(&valid_tx).expect("should decode tx"); + let TxEnvelope::Legacy(signed_tx) = tx_envelope else { + panic!("should decode to legacy transaction"); + }; + + assert!(matches!( + signed_tx.tx(), + TxLegacy { + chain_id: Some(0x01), + nonce: 0x15, + gas_price: 0x4a817c800, + to: TxKind::Call(..), + .. + } + )); + } + + #[test] + fn test_extract_versioned_hashes() { + use serde::Deserialize; + + #[derive(Deserialize)] + #[serde(transparent)] + struct TestTransactions( + #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] types::Transactions, + ); + + let TestTransactions(raw_transactions): TestTransactions = serde_json::from_str(r#"[ + "0x03f901388501a1f0ff430f843b9aca00843b9aca0082520894e7249813d8ccf6fa95a2203f46a64166073d58878080c002f8c6a0012e98362c814f1724262c0d211a1463418a5f6382a8d457b37a2698afbe7b5ea00100ef985761395dfa8ed5ce91f3f2180b612401909e4cb8f33b90c8a454d9baa0013d45411623b90d90f916e4025ada74b453dd4ca093c017c838367c9de0f801a001753e2af0b1e70e7ef80541355b2a035cc9b2c177418bb2a4402a9b346cf84da0011789b520a8068094a92aa0b04db8d8ef1c6c9818947c5210821732b8744049a0011c4c4f95597305daa5f62bf5f690e37fa11f5de05a95d05cac4e2119e394db80a0ccd86a742af0e042d08cbb35d910ddc24bbc6538f9e53be6620d4b6e1bb77662a01a8bacbc614940ac2f5c23ffc00a122c9f085046883de65c88ab0edb859acb99", + "0x02f9017a8501a1f0ff4382363485012a05f2008512a05f2000830249f094c1b0bc605e2c808aa0867bfc98e51a1fe3e9867f80b901040cc7326300000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000009445a285baa43e00000000000000000000000000c500931f24edb821cef6e28f7adb33b38578c82000000000000000000000000fc7360b3b28cf4204268a8354dbec60720d155d2000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000009a054a063f0fe7b9c68de8df91aaa5e96c15ab540000000000000000000000000c8d41b8fcc066cdabaf074d78e5153e8ce018a9c080a008e14475c1173cd9f5740c24c08b793f9e16c36c08fa73769db95050e31e3396a019767dcdda26c4a774ca28c9df15d0c20e43bd07bd33ee0f84d6096cb5a1ebed" + ]"#).expect("should get raw transactions"); + let expected_versioned_hashes = vec![ + "0x012e98362c814f1724262c0d211a1463418a5f6382a8d457b37a2698afbe7b5e", + "0x0100ef985761395dfa8ed5ce91f3f2180b612401909e4cb8f33b90c8a454d9ba", + "0x013d45411623b90d90f916e4025ada74b453dd4ca093c017c838367c9de0f801", + "0x01753e2af0b1e70e7ef80541355b2a035cc9b2c177418bb2a4402a9b346cf84d", + "0x011789b520a8068094a92aa0b04db8d8ef1c6c9818947c5210821732b8744049", + "0x011c4c4f95597305daa5f62bf5f690e37fa11f5de05a95d05cac4e2119e394db", + ] + .into_iter() + .map(|tx| Hash256::from_slice(&hex::decode(&tx[2..]).expect("should decode hex"))) + .collect::>(); + + let versioned_hashes = extract_versioned_hashes_from_transactions::(&raw_transactions) + .expect("should get versioned hashes"); + assert_eq!(versioned_hashes, expected_versioned_hashes); + } +} diff --git a/beacon_node/http_api/src/attestation_performance.rs b/beacon_node/http_api/src/attestation_performance.rs index 3e7d8d5e3..6e3ebccce 100644 --- a/beacon_node/http_api/src/attestation_performance.rs +++ b/beacon_node/http_api/src/attestation_performance.rs @@ -14,11 +14,12 @@ const MAX_REQUEST_RANGE_EPOCHS: usize = 100; const BLOCK_ROOT_CHUNK_SIZE: usize = 100; #[derive(Debug)] +// We don't use the inner values directly, but they're used in the Debug impl. enum AttestationPerformanceError { - BlockReplay(BlockReplayError), - BeaconState(BeaconStateError), - ParticipationCache(ParticipationCacheError), - UnableToFindValidator(usize), + BlockReplay(#[allow(dead_code)] BlockReplayError), + BeaconState(#[allow(dead_code)] BeaconStateError), + ParticipationCache(#[allow(dead_code)] ParticipationCacheError), + UnableToFindValidator(#[allow(dead_code)] usize), } impl From for AttestationPerformanceError { diff --git a/beacon_node/http_api/src/block_packing_efficiency.rs b/beacon_node/http_api/src/block_packing_efficiency.rs index e099e130a..c73dcb7e0 100644 --- a/beacon_node/http_api/src/block_packing_efficiency.rs +++ b/beacon_node/http_api/src/block_packing_efficiency.rs @@ -19,10 +19,11 @@ use warp_utils::reject::{beacon_chain_error, custom_bad_request, custom_server_e const BLOCK_ROOT_CHUNK_SIZE: usize = 100; #[derive(Debug)] +// We don't use the inner values directly, but they're used in the Debug impl. enum PackingEfficiencyError { - BlockReplay(BlockReplayError), - BeaconState(BeaconStateError), - CommitteeStoreError(Slot), + BlockReplay(#[allow(dead_code)] BlockReplayError), + BeaconState(#[allow(dead_code)] BeaconStateError), + CommitteeStoreError(#[allow(dead_code)] Slot), InvalidAttestationError, } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 1594668e5..5a8d5cae0 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -16,6 +16,7 @@ mod database; mod metrics; mod produce_block; mod proposer_duties; +mod publish_attestations; mod publish_blocks; mod standard_block_rewards; mod state_id; @@ -35,7 +36,7 @@ use beacon_chain::{ validator_monitor::timestamp_now, AttestationError as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped, }; -use beacon_processor::BeaconProcessorSend; +use beacon_processor::{work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorSend}; pub use block_id::BlockId; use builder_states::get_next_withdrawals; use bytes::Bytes; @@ -67,7 +68,7 @@ use std::path::PathBuf; use std::pin::Pin; use std::sync::Arc; use sysinfo::{System, SystemExt}; -use system_health::observe_system_health_bn; +use system_health::{observe_nat, observe_system_health_bn}; use task_spawner::{Priority, TaskSpawner}; use tokio::sync::{ mpsc::{Sender, UnboundedSender}, @@ -129,6 +130,7 @@ pub struct Context { pub network_senders: Option>, pub network_globals: Option>>, pub beacon_processor_send: Option>, + pub beacon_processor_reprocess_send: Option>, pub eth1_service: Option, pub sse_logging_components: Option, pub log: Logger, @@ -534,6 +536,11 @@ pub fn serve( .filter(|_| config.enable_beacon_processor); let task_spawner_filter = warp::any().map(move || TaskSpawner::new(beacon_processor_send.clone())); + let beacon_processor_reprocess_send = ctx + .beacon_processor_reprocess_send + .clone() + .filter(|_| config.enable_beacon_processor); + let reprocess_send_filter = warp::any().map(move || beacon_processor_reprocess_send.clone()); let duplicate_block_status_code = ctx.config.duplicate_block_status_code; @@ -682,7 +689,7 @@ pub fn serve( .clone() .and(warp::path("validator_balances")) .and(warp::path::end()) - .and(warp::body::json()) + .and(warp_utils::json::json()) .then( |state_id: StateId, task_spawner: TaskSpawner, @@ -726,7 +733,7 @@ pub fn serve( .clone() .and(warp::path("validators")) .and(warp::path::end()) - .and(warp::body::json()) + .and(warp_utils::json::json()) .then( |state_id: StateId, task_spawner: TaskSpawner, @@ -1019,7 +1026,7 @@ pub fn serve( Ok(( state .get_built_sync_committee(epoch, &chain.spec) - .map(|committee| committee.clone()) + .cloned() .map_err(|e| match e { BeaconStateError::SyncCommitteeNotKnown { .. } => { warp_utils::reject::custom_bad_request(format!( @@ -1257,7 +1264,7 @@ pub fn serve( .and(warp::path("beacon")) .and(warp::path("blocks")) .and(warp::path::end()) - .and(warp::body::json()) + .and(warp_utils::json::json()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(network_tx_filter.clone()) @@ -1327,7 +1334,7 @@ pub fn serve( .and(warp::path("blocks")) .and(warp::query::()) .and(warp::path::end()) - .and(warp::body::json()) + .and(warp_utils::json::json()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(network_tx_filter.clone()) @@ -1404,7 +1411,7 @@ pub fn serve( .and(warp::path("beacon")) .and(warp::path("blinded_blocks")) .and(warp::path::end()) - .and(warp::body::json()) + .and(warp_utils::json::json()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(network_tx_filter.clone()) @@ -1472,7 +1479,7 @@ pub fn serve( .and(warp::path("blinded_blocks")) .and(warp::query::()) .and(warp::path::end()) - .and(warp::body::json()) + .and(warp_utils::json::json()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(network_tx_filter.clone()) @@ -1754,142 +1761,28 @@ pub fn serve( .clone() .and(warp::path("attestations")) .and(warp::path::end()) - .and(warp::body::json()) + .and(warp_utils::json::json()) .and(network_tx_filter.clone()) + .and(reprocess_send_filter) .and(log_filter.clone()) .then( |task_spawner: TaskSpawner, chain: Arc>, attestations: Vec>, network_tx: UnboundedSender>, - log: Logger| { - task_spawner.blocking_json_task(Priority::P0, move || { - let seen_timestamp = timestamp_now(); - let mut failures = Vec::new(); - let mut num_already_known = 0; - - for (index, attestation) in attestations.as_slice().iter().enumerate() { - let attestation = match chain - .verify_unaggregated_attestation_for_gossip(attestation, None) - { - Ok(attestation) => attestation, - Err(AttnError::PriorAttestationKnown { .. }) => { - num_already_known += 1; - - // Skip to the next attestation since an attestation for this - // validator is already known in this epoch. - // - // There's little value for the network in validating a second - // attestation for another validator since it is either: - // - // 1. A duplicate. - // 2. Slashable. - // 3. Invalid. - // - // We are likely to get duplicates in the case where a VC is using - // fallback BNs. If the first BN actually publishes some/all of a - // batch of attestations but fails to respond in a timely fashion, - // the VC is likely to try publishing the attestations on another - // BN. That second BN may have already seen the attestations from - // the first BN and therefore indicate that the attestations are - // "already seen". An attestation that has already been seen has - // been published on the network so there's no actual error from - // the perspective of the user. - // - // It's better to prevent slashable attestations from ever - // appearing on the network than trying to slash validators, - // especially those validators connected to the local API. - // - // There might be *some* value in determining that this attestation - // is invalid, but since a valid attestation already it exists it - // appears that this validator is capable of producing valid - // attestations and there's no immediate cause for concern. - continue; - } - Err(e) => { - error!(log, - "Failure verifying attestation for gossip"; - "error" => ?e, - "request_index" => index, - "committee_index" => attestation.data.index, - "attestation_slot" => attestation.data.slot, - ); - failures.push(api_types::Failure::new( - index, - format!("Verification: {:?}", e), - )); - // skip to the next attestation so we do not publish this one to gossip - continue; - } - }; - - // Notify the validator monitor. - chain - .validator_monitor - .read() - .register_api_unaggregated_attestation( - seen_timestamp, - attestation.indexed_attestation(), - &chain.slot_clock, - ); - - publish_pubsub_message( - &network_tx, - PubsubMessage::Attestation(Box::new(( - attestation.subnet_id(), - attestation.attestation().clone(), - ))), - )?; - - let committee_index = attestation.attestation().data.index; - let slot = attestation.attestation().data.slot; - - if let Err(e) = chain.apply_attestation_to_fork_choice(&attestation) { - error!(log, - "Failure applying verified attestation to fork choice"; - "error" => ?e, - "request_index" => index, - "committee_index" => committee_index, - "slot" => slot, - ); - failures.push(api_types::Failure::new( - index, - format!("Fork choice: {:?}", e), - )); - }; - - if let Err(e) = chain.add_to_naive_aggregation_pool(&attestation) { - error!(log, - "Failure adding verified attestation to the naive aggregation pool"; - "error" => ?e, - "request_index" => index, - "committee_index" => committee_index, - "slot" => slot, - ); - failures.push(api_types::Failure::new( - index, - format!("Naive aggregation pool: {:?}", e), - )); - } - } - - if num_already_known > 0 { - debug!( - log, - "Some unagg attestations already known"; - "count" => num_already_known - ); - } - - if failures.is_empty() { - Ok(()) - } else { - Err(warp_utils::reject::indexed_bad_request( - "error processing attestations".to_string(), - failures, - )) - } - }) + reprocess_tx: Option>, + log: Logger| async move { + let result = crate::publish_attestations::publish_attestations( + task_spawner, + chain, + attestations, + network_tx, + reprocess_tx, + log, + ) + .await + .map(|()| warp::reply::json(&())); + task_spawner::convert_rejection(result).await }, ); @@ -1930,7 +1823,7 @@ pub fn serve( .clone() .and(warp::path("attester_slashings")) .and(warp::path::end()) - .and(warp::body::json()) + .and(warp_utils::json::json()) .and(network_tx_filter.clone()) .then( |task_spawner: TaskSpawner, @@ -1988,7 +1881,7 @@ pub fn serve( .clone() .and(warp::path("proposer_slashings")) .and(warp::path::end()) - .and(warp::body::json()) + .and(warp_utils::json::json()) .and(network_tx_filter.clone()) .then( |task_spawner: TaskSpawner, @@ -2046,7 +1939,7 @@ pub fn serve( .clone() .and(warp::path("voluntary_exits")) .and(warp::path::end()) - .and(warp::body::json()) + .and(warp_utils::json::json()) .and(network_tx_filter.clone()) .then( |task_spawner: TaskSpawner, @@ -2102,7 +1995,7 @@ pub fn serve( .clone() .and(warp::path("sync_committees")) .and(warp::path::end()) - .and(warp::body::json()) + .and(warp_utils::json::json()) .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( @@ -2139,7 +2032,7 @@ pub fn serve( .clone() .and(warp::path("bls_to_execution_changes")) .and(warp::path::end()) - .and(warp::body::json()) + .and(warp_utils::json::json()) .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( @@ -2434,9 +2327,8 @@ pub fn serve( accept_header: Option| { task_spawner.blocking_response_task(Priority::P1, move || { let update = chain - .latest_seen_optimistic_update - .lock() - .clone() + .light_client_server_cache + .get_latest_optimistic_update() .ok_or_else(|| { warp_utils::reject::custom_not_found( "No LightClientOptimisticUpdate is available".to_string(), @@ -2482,9 +2374,8 @@ pub fn serve( accept_header: Option| { task_spawner.blocking_response_task(Priority::P1, move || { let update = chain - .latest_seen_finality_update - .lock() - .clone() + .light_client_server_cache + .get_latest_finality_update() .ok_or_else(|| { warp_utils::reject::custom_not_found( "No LightClientFinalityUpdate is available".to_string(), @@ -2533,7 +2424,7 @@ pub fn serve( .and(warp::path("attestations")) .and(warp::path::param::()) .and(warp::path::end()) - .and(warp::body::json()) + .and(warp_utils::json::json()) .then( |task_spawner: TaskSpawner, chain: Arc>, @@ -2583,7 +2474,7 @@ pub fn serve( .and(warp::path("sync_committee")) .and(block_id_or_err) .and(warp::path::end()) - .and(warp::body::json()) + .and(warp_utils::json::json()) .and(log_filter.clone()) .then( |task_spawner: TaskSpawner, @@ -2860,7 +2751,7 @@ pub fn serve( hex::encode( meta_data .syncnets() - .map(|x| x.clone()) + .cloned() .unwrap_or_default() .into_bytes() ) @@ -3326,7 +3217,7 @@ pub fn serve( })) .and(warp::path::end()) .and(not_while_syncing_filter.clone()) - .and(warp::body::json()) + .and(warp_utils::json::json()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .then( @@ -3352,7 +3243,7 @@ pub fn serve( })) .and(warp::path::end()) .and(not_while_syncing_filter.clone()) - .and(warp::body::json()) + .and(warp_utils::json::json()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .then( @@ -3406,7 +3297,7 @@ pub fn serve( .and(not_while_syncing_filter.clone()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and(warp::body::json()) + .and(warp_utils::json::json()) .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( @@ -3519,7 +3410,7 @@ pub fn serve( .and(not_while_syncing_filter.clone()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and(warp::body::json()) + .and(warp_utils::json::json()) .and(network_tx_filter) .and(log_filter.clone()) .then( @@ -3545,7 +3436,7 @@ pub fn serve( .and(warp::path("validator")) .and(warp::path("beacon_committee_subscriptions")) .and(warp::path::end()) - .and(warp::body::json()) + .and(warp_utils::json::json()) .and(validator_subscription_tx_filter.clone()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) @@ -3557,34 +3448,34 @@ pub fn serve( chain: Arc>, log: Logger| { task_spawner.blocking_json_task(Priority::P0, move || { - for subscription in &subscriptions { - chain - .validator_monitor - .write() - .auto_register_local_validator(subscription.validator_index); - - let validator_subscription = api_types::ValidatorSubscription { - validator_index: subscription.validator_index, - attestation_committee_index: subscription.committee_index, - slot: subscription.slot, - committee_count_at_slot: subscription.committees_at_slot, - is_aggregator: subscription.is_aggregator, - }; - - let message = ValidatorSubscriptionMessage::AttestationSubscribe { - subscriptions: vec![validator_subscription], - }; - if let Err(e) = validator_subscription_tx.try_send(message) { - warn!( - log, - "Unable to process committee subscriptions"; - "info" => "the host may be overloaded or resource-constrained", - "error" => ?e, - ); - return Err(warp_utils::reject::custom_server_error( - "unable to queue subscription, host may be overloaded or shutting down".to_string(), - )); - } + let subscriptions: std::collections::BTreeSet<_> = subscriptions + .iter() + .map(|subscription| { + chain + .validator_monitor + .write() + .auto_register_local_validator(subscription.validator_index); + api_types::ValidatorSubscription { + attestation_committee_index: subscription.committee_index, + slot: subscription.slot, + committee_count_at_slot: subscription.committees_at_slot, + is_aggregator: subscription.is_aggregator, + } + }) + .collect(); + let message = + ValidatorSubscriptionMessage::AttestationSubscribe { subscriptions }; + if let Err(e) = validator_subscription_tx.try_send(message) { + warn!( + log, + "Unable to process committee subscriptions"; + "info" => "the host may be overloaded or resource-constrained", + "error" => ?e, + ); + return Err(warp_utils::reject::custom_server_error( + "unable to queue subscription, host may be overloaded or shutting down" + .to_string(), + )); } Ok(()) @@ -3601,7 +3492,7 @@ pub fn serve( .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(log_filter.clone()) - .and(warp::body::json()) + .and(warp_utils::json::json()) .then( |task_spawner: TaskSpawner, chain: Arc>, @@ -3652,7 +3543,7 @@ pub fn serve( .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(log_filter.clone()) - .and(warp::body::json()) + .and(warp_utils::json::json()) .then( |task_spawner: TaskSpawner, chain: Arc>, @@ -3826,7 +3717,7 @@ pub fn serve( .and(warp::path("validator")) .and(warp::path("sync_committee_subscriptions")) .and(warp::path::end()) - .and(warp::body::json()) + .and(warp_utils::json::json()) .and(validator_subscription_tx_filter) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) @@ -3866,18 +3757,18 @@ pub fn serve( }, ); - // POST vaidator/liveness/{epoch} + // POST validator/liveness/{epoch} let post_validator_liveness_epoch = eth_v1 .and(warp::path("validator")) .and(warp::path("liveness")) .and(warp::path::param::()) .and(warp::path::end()) - .and(warp::body::json()) + .and(warp_utils::json::json()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .then( |epoch: Epoch, - indices: Vec, + indices: api_types::ValidatorIndexData, task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P0, move || { @@ -3896,6 +3787,7 @@ pub fn serve( } let liveness: Vec = indices + .0 .iter() .cloned() .map(|index| { @@ -3913,7 +3805,7 @@ pub fn serve( let post_lighthouse_liveness = warp::path("lighthouse") .and(warp::path("liveness")) .and(warp::path::end()) - .and(warp::body::json()) + .and(warp_utils::json::json()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .then( @@ -4016,7 +3908,7 @@ pub fn serve( .and(warp::path("ui")) .and(warp::path("validator_metrics")) .and(warp::path::end()) - .and(warp::body::json()) + .and(warp_utils::json::json()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .then( @@ -4035,7 +3927,7 @@ pub fn serve( .and(warp::path("ui")) .and(warp::path("validator_info")) .and(warp::path::end()) - .and(warp::body::json()) + .and(warp_utils::json::json()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .then( @@ -4073,13 +3965,7 @@ pub fn serve( .and(warp::path::end()) .then(|task_spawner: TaskSpawner| { task_spawner.blocking_json_task(Priority::P1, move || { - Ok(api_types::GenericResponse::from( - lighthouse_network::metrics::NAT_OPEN - .as_ref() - .map(|v| v.get()) - .unwrap_or(0) - != 0, - )) + Ok(api_types::GenericResponse::from(observe_nat())) }) }); @@ -4267,36 +4153,6 @@ pub fn serve( }, ); - // GET lighthouse/beacon/states/{state_id}/ssz - let get_lighthouse_beacon_states_ssz = warp::path("lighthouse") - .and(warp::path("beacon")) - .and(warp::path("states")) - .and(warp::path::param::()) - .and(warp::path("ssz")) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_response_task(Priority::P1, move || { - // This debug endpoint provides no indication of optimistic status. - let (state, _execution_optimistic, _finalized) = state_id.state(&chain)?; - Response::builder() - .status(200) - .body(state.as_ssz_bytes().into()) - .map(|res: Response| add_ssz_content_type_header(res)) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to create response: {}", - e - )) - }) - }) - }, - ); - // GET lighthouse/staking let get_lighthouse_staking = warp::path("lighthouse") .and(warp::path("staking")) @@ -4368,7 +4224,7 @@ pub fn serve( let post_lighthouse_block_rewards = warp::path("lighthouse") .and(warp::path("analysis")) .and(warp::path("block_rewards")) - .and(warp::body::json()) + .and(warp_utils::json::json()) .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) @@ -4631,7 +4487,6 @@ pub fn serve( .uor(get_lighthouse_eth1_syncing) .uor(get_lighthouse_eth1_block_cache) .uor(get_lighthouse_eth1_deposit_cache) - .uor(get_lighthouse_beacon_states_ssz) .uor(get_lighthouse_staking) .uor(get_lighthouse_database_info) .uor(get_lighthouse_block_rewards) diff --git a/beacon_node/http_api/src/publish_attestations.rs b/beacon_node/http_api/src/publish_attestations.rs new file mode 100644 index 000000000..ed7f1ed17 --- /dev/null +++ b/beacon_node/http_api/src/publish_attestations.rs @@ -0,0 +1,319 @@ +//! Import attestations and publish them to the network. +//! +//! This module gracefully handles attestations to unknown blocks by requeuing them and then +//! efficiently waiting for them to finish reprocessing (using an async yield). +//! +//! The following comments relate to the handling of duplicate attestations (relocated here during +//! refactoring): +//! +//! Skip to the next attestation since an attestation for this +//! validator is already known in this epoch. +//! +//! There's little value for the network in validating a second +//! attestation for another validator since it is either: +//! +//! 1. A duplicate. +//! 2. Slashable. +//! 3. Invalid. +//! +//! We are likely to get duplicates in the case where a VC is using +//! fallback BNs. If the first BN actually publishes some/all of a +//! batch of attestations but fails to respond in a timely fashion, +//! the VC is likely to try publishing the attestations on another +//! BN. That second BN may have already seen the attestations from +//! the first BN and therefore indicate that the attestations are +//! "already seen". An attestation that has already been seen has +//! been published on the network so there's no actual error from +//! the perspective of the user. +//! +//! It's better to prevent slashable attestations from ever +//! appearing on the network than trying to slash validators, +//! especially those validators connected to the local API. +//! +//! There might be *some* value in determining that this attestation +//! is invalid, but since a valid attestation already it exists it +//! appears that this validator is capable of producing valid +//! attestations and there's no immediate cause for concern. +use crate::task_spawner::{Priority, TaskSpawner}; +use beacon_chain::{ + validator_monitor::timestamp_now, AttestationError, BeaconChain, BeaconChainError, + BeaconChainTypes, +}; +use beacon_processor::work_reprocessing_queue::{QueuedUnaggregate, ReprocessQueueMessage}; +use eth2::types::Failure; +use lighthouse_network::PubsubMessage; +use network::NetworkMessage; +use slog::{debug, error, warn, Logger}; +use std::sync::Arc; +use std::time::Duration; +use tokio::sync::{ + mpsc::{Sender, UnboundedSender}, + oneshot, +}; +use types::Attestation; + +// Error variants are only used in `Debug` and considered `dead_code` by the compiler. +#[derive(Debug)] +enum Error { + Validation(AttestationError), + Publication, + ForkChoice(#[allow(dead_code)] BeaconChainError), + AggregationPool(#[allow(dead_code)] AttestationError), + ReprocessDisabled, + ReprocessFull, + ReprocessTimeout, +} + +enum PublishAttestationResult { + Success, + AlreadyKnown, + Reprocessing(oneshot::Receiver>), + Failure(Error), +} + +fn verify_and_publish_attestation( + chain: &Arc>, + attestation: &Attestation, + seen_timestamp: Duration, + network_tx: &UnboundedSender>, + log: &Logger, +) -> Result<(), Error> { + let attestation = chain + .verify_unaggregated_attestation_for_gossip(attestation, None) + .map_err(Error::Validation)?; + + // Publish. + network_tx + .send(NetworkMessage::Publish { + messages: vec![PubsubMessage::Attestation(Box::new(( + attestation.subnet_id(), + attestation.attestation().clone(), + )))], + }) + .map_err(|_| Error::Publication)?; + + // Notify the validator monitor. + chain + .validator_monitor + .read() + .register_api_unaggregated_attestation( + seen_timestamp, + attestation.indexed_attestation(), + &chain.slot_clock, + ); + + let fc_result = chain.apply_attestation_to_fork_choice(&attestation); + let naive_aggregation_result = chain.add_to_naive_aggregation_pool(&attestation); + + if let Err(e) = &fc_result { + warn!( + log, + "Attestation invalid for fork choice"; + "err" => ?e, + ); + } + if let Err(e) = &naive_aggregation_result { + warn!( + log, + "Attestation invalid for aggregation"; + "err" => ?e + ); + } + + if let Err(e) = fc_result { + Err(Error::ForkChoice(e)) + } else if let Err(e) = naive_aggregation_result { + Err(Error::AggregationPool(e)) + } else { + Ok(()) + } +} + +pub async fn publish_attestations( + task_spawner: TaskSpawner, + chain: Arc>, + attestations: Vec>, + network_tx: UnboundedSender>, + reprocess_send: Option>, + log: Logger, +) -> Result<(), warp::Rejection> { + // Collect metadata about attestations which we'll use to report failures. We need to + // move the `attestations` vec into the blocking task, so this small overhead is unavoidable. + let attestation_metadata = attestations + .iter() + .map(|att| (att.data.slot, att.data.index)) + .collect::>(); + + // Gossip validate and publish attestations that can be immediately processed. + let seen_timestamp = timestamp_now(); + let inner_log = log.clone(); + let mut prelim_results = task_spawner + .blocking_task(Priority::P0, move || { + Ok(attestations + .into_iter() + .map(|attestation| { + match verify_and_publish_attestation( + &chain, + &attestation, + seen_timestamp, + &network_tx, + &inner_log, + ) { + Ok(()) => PublishAttestationResult::Success, + Err(Error::Validation(AttestationError::UnknownHeadBlock { + beacon_block_root, + })) => { + let Some(reprocess_tx) = &reprocess_send else { + return PublishAttestationResult::Failure(Error::ReprocessDisabled); + }; + // Re-process. + let (tx, rx) = oneshot::channel(); + let reprocess_chain = chain.clone(); + let reprocess_network_tx = network_tx.clone(); + let reprocess_log = inner_log.clone(); + let reprocess_fn = move || { + let result = verify_and_publish_attestation( + &reprocess_chain, + &attestation, + seen_timestamp, + &reprocess_network_tx, + &reprocess_log, + ); + // Ignore failure on the oneshot that reports the result. This + // shouldn't happen unless some catastrophe befalls the waiting + // thread which causes it to drop. + let _ = tx.send(result); + }; + let reprocess_msg = + ReprocessQueueMessage::UnknownBlockUnaggregate(QueuedUnaggregate { + beacon_block_root, + process_fn: Box::new(reprocess_fn), + }); + if reprocess_tx.try_send(reprocess_msg).is_err() { + PublishAttestationResult::Failure(Error::ReprocessFull) + } else { + PublishAttestationResult::Reprocessing(rx) + } + } + Err(Error::Validation(AttestationError::PriorAttestationKnown { + .. + })) => PublishAttestationResult::AlreadyKnown, + Err(e) => PublishAttestationResult::Failure(e), + } + }) + .map(Some) + .collect::>()) + }) + .await?; + + // Asynchronously wait for re-processing of attestations to unknown blocks. This avoids blocking + // any of the beacon processor workers while we wait for reprocessing. + let (reprocess_indices, reprocess_futures): (Vec<_>, Vec<_>) = prelim_results + .iter_mut() + .enumerate() + .filter_map(|(i, opt_result)| { + if let Some(PublishAttestationResult::Reprocessing(..)) = &opt_result { + let PublishAttestationResult::Reprocessing(rx) = opt_result.take()? else { + // Unreachable. + return None; + }; + Some((i, rx)) + } else { + None + } + }) + .unzip(); + let reprocess_results = futures::future::join_all(reprocess_futures).await; + + // Join everything back together and construct a response. + // This part should be quick so we just stay in the Tokio executor's async task. + for (i, reprocess_result) in reprocess_indices.into_iter().zip(reprocess_results) { + let Some(result_entry) = prelim_results.get_mut(i) else { + error!( + log, + "Unreachable case in attestation publishing"; + "case" => "prelim out of bounds", + "request_index" => i, + ); + continue; + }; + *result_entry = Some(match reprocess_result { + Ok(Ok(())) => PublishAttestationResult::Success, + // Attestation failed processing on re-process. + Ok(Err(Error::Validation(AttestationError::PriorAttestationKnown { .. }))) => { + PublishAttestationResult::AlreadyKnown + } + Ok(Err(e)) => PublishAttestationResult::Failure(e), + // Oneshot was dropped, indicating that the attestation either timed out in the + // reprocess queue or was dropped due to some error. + Err(_) => PublishAttestationResult::Failure(Error::ReprocessTimeout), + }); + } + + // Construct the response. + let mut failures = vec![]; + let mut num_already_known = 0; + + for (index, result) in prelim_results.iter().enumerate() { + match result { + Some(PublishAttestationResult::Success) => {} + Some(PublishAttestationResult::AlreadyKnown) => num_already_known += 1, + Some(PublishAttestationResult::Failure(e)) => { + if let Some((slot, committee_index)) = attestation_metadata.get(index) { + error!( + log, + "Failure verifying attestation for gossip"; + "error" => ?e, + "request_index" => index, + "committee_index" => committee_index, + "attestation_slot" => slot, + ); + failures.push(Failure::new(index, format!("{e:?}"))); + } else { + error!( + log, + "Unreachable case in attestation publishing"; + "case" => "out of bounds", + "request_index" => index + ); + failures.push(Failure::new(index, "metadata logic error".into())); + } + } + Some(PublishAttestationResult::Reprocessing(_)) => { + error!( + log, + "Unreachable case in attestation publishing"; + "case" => "reprocessing", + "request_index" => index + ); + failures.push(Failure::new(index, "reprocess logic error".into())); + } + None => { + error!( + log, + "Unreachable case in attestation publishing"; + "case" => "result is None", + "request_index" => index + ); + failures.push(Failure::new(index, "result logic error".into())); + } + } + } + + if num_already_known > 0 { + debug!( + log, + "Some unagg attestations already known"; + "count" => num_already_known + ); + } + + if failures.is_empty() { + Ok(()) + } else { + Err(warp_utils::reject::indexed_bad_request( + "error processing attestations".to_string(), + failures, + )) + } +} diff --git a/beacon_node/http_api/src/task_spawner.rs b/beacon_node/http_api/src/task_spawner.rs index 8768e057d..cfee5e01c 100644 --- a/beacon_node/http_api/src/task_spawner.rs +++ b/beacon_node/http_api/src/task_spawner.rs @@ -60,11 +60,15 @@ impl TaskSpawner { } } - /// Executes a "blocking" (non-async) task which returns a `Response`. - pub async fn blocking_response_task(self, priority: Priority, func: F) -> Response + /// Executes a "blocking" (non-async) task which returns an arbitrary value. + pub async fn blocking_task( + self, + priority: Priority, + func: F, + ) -> Result where F: FnOnce() -> Result + Send + Sync + 'static, - T: Reply + Send + 'static, + T: Send + 'static, { if let Some(beacon_processor_send) = &self.beacon_processor_send { // Create a closure that will execute `func` and send the result to @@ -79,22 +83,31 @@ impl TaskSpawner { }; // Send the function to the beacon processor for execution at some arbitrary time. - let result = send_to_beacon_processor( + send_to_beacon_processor( beacon_processor_send, priority, BlockingOrAsync::Blocking(Box::new(process_fn)), rx, ) .await - .and_then(|x| x); - convert_rejection(result).await + .and_then(|x| x) } else { // There is no beacon processor so spawn a task directly on the // tokio executor. - convert_rejection(warp_utils::task::blocking_response_task(func).await).await + warp_utils::task::blocking_task(func).await } } + /// Executes a "blocking" (non-async) task which returns a `Response`. + pub async fn blocking_response_task(self, priority: Priority, func: F) -> Response + where + F: FnOnce() -> Result + Send + Sync + 'static, + T: Reply + Send + 'static, + { + let result = self.blocking_task(priority, func).await; + convert_rejection(result).await + } + /// Executes a "blocking" (non-async) task which returns a JSON-serializable /// object. pub async fn blocking_json_task(self, priority: Priority, func: F) -> Response diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index b87fdf608..c1313168b 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -35,6 +35,7 @@ pub const EXTERNAL_ADDR: &str = "/ip4/0.0.0.0/tcp/9000"; /// HTTP API tester that allows interaction with the underlying beacon chain harness. pub struct InteractiveTester { + pub ctx: Arc>>, pub harness: BeaconChainHarness>, pub client: BeaconNodeHttpClient, pub network_rx: NetworkReceivers, @@ -43,10 +44,11 @@ pub struct InteractiveTester { /// The result of calling `create_api_server`. /// /// Glue-type between `tests::ApiTester` and `InteractiveTester`. -pub struct ApiServer> { +pub struct ApiServer> { + pub ctx: Arc>, pub server: SFut, pub listening_socket: SocketAddr, - pub network_rx: NetworkReceivers, + pub network_rx: NetworkReceivers, pub local_enr: Enr, pub external_peer_id: PeerId, } @@ -90,6 +92,7 @@ impl InteractiveTester { let harness = harness_builder.build(); let ApiServer { + ctx, server, listening_socket, network_rx, @@ -114,6 +117,7 @@ impl InteractiveTester { ); Self { + ctx, harness, client, network_rx, @@ -125,7 +129,7 @@ pub async fn create_api_server( chain: Arc>, test_runtime: &TestRuntime, log: Logger, -) -> ApiServer> { +) -> ApiServer> { // Use port 0 to allocate a new unused port. let port = 0; @@ -187,6 +191,7 @@ pub async fn create_api_server( } = BeaconProcessorChannels::new(&beacon_processor_config); let beacon_processor_send = beacon_processor_tx; + let reprocess_send = work_reprocessing_tx.clone(); BeaconProcessor { network_globals: network_globals.clone(), executor: test_runtime.task_executor.clone(), @@ -216,14 +221,17 @@ pub async fn create_api_server( network_senders: Some(network_senders), network_globals: Some(network_globals), beacon_processor_send: Some(beacon_processor_send), + beacon_processor_reprocess_send: Some(reprocess_send), eth1_service: Some(eth1_service), sse_logging_components: None, log, }); - let (listening_socket, server) = crate::serve(ctx, test_runtime.task_executor.exit()).unwrap(); + let (listening_socket, server) = + crate::serve(ctx.clone(), test_runtime.task_executor.exit()).unwrap(); ApiServer { + ctx, server, listening_socket, network_rx: network_receivers, diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 6fb197b41..d63d04fce 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -4,6 +4,7 @@ use beacon_chain::{ test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, ChainConfig, }; +use beacon_processor::work_reprocessing_queue::ReprocessQueueMessage; use eth2::types::ProduceBlockV3Response; use eth2::types::{DepositContractData, StateId}; use execution_layer::{ForkchoiceState, PayloadAttributes}; @@ -840,3 +841,78 @@ pub async fn fork_choice_before_proposal() { // D's parent is B. assert_eq!(block_d.parent_root(), block_root_b.into()); } + +// Test that attestations to unknown blocks are requeued and processed when their block arrives. +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn queue_attestations_from_http() { + let validator_count = 128; + let all_validators = (0..validator_count).collect::>(); + + let tester = InteractiveTester::::new(None, validator_count).await; + let harness = &tester.harness; + let client = tester.client.clone(); + + let num_initial = 5; + + // Slot of the block attested to. + let attestation_slot = Slot::new(num_initial) + 1; + + // Make some initial blocks. + harness.advance_slot(); + harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + harness.advance_slot(); + assert_eq!(harness.get_current_slot(), attestation_slot); + + // Make the attested-to block without applying it. + let pre_state = harness.get_current_state(); + let (block, post_state) = harness.make_block(pre_state, attestation_slot).await; + let block_root = block.0.canonical_root(); + + // Make attestations to the block and POST them to the beacon node on a background thread. + let attestations = harness + .make_unaggregated_attestations( + &all_validators, + &post_state, + block.0.state_root(), + block_root.into(), + attestation_slot, + ) + .into_iter() + .flat_map(|attestations| attestations.into_iter().map(|(att, _subnet)| att)) + .collect::>(); + + let attestation_future = tokio::spawn(async move { + client + .post_beacon_pool_attestations(&attestations) + .await + .expect("attestations should be processed successfully") + }); + + // In parallel, apply the block. We need to manually notify the reprocess queue, because the + // `beacon_chain` does not know about the queue and will not update it for us. + let parent_root = block.0.parent_root(); + harness + .process_block(attestation_slot, block_root, block) + .await + .unwrap(); + tester + .ctx + .beacon_processor_reprocess_send + .as_ref() + .unwrap() + .send(ReprocessQueueMessage::BlockImported { + block_root, + parent_root, + }) + .await + .unwrap(); + + attestation_future.await.unwrap(); +} diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 933f98661..098f9f105 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -248,6 +248,7 @@ impl ApiTester { let log = null_logger().unwrap(); let ApiServer { + ctx: _, server, listening_socket, network_rx, @@ -341,6 +342,7 @@ impl ApiTester { let log = null_logger().unwrap(); let ApiServer { + ctx: _, server, listening_socket, network_rx, @@ -1731,7 +1733,10 @@ impl ApiTester { Err(e) => panic!("query failed incorrectly: {e:?}"), }; - let expected = self.chain.latest_seen_optimistic_update.lock().clone(); + let expected = self + .chain + .light_client_server_cache + .get_latest_optimistic_update(); assert_eq!(result, expected); self @@ -1747,7 +1752,10 @@ impl ApiTester { Err(e) => panic!("query failed incorrectly: {e:?}"), }; - let expected = self.chain.latest_seen_finality_update.lock().clone(); + let expected = self + .chain + .light_client_server_cache + .get_latest_finality_update(); assert_eq!(result, expected); self @@ -2713,6 +2721,31 @@ impl ApiTester { self } + /// Check that the metadata from the headers & JSON response body are consistent, and that the + /// consensus block value is non-zero. + fn check_block_v3_metadata( + metadata: &ProduceBlockV3Metadata, + response: &JsonProduceBlockV3Response, + ) { + // Compare fork name to ForkVersionedResponse rather than metadata consensus_version, which + // is deserialized to a dummy value. + assert_eq!(Some(metadata.consensus_version), response.version); + assert_eq!(ForkName::Base, response.metadata.consensus_version); + assert_eq!( + metadata.execution_payload_blinded, + response.metadata.execution_payload_blinded + ); + assert_eq!( + metadata.execution_payload_value, + response.metadata.execution_payload_value + ); + assert_eq!( + metadata.consensus_block_value, + response.metadata.consensus_block_value + ); + assert!(!metadata.consensus_block_value.is_zero()); + } + pub async fn test_block_production_v3_ssz(self) -> Self { let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; @@ -3574,11 +3607,12 @@ impl ApiTester { let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); let payload: BlindedPayload = match payload_type.data { ProduceBlockV3Response::Blinded(payload) => { @@ -3600,11 +3634,12 @@ impl ApiTester { let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, Some(0)) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); let payload: FullPayload = match payload_type.data { ProduceBlockV3Response::Full(payload) => { @@ -3626,11 +3661,12 @@ impl ApiTester { let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, Some(u64::MAX)) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); let payload: BlindedPayload = match payload_type.data { ProduceBlockV3Response::Blinded(payload) => { @@ -3730,11 +3766,12 @@ impl ApiTester { let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); let payload: BlindedPayload = match payload_type.data { ProduceBlockV3Response::Blinded(payload) => { @@ -3806,11 +3843,12 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); let payload: BlindedPayload = match payload_type.data { ProduceBlockV3Response::Blinded(payload) => { @@ -3896,11 +3934,12 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); let payload: FullPayload = match payload_type.data { ProduceBlockV3Response::Full(payload) => { @@ -3982,11 +4021,12 @@ impl ApiTester { .unwrap(); let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); let payload: FullPayload = match payload_type.data { ProduceBlockV3Response::Full(payload) => { @@ -4068,11 +4108,12 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); let payload: FullPayload = match payload_type.data { ProduceBlockV3Response::Full(payload) => { @@ -4152,11 +4193,12 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); let payload: FullPayload = match payload_type.data { ProduceBlockV3Response::Full(payload) => { @@ -4208,11 +4250,12 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); match payload_type.data { ProduceBlockV3Response::Full(_) => (), @@ -4274,11 +4317,12 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); match payload_type.data { ProduceBlockV3Response::Full(_) => (), @@ -4382,11 +4426,12 @@ impl ApiTester { .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(next_slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); match payload_type.data { ProduceBlockV3Response::Blinded(_) => (), @@ -4402,11 +4447,12 @@ impl ApiTester { .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(next_slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); match payload_type.data { ProduceBlockV3Response::Full(_) => (), @@ -4530,11 +4576,12 @@ impl ApiTester { .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(next_slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); match payload_type.data { ProduceBlockV3Response::Full(_) => (), @@ -4560,11 +4607,12 @@ impl ApiTester { .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(next_slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); match payload_type.data { ProduceBlockV3Response::Blinded(_) => (), @@ -4640,11 +4688,12 @@ impl ApiTester { let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); let payload: FullPayload = match payload_type.data { ProduceBlockV3Response::Full(payload) => { @@ -4709,11 +4758,12 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); match payload_type.data { ProduceBlockV3Response::Blinded(_) => (), @@ -4773,11 +4823,12 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); match payload_type.data { ProduceBlockV3Response::Full(_) => (), @@ -4837,11 +4888,12 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); match payload_type.data { ProduceBlockV3Response::Full(_) => (), @@ -4899,11 +4951,12 @@ impl ApiTester { let epoch = self.chain.epoch().unwrap(); let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); let _block_contents = match payload_type.data { ProduceBlockV3Response::Blinded(payload) => payload, @@ -4971,11 +5024,12 @@ impl ApiTester { let epoch = self.chain.epoch().unwrap(); let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); match payload_type.data { ProduceBlockV3Response::Full(_) => (), @@ -5057,26 +5111,6 @@ impl ApiTester { self } - pub async fn test_get_lighthouse_beacon_states_ssz(self) -> Self { - for state_id in self.interesting_state_ids() { - let result = self - .client - .get_lighthouse_beacon_states_ssz(&state_id.0, &self.chain.spec) - .await - .unwrap(); - - let mut expected = state_id - .state(&self.chain) - .ok() - .map(|(state, _execution_optimistic, _finalized)| state); - expected.as_mut().map(|state| state.drop_all_caches()); - - assert_eq!(result, expected, "{:?}", state_id); - } - - self - } - pub async fn test_get_lighthouse_staking(self) -> Self { let result = self.client.get_lighthouse_staking().await.unwrap(); @@ -6373,8 +6407,6 @@ async fn lighthouse_endpoints() { .await .test_get_lighthouse_eth1_deposit_cache() .await - .test_get_lighthouse_beacon_states_ssz() - .await .test_get_lighthouse_staking() .await .test_get_lighthouse_database_info() diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 46acdeade..171141807 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -5,6 +5,7 @@ authors = ["Sigma Prime "] edition = { workspace = true } [dependencies] +async-channel = { workspace = true } discv5 = { workspace = true } unsigned-varint = { version = "0.6", features = ["codec"] } ssz_types = { workspace = true } @@ -42,22 +43,36 @@ superstruct = { workspace = true } prometheus-client = "0.22.0" unused_port = { workspace = true } delay_map = { workspace = true } -void = "1" -libp2p-mplex = { git = "https://github.com/sigp/rust-libp2p/", rev = "cfa3275ca17e502799ed56e555b6c0611752e369" } +tracing = { workspace = true } +byteorder = { workspace = true } +bytes = { workspace = true } +either = { workspace = true } + +# Local dependencies +futures-ticker = "0.0.3" +futures-timer = "3.0.2" +getrandom = "0.2.11" +hex_fmt = "0.3.0" +instant = "0.1.12" +quick-protobuf = "0.8" +void = "1.0.2" +asynchronous-codec = "0.7.0" +base64 = "0.21.5" +libp2p-mplex = "0.41" +quick-protobuf-codec = "0.3" [dependencies.libp2p] -git = "https://github.com/sigp/rust-libp2p/" -rev = "cfa3275ca17e502799ed56e555b6c0611752e369" +version = "0.53" default-features = false -features = ["identify", "yamux", "noise", "gossipsub", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa", "metrics", "quic"] +features = ["identify", "yamux", "noise", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa", "metrics", "quic", "upnp"] [dev-dependencies] slog-term = { workspace = true } slog-async = { workspace = true } tempfile = { workspace = true } -exit-future = { workspace = true } quickcheck = { workspace = true } quickcheck_macros = { workspace = true } +async-std = { version = "1.6.3", features = ["unstable"] } [features] libp2p-websocket = [] diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 169a061d2..02134580e 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -1,3 +1,4 @@ +use crate::gossipsub; use crate::listen_addr::{ListenAddr, ListenAddress}; use crate::rpc::config::{InboundRateLimiterConfig, OutboundRateLimiterConfig}; use crate::types::GossipKind; @@ -5,7 +6,6 @@ use crate::{Enr, PeerIdSerialized}; use directory::{ DEFAULT_BEACON_NODE_DIR, DEFAULT_HARDCODED_NETWORK, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR, }; -use libp2p::gossipsub; use libp2p::Multiaddr; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; @@ -101,7 +101,7 @@ pub struct Config { /// List of libp2p nodes to initially connect to. pub libp2p_nodes: Vec, - /// List of trusted libp2p nodes which are not scored. + /// List of trusted libp2p nodes which are not scored and marked as explicit. pub trusted_peers: Vec, /// Disables peer scoring altogether. @@ -157,10 +157,6 @@ pub struct Config { /// Configuration for the inbound rate limiter (requests received by this node). pub inbound_rate_limiter_config: Option, - - /// Whether to disable logging duplicate gossip messages as WARN. If set to true, duplicate - /// errors will be logged at DEBUG level. - pub disable_duplicate_warn_logs: bool, } impl Config { @@ -354,7 +350,7 @@ impl Default for Config { enr_udp6_port: None, enr_quic6_port: None, enr_tcp6_port: None, - target_peers: 50, + target_peers: 100, gs_config, discv5_config, boot_nodes_enr: vec![], @@ -378,7 +374,6 @@ impl Default for Config { outbound_rate_limiter_config: None, invalid_block_storage: None, inbound_rate_limiter_config: None, - disable_duplicate_warn_logs: false, } } } diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 829124e12..347c2352f 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -59,7 +59,7 @@ const MAX_DISCOVERY_RETRY: usize = 3; /// Note: we always allow a single FindPeers query, so we would be /// running a maximum of `MAX_CONCURRENT_SUBNET_QUERIES + 1` /// discovery queries at a time. -const MAX_CONCURRENT_SUBNET_QUERIES: usize = 2; +const MAX_CONCURRENT_SUBNET_QUERIES: usize = 4; /// The max number of subnets to search for in a single subnet discovery query. const MAX_SUBNETS_IN_QUERY: usize = 3; /// The number of closest peers to search for when doing a regular peer search. @@ -1004,7 +1004,10 @@ impl NetworkBehaviour for Discovery { discv5::Event::SocketUpdated(socket_addr) => { info!(self.log, "Address updated"; "ip" => %socket_addr.ip(), "udp_port" => %socket_addr.port()); metrics::inc_counter(&metrics::ADDRESS_UPDATE_COUNT); - metrics::check_nat(); + // We have SOCKET_UPDATED messages. This occurs when discovery has a majority of + // users reporting an external port and our ENR gets updated. + // Which means we are able to do NAT traversal. + metrics::set_gauge_vec(&metrics::NAT_OPEN, &["discv5"], 1); // Discv5 will have updated our local ENR. We save the updated version // to disk. diff --git a/beacon_node/lighthouse_network/src/gossipsub/backoff.rs b/beacon_node/lighthouse_network/src/gossipsub/backoff.rs new file mode 100644 index 000000000..0752f800b --- /dev/null +++ b/beacon_node/lighthouse_network/src/gossipsub/backoff.rs @@ -0,0 +1,175 @@ +// Copyright 2020 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +//! Data structure for efficiently storing known back-off's when pruning peers. +use crate::gossipsub::topic::TopicHash; +use instant::Instant; +use libp2p::identity::PeerId; +use std::collections::{ + hash_map::{Entry, HashMap}, + HashSet, +}; +use std::time::Duration; + +#[derive(Copy, Clone)] +struct HeartbeatIndex(usize); + +/// Stores backoffs in an efficient manner. +pub(crate) struct BackoffStorage { + /// Stores backoffs and the index in backoffs_by_heartbeat per peer per topic. + backoffs: HashMap>, + /// Stores peer topic pairs per heartbeat (this is cyclic the current index is + /// heartbeat_index). + backoffs_by_heartbeat: Vec>, + /// The index in the backoffs_by_heartbeat vector corresponding to the current heartbeat. + heartbeat_index: HeartbeatIndex, + /// The heartbeat interval duration from the config. + heartbeat_interval: Duration, + /// Backoff slack from the config. + backoff_slack: u32, +} + +impl BackoffStorage { + fn heartbeats(d: &Duration, heartbeat_interval: &Duration) -> usize { + ((d.as_nanos() + heartbeat_interval.as_nanos() - 1) / heartbeat_interval.as_nanos()) + as usize + } + + pub(crate) fn new( + prune_backoff: &Duration, + heartbeat_interval: Duration, + backoff_slack: u32, + ) -> BackoffStorage { + // We add one additional slot for partial heartbeat + let max_heartbeats = + Self::heartbeats(prune_backoff, &heartbeat_interval) + backoff_slack as usize + 1; + BackoffStorage { + backoffs: HashMap::new(), + backoffs_by_heartbeat: vec![HashSet::new(); max_heartbeats], + heartbeat_index: HeartbeatIndex(0), + heartbeat_interval, + backoff_slack, + } + } + + /// Updates the backoff for a peer (if there is already a more restrictive backoff then this call + /// doesn't change anything). + pub(crate) fn update_backoff(&mut self, topic: &TopicHash, peer: &PeerId, time: Duration) { + let instant = Instant::now() + time; + let insert_into_backoffs_by_heartbeat = + |heartbeat_index: HeartbeatIndex, + backoffs_by_heartbeat: &mut Vec>, + heartbeat_interval, + backoff_slack| { + let pair = (topic.clone(), *peer); + let index = (heartbeat_index.0 + + Self::heartbeats(&time, heartbeat_interval) + + backoff_slack as usize) + % backoffs_by_heartbeat.len(); + backoffs_by_heartbeat[index].insert(pair); + HeartbeatIndex(index) + }; + match self.backoffs.entry(topic.clone()).or_default().entry(*peer) { + Entry::Occupied(mut o) => { + let (backoff, index) = o.get(); + if backoff < &instant { + let pair = (topic.clone(), *peer); + if let Some(s) = self.backoffs_by_heartbeat.get_mut(index.0) { + s.remove(&pair); + } + let index = insert_into_backoffs_by_heartbeat( + self.heartbeat_index, + &mut self.backoffs_by_heartbeat, + &self.heartbeat_interval, + self.backoff_slack, + ); + o.insert((instant, index)); + } + } + Entry::Vacant(v) => { + let index = insert_into_backoffs_by_heartbeat( + self.heartbeat_index, + &mut self.backoffs_by_heartbeat, + &self.heartbeat_interval, + self.backoff_slack, + ); + v.insert((instant, index)); + } + }; + } + + /// Checks if a given peer is backoffed for the given topic. This method respects the + /// configured BACKOFF_SLACK and may return true even if the backup is already over. + /// It is guaranteed to return false if the backoff is not over and eventually if enough time + /// passed true if the backoff is over. + /// + /// This method should be used for deciding if we can already send a GRAFT to a previously + /// backoffed peer. + pub(crate) fn is_backoff_with_slack(&self, topic: &TopicHash, peer: &PeerId) -> bool { + self.backoffs + .get(topic) + .map_or(false, |m| m.contains_key(peer)) + } + + pub(crate) fn get_backoff_time(&self, topic: &TopicHash, peer: &PeerId) -> Option { + Self::get_backoff_time_from_backoffs(&self.backoffs, topic, peer) + } + + fn get_backoff_time_from_backoffs( + backoffs: &HashMap>, + topic: &TopicHash, + peer: &PeerId, + ) -> Option { + backoffs + .get(topic) + .and_then(|m| m.get(peer).map(|(i, _)| *i)) + } + + /// Applies a heartbeat. That should be called regularly in intervals of length + /// `heartbeat_interval`. + pub(crate) fn heartbeat(&mut self) { + // Clean up backoffs_by_heartbeat + if let Some(s) = self.backoffs_by_heartbeat.get_mut(self.heartbeat_index.0) { + let backoffs = &mut self.backoffs; + let slack = self.heartbeat_interval * self.backoff_slack; + let now = Instant::now(); + s.retain(|(topic, peer)| { + let keep = match Self::get_backoff_time_from_backoffs(backoffs, topic, peer) { + Some(backoff_time) => backoff_time + slack > now, + None => false, + }; + if !keep { + //remove from backoffs + if let Entry::Occupied(mut m) = backoffs.entry(topic.clone()) { + if m.get_mut().remove(peer).is_some() && m.get().is_empty() { + m.remove(); + } + } + } + + keep + }); + } + + // Increase heartbeat index + self.heartbeat_index = + HeartbeatIndex((self.heartbeat_index.0 + 1) % self.backoffs_by_heartbeat.len()); + } +} diff --git a/beacon_node/lighthouse_network/src/gossipsub/behaviour.rs b/beacon_node/lighthouse_network/src/gossipsub/behaviour.rs new file mode 100644 index 000000000..10025626d --- /dev/null +++ b/beacon_node/lighthouse_network/src/gossipsub/behaviour.rs @@ -0,0 +1,3488 @@ +// Copyright 2020 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use std::{ + cmp::{max, Ordering}, + collections::HashSet, + collections::VecDeque, + collections::{BTreeSet, HashMap}, + fmt, + net::IpAddr, + task::{Context, Poll}, + time::Duration, +}; + +use futures::StreamExt; +use futures_ticker::Ticker; +use prometheus_client::registry::Registry; +use rand::{seq::SliceRandom, thread_rng}; + +use instant::Instant; +use libp2p::core::{multiaddr::Protocol::Ip4, multiaddr::Protocol::Ip6, Endpoint, Multiaddr}; +use libp2p::identity::Keypair; +use libp2p::identity::PeerId; +use libp2p::swarm::{ + behaviour::{AddressChange, ConnectionClosed, ConnectionEstablished, FromSwarm}, + dial_opts::DialOpts, + ConnectionDenied, ConnectionId, NetworkBehaviour, NotifyHandler, THandler, THandlerInEvent, + THandlerOutEvent, ToSwarm, +}; + +use super::gossip_promises::GossipPromises; +use super::handler::{Handler, HandlerEvent, HandlerIn}; +use super::mcache::MessageCache; +use super::metrics::{Churn, Config as MetricsConfig, Inclusion, Metrics, Penalty}; +use super::peer_score::{PeerScore, PeerScoreParams, PeerScoreThresholds, RejectReason}; +use super::protocol::SIGNING_PREFIX; +use super::rpc_proto::proto; +use super::subscription_filter::{AllowAllSubscriptionFilter, TopicSubscriptionFilter}; +use super::time_cache::DuplicateCache; +use super::topic::{Hasher, Topic, TopicHash}; +use super::transform::{DataTransform, IdentityTransform}; +use super::types::{ + ControlAction, Message, MessageAcceptance, MessageId, PeerInfo, RawMessage, Subscription, + SubscriptionAction, +}; +use super::types::{Graft, IHave, IWant, PeerConnections, PeerKind, Prune}; +use super::{backoff::BackoffStorage, types::RpcSender}; +use super::{ + config::{Config, ValidationMode}, + types::RpcOut, +}; +use super::{FailedMessages, PublishError, SubscriptionError, TopicScoreParams, ValidationError}; +use instant::SystemTime; +use quick_protobuf::{MessageWrite, Writer}; +use std::{cmp::Ordering::Equal, fmt::Debug}; + +#[cfg(test)] +mod tests; + +/// Determines if published messages should be signed or not. +/// +/// Without signing, a number of privacy preserving modes can be selected. +/// +/// NOTE: The default validation settings are to require signatures. The [`ValidationMode`] +/// should be updated in the [`Config`] to allow for unsigned messages. +#[derive(Clone)] +pub enum MessageAuthenticity { + /// Message signing is enabled. The author will be the owner of the key and the sequence number + /// will be linearly increasing. + Signed(Keypair), + /// Message signing is disabled. + /// + /// The specified [`PeerId`] will be used as the author of all published messages. The sequence + /// number will be randomized. + Author(PeerId), + /// Message signing is disabled. + /// + /// A random [`PeerId`] will be used when publishing each message. The sequence number will be + /// randomized. + RandomAuthor, + /// Message signing is disabled. + /// + /// The author of the message and the sequence numbers are excluded from the message. + /// + /// NOTE: Excluding these fields may make these messages invalid by other nodes who + /// enforce validation of these fields. See [`ValidationMode`] in the [`Config`] + /// for how to customise this for rust-libp2p gossipsub. A custom `message_id` + /// function will need to be set to prevent all messages from a peer being filtered + /// as duplicates. + Anonymous, +} + +impl MessageAuthenticity { + /// Returns true if signing is enabled. + pub fn is_signing(&self) -> bool { + matches!(self, MessageAuthenticity::Signed(_)) + } + + pub fn is_anonymous(&self) -> bool { + matches!(self, MessageAuthenticity::Anonymous) + } +} + +/// Event that can be emitted by the gossipsub behaviour. +#[derive(Debug)] +pub enum Event { + /// A message has been received. + Message { + /// The peer that forwarded us this message. + propagation_source: PeerId, + /// The [`MessageId`] of the message. This should be referenced by the application when + /// validating a message (if required). + message_id: MessageId, + /// The decompressed message itself. + message: Message, + }, + /// A remote subscribed to a topic. + Subscribed { + /// Remote that has subscribed. + peer_id: PeerId, + /// The topic it has subscribed to. + topic: TopicHash, + }, + /// A remote unsubscribed from a topic. + Unsubscribed { + /// Remote that has unsubscribed. + peer_id: PeerId, + /// The topic it has subscribed from. + topic: TopicHash, + }, + /// A peer that does not support gossipsub has connected. + GossipsubNotSupported { peer_id: PeerId }, + /// A peer is not able to download messages in time. + SlowPeer { + /// The peer_id + peer_id: PeerId, + /// The types and amounts of failed messages that are occurring for this peer. + failed_messages: FailedMessages, + }, +} + +/// A data structure for storing configuration for publishing messages. See [`MessageAuthenticity`] +/// for further details. +#[allow(clippy::large_enum_variant)] +enum PublishConfig { + Signing { + keypair: Keypair, + author: PeerId, + inline_key: Option>, + last_seq_no: SequenceNumber, + }, + Author(PeerId), + RandomAuthor, + Anonymous, +} + +/// A strictly linearly increasing sequence number. +/// +/// We start from the current time as unix timestamp in milliseconds. +#[derive(Debug)] +struct SequenceNumber(u64); + +impl SequenceNumber { + fn new() -> Self { + let unix_timestamp = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("time to be linear") + .as_nanos(); + + Self(unix_timestamp as u64) + } + + fn next(&mut self) -> u64 { + self.0 = self + .0 + .checked_add(1) + .expect("to not exhaust u64 space for sequence numbers"); + + self.0 + } +} + +impl PublishConfig { + pub(crate) fn get_own_id(&self) -> Option<&PeerId> { + match self { + Self::Signing { author, .. } => Some(author), + Self::Author(author) => Some(author), + _ => None, + } + } +} + +impl From for PublishConfig { + fn from(authenticity: MessageAuthenticity) -> Self { + match authenticity { + MessageAuthenticity::Signed(keypair) => { + let public_key = keypair.public(); + let key_enc = public_key.encode_protobuf(); + let key = if key_enc.len() <= 42 { + // The public key can be inlined in [`rpc_proto::proto::::Message::from`], so we don't include it + // specifically in the [`rpc_proto::proto::Message::key`] field. + None + } else { + // Include the protobuf encoding of the public key in the message. + Some(key_enc) + }; + + PublishConfig::Signing { + keypair, + author: public_key.to_peer_id(), + inline_key: key, + last_seq_no: SequenceNumber::new(), + } + } + MessageAuthenticity::Author(peer_id) => PublishConfig::Author(peer_id), + MessageAuthenticity::RandomAuthor => PublishConfig::RandomAuthor, + MessageAuthenticity::Anonymous => PublishConfig::Anonymous, + } + } +} + +/// Network behaviour that handles the gossipsub protocol. +/// +/// NOTE: Initialisation requires a [`MessageAuthenticity`] and [`Config`] instance. If +/// message signing is disabled, the [`ValidationMode`] in the config should be adjusted to an +/// appropriate level to accept unsigned messages. +/// +/// The DataTransform trait allows applications to optionally add extra encoding/decoding +/// functionality to the underlying messages. This is intended for custom compression algorithms. +/// +/// The TopicSubscriptionFilter allows applications to implement specific filters on topics to +/// prevent unwanted messages being propagated and evaluated. +pub struct Behaviour { + /// Configuration providing gossipsub performance parameters. + config: Config, + + /// Events that need to be yielded to the outside when polling. + events: VecDeque>, + + /// Information used for publishing messages. + publish_config: PublishConfig, + + /// An LRU Time cache for storing seen messages (based on their ID). This cache prevents + /// duplicates from being propagated to the application and on the network. + duplicate_cache: DuplicateCache, + + /// A set of connected peers, indexed by their [`PeerId`] tracking both the [`PeerKind`] and + /// the set of [`ConnectionId`]s. + connected_peers: HashMap, + + /// A set of all explicit peers. These are peers that remain connected and we unconditionally + /// forward messages to, outside of the scoring system. + explicit_peers: HashSet, + + /// A list of peers that have been blacklisted by the user. + /// Messages are not sent to and are rejected from these peers. + blacklisted_peers: HashSet, + + /// Overlay network of connected peers - Maps topics to connected gossipsub peers. + mesh: HashMap>, + + /// Map of topics to list of peers that we publish to, but don't subscribe to. + fanout: HashMap>, + + /// The last publish time for fanout topics. + fanout_last_pub: HashMap, + + ///Storage for backoffs + backoffs: BackoffStorage, + + /// Message cache for the last few heartbeats. + mcache: MessageCache, + + /// Heartbeat interval stream. + heartbeat: Ticker, + + /// Number of heartbeats since the beginning of time; this allows us to amortize some resource + /// clean up -- eg backoff clean up. + heartbeat_ticks: u64, + + /// We remember all peers we found through peer exchange, since those peers are not considered + /// as safe as randomly discovered outbound peers. This behaviour diverges from the go + /// implementation to avoid possible love bombing attacks in PX. When disconnecting peers will + /// be removed from this list which may result in a true outbound rediscovery. + px_peers: HashSet, + + /// Set of connected outbound peers (we only consider true outbound peers found through + /// discovery and not by PX). + outbound_peers: HashSet, + + /// Stores optional peer score data together with thresholds, decay interval and gossip + /// promises. + peer_score: Option<(PeerScore, PeerScoreThresholds, Ticker, GossipPromises)>, + + /// Counts the number of `IHAVE` received from each peer since the last heartbeat. + count_received_ihave: HashMap, + + /// Counts the number of `IWANT` that we sent the each peer since the last heartbeat. + count_sent_iwant: HashMap, + + /// Short term cache for published message ids. This is used for penalizing peers sending + /// our own messages back if the messages are anonymous or use a random author. + published_message_ids: DuplicateCache, + + /// The filter used to handle message subscriptions. + subscription_filter: F, + + /// A general transformation function that can be applied to data received from the wire before + /// calculating the message-id and sending to the application. This is designed to allow the + /// user to implement arbitrary topic-based compression algorithms. + data_transform: D, + + /// Keep track of a set of internal metrics relating to gossipsub. + metrics: Option, + + /// Tracks the numbers of failed messages per peer-id. + failed_messages: HashMap, +} + +impl Behaviour +where + D: DataTransform + Default, + F: TopicSubscriptionFilter + Default, +{ + /// Creates a Gossipsub [`Behaviour`] struct given a set of parameters specified via a + /// [`Config`]. This has no subscription filter and uses no compression. + pub fn new(privacy: MessageAuthenticity, config: Config) -> Result { + Self::new_with_subscription_filter_and_transform( + privacy, + config, + None, + F::default(), + D::default(), + ) + } + + /// Creates a Gossipsub [`Behaviour`] struct given a set of parameters specified via a + /// [`Config`]. This has no subscription filter and uses no compression. + /// Metrics can be evaluated by passing a reference to a [`Registry`]. + pub fn new_with_metrics( + privacy: MessageAuthenticity, + config: Config, + metrics_registry: &mut Registry, + metrics_config: MetricsConfig, + ) -> Result { + Self::new_with_subscription_filter_and_transform( + privacy, + config, + Some((metrics_registry, metrics_config)), + F::default(), + D::default(), + ) + } +} + +impl Behaviour +where + D: DataTransform + Default, + F: TopicSubscriptionFilter, +{ + /// Creates a Gossipsub [`Behaviour`] struct given a set of parameters specified via a + /// [`Config`] and a custom subscription filter. + pub fn new_with_subscription_filter( + privacy: MessageAuthenticity, + config: Config, + metrics: Option<(&mut Registry, MetricsConfig)>, + subscription_filter: F, + ) -> Result { + Self::new_with_subscription_filter_and_transform( + privacy, + config, + metrics, + subscription_filter, + D::default(), + ) + } +} + +impl Behaviour +where + D: DataTransform, + F: TopicSubscriptionFilter + Default, +{ + /// Creates a Gossipsub [`Behaviour`] struct given a set of parameters specified via a + /// [`Config`] and a custom data transform. + pub fn new_with_transform( + privacy: MessageAuthenticity, + config: Config, + metrics: Option<(&mut Registry, MetricsConfig)>, + data_transform: D, + ) -> Result { + Self::new_with_subscription_filter_and_transform( + privacy, + config, + metrics, + F::default(), + data_transform, + ) + } +} + +impl Behaviour +where + D: DataTransform, + F: TopicSubscriptionFilter, +{ + /// Creates a Gossipsub [`Behaviour`] struct given a set of parameters specified via a + /// [`Config`] and a custom subscription filter and data transform. + pub fn new_with_subscription_filter_and_transform( + privacy: MessageAuthenticity, + config: Config, + metrics: Option<(&mut Registry, MetricsConfig)>, + subscription_filter: F, + data_transform: D, + ) -> Result { + // Set up the router given the configuration settings. + + // We do not allow configurations where a published message would also be rejected if it + // were received locally. + validate_config(&privacy, config.validation_mode())?; + + Ok(Behaviour { + metrics: metrics.map(|(registry, cfg)| Metrics::new(registry, cfg)), + events: VecDeque::new(), + publish_config: privacy.into(), + duplicate_cache: DuplicateCache::new(config.duplicate_cache_time()), + explicit_peers: HashSet::new(), + blacklisted_peers: HashSet::new(), + mesh: HashMap::new(), + fanout: HashMap::new(), + fanout_last_pub: HashMap::new(), + backoffs: BackoffStorage::new( + &config.prune_backoff(), + config.heartbeat_interval(), + config.backoff_slack(), + ), + mcache: MessageCache::new(config.history_gossip(), config.history_length()), + heartbeat: Ticker::new_with_next( + config.heartbeat_interval(), + config.heartbeat_initial_delay(), + ), + heartbeat_ticks: 0, + px_peers: HashSet::new(), + outbound_peers: HashSet::new(), + peer_score: None, + count_received_ihave: HashMap::new(), + count_sent_iwant: HashMap::new(), + connected_peers: HashMap::new(), + published_message_ids: DuplicateCache::new(config.published_message_ids_cache_time()), + config, + subscription_filter, + data_transform, + failed_messages: Default::default(), + }) + } +} + +impl Behaviour +where + D: DataTransform + Send + 'static, + F: TopicSubscriptionFilter + Send + 'static, +{ + /// Lists the hashes of the topics we are currently subscribed to. + pub fn topics(&self) -> impl Iterator { + self.mesh.keys() + } + + /// Lists all mesh peers for a certain topic hash. + pub fn mesh_peers(&self, topic_hash: &TopicHash) -> impl Iterator { + self.mesh.get(topic_hash).into_iter().flat_map(|x| x.iter()) + } + + pub fn all_mesh_peers(&self) -> impl Iterator { + let mut res = BTreeSet::new(); + for peers in self.mesh.values() { + res.extend(peers); + } + res.into_iter() + } + + /// Lists all known peers and their associated subscribed topics. + pub fn all_peers(&self) -> impl Iterator)> { + self.connected_peers + .iter() + .map(|(peer_id, peer)| (peer_id, peer.topics.iter().collect())) + } + + /// Lists all known peers and their associated protocol. + pub fn peer_protocol(&self) -> impl Iterator { + self.connected_peers.iter().map(|(k, v)| (k, &v.kind)) + } + + /// Returns the gossipsub score for a given peer, if one exists. + pub fn peer_score(&self, peer_id: &PeerId) -> Option { + self.peer_score + .as_ref() + .map(|(score, ..)| score.score(peer_id)) + } + + /// Subscribe to a topic. + /// + /// Returns [`Ok(true)`] if the subscription worked. Returns [`Ok(false)`] if we were already + /// subscribed. + pub fn subscribe(&mut self, topic: &Topic) -> Result { + tracing::debug!(%topic, "Subscribing to topic"); + let topic_hash = topic.hash(); + if !self.subscription_filter.can_subscribe(&topic_hash) { + return Err(SubscriptionError::NotAllowed); + } + + if self.mesh.get(&topic_hash).is_some() { + tracing::debug!(%topic, "Topic is already in the mesh"); + return Ok(false); + } + + // send subscription request to all peers + for (peer_id, peer) in self.connected_peers.iter_mut() { + tracing::debug!(%peer_id, "Sending SUBSCRIBE to peer"); + + peer.sender.subscribe(topic_hash.clone()); + } + + // call JOIN(topic) + // this will add new peers to the mesh for the topic + self.join(&topic_hash); + tracing::debug!(%topic, "Subscribed to topic"); + Ok(true) + } + + /// Unsubscribes from a topic. + /// + /// Returns [`Ok(true)`] if we were subscribed to this topic. + pub fn unsubscribe(&mut self, topic: &Topic) -> Result { + tracing::debug!(%topic, "Unsubscribing from topic"); + let topic_hash = topic.hash(); + + if self.mesh.get(&topic_hash).is_none() { + tracing::debug!(topic=%topic_hash, "Already unsubscribed from topic"); + // we are not subscribed + return Ok(false); + } + + // announce to all peers + for (peer_id, peer) in self.connected_peers.iter_mut() { + tracing::debug!(%peer_id, "Sending UNSUBSCRIBE to peer"); + peer.sender.unsubscribe(topic_hash.clone()); + } + + // call LEAVE(topic) + // this will remove the topic from the mesh + self.leave(&topic_hash); + + tracing::debug!(topic=%topic_hash, "Unsubscribed from topic"); + Ok(true) + } + + /// Publishes a message with multiple topics to the network. + pub fn publish( + &mut self, + topic: impl Into, + data: impl Into>, + ) -> Result { + let data = data.into(); + let topic = topic.into(); + + // Transform the data before building a raw_message. + let transformed_data = self + .data_transform + .outbound_transform(&topic, data.clone())?; + + let raw_message = self.build_raw_message(topic, transformed_data)?; + + // calculate the message id from the un-transformed data + let msg_id = self.config.message_id(&Message { + source: raw_message.source, + data, // the uncompressed form + sequence_number: raw_message.sequence_number, + topic: raw_message.topic.clone(), + }); + + // check that the size doesn't exceed the max transmission size + if raw_message.raw_protobuf_len() > self.config.max_transmit_size() { + return Err(PublishError::MessageTooLarge); + } + + // Check the if the message has been published before + if self.duplicate_cache.contains(&msg_id) { + // This message has already been seen. We don't re-publish messages that have already + // been published on the network. + tracing::warn!( + message=%msg_id, + "Not publishing a message that has already been published" + ); + return Err(PublishError::Duplicate); + } + + tracing::trace!(message=%msg_id, "Publishing message"); + + let topic_hash = raw_message.topic.clone(); + + let mut peers_on_topic = self + .connected_peers + .iter() + .filter(|(_, p)| p.topics.contains(&topic_hash)) + .map(|(peer_id, _)| peer_id) + .peekable(); + + if peers_on_topic.peek().is_none() { + return Err(PublishError::InsufficientPeers); + } + + let mut recipient_peers = HashSet::new(); + + if self.config.flood_publish() { + // Forward to all peers above score and all explicit peers + recipient_peers.extend(peers_on_topic.filter(|p| { + self.explicit_peers.contains(*p) + || !self.score_below_threshold(p, |ts| ts.publish_threshold).0 + })); + } else { + match self.mesh.get(&topic_hash) { + // Mesh peers + Some(mesh_peers) => { + // We have a mesh set. We want to make sure to publish to at least `mesh_n` + // peers (if possible). + let needed_extra_peers = self.config.mesh_n().saturating_sub(mesh_peers.len()); + + if needed_extra_peers > 0 { + // We don't have `mesh_n` peers in our mesh, we will randomly select extras + // and publish to them. + + // Get a random set of peers that are appropriate to send messages too. + let peer_list = get_random_peers( + &self.connected_peers, + &topic_hash, + needed_extra_peers, + |peer| { + !mesh_peers.contains(peer) + && !self.explicit_peers.contains(peer) + && !self + .score_below_threshold(peer, |pst| pst.publish_threshold) + .0 + }, + ); + recipient_peers.extend(peer_list); + } + + recipient_peers.extend(mesh_peers); + } + // Gossipsub peers + None => { + tracing::debug!(topic=%topic_hash, "Topic not in the mesh"); + // If we have fanout peers add them to the map. + if self.fanout.contains_key(&topic_hash) { + for peer in self.fanout.get(&topic_hash).expect("Topic must exist") { + recipient_peers.insert(*peer); + } + } else { + // We have no fanout peers, select mesh_n of them and add them to the fanout + let mesh_n = self.config.mesh_n(); + let new_peers = + get_random_peers(&self.connected_peers, &topic_hash, mesh_n, { + |p| { + !self.explicit_peers.contains(p) + && !self + .score_below_threshold(p, |pst| pst.publish_threshold) + .0 + } + }); + // Add the new peers to the fanout and recipient peers + self.fanout.insert(topic_hash.clone(), new_peers.clone()); + for peer in new_peers { + tracing::debug!(%peer, "Peer added to fanout"); + recipient_peers.insert(peer); + } + } + // We are publishing to fanout peers - update the time we published + self.fanout_last_pub + .insert(topic_hash.clone(), Instant::now()); + } + } + + // Explicit peers that are part of the topic + recipient_peers + .extend(peers_on_topic.filter(|peer_id| self.explicit_peers.contains(peer_id))); + + // Floodsub peers + for (peer, connections) in &self.connected_peers { + if connections.kind == PeerKind::Floodsub + && !self + .score_below_threshold(peer, |ts| ts.publish_threshold) + .0 + { + recipient_peers.insert(*peer); + } + } + } + + // If the message isn't a duplicate and we have sent it to some peers add it to the + // duplicate cache and memcache. + self.duplicate_cache.insert(msg_id.clone()); + self.mcache.put(&msg_id, raw_message.clone()); + + // If the message is anonymous or has a random author add it to the published message ids + // cache. + if let PublishConfig::RandomAuthor | PublishConfig::Anonymous = self.publish_config { + if !self.config.allow_self_origin() { + self.published_message_ids.insert(msg_id.clone()); + } + } + + // Send to peers we know are subscribed to the topic. + let mut publish_failed = true; + for peer_id in recipient_peers.iter() { + if let Some(peer) = self.connected_peers.get_mut(peer_id) { + tracing::trace!(peer=%peer_id, "Sending message to peer"); + match peer.sender.publish( + raw_message.clone(), + self.config.publish_queue_duration(), + self.metrics.as_mut(), + ) { + Ok(_) => publish_failed = false, + Err(_) => { + self.failed_messages.entry(*peer_id).or_default().priority += 1; + + tracing::warn!(peer_id=%peer_id, "Publish queue full. Could not publish to peer"); + // Downscore the peer due to failed message. + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.failed_message_slow_peer(peer_id); + } + } + } + } else { + tracing::error!(peer_id = %peer_id, + "Could not PUBLISH, peer doesn't exist in connected peer list"); + } + } + + if recipient_peers.is_empty() { + return Err(PublishError::InsufficientPeers); + } + + if publish_failed { + return Err(PublishError::AllQueuesFull(recipient_peers.len())); + } + + tracing::debug!(message=%msg_id, "Published message"); + + if let Some(metrics) = self.metrics.as_mut() { + metrics.register_published_message(&topic_hash); + } + + Ok(msg_id) + } + + /// This function should be called when [`Config::validate_messages()`] is `true` after + /// the message got validated by the caller. Messages are stored in the ['Memcache'] and + /// validation is expected to be fast enough that the messages should still exist in the cache. + /// There are three possible validation outcomes and the outcome is given in acceptance. + /// + /// If acceptance = [`MessageAcceptance::Accept`] the message will get propagated to the + /// network. The `propagation_source` parameter indicates who the message was received by and + /// will not be forwarded back to that peer. + /// + /// If acceptance = [`MessageAcceptance::Reject`] the message will be deleted from the memcache + /// and the P₄ penalty will be applied to the `propagation_source`. + // + /// If acceptance = [`MessageAcceptance::Ignore`] the message will be deleted from the memcache + /// but no P₄ penalty will be applied. + /// + /// This function will return true if the message was found in the cache and false if was not + /// in the cache anymore. + /// + /// This should only be called once per message. + pub fn report_message_validation_result( + &mut self, + msg_id: &MessageId, + propagation_source: &PeerId, + acceptance: MessageAcceptance, + ) -> Result { + let reject_reason = match acceptance { + MessageAcceptance::Accept => { + let (raw_message, originating_peers) = match self.mcache.validate(msg_id) { + Some((raw_message, originating_peers)) => { + (raw_message.clone(), originating_peers) + } + None => { + tracing::warn!( + message=%msg_id, + "Message not in cache. Ignoring forwarding" + ); + if let Some(metrics) = self.metrics.as_mut() { + metrics.memcache_miss(); + } + return Ok(false); + } + }; + + if let Some(metrics) = self.metrics.as_mut() { + metrics.register_msg_validation(&raw_message.topic, &acceptance); + } + + self.forward_msg( + msg_id, + raw_message, + Some(propagation_source), + originating_peers, + )?; + return Ok(true); + } + MessageAcceptance::Reject => RejectReason::ValidationFailed, + MessageAcceptance::Ignore => RejectReason::ValidationIgnored, + }; + + if let Some((raw_message, originating_peers)) = self.mcache.remove(msg_id) { + if let Some(metrics) = self.metrics.as_mut() { + metrics.register_msg_validation(&raw_message.topic, &acceptance); + } + + // Tell peer_score about reject + // Reject the original source, and any duplicates we've seen from other peers. + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.reject_message( + propagation_source, + msg_id, + &raw_message.topic, + reject_reason, + ); + for peer in originating_peers.iter() { + peer_score.reject_message(peer, msg_id, &raw_message.topic, reject_reason); + } + } + Ok(true) + } else { + tracing::warn!(message=%msg_id, "Rejected message not in cache"); + Ok(false) + } + } + + /// Register topics to ensure metrics are recorded correctly for these topics. + pub fn register_topics_for_metrics(&mut self, topics: Vec) { + if let Some(metrics) = &mut self.metrics { + metrics.register_allowed_topics(topics); + } + } + + /// Adds a new peer to the list of explicitly connected peers. + pub fn add_explicit_peer(&mut self, peer_id: &PeerId) { + tracing::debug!(peer=%peer_id, "Adding explicit peer"); + + self.explicit_peers.insert(*peer_id); + + self.check_explicit_peer_connection(peer_id); + } + + /// This removes the peer from explicitly connected peers, note that this does not disconnect + /// the peer. + pub fn remove_explicit_peer(&mut self, peer_id: &PeerId) { + tracing::debug!(peer=%peer_id, "Removing explicit peer"); + self.explicit_peers.remove(peer_id); + } + + /// Blacklists a peer. All messages from this peer will be rejected and any message that was + /// created by this peer will be rejected. + pub fn blacklist_peer(&mut self, peer_id: &PeerId) { + if self.blacklisted_peers.insert(*peer_id) { + tracing::debug!(peer=%peer_id, "Peer has been blacklisted"); + } + } + + /// Removes a peer from the blacklist if it has previously been blacklisted. + pub fn remove_blacklisted_peer(&mut self, peer_id: &PeerId) { + if self.blacklisted_peers.remove(peer_id) { + tracing::debug!(peer=%peer_id, "Peer has been removed from the blacklist"); + } + } + + /// Activates the peer scoring system with the given parameters. This will reset all scores + /// if there was already another peer scoring system activated. Returns an error if the + /// params are not valid or if they got already set. + pub fn with_peer_score( + &mut self, + params: PeerScoreParams, + threshold: PeerScoreThresholds, + ) -> Result<(), String> { + self.with_peer_score_and_message_delivery_time_callback(params, threshold, None) + } + + /// Activates the peer scoring system with the given parameters and a message delivery time + /// callback. Returns an error if the parameters got already set. + pub fn with_peer_score_and_message_delivery_time_callback( + &mut self, + params: PeerScoreParams, + threshold: PeerScoreThresholds, + callback: Option, + ) -> Result<(), String> { + params.validate()?; + threshold.validate()?; + + if self.peer_score.is_some() { + return Err("Peer score set twice".into()); + } + + let interval = Ticker::new(params.decay_interval); + let peer_score = PeerScore::new_with_message_delivery_time_callback(params, callback); + self.peer_score = Some((peer_score, threshold, interval, GossipPromises::default())); + Ok(()) + } + + /// Sets scoring parameters for a topic. + /// + /// The [`Self::with_peer_score()`] must first be called to initialise peer scoring. + pub fn set_topic_params( + &mut self, + topic: Topic, + params: TopicScoreParams, + ) -> Result<(), &'static str> { + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.set_topic_params(topic.hash(), params); + Ok(()) + } else { + Err("Peer score must be initialised with `with_peer_score()`") + } + } + + /// Returns a scoring parameters for a topic if existent. + pub fn get_topic_params(&self, topic: &Topic) -> Option<&TopicScoreParams> { + self.peer_score.as_ref()?.0.get_topic_params(&topic.hash()) + } + + /// Sets the application specific score for a peer. Returns true if scoring is active and + /// the peer is connected or if the score of the peer is not yet expired, false otherwise. + pub fn set_application_score(&mut self, peer_id: &PeerId, new_score: f64) -> bool { + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.set_application_score(peer_id, new_score) + } else { + false + } + } + + /// Gossipsub JOIN(topic) - adds topic peers to mesh and sends them GRAFT messages. + fn join(&mut self, topic_hash: &TopicHash) { + tracing::debug!(topic=%topic_hash, "Running JOIN for topic"); + + // if we are already in the mesh, return + if self.mesh.contains_key(topic_hash) { + tracing::debug!(topic=%topic_hash, "JOIN: The topic is already in the mesh, ignoring JOIN"); + return; + } + + let mut added_peers = HashSet::new(); + + if let Some(m) = self.metrics.as_mut() { + m.joined(topic_hash) + } + + // check if we have mesh_n peers in fanout[topic] and add them to the mesh if we do, + // removing the fanout entry. + if let Some((_, mut peers)) = self.fanout.remove_entry(topic_hash) { + tracing::debug!( + topic=%topic_hash, + "JOIN: Removing peers from the fanout for topic" + ); + + // remove explicit peers, peers with negative scores, and backoffed peers + peers.retain(|p| { + !self.explicit_peers.contains(p) + && !self.score_below_threshold(p, |_| 0.0).0 + && !self.backoffs.is_backoff_with_slack(topic_hash, p) + }); + + // Add up to mesh_n of them them to the mesh + // NOTE: These aren't randomly added, currently FIFO + let add_peers = std::cmp::min(peers.len(), self.config.mesh_n()); + tracing::debug!( + topic=%topic_hash, + "JOIN: Adding {:?} peers from the fanout for topic", + add_peers + ); + added_peers.extend(peers.iter().take(add_peers)); + + self.mesh.insert( + topic_hash.clone(), + peers.into_iter().take(add_peers).collect(), + ); + + // remove the last published time + self.fanout_last_pub.remove(topic_hash); + } + + let fanaout_added = added_peers.len(); + if let Some(m) = self.metrics.as_mut() { + m.peers_included(topic_hash, Inclusion::Fanout, fanaout_added) + } + + // check if we need to get more peers, which we randomly select + if added_peers.len() < self.config.mesh_n() { + // get the peers + let new_peers = get_random_peers( + &self.connected_peers, + topic_hash, + self.config.mesh_n() - added_peers.len(), + |peer| { + !added_peers.contains(peer) + && !self.explicit_peers.contains(peer) + && !self.score_below_threshold(peer, |_| 0.0).0 + && !self.backoffs.is_backoff_with_slack(topic_hash, peer) + }, + ); + added_peers.extend(new_peers.clone()); + // add them to the mesh + tracing::debug!( + "JOIN: Inserting {:?} random peers into the mesh", + new_peers.len() + ); + let mesh_peers = self.mesh.entry(topic_hash.clone()).or_default(); + mesh_peers.extend(new_peers); + } + + let random_added = added_peers.len() - fanaout_added; + if let Some(m) = self.metrics.as_mut() { + m.peers_included(topic_hash, Inclusion::Random, random_added) + } + + for peer_id in added_peers { + // Send a GRAFT control message + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.graft(&peer_id, topic_hash.clone()); + } + if let Some(peer) = &mut self.connected_peers.get_mut(&peer_id) { + tracing::debug!(peer=%peer_id, "JOIN: Sending Graft message to peer"); + peer.sender.graft(Graft { + topic_hash: topic_hash.clone(), + }); + } else { + tracing::error!(peer = %peer_id, + "Could not GRAFT, peer doesn't exist in connected peer list"); + } + + // If the peer did not previously exist in any mesh, inform the handler + peer_added_to_mesh( + peer_id, + vec![topic_hash], + &self.mesh, + &mut self.events, + &self.connected_peers, + ); + } + + let mesh_peers = self.mesh_peers(topic_hash).count(); + if let Some(m) = self.metrics.as_mut() { + m.set_mesh_peers(topic_hash, mesh_peers) + } + + tracing::debug!(topic=%topic_hash, "Completed JOIN for topic"); + } + + /// Creates a PRUNE gossipsub action. + fn make_prune( + &mut self, + topic_hash: &TopicHash, + peer: &PeerId, + do_px: bool, + on_unsubscribe: bool, + ) -> Prune { + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.prune(peer, topic_hash.clone()); + } + + match self.connected_peers.get(peer).map(|v| &v.kind) { + Some(PeerKind::Floodsub) => { + tracing::error!("Attempted to prune a Floodsub peer"); + } + Some(PeerKind::Gossipsub) => { + // GossipSub v1.0 -- no peer exchange, the peer won't be able to parse it anyway + return Prune { + topic_hash: topic_hash.clone(), + peers: Vec::new(), + backoff: None, + }; + } + None => { + tracing::error!("Attempted to Prune an unknown peer"); + } + _ => {} // Gossipsub 1.1 peer perform the `Prune` + } + + // Select peers for peer exchange + let peers = if do_px { + get_random_peers( + &self.connected_peers, + topic_hash, + self.config.prune_peers(), + |p| p != peer && !self.score_below_threshold(p, |_| 0.0).0, + ) + .into_iter() + .map(|p| PeerInfo { peer_id: Some(p) }) + .collect() + } else { + Vec::new() + }; + + let backoff = if on_unsubscribe { + self.config.unsubscribe_backoff() + } else { + self.config.prune_backoff() + }; + + // update backoff + self.backoffs.update_backoff(topic_hash, peer, backoff); + + Prune { + topic_hash: topic_hash.clone(), + peers, + backoff: Some(backoff.as_secs()), + } + } + + /// Gossipsub LEAVE(topic) - Notifies mesh\[topic\] peers with PRUNE messages. + fn leave(&mut self, topic_hash: &TopicHash) { + tracing::debug!(topic=%topic_hash, "Running LEAVE for topic"); + + // If our mesh contains the topic, send prune to peers and delete it from the mesh + if let Some((_, peers)) = self.mesh.remove_entry(topic_hash) { + if let Some(m) = self.metrics.as_mut() { + m.left(topic_hash) + } + for peer_id in peers { + // Send a PRUNE control message + let prune = self.make_prune(topic_hash, &peer_id, self.config.do_px(), true); + if let Some(peer) = &mut self.connected_peers.get_mut(&peer_id) { + tracing::debug!(%peer_id, "LEAVE: Sending PRUNE to peer"); + peer.sender.prune(prune); + } else { + tracing::error!(peer = %peer_id, + "Could not PRUNE, peer doesn't exist in connected peer list"); + } + + // If the peer did not previously exist in any mesh, inform the handler + peer_removed_from_mesh( + peer_id, + topic_hash, + &self.mesh, + &mut self.events, + &self.connected_peers, + ); + } + } + tracing::debug!(topic=%topic_hash, "Completed LEAVE for topic"); + } + + /// Checks if the given peer is still connected and if not dials the peer again. + fn check_explicit_peer_connection(&mut self, peer_id: &PeerId) { + if !self.connected_peers.contains_key(peer_id) { + // Connect to peer + tracing::debug!(peer=%peer_id, "Connecting to explicit peer"); + self.events.push_back(ToSwarm::Dial { + opts: DialOpts::peer_id(*peer_id).build(), + }); + } + } + + /// Determines if a peer's score is below a given `PeerScoreThreshold` chosen via the + /// `threshold` parameter. + fn score_below_threshold( + &self, + peer_id: &PeerId, + threshold: impl Fn(&PeerScoreThresholds) -> f64, + ) -> (bool, f64) { + Self::score_below_threshold_from_scores(&self.peer_score, peer_id, threshold) + } + + fn score_below_threshold_from_scores( + peer_score: &Option<(PeerScore, PeerScoreThresholds, Ticker, GossipPromises)>, + peer_id: &PeerId, + threshold: impl Fn(&PeerScoreThresholds) -> f64, + ) -> (bool, f64) { + if let Some((peer_score, thresholds, ..)) = peer_score { + let score = peer_score.score(peer_id); + if score < threshold(thresholds) { + return (true, score); + } + (false, score) + } else { + (false, 0.0) + } + } + + /// Handles an IHAVE control message. Checks our cache of messages. If the message is unknown, + /// requests it with an IWANT control message. + fn handle_ihave(&mut self, peer_id: &PeerId, ihave_msgs: Vec<(TopicHash, Vec)>) { + // We ignore IHAVE gossip from any peer whose score is below the gossip threshold + if let (true, score) = self.score_below_threshold(peer_id, |pst| pst.gossip_threshold) { + tracing::debug!( + peer=%peer_id, + %score, + "IHAVE: ignoring peer with score below threshold" + ); + return; + } + + // IHAVE flood protection + let peer_have = self.count_received_ihave.entry(*peer_id).or_insert(0); + *peer_have += 1; + if *peer_have > self.config.max_ihave_messages() { + tracing::debug!( + peer=%peer_id, + "IHAVE: peer has advertised too many times ({}) within this heartbeat \ + interval; ignoring", + *peer_have + ); + return; + } + + if let Some(iasked) = self.count_sent_iwant.get(peer_id) { + if *iasked >= self.config.max_ihave_length() { + tracing::debug!( + peer=%peer_id, + "IHAVE: peer has already advertised too many messages ({}); ignoring", + *iasked + ); + return; + } + } + + tracing::trace!(peer=%peer_id, "Handling IHAVE for peer"); + + let mut iwant_ids = HashSet::new(); + + let want_message = |id: &MessageId| { + if self.duplicate_cache.contains(id) { + return false; + } + + self.peer_score + .as_ref() + .map(|(_, _, _, promises)| !promises.contains(id)) + .unwrap_or(true) + }; + + for (topic, ids) in ihave_msgs { + // only process the message if we are subscribed + if !self.mesh.contains_key(&topic) { + tracing::debug!( + %topic, + "IHAVE: Ignoring IHAVE - Not subscribed to topic" + ); + continue; + } + + for id in ids.into_iter().filter(want_message) { + // have not seen this message and are not currently requesting it + if iwant_ids.insert(id) { + // Register the IWANT metric + if let Some(metrics) = self.metrics.as_mut() { + metrics.register_iwant(&topic); + } + } + } + } + + if !iwant_ids.is_empty() { + let iasked = self.count_sent_iwant.entry(*peer_id).or_insert(0); + let mut iask = iwant_ids.len(); + if *iasked + iask > self.config.max_ihave_length() { + iask = self.config.max_ihave_length().saturating_sub(*iasked); + } + + // Send the list of IWANT control messages + tracing::debug!( + peer=%peer_id, + "IHAVE: Asking for {} out of {} messages from peer", + iask, + iwant_ids.len() + ); + + // Ask in random order + let mut iwant_ids_vec: Vec<_> = iwant_ids.into_iter().collect(); + let mut rng = thread_rng(); + iwant_ids_vec.partial_shuffle(&mut rng, iask); + + iwant_ids_vec.truncate(iask); + *iasked += iask; + + if let Some((_, _, _, gossip_promises)) = &mut self.peer_score { + gossip_promises.add_promise( + *peer_id, + &iwant_ids_vec, + Instant::now() + self.config.iwant_followup_time(), + ); + } + + if let Some(peer) = &mut self.connected_peers.get_mut(peer_id) { + tracing::trace!( + peer=%peer_id, + "IHAVE: Asking for the following messages from peer: {:?}", + iwant_ids_vec + ); + + if peer + .sender + .iwant(IWant { + message_ids: iwant_ids_vec, + }) + .is_err() + { + tracing::warn!(peer=%peer_id, "Send Queue full. Could not send IWANT"); + + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.failed_message_slow_peer(peer_id); + } + // Increment failed message count + self.failed_messages + .entry(*peer_id) + .or_default() + .non_priority += 1; + } + } else { + tracing::error!(peer = %peer_id, + "Could not IWANT, peer doesn't exist in connected peer list"); + } + } + tracing::trace!(peer=%peer_id, "Completed IHAVE handling for peer"); + } + + /// Handles an IWANT control message. Checks our cache of messages. If the message exists it is + /// forwarded to the requesting peer. + fn handle_iwant(&mut self, peer_id: &PeerId, iwant_msgs: Vec) { + // We ignore IWANT gossip from any peer whose score is below the gossip threshold + if let (true, score) = self.score_below_threshold(peer_id, |pst| pst.gossip_threshold) { + tracing::debug!( + peer=%peer_id, + "IWANT: ignoring peer with score below threshold [score = {}]", + score + ); + return; + } + + tracing::debug!(peer=%peer_id, "Handling IWANT for peer"); + + for id in iwant_msgs { + // If we have it and the IHAVE count is not above the threshold, + // foward the message. + if let Some((msg, count)) = self + .mcache + .get_with_iwant_counts(&id, peer_id) + .map(|(msg, count)| (msg.clone(), count)) + { + if count > self.config.gossip_retransimission() { + tracing::debug!( + peer=%peer_id, + message=%id, + "IWANT: Peer has asked for message too many times; ignoring request" + ); + } else if let Some(peer) = &mut self.connected_peers.get_mut(peer_id) { + tracing::debug!(peer=%peer_id, "IWANT: Sending cached messages to peer"); + if peer + .sender + .forward( + msg, + self.config.forward_queue_duration(), + self.metrics.as_mut(), + ) + .is_err() + { + // Downscore the peer + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.failed_message_slow_peer(peer_id); + } + // Increment the failed message count + self.failed_messages + .entry(*peer_id) + .or_default() + .non_priority += 1; + } + } else { + tracing::error!(peer = %peer_id, + "Could not IWANT, peer doesn't exist in connected peer list"); + } + } + } + tracing::debug!(peer=%peer_id, "Completed IWANT handling for peer"); + } + + /// Handles GRAFT control messages. If subscribed to the topic, adds the peer to mesh, if not, + /// responds with PRUNE messages. + fn handle_graft(&mut self, peer_id: &PeerId, topics: Vec) { + tracing::debug!(peer=%peer_id, "Handling GRAFT message for peer"); + + let mut to_prune_topics = HashSet::new(); + + let mut do_px = self.config.do_px(); + + // For each topic, if a peer has grafted us, then we necessarily must be in their mesh + // and they must be subscribed to the topic. Ensure we have recorded the mapping. + for topic in &topics { + let Some(connected_peer) = self.connected_peers.get_mut(peer_id) else { + tracing::error!(peer_id = %peer_id, "Peer non-existent when handling graft"); + return; + }; + if connected_peer.topics.insert(topic.clone()) { + if let Some(m) = self.metrics.as_mut() { + m.inc_topic_peers(topic); + } + } + } + + // we don't GRAFT to/from explicit peers; complain loudly if this happens + if self.explicit_peers.contains(peer_id) { + tracing::warn!(peer=%peer_id, "GRAFT: ignoring request from direct peer"); + // this is possibly a bug from non-reciprocal configuration; send a PRUNE for all topics + to_prune_topics = topics.into_iter().collect(); + // but don't PX + do_px = false + } else { + let (below_zero, score) = self.score_below_threshold(peer_id, |_| 0.0); + let now = Instant::now(); + for topic_hash in topics { + if let Some(peers) = self.mesh.get_mut(&topic_hash) { + // if the peer is already in the mesh ignore the graft + if peers.contains(peer_id) { + tracing::debug!( + peer=%peer_id, + topic=%&topic_hash, + "GRAFT: Received graft for peer that is already in topic" + ); + continue; + } + + // make sure we are not backing off that peer + if let Some(backoff_time) = self.backoffs.get_backoff_time(&topic_hash, peer_id) + { + if backoff_time > now { + tracing::warn!( + peer=%peer_id, + "[Penalty] Peer attempted graft within backoff time, penalizing" + ); + // add behavioural penalty + if let Some((peer_score, ..)) = &mut self.peer_score { + if let Some(metrics) = self.metrics.as_mut() { + metrics.register_score_penalty(Penalty::GraftBackoff); + } + peer_score.add_penalty(peer_id, 1); + + // check the flood cutoff + // See: https://github.com/rust-lang/rust-clippy/issues/10061 + #[allow(unknown_lints, clippy::unchecked_duration_subtraction)] + let flood_cutoff = (backoff_time + + self.config.graft_flood_threshold()) + - self.config.prune_backoff(); + if flood_cutoff > now { + //extra penalty + peer_score.add_penalty(peer_id, 1); + } + } + // no PX + do_px = false; + + to_prune_topics.insert(topic_hash.clone()); + continue; + } + } + + // check the score + if below_zero { + // we don't GRAFT peers with negative score + tracing::debug!( + peer=%peer_id, + %score, + topic=%topic_hash, + "GRAFT: ignoring peer with negative score" + ); + // we do send them PRUNE however, because it's a matter of protocol correctness + to_prune_topics.insert(topic_hash.clone()); + // but we won't PX to them + do_px = false; + continue; + } + + // check mesh upper bound and only allow graft if the upper bound is not reached or + // if it is an outbound peer + if peers.len() >= self.config.mesh_n_high() + && !self.outbound_peers.contains(peer_id) + { + to_prune_topics.insert(topic_hash.clone()); + continue; + } + + // add peer to the mesh + tracing::debug!( + peer=%peer_id, + topic=%topic_hash, + "GRAFT: Mesh link added for peer in topic" + ); + + if peers.insert(*peer_id) { + if let Some(m) = self.metrics.as_mut() { + m.peers_included(&topic_hash, Inclusion::Subscribed, 1) + } + } + + // If the peer did not previously exist in any mesh, inform the handler + peer_added_to_mesh( + *peer_id, + vec![&topic_hash], + &self.mesh, + &mut self.events, + &self.connected_peers, + ); + + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.graft(peer_id, topic_hash); + } + } else { + // don't do PX when there is an unknown topic to avoid leaking our peers + do_px = false; + tracing::debug!( + peer=%peer_id, + topic=%topic_hash, + "GRAFT: Received graft for unknown topic from peer" + ); + // spam hardening: ignore GRAFTs for unknown topics + continue; + } + } + } + + if !to_prune_topics.is_empty() { + // build the prune messages to send + let on_unsubscribe = false; + + let mut sender = match self.connected_peers.get_mut(peer_id) { + Some(connected_peer) => connected_peer.sender.clone(), + None => { + tracing::error!(peer_id = %peer_id, "Peer non-existent when handling graft and obtaining a sender"); + return; + } + }; + + for prune in to_prune_topics + .iter() + .map(|t| self.make_prune(t, peer_id, do_px, on_unsubscribe)) + { + sender.prune(prune); + } + // Send the prune messages to the peer + tracing::debug!( + peer=%peer_id, + "GRAFT: Not subscribed to topics - Sending PRUNE to peer" + ); + } + tracing::debug!(peer=%peer_id, "Completed GRAFT handling for peer"); + } + + fn remove_peer_from_mesh( + &mut self, + peer_id: &PeerId, + topic_hash: &TopicHash, + backoff: Option, + always_update_backoff: bool, + reason: Churn, + ) { + let mut update_backoff = always_update_backoff; + if let Some(peers) = self.mesh.get_mut(topic_hash) { + // remove the peer if it exists in the mesh + if peers.remove(peer_id) { + tracing::debug!( + peer=%peer_id, + topic=%topic_hash, + "PRUNE: Removing peer from the mesh for topic" + ); + if let Some(m) = self.metrics.as_mut() { + m.peers_removed(topic_hash, reason, 1) + } + + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.prune(peer_id, topic_hash.clone()); + } + + update_backoff = true; + + // inform the handler + peer_removed_from_mesh( + *peer_id, + topic_hash, + &self.mesh, + &mut self.events, + &self.connected_peers, + ); + } + } + if update_backoff { + let time = if let Some(backoff) = backoff { + Duration::from_secs(backoff) + } else { + self.config.prune_backoff() + }; + // is there a backoff specified by the peer? if so obey it. + self.backoffs.update_backoff(topic_hash, peer_id, time); + } + } + + /// Handles PRUNE control messages. Removes peer from the mesh. + fn handle_prune( + &mut self, + peer_id: &PeerId, + prune_data: Vec<(TopicHash, Vec, Option)>, + ) { + tracing::debug!(peer=%peer_id, "Handling PRUNE message for peer"); + let (below_threshold, score) = + self.score_below_threshold(peer_id, |pst| pst.accept_px_threshold); + for (topic_hash, px, backoff) in prune_data { + self.remove_peer_from_mesh(peer_id, &topic_hash, backoff, true, Churn::Prune); + + if self.mesh.contains_key(&topic_hash) { + //connect to px peers + if !px.is_empty() { + // we ignore PX from peers with insufficient score + if below_threshold { + tracing::debug!( + peer=%peer_id, + %score, + topic=%topic_hash, + "PRUNE: ignoring PX from peer with insufficient score" + ); + continue; + } + + // NOTE: We cannot dial any peers from PX currently as we typically will not + // know their multiaddr. Until SignedRecords are spec'd this + // remains a stub. By default `config.prune_peers()` is set to zero and + // this is skipped. If the user modifies this, this will only be able to + // dial already known peers (from an external discovery mechanism for + // example). + if self.config.prune_peers() > 0 { + self.px_connect(px); + } + } + } + } + tracing::debug!(peer=%peer_id, "Completed PRUNE handling for peer"); + } + + fn px_connect(&mut self, mut px: Vec) { + let n = self.config.prune_peers(); + // Ignore peerInfo with no ID + // + //TODO: Once signed records are spec'd: Can we use peerInfo without any IDs if they have a + // signed peer record? + px.retain(|p| p.peer_id.is_some()); + if px.len() > n { + // only use at most prune_peers many random peers + let mut rng = thread_rng(); + px.partial_shuffle(&mut rng, n); + px = px.into_iter().take(n).collect(); + } + + for p in px { + // TODO: Once signed records are spec'd: extract signed peer record if given and handle + // it, see https://github.com/libp2p/specs/pull/217 + if let Some(peer_id) = p.peer_id { + // mark as px peer + self.px_peers.insert(peer_id); + + // dial peer + self.events.push_back(ToSwarm::Dial { + opts: DialOpts::peer_id(peer_id).build(), + }); + } + } + } + + /// Applies some basic checks to whether this message is valid. Does not apply user validation + /// checks. + fn message_is_valid( + &mut self, + msg_id: &MessageId, + raw_message: &mut RawMessage, + propagation_source: &PeerId, + ) -> bool { + tracing::debug!( + peer=%propagation_source, + message=%msg_id, + "Handling message from peer" + ); + + // Reject any message from a blacklisted peer + if self.blacklisted_peers.contains(propagation_source) { + tracing::debug!( + peer=%propagation_source, + "Rejecting message from blacklisted peer" + ); + if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score { + peer_score.reject_message( + propagation_source, + msg_id, + &raw_message.topic, + RejectReason::BlackListedPeer, + ); + gossip_promises.reject_message(msg_id, &RejectReason::BlackListedPeer); + } + return false; + } + + // Also reject any message that originated from a blacklisted peer + if let Some(source) = raw_message.source.as_ref() { + if self.blacklisted_peers.contains(source) { + tracing::debug!( + peer=%propagation_source, + %source, + "Rejecting message from peer because of blacklisted source" + ); + self.handle_invalid_message( + propagation_source, + raw_message, + RejectReason::BlackListedSource, + ); + return false; + } + } + + // If we are not validating messages, assume this message is validated + // This will allow the message to be gossiped without explicitly calling + // `validate_message`. + if !self.config.validate_messages() { + raw_message.validated = true; + } + + // reject messages claiming to be from ourselves but not locally published + let self_published = !self.config.allow_self_origin() + && if let Some(own_id) = self.publish_config.get_own_id() { + own_id != propagation_source + && raw_message.source.as_ref().map_or(false, |s| s == own_id) + } else { + self.published_message_ids.contains(msg_id) + }; + + if self_published { + tracing::debug!( + message=%msg_id, + source=%propagation_source, + "Dropping message claiming to be from self but forwarded from source" + ); + self.handle_invalid_message(propagation_source, raw_message, RejectReason::SelfOrigin); + return false; + } + + true + } + + /// Handles a newly received [`RawMessage`]. + /// + /// Forwards the message to all peers in the mesh. + fn handle_received_message( + &mut self, + mut raw_message: RawMessage, + propagation_source: &PeerId, + ) { + // Record the received metric + if let Some(metrics) = self.metrics.as_mut() { + metrics.msg_recvd_unfiltered(&raw_message.topic, raw_message.raw_protobuf_len()); + } + + // Try and perform the data transform to the message. If it fails, consider it invalid. + let message = match self.data_transform.inbound_transform(raw_message.clone()) { + Ok(message) => message, + Err(e) => { + tracing::debug!("Invalid message. Transform error: {:?}", e); + // Reject the message and return + self.handle_invalid_message( + propagation_source, + &raw_message, + RejectReason::ValidationError(ValidationError::TransformFailed), + ); + return; + } + }; + + // Calculate the message id on the transformed data. + let msg_id = self.config.message_id(&message); + + // Check the validity of the message + // Peers get penalized if this message is invalid. We don't add it to the duplicate cache + // and instead continually penalize peers that repeatedly send this message. + if !self.message_is_valid(&msg_id, &mut raw_message, propagation_source) { + return; + } + + if !self.duplicate_cache.insert(msg_id.clone()) { + tracing::debug!(message=%msg_id, "Message already received, ignoring"); + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.duplicated_message(propagation_source, &msg_id, &message.topic); + } + self.mcache.observe_duplicate(&msg_id, propagation_source); + return; + } + tracing::debug!( + message=%msg_id, + "Put message in duplicate_cache and resolve promises" + ); + + // Record the received message with the metrics + if let Some(metrics) = self.metrics.as_mut() { + metrics.msg_recvd(&message.topic); + } + + // Tells score that message arrived (but is maybe not fully validated yet). + // Consider the message as delivered for gossip promises. + if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score { + peer_score.validate_message(propagation_source, &msg_id, &message.topic); + gossip_promises.message_delivered(&msg_id); + } + + // Add the message to our memcache + self.mcache.put(&msg_id, raw_message.clone()); + + // Dispatch the message to the user if we are subscribed to any of the topics + if self.mesh.contains_key(&message.topic) { + tracing::debug!("Sending received message to user"); + self.events + .push_back(ToSwarm::GenerateEvent(Event::Message { + propagation_source: *propagation_source, + message_id: msg_id.clone(), + message, + })); + } else { + tracing::debug!( + topic=%message.topic, + "Received message on a topic we are not subscribed to" + ); + return; + } + + // forward the message to mesh peers, if no validation is required + if !self.config.validate_messages() { + if self + .forward_msg( + &msg_id, + raw_message, + Some(propagation_source), + HashSet::new(), + ) + .is_err() + { + tracing::error!("Failed to forward message. Too large"); + } + tracing::debug!(message=%msg_id, "Completed message handling for message"); + } + } + + // Handles invalid messages received. + fn handle_invalid_message( + &mut self, + propagation_source: &PeerId, + raw_message: &RawMessage, + reject_reason: RejectReason, + ) { + if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score { + if let Some(metrics) = self.metrics.as_mut() { + metrics.register_invalid_message(&raw_message.topic); + } + + if let Ok(message) = self.data_transform.inbound_transform(raw_message.clone()) { + let message_id = self.config.message_id(&message); + + peer_score.reject_message( + propagation_source, + &message_id, + &message.topic, + reject_reason, + ); + + gossip_promises.reject_message(&message_id, &reject_reason); + } else { + // The message is invalid, we reject it ignoring any gossip promises. If a peer is + // advertising this message via an IHAVE and it's invalid it will be double + // penalized, one for sending us an invalid and again for breaking a promise. + peer_score.reject_invalid_message(propagation_source, &raw_message.topic); + } + } + } + + /// Handles received subscriptions. + fn handle_received_subscriptions( + &mut self, + subscriptions: &[Subscription], + propagation_source: &PeerId, + ) { + tracing::debug!( + source=%propagation_source, + "Handling subscriptions: {:?}", + subscriptions, + ); + + let mut unsubscribed_peers = Vec::new(); + + let Some(peer) = self.connected_peers.get_mut(propagation_source) else { + tracing::error!( + peer=%propagation_source, + "Subscription by unknown peer" + ); + return; + }; + + // Collect potential graft topics for the peer. + let mut topics_to_graft = Vec::new(); + + // Notify the application about the subscription, after the grafts are sent. + let mut application_event = Vec::new(); + + let filtered_topics = match self + .subscription_filter + .filter_incoming_subscriptions(subscriptions, &peer.topics) + { + Ok(topics) => topics, + Err(s) => { + tracing::error!( + peer=%propagation_source, + "Subscription filter error: {}; ignoring RPC from peer", + s + ); + return; + } + }; + + for subscription in filtered_topics { + // get the peers from the mapping, or insert empty lists if the topic doesn't exist + let topic_hash = &subscription.topic_hash; + + match subscription.action { + SubscriptionAction::Subscribe => { + // add to the peer_topics mapping + if peer.topics.insert(topic_hash.clone()) { + tracing::debug!( + peer=%propagation_source, + topic=%topic_hash, + "SUBSCRIPTION: Adding gossip peer to topic" + ); + + if let Some(m) = self.metrics.as_mut() { + m.inc_topic_peers(topic_hash); + } + } + // if the mesh needs peers add the peer to the mesh + if !self.explicit_peers.contains(propagation_source) + && matches!(peer.kind, PeerKind::Gossipsubv1_1 | PeerKind::Gossipsub) + && !Self::score_below_threshold_from_scores( + &self.peer_score, + propagation_source, + |_| 0.0, + ) + .0 + && !self + .backoffs + .is_backoff_with_slack(topic_hash, propagation_source) + { + if let Some(peers) = self.mesh.get_mut(topic_hash) { + if peers.len() < self.config.mesh_n_low() + && peers.insert(*propagation_source) + { + tracing::debug!( + peer=%propagation_source, + topic=%topic_hash, + "SUBSCRIPTION: Adding peer to the mesh for topic" + ); + if let Some(m) = self.metrics.as_mut() { + m.peers_included(topic_hash, Inclusion::Subscribed, 1) + } + // send graft to the peer + tracing::debug!( + peer=%propagation_source, + topic=%topic_hash, + "Sending GRAFT to peer for topic" + ); + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.graft(propagation_source, topic_hash.clone()); + } + topics_to_graft.push(topic_hash.clone()); + } + } + } + // generates a subscription event to be polled + application_event.push(ToSwarm::GenerateEvent(Event::Subscribed { + peer_id: *propagation_source, + topic: topic_hash.clone(), + })); + } + SubscriptionAction::Unsubscribe => { + // remove topic from the peer_topics mapping + if peer.topics.remove(topic_hash) { + tracing::debug!( + peer=%propagation_source, + topic=%topic_hash, + "SUBSCRIPTION: Removing gossip peer from topic" + ); + + if let Some(m) = self.metrics.as_mut() { + m.dec_topic_peers(topic_hash); + } + } + + unsubscribed_peers.push((*propagation_source, topic_hash.clone())); + // generate an unsubscribe event to be polled + application_event.push(ToSwarm::GenerateEvent(Event::Unsubscribed { + peer_id: *propagation_source, + topic: topic_hash.clone(), + })); + } + } + } + + // remove unsubscribed peers from the mesh if it exists + for (peer_id, topic_hash) in unsubscribed_peers { + self.remove_peer_from_mesh(&peer_id, &topic_hash, None, false, Churn::Unsub); + } + + // Potentially inform the handler if we have added this peer to a mesh for the first time. + let topics_joined = topics_to_graft.iter().collect::>(); + if !topics_joined.is_empty() { + peer_added_to_mesh( + *propagation_source, + topics_joined, + &self.mesh, + &mut self.events, + &self.connected_peers, + ); + } + + // If we need to send grafts to peer, do so immediately, rather than waiting for the + // heartbeat. + if let Some(peer) = &mut self.connected_peers.get_mut(propagation_source) { + for topic_hash in topics_to_graft.into_iter() { + peer.sender.graft(Graft { topic_hash }); + } + } else { + tracing::error!(peer = %propagation_source, + "Could not GRAFT, peer doesn't exist in connected peer list"); + } + + // Notify the application of the subscriptions + for event in application_event { + self.events.push_back(event); + } + + tracing::trace!( + source=%propagation_source, + "Completed handling subscriptions from source" + ); + } + + /// Applies penalties to peers that did not respond to our IWANT requests. + fn apply_iwant_penalties(&mut self) { + if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score { + for (peer, count) in gossip_promises.get_broken_promises() { + peer_score.add_penalty(&peer, count); + if let Some(metrics) = self.metrics.as_mut() { + metrics.register_score_penalty(Penalty::BrokenPromise); + } + } + } + } + + /// Heartbeat function which shifts the memcache and updates the mesh. + fn heartbeat(&mut self) { + tracing::debug!("Starting heartbeat"); + let start = Instant::now(); + + // Every heartbeat we sample the send queues to add to our metrics. We do this intentionally + // before we add all the gossip from this heartbeat in order to gain a true measure of + // steady-state size of the queues. + if let Some(m) = &mut self.metrics { + for sender_queue in self.connected_peers.values_mut().map(|v| &v.sender) { + m.observe_priority_queue_size(sender_queue.priority_len()); + m.observe_non_priority_queue_size(sender_queue.non_priority_len()); + } + } + + self.heartbeat_ticks += 1; + + let mut to_graft = HashMap::new(); + let mut to_prune = HashMap::new(); + let mut no_px = HashSet::new(); + + // clean up expired backoffs + self.backoffs.heartbeat(); + + // clean up ihave counters + self.count_sent_iwant.clear(); + self.count_received_ihave.clear(); + + // apply iwant penalties + self.apply_iwant_penalties(); + + // check connections to explicit peers + if self.heartbeat_ticks % self.config.check_explicit_peers_ticks() == 0 { + for p in self.explicit_peers.clone() { + self.check_explicit_peer_connection(&p); + } + } + + // Cache the scores of all connected peers, and record metrics for current penalties. + let mut scores = HashMap::with_capacity(self.connected_peers.len()); + if let Some((peer_score, ..)) = &self.peer_score { + for peer_id in self.connected_peers.keys() { + scores + .entry(peer_id) + .or_insert_with(|| peer_score.metric_score(peer_id, self.metrics.as_mut())); + } + } + + // maintain the mesh for each topic + for (topic_hash, peers) in self.mesh.iter_mut() { + let explicit_peers = &self.explicit_peers; + let backoffs = &self.backoffs; + let outbound_peers = &self.outbound_peers; + + // drop all peers with negative score, without PX + // if there is at some point a stable retain method for BTreeSet the following can be + // written more efficiently with retain. + let mut to_remove_peers = Vec::new(); + for peer_id in peers.iter() { + let peer_score = *scores.get(peer_id).unwrap_or(&0.0); + + // Record the score per mesh + if let Some(metrics) = self.metrics.as_mut() { + metrics.observe_mesh_peers_score(topic_hash, peer_score); + } + + if peer_score < 0.0 { + tracing::debug!( + peer=%peer_id, + score=%peer_score, + topic=%topic_hash, + "HEARTBEAT: Prune peer with negative score" + ); + + let current_topic = to_prune.entry(*peer_id).or_insert_with(Vec::new); + current_topic.push(topic_hash.clone()); + no_px.insert(*peer_id); + to_remove_peers.push(*peer_id); + } + } + + if let Some(m) = self.metrics.as_mut() { + m.peers_removed(topic_hash, Churn::BadScore, to_remove_peers.len()) + } + + for peer_id in to_remove_peers { + peers.remove(&peer_id); + } + + // too little peers - add some + if peers.len() < self.config.mesh_n_low() { + tracing::debug!( + topic=%topic_hash, + "HEARTBEAT: Mesh low. Topic contains: {} needs: {}", + peers.len(), + self.config.mesh_n_low() + ); + // not enough peers - get mesh_n - current_length more + let desired_peers = self.config.mesh_n() - peers.len(); + let peer_list = + get_random_peers(&self.connected_peers, topic_hash, desired_peers, |peer| { + !peers.contains(peer) + && !explicit_peers.contains(peer) + && !backoffs.is_backoff_with_slack(topic_hash, peer) + && *scores.get(peer).unwrap_or(&0.0) >= 0.0 + }); + for peer in &peer_list { + let current_topic = to_graft.entry(*peer).or_insert_with(Vec::new); + current_topic.push(topic_hash.clone()); + } + // update the mesh + tracing::debug!("Updating mesh, new mesh: {:?}", peer_list); + if let Some(m) = self.metrics.as_mut() { + m.peers_included(topic_hash, Inclusion::Random, peer_list.len()) + } + peers.extend(peer_list); + } + + // too many peers - remove some + if peers.len() > self.config.mesh_n_high() { + tracing::debug!( + topic=%topic_hash, + "HEARTBEAT: Mesh high. Topic contains: {} needs: {}", + peers.len(), + self.config.mesh_n_high() + ); + let excess_peer_no = peers.len() - self.config.mesh_n(); + + // shuffle the peers and then sort by score ascending beginning with the worst + let mut rng = thread_rng(); + let mut shuffled = peers.iter().copied().collect::>(); + shuffled.shuffle(&mut rng); + shuffled.sort_by(|p1, p2| { + let score_p1 = *scores.get(p1).unwrap_or(&0.0); + let score_p2 = *scores.get(p2).unwrap_or(&0.0); + + score_p1.partial_cmp(&score_p2).unwrap_or(Ordering::Equal) + }); + // shuffle everything except the last retain_scores many peers (the best ones) + shuffled[..peers.len() - self.config.retain_scores()].shuffle(&mut rng); + + // count total number of outbound peers + let mut outbound = { + let outbound_peers = &self.outbound_peers; + shuffled + .iter() + .filter(|p| outbound_peers.contains(*p)) + .count() + }; + + // remove the first excess_peer_no allowed (by outbound restrictions) peers adding + // them to to_prune + let mut removed = 0; + for peer in shuffled { + if removed == excess_peer_no { + break; + } + if self.outbound_peers.contains(&peer) { + if outbound <= self.config.mesh_outbound_min() { + // do not remove anymore outbound peers + continue; + } + // an outbound peer gets removed + outbound -= 1; + } + + // remove the peer + peers.remove(&peer); + let current_topic = to_prune.entry(peer).or_insert_with(Vec::new); + current_topic.push(topic_hash.clone()); + removed += 1; + } + + if let Some(m) = self.metrics.as_mut() { + m.peers_removed(topic_hash, Churn::Excess, removed) + } + } + + // do we have enough outbound peers? + if peers.len() >= self.config.mesh_n_low() { + // count number of outbound peers we have + let outbound = { peers.iter().filter(|p| outbound_peers.contains(*p)).count() }; + + // if we have not enough outbound peers, graft to some new outbound peers + if outbound < self.config.mesh_outbound_min() { + let needed = self.config.mesh_outbound_min() - outbound; + let peer_list = + get_random_peers(&self.connected_peers, topic_hash, needed, |peer| { + !peers.contains(peer) + && !explicit_peers.contains(peer) + && !backoffs.is_backoff_with_slack(topic_hash, peer) + && *scores.get(peer).unwrap_or(&0.0) >= 0.0 + && outbound_peers.contains(peer) + }); + for peer in &peer_list { + let current_topic = to_graft.entry(*peer).or_insert_with(Vec::new); + current_topic.push(topic_hash.clone()); + } + // update the mesh + tracing::debug!("Updating mesh, new mesh: {:?}", peer_list); + if let Some(m) = self.metrics.as_mut() { + m.peers_included(topic_hash, Inclusion::Outbound, peer_list.len()) + } + peers.extend(peer_list); + } + } + + // should we try to improve the mesh with opportunistic grafting? + if self.heartbeat_ticks % self.config.opportunistic_graft_ticks() == 0 + && peers.len() > 1 + && self.peer_score.is_some() + { + if let Some((_, thresholds, _, _)) = &self.peer_score { + // Opportunistic grafting works as follows: we check the median score of peers + // in the mesh; if this score is below the opportunisticGraftThreshold, we + // select a few peers at random with score over the median. + // The intention is to (slowly) improve an underperforming mesh by introducing + // good scoring peers that may have been gossiping at us. This allows us to + // get out of sticky situations where we are stuck with poor peers and also + // recover from churn of good peers. + + // now compute the median peer score in the mesh + let mut peers_by_score: Vec<_> = peers.iter().collect(); + peers_by_score.sort_by(|p1, p2| { + let p1_score = *scores.get(p1).unwrap_or(&0.0); + let p2_score = *scores.get(p2).unwrap_or(&0.0); + p1_score.partial_cmp(&p2_score).unwrap_or(Equal) + }); + + let middle = peers_by_score.len() / 2; + let median = if peers_by_score.len() % 2 == 0 { + let sub_middle_peer = *peers_by_score + .get(middle - 1) + .expect("middle < vector length and middle > 0 since peers.len() > 0"); + let sub_middle_score = *scores.get(sub_middle_peer).unwrap_or(&0.0); + let middle_peer = + *peers_by_score.get(middle).expect("middle < vector length"); + let middle_score = *scores.get(middle_peer).unwrap_or(&0.0); + + (sub_middle_score + middle_score) * 0.5 + } else { + *scores + .get(*peers_by_score.get(middle).expect("middle < vector length")) + .unwrap_or(&0.0) + }; + + // if the median score is below the threshold, select a better peer (if any) and + // GRAFT + if median < thresholds.opportunistic_graft_threshold { + let peer_list = get_random_peers( + &self.connected_peers, + topic_hash, + self.config.opportunistic_graft_peers(), + |peer_id| { + !peers.contains(peer_id) + && !explicit_peers.contains(peer_id) + && !backoffs.is_backoff_with_slack(topic_hash, peer_id) + && *scores.get(peer_id).unwrap_or(&0.0) > median + }, + ); + for peer in &peer_list { + let current_topic = to_graft.entry(*peer).or_insert_with(Vec::new); + current_topic.push(topic_hash.clone()); + } + // update the mesh + tracing::debug!( + topic=%topic_hash, + "Opportunistically graft in topic with peers {:?}", + peer_list + ); + if let Some(m) = self.metrics.as_mut() { + m.peers_included(topic_hash, Inclusion::Random, peer_list.len()) + } + peers.extend(peer_list); + } + } + } + // Register the final count of peers in the mesh + if let Some(m) = self.metrics.as_mut() { + m.set_mesh_peers(topic_hash, peers.len()) + } + } + + // remove expired fanout topics + { + let fanout = &mut self.fanout; // help the borrow checker + let fanout_ttl = self.config.fanout_ttl(); + self.fanout_last_pub.retain(|topic_hash, last_pub_time| { + if *last_pub_time + fanout_ttl < Instant::now() { + tracing::debug!( + topic=%topic_hash, + "HEARTBEAT: Fanout topic removed due to timeout" + ); + fanout.remove(topic_hash); + return false; + } + true + }); + } + + // maintain fanout + // check if our peers are still a part of the topic + for (topic_hash, peers) in self.fanout.iter_mut() { + let mut to_remove_peers = Vec::new(); + let publish_threshold = match &self.peer_score { + Some((_, thresholds, _, _)) => thresholds.publish_threshold, + _ => 0.0, + }; + for peer_id in peers.iter() { + // is the peer still subscribed to the topic? + let peer_score = *scores.get(peer_id).unwrap_or(&0.0); + match self.connected_peers.get(peer_id) { + Some(peer) => { + if !peer.topics.contains(topic_hash) || peer_score < publish_threshold { + tracing::debug!( + topic=%topic_hash, + "HEARTBEAT: Peer removed from fanout for topic" + ); + to_remove_peers.push(*peer_id); + } + } + None => { + // remove if the peer has disconnected + to_remove_peers.push(*peer_id); + } + } + } + for to_remove in to_remove_peers { + peers.remove(&to_remove); + } + + // not enough peers + if peers.len() < self.config.mesh_n() { + tracing::debug!( + "HEARTBEAT: Fanout low. Contains: {:?} needs: {:?}", + peers.len(), + self.config.mesh_n() + ); + let needed_peers = self.config.mesh_n() - peers.len(); + let explicit_peers = &self.explicit_peers; + let new_peers = + get_random_peers(&self.connected_peers, topic_hash, needed_peers, |peer_id| { + !peers.contains(peer_id) + && !explicit_peers.contains(peer_id) + && *scores.get(peer_id).unwrap_or(&0.0) < publish_threshold + }); + peers.extend(new_peers); + } + } + + if self.peer_score.is_some() { + tracing::trace!("Mesh message deliveries: {:?}", { + self.mesh + .iter() + .map(|(t, peers)| { + ( + t.clone(), + peers + .iter() + .map(|p| { + ( + *p, + self.peer_score + .as_ref() + .expect("peer_score.is_some()") + .0 + .mesh_message_deliveries(p, t) + .unwrap_or(0.0), + ) + }) + .collect::>(), + ) + }) + .collect::>>() + }) + } + + self.emit_gossip(); + + // send graft/prunes + if !to_graft.is_empty() | !to_prune.is_empty() { + self.send_graft_prune(to_graft, to_prune, no_px); + } + + // shift the memcache + self.mcache.shift(); + + // Report expired messages + for (peer_id, failed_messages) in self.failed_messages.drain() { + tracing::debug!("Peer couldn't consume messages: {:?}", failed_messages); + self.events + .push_back(ToSwarm::GenerateEvent(Event::SlowPeer { + peer_id, + failed_messages, + })); + } + self.failed_messages.shrink_to_fit(); + + tracing::debug!("Completed Heartbeat"); + if let Some(metrics) = self.metrics.as_mut() { + let duration = u64::try_from(start.elapsed().as_millis()).unwrap_or(u64::MAX); + metrics.observe_heartbeat_duration(duration); + } + } + + /// Emits gossip - Send IHAVE messages to a random set of gossip peers. This is applied to mesh + /// and fanout peers + fn emit_gossip(&mut self) { + let mut rng = thread_rng(); + for (topic_hash, peers) in self.mesh.iter().chain(self.fanout.iter()) { + let mut message_ids = self.mcache.get_gossip_message_ids(topic_hash); + if message_ids.is_empty() { + continue; + } + + // if we are emitting more than GossipSubMaxIHaveLength message_ids, truncate the list + if message_ids.len() > self.config.max_ihave_length() { + // we do the truncation (with shuffling) per peer below + tracing::debug!( + "too many messages for gossip; will truncate IHAVE list ({} messages)", + message_ids.len() + ); + } else { + // shuffle to emit in random order + message_ids.shuffle(&mut rng); + } + + // dynamic number of peers to gossip based on `gossip_factor` with minimum `gossip_lazy` + let n_map = |m| { + max( + self.config.gossip_lazy(), + (self.config.gossip_factor() * m as f64) as usize, + ) + }; + // get gossip_lazy random peers + let to_msg_peers = + get_random_peers_dynamic(&self.connected_peers, topic_hash, n_map, |peer| { + !peers.contains(peer) + && !self.explicit_peers.contains(peer) + && !self.score_below_threshold(peer, |ts| ts.gossip_threshold).0 + }); + + tracing::debug!("Gossiping IHAVE to {} peers", to_msg_peers.len()); + + for peer_id in to_msg_peers { + let mut peer_message_ids = message_ids.clone(); + + if peer_message_ids.len() > self.config.max_ihave_length() { + // We do this per peer so that we emit a different set for each peer. + // we have enough redundancy in the system that this will significantly increase + // the message coverage when we do truncate. + peer_message_ids.partial_shuffle(&mut rng, self.config.max_ihave_length()); + peer_message_ids.truncate(self.config.max_ihave_length()); + } + + // send an IHAVE message + if let Some(peer) = &mut self.connected_peers.get_mut(&peer_id) { + if peer + .sender + .ihave(IHave { + topic_hash: topic_hash.clone(), + message_ids: peer_message_ids, + }) + .is_err() + { + tracing::warn!(peer=%peer_id, "Send Queue full. Could not send IHAVE"); + + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.failed_message_slow_peer(&peer_id); + } + // Increment failed message count + self.failed_messages + .entry(peer_id) + .or_default() + .non_priority += 1; + } + } else { + tracing::error!(peer = %peer_id, + "Could not IHAVE, peer doesn't exist in connected peer list"); + } + } + } + } + + /// Handles multiple GRAFT/PRUNE messages and coalesces them into chunked gossip control + /// messages. + fn send_graft_prune( + &mut self, + to_graft: HashMap>, + mut to_prune: HashMap>, + no_px: HashSet, + ) { + // handle the grafts and overlapping prunes per peer + for (peer_id, topics) in to_graft.into_iter() { + for topic in &topics { + // inform scoring of graft + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.graft(&peer_id, topic.clone()); + } + + // inform the handler of the peer being added to the mesh + // If the peer did not previously exist in any mesh, inform the handler + peer_added_to_mesh( + peer_id, + vec![topic], + &self.mesh, + &mut self.events, + &self.connected_peers, + ); + } + + // If there are prunes associated with the same peer add them. + // NOTE: In this case a peer has been added to a topic mesh, and removed from another. + // It therefore must be in at least one mesh and we do not need to inform the handler + // of its removal from another. + + // send the control messages + let mut sender = match self.connected_peers.get_mut(&peer_id) { + Some(connected_peer) => connected_peer.sender.clone(), + None => { + tracing::error!(peer_id = %peer_id, "Peer non-existent when sending graft/prune"); + return; + } + }; + + // The following prunes are not due to unsubscribing. + let prunes = to_prune + .remove(&peer_id) + .into_iter() + .flatten() + .map(|topic_hash| { + self.make_prune( + &topic_hash, + &peer_id, + self.config.do_px() && !no_px.contains(&peer_id), + false, + ) + }); + + for topic_hash in topics { + sender.graft(Graft { + topic_hash: topic_hash.clone(), + }); + } + + for prune in prunes { + sender.prune(prune); + } + } + + // handle the remaining prunes + // The following prunes are not due to unsubscribing. + for (peer_id, topics) in to_prune.iter() { + for topic_hash in topics { + let prune = self.make_prune( + topic_hash, + peer_id, + self.config.do_px() && !no_px.contains(peer_id), + false, + ); + if let Some(peer) = self.connected_peers.get_mut(peer_id) { + peer.sender.prune(prune); + } else { + tracing::error!(peer = %peer_id, + "Could not PRUNE, peer doesn't exist in connected peer list"); + } + + // inform the handler + peer_removed_from_mesh( + *peer_id, + topic_hash, + &self.mesh, + &mut self.events, + &self.connected_peers, + ); + } + } + } + + /// Helper function which forwards a message to mesh\[topic\] peers. + /// + /// Returns true if at least one peer was messaged. + fn forward_msg( + &mut self, + msg_id: &MessageId, + message: RawMessage, + propagation_source: Option<&PeerId>, + originating_peers: HashSet, + ) -> Result { + // message is fully validated inform peer_score + if let Some((peer_score, ..)) = &mut self.peer_score { + if let Some(peer) = propagation_source { + peer_score.deliver_message(peer, msg_id, &message.topic); + } + } + + tracing::debug!(message=%msg_id, "Forwarding message"); + let mut recipient_peers = HashSet::new(); + + // Populate the recipient peers mapping + + // Add explicit peers + for peer_id in &self.explicit_peers { + if let Some(peer) = self.connected_peers.get(peer_id) { + if Some(peer_id) != propagation_source + && !originating_peers.contains(peer_id) + && Some(peer_id) != message.source.as_ref() + && peer.topics.contains(&message.topic) + { + recipient_peers.insert(*peer_id); + } + } + } + + // add mesh peers + let topic = &message.topic; + // mesh + if let Some(mesh_peers) = self.mesh.get(topic) { + for peer_id in mesh_peers { + if Some(peer_id) != propagation_source + && !originating_peers.contains(peer_id) + && Some(peer_id) != message.source.as_ref() + { + recipient_peers.insert(*peer_id); + } + } + } + + // forward the message to peers + if !recipient_peers.is_empty() { + for peer_id in recipient_peers.iter() { + if let Some(peer) = self.connected_peers.get_mut(peer_id) { + tracing::debug!(%peer_id, message=%msg_id, "Sending message to peer"); + if peer + .sender + .forward( + message.clone(), + self.config.forward_queue_duration(), + self.metrics.as_mut(), + ) + .is_err() + { + // Downscore the peer + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.failed_message_slow_peer(peer_id); + } + // Increment the failed message count + self.failed_messages + .entry(*peer_id) + .or_default() + .non_priority += 1; + } + } else { + tracing::error!(peer = %peer_id, + "Could not FORWARD, peer doesn't exist in connected peer list"); + } + } + tracing::debug!("Completed forwarding message"); + Ok(true) + } else { + Ok(false) + } + } + + /// Constructs a [`RawMessage`] performing message signing if required. + pub(crate) fn build_raw_message( + &mut self, + topic: TopicHash, + data: Vec, + ) -> Result { + match &mut self.publish_config { + PublishConfig::Signing { + ref keypair, + author, + inline_key, + last_seq_no, + } => { + let sequence_number = last_seq_no.next(); + + let signature = { + let message = proto::Message { + from: Some(author.to_bytes()), + data: Some(data.clone()), + seqno: Some(sequence_number.to_be_bytes().to_vec()), + topic: topic.clone().into_string(), + signature: None, + key: None, + }; + + let mut buf = Vec::with_capacity(message.get_size()); + let mut writer = Writer::new(&mut buf); + + message + .write_message(&mut writer) + .expect("Encoding to succeed"); + + // the signature is over the bytes "libp2p-pubsub:" + let mut signature_bytes = SIGNING_PREFIX.to_vec(); + signature_bytes.extend_from_slice(&buf); + Some(keypair.sign(&signature_bytes)?) + }; + + Ok(RawMessage { + source: Some(*author), + data, + // To be interoperable with the go-implementation this is treated as a 64-bit + // big-endian uint. + sequence_number: Some(sequence_number), + topic, + signature, + key: inline_key.clone(), + validated: true, // all published messages are valid + }) + } + PublishConfig::Author(peer_id) => { + Ok(RawMessage { + source: Some(*peer_id), + data, + // To be interoperable with the go-implementation this is treated as a 64-bit + // big-endian uint. + sequence_number: Some(rand::random()), + topic, + signature: None, + key: None, + validated: true, // all published messages are valid + }) + } + PublishConfig::RandomAuthor => { + Ok(RawMessage { + source: Some(PeerId::random()), + data, + // To be interoperable with the go-implementation this is treated as a 64-bit + // big-endian uint. + sequence_number: Some(rand::random()), + topic, + signature: None, + key: None, + validated: true, // all published messages are valid + }) + } + PublishConfig::Anonymous => { + Ok(RawMessage { + source: None, + data, + // To be interoperable with the go-implementation this is treated as a 64-bit + // big-endian uint. + sequence_number: None, + topic, + signature: None, + key: None, + validated: true, // all published messages are valid + }) + } + } + } + + fn on_connection_established( + &mut self, + ConnectionEstablished { + peer_id, + endpoint, + other_established, + .. + }: ConnectionEstablished, + ) { + // Diverging from the go implementation we only want to consider a peer as outbound peer + // if its first connection is outbound. + + if endpoint.is_dialer() && other_established == 0 && !self.px_peers.contains(&peer_id) { + // The first connection is outbound and it is not a peer from peer exchange => mark + // it as outbound peer + self.outbound_peers.insert(peer_id); + } + + // Add the IP to the peer scoring system + if let Some((peer_score, ..)) = &mut self.peer_score { + if let Some(ip) = get_ip_addr(endpoint.get_remote_address()) { + peer_score.add_ip(&peer_id, ip); + } else { + tracing::trace!( + peer=%peer_id, + "Couldn't extract ip from endpoint of peer with endpoint {:?}", + endpoint + ) + } + } + + if other_established > 0 { + return; // Not our first connection to this peer, hence nothing to do. + } + + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.add_peer(peer_id); + } + + // Ignore connections from blacklisted peers. + if self.blacklisted_peers.contains(&peer_id) { + tracing::debug!(peer=%peer_id, "Ignoring connection from blacklisted peer"); + return; + } + + tracing::debug!(peer=%peer_id, "New peer connected"); + // We need to send our subscriptions to the newly-connected node. + if let Some(peer) = self.connected_peers.get_mut(&peer_id) { + for topic_hash in self.mesh.clone().into_keys() { + peer.sender.subscribe(topic_hash); + } + } else { + tracing::error!(peer = %peer_id, + "Could not SUBSCRIBE, peer doesn't exist in connected peer list"); + } + } + + fn on_connection_closed( + &mut self, + ConnectionClosed { + peer_id, + connection_id, + endpoint, + remaining_established, + .. + }: ConnectionClosed, + ) { + // Remove IP from peer scoring system + if let Some((peer_score, ..)) = &mut self.peer_score { + if let Some(ip) = get_ip_addr(endpoint.get_remote_address()) { + peer_score.remove_ip(&peer_id, &ip); + } else { + tracing::trace!( + peer=%peer_id, + "Couldn't extract ip from endpoint of peer with endpoint {:?}", + endpoint + ) + } + } + + if remaining_established != 0 { + // Remove the connection from the list + if let Some(peer) = self.connected_peers.get_mut(&peer_id) { + let index = peer + .connections + .iter() + .position(|v| v == &connection_id) + .expect("Previously established connection to peer must be present"); + peer.connections.remove(index); + + // If there are more connections and this peer is in a mesh, inform the first connection + // handler. + if !peer.connections.is_empty() { + for topic in &peer.topics { + if let Some(mesh_peers) = self.mesh.get(topic) { + if mesh_peers.contains(&peer_id) { + self.events.push_back(ToSwarm::NotifyHandler { + peer_id, + event: HandlerIn::JoinedMesh, + handler: NotifyHandler::One(peer.connections[0]), + }); + break; + } + } + } + } + } + } else { + // remove from mesh, topic_peers, peer_topic and the fanout + tracing::debug!(peer=%peer_id, "Peer disconnected"); + + let Some(connected_peer) = self.connected_peers.get(&peer_id) else { + tracing::error!(peer_id = %peer_id, "Peer non-existent when handling disconnection"); + return; + }; + + // remove peer from all mappings + for topic in &connected_peer.topics { + // check the mesh for the topic + if let Some(mesh_peers) = self.mesh.get_mut(topic) { + // check if the peer is in the mesh and remove it + if mesh_peers.remove(&peer_id) { + if let Some(m) = self.metrics.as_mut() { + m.peers_removed(topic, Churn::Dc, 1); + m.set_mesh_peers(topic, mesh_peers.len()); + } + }; + } + + if let Some(m) = self.metrics.as_mut() { + m.dec_topic_peers(topic); + } + + // remove from fanout + self.fanout + .get_mut(topic) + .map(|peers| peers.remove(&peer_id)); + } + + // Forget px and outbound status for this peer + self.px_peers.remove(&peer_id); + self.outbound_peers.remove(&peer_id); + + // If metrics are enabled, register the disconnection of a peer based on its protocol. + if let Some(metrics) = self.metrics.as_mut() { + metrics.peer_protocol_disconnected(connected_peer.kind.clone()); + } + + self.connected_peers.remove(&peer_id); + + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.remove_peer(&peer_id); + } + } + } + + fn on_address_change( + &mut self, + AddressChange { + peer_id, + old: endpoint_old, + new: endpoint_new, + .. + }: AddressChange, + ) { + // Exchange IP in peer scoring system + if let Some((peer_score, ..)) = &mut self.peer_score { + if let Some(ip) = get_ip_addr(endpoint_old.get_remote_address()) { + peer_score.remove_ip(&peer_id, &ip); + } else { + tracing::trace!( + peer=%&peer_id, + "Couldn't extract ip from endpoint of peer with endpoint {:?}", + endpoint_old + ) + } + if let Some(ip) = get_ip_addr(endpoint_new.get_remote_address()) { + peer_score.add_ip(&peer_id, ip); + } else { + tracing::trace!( + peer=%peer_id, + "Couldn't extract ip from endpoint of peer with endpoint {:?}", + endpoint_new + ) + } + } + } +} + +fn get_ip_addr(addr: &Multiaddr) -> Option { + addr.iter().find_map(|p| match p { + Ip4(addr) => Some(IpAddr::V4(addr)), + Ip6(addr) => Some(IpAddr::V6(addr)), + _ => None, + }) +} + +impl NetworkBehaviour for Behaviour +where + C: Send + 'static + DataTransform, + F: Send + 'static + TopicSubscriptionFilter, +{ + type ConnectionHandler = Handler; + type ToSwarm = Event; + + fn handle_established_inbound_connection( + &mut self, + connection_id: ConnectionId, + peer_id: PeerId, + _: &Multiaddr, + _: &Multiaddr, + ) -> Result, ConnectionDenied> { + // By default we assume a peer is only a floodsub peer. + // + // The protocol negotiation occurs once a message is sent/received. Once this happens we + // update the type of peer that this is in order to determine which kind of routing should + // occur. + let connected_peer = self + .connected_peers + .entry(peer_id) + .or_insert(PeerConnections { + kind: PeerKind::Floodsub, + connections: vec![], + sender: RpcSender::new(self.config.connection_handler_queue_len()), + topics: Default::default(), + }); + // Add the new connection + connected_peer.connections.push(connection_id); + + Ok(Handler::new( + self.config.protocol_config(), + connected_peer.sender.new_receiver(), + )) + } + + fn handle_established_outbound_connection( + &mut self, + connection_id: ConnectionId, + peer_id: PeerId, + _: &Multiaddr, + _: Endpoint, + ) -> Result, ConnectionDenied> { + // By default we assume a peer is only a floodsub peer. + // + // The protocol negotiation occurs once a message is sent/received. Once this happens we + // update the type of peer that this is in order to determine which kind of routing should + // occur. + let connected_peer = self + .connected_peers + .entry(peer_id) + .or_insert(PeerConnections { + kind: PeerKind::Floodsub, + connections: vec![], + sender: RpcSender::new(self.config.connection_handler_queue_len()), + topics: Default::default(), + }); + // Add the new connection + connected_peer.connections.push(connection_id); + + Ok(Handler::new( + self.config.protocol_config(), + connected_peer.sender.new_receiver(), + )) + } + + fn on_connection_handler_event( + &mut self, + propagation_source: PeerId, + _connection_id: ConnectionId, + handler_event: THandlerOutEvent, + ) { + match handler_event { + HandlerEvent::PeerKind(kind) => { + // We have identified the protocol this peer is using + + if let Some(metrics) = self.metrics.as_mut() { + metrics.peer_protocol_connected(kind.clone()); + } + + if let PeerKind::NotSupported = kind { + tracing::debug!( + peer=%propagation_source, + "Peer does not support gossipsub protocols" + ); + self.events + .push_back(ToSwarm::GenerateEvent(Event::GossipsubNotSupported { + peer_id: propagation_source, + })); + } else if let Some(conn) = self.connected_peers.get_mut(&propagation_source) { + // Only change the value if the old value is Floodsub (the default set in + // `NetworkBehaviour::on_event` with FromSwarm::ConnectionEstablished). + // All other PeerKind changes are ignored. + tracing::debug!( + peer=%propagation_source, + peer_type=%kind, + "New peer type found for peer" + ); + if let PeerKind::Floodsub = conn.kind { + conn.kind = kind; + } + } + } + HandlerEvent::MessageDropped(rpc) => { + // Account for this in the scoring logic + if let Some((peer_score, _, _, _)) = &mut self.peer_score { + peer_score.failed_message_slow_peer(&propagation_source); + } + + // Keep track of expired messages for the application layer. + match rpc { + RpcOut::Publish { .. } => { + self.failed_messages + .entry(propagation_source) + .or_default() + .publish += 1; + } + RpcOut::Forward { .. } => { + self.failed_messages + .entry(propagation_source) + .or_default() + .forward += 1; + } + _ => {} // + } + + // Record metrics on the failure. + if let Some(metrics) = self.metrics.as_mut() { + match rpc { + RpcOut::Publish { message, .. } => { + metrics.publish_msg_dropped(&message.topic); + } + RpcOut::Forward { message, .. } => { + metrics.forward_msg_dropped(&message.topic); + } + _ => {} + } + } + } + HandlerEvent::Message { + rpc, + invalid_messages, + } => { + // Handle the gossipsub RPC + + // Handle subscriptions + // Update connected peers topics + if !rpc.subscriptions.is_empty() { + self.handle_received_subscriptions(&rpc.subscriptions, &propagation_source); + } + + // Check if peer is graylisted in which case we ignore the event + if let (true, _) = + self.score_below_threshold(&propagation_source, |pst| pst.graylist_threshold) + { + tracing::debug!(peer=%propagation_source, "RPC Dropped from greylisted peer"); + return; + } + + // Handle any invalid messages from this peer + if self.peer_score.is_some() { + for (raw_message, validation_error) in invalid_messages { + self.handle_invalid_message( + &propagation_source, + &raw_message, + RejectReason::ValidationError(validation_error), + ) + } + } else { + // log the invalid messages + for (message, validation_error) in invalid_messages { + tracing::warn!( + peer=%propagation_source, + source=?message.source, + "Invalid message from peer. Reason: {:?}", + validation_error, + ); + } + } + + // Handle messages + for (count, raw_message) in rpc.messages.into_iter().enumerate() { + // Only process the amount of messages the configuration allows. + if self.config.max_messages_per_rpc().is_some() + && Some(count) >= self.config.max_messages_per_rpc() + { + tracing::warn!("Received more messages than permitted. Ignoring further messages. Processed: {}", count); + break; + } + self.handle_received_message(raw_message, &propagation_source); + } + + // Handle control messages + // group some control messages, this minimises SendEvents (code is simplified to handle each event at a time however) + let mut ihave_msgs = vec![]; + let mut graft_msgs = vec![]; + let mut prune_msgs = vec![]; + for control_msg in rpc.control_msgs { + match control_msg { + ControlAction::IHave(IHave { + topic_hash, + message_ids, + }) => { + ihave_msgs.push((topic_hash, message_ids)); + } + ControlAction::IWant(IWant { message_ids }) => { + self.handle_iwant(&propagation_source, message_ids) + } + ControlAction::Graft(Graft { topic_hash }) => graft_msgs.push(topic_hash), + ControlAction::Prune(Prune { + topic_hash, + peers, + backoff, + }) => prune_msgs.push((topic_hash, peers, backoff)), + } + } + if !ihave_msgs.is_empty() { + self.handle_ihave(&propagation_source, ihave_msgs); + } + if !graft_msgs.is_empty() { + self.handle_graft(&propagation_source, graft_msgs); + } + if !prune_msgs.is_empty() { + self.handle_prune(&propagation_source, prune_msgs); + } + } + } + } + + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self, cx))] + fn poll( + &mut self, + cx: &mut Context<'_>, + ) -> Poll>> { + if let Some(event) = self.events.pop_front() { + return Poll::Ready(event); + } + + // update scores + if let Some((peer_score, _, interval, _)) = &mut self.peer_score { + while let Poll::Ready(Some(_)) = interval.poll_next_unpin(cx) { + peer_score.refresh_scores(); + } + } + + while let Poll::Ready(Some(_)) = self.heartbeat.poll_next_unpin(cx) { + self.heartbeat(); + } + + Poll::Pending + } + + fn on_swarm_event(&mut self, event: FromSwarm) { + match event { + FromSwarm::ConnectionEstablished(connection_established) => { + self.on_connection_established(connection_established) + } + FromSwarm::ConnectionClosed(connection_closed) => { + self.on_connection_closed(connection_closed) + } + FromSwarm::AddressChange(address_change) => self.on_address_change(address_change), + _ => {} + } + } +} + +/// This is called when peers are added to any mesh. It checks if the peer existed +/// in any other mesh. If this is the first mesh they have joined, it queues a message to notify +/// the appropriate connection handler to maintain a connection. +fn peer_added_to_mesh( + peer_id: PeerId, + new_topics: Vec<&TopicHash>, + mesh: &HashMap>, + events: &mut VecDeque>, + connections: &HashMap, +) { + // Ensure there is an active connection + let connection_id = match connections.get(&peer_id) { + Some(p) => p + .connections + .first() + .expect("There should be at least one connection to a peer."), + None => { + tracing::error!(peer_id=%peer_id, "Peer not existent when added to the mesh"); + return; + } + }; + + if let Some(peer) = connections.get(&peer_id) { + for topic in &peer.topics { + if !new_topics.contains(&topic) { + if let Some(mesh_peers) = mesh.get(topic) { + if mesh_peers.contains(&peer_id) { + // the peer is already in a mesh for another topic + return; + } + } + } + } + } + // This is the first mesh the peer has joined, inform the handler + events.push_back(ToSwarm::NotifyHandler { + peer_id, + event: HandlerIn::JoinedMesh, + handler: NotifyHandler::One(*connection_id), + }); +} + +/// This is called when peers are removed from a mesh. It checks if the peer exists +/// in any other mesh. If this is the last mesh they have joined, we return true, in order to +/// notify the handler to no longer maintain a connection. +fn peer_removed_from_mesh( + peer_id: PeerId, + old_topic: &TopicHash, + mesh: &HashMap>, + events: &mut VecDeque>, + connections: &HashMap, +) { + // Ensure there is an active connection + let connection_id = match connections.get(&peer_id) { + Some(p) => p + .connections + .first() + .expect("There should be at least one connection to a peer."), + None => { + tracing::error!(peer_id=%peer_id, "Peer not existent when removed from mesh"); + return; + } + }; + + if let Some(peer) = connections.get(&peer_id) { + for topic in &peer.topics { + if topic != old_topic { + if let Some(mesh_peers) = mesh.get(topic) { + if mesh_peers.contains(&peer_id) { + // the peer exists in another mesh still + return; + } + } + } + } + } + // The peer is not in any other mesh, inform the handler + events.push_back(ToSwarm::NotifyHandler { + peer_id, + event: HandlerIn::LeftMesh, + handler: NotifyHandler::One(*connection_id), + }); +} + +/// Helper function to get a subset of random gossipsub peers for a `topic_hash` +/// filtered by the function `f`. The number of peers to get equals the output of `n_map` +/// that gets as input the number of filtered peers. +fn get_random_peers_dynamic( + connected_peers: &HashMap, + topic_hash: &TopicHash, + // maps the number of total peers to the number of selected peers + n_map: impl Fn(usize) -> usize, + mut f: impl FnMut(&PeerId) -> bool, +) -> BTreeSet { + let mut gossip_peers = connected_peers + .iter() + .filter(|(_, p)| p.topics.contains(topic_hash)) + .filter(|(peer_id, _)| f(peer_id)) + .filter(|(_, p)| p.kind == PeerKind::Gossipsub || p.kind == PeerKind::Gossipsubv1_1) + .map(|(peer_id, _)| *peer_id) + .collect::>(); + + // if we have less than needed, return them + let n = n_map(gossip_peers.len()); + if gossip_peers.len() <= n { + tracing::debug!("RANDOM PEERS: Got {:?} peers", gossip_peers.len()); + return gossip_peers.into_iter().collect(); + } + + // we have more peers than needed, shuffle them and return n of them + let mut rng = thread_rng(); + gossip_peers.partial_shuffle(&mut rng, n); + + tracing::debug!("RANDOM PEERS: Got {:?} peers", n); + + gossip_peers.into_iter().take(n).collect() +} + +/// Helper function to get a set of `n` random gossipsub peers for a `topic_hash` +/// filtered by the function `f`. +fn get_random_peers( + connected_peers: &HashMap, + topic_hash: &TopicHash, + n: usize, + f: impl FnMut(&PeerId) -> bool, +) -> BTreeSet { + get_random_peers_dynamic(connected_peers, topic_hash, |_| n, f) +} + +/// Validates the combination of signing, privacy and message validation to ensure the +/// configuration will not reject published messages. +fn validate_config( + authenticity: &MessageAuthenticity, + validation_mode: &ValidationMode, +) -> Result<(), &'static str> { + match validation_mode { + ValidationMode::Anonymous => { + if authenticity.is_signing() { + return Err("Cannot enable message signing with an Anonymous validation mode. Consider changing either the ValidationMode or MessageAuthenticity"); + } + + if !authenticity.is_anonymous() { + return Err("Published messages contain an author but incoming messages with an author will be rejected. Consider adjusting the validation or privacy settings in the config"); + } + } + ValidationMode::Strict => { + if !authenticity.is_signing() { + return Err( + "Messages will be + published unsigned and incoming unsigned messages will be rejected. Consider adjusting + the validation or privacy settings in the config" + ); + } + } + _ => {} + } + Ok(()) +} + +impl fmt::Debug for Behaviour { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Behaviour") + .field("config", &self.config) + .field("events", &self.events.len()) + .field("publish_config", &self.publish_config) + .field("mesh", &self.mesh) + .field("fanout", &self.fanout) + .field("fanout_last_pub", &self.fanout_last_pub) + .field("mcache", &self.mcache) + .field("heartbeat", &self.heartbeat) + .finish() + } +} + +impl fmt::Debug for PublishConfig { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + PublishConfig::Signing { author, .. } => { + f.write_fmt(format_args!("PublishConfig::Signing({author})")) + } + PublishConfig::Author(author) => { + f.write_fmt(format_args!("PublishConfig::Author({author})")) + } + PublishConfig::RandomAuthor => f.write_fmt(format_args!("PublishConfig::RandomAuthor")), + PublishConfig::Anonymous => f.write_fmt(format_args!("PublishConfig::Anonymous")), + } + } +} diff --git a/beacon_node/lighthouse_network/src/gossipsub/behaviour/tests.rs b/beacon_node/lighthouse_network/src/gossipsub/behaviour/tests.rs new file mode 100644 index 000000000..f191d38f5 --- /dev/null +++ b/beacon_node/lighthouse_network/src/gossipsub/behaviour/tests.rs @@ -0,0 +1,5243 @@ +// Copyright 2020 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +// Collection of tests for the gossipsub network behaviour + +use super::*; +use crate::gossipsub::subscription_filter::WhitelistSubscriptionFilter; +use crate::gossipsub::transform::{DataTransform, IdentityTransform}; +use crate::gossipsub::types::{RpcOut, RpcReceiver}; +use crate::gossipsub::ValidationError; +use crate::gossipsub::{ + config::Config, config::ConfigBuilder, types::Rpc, IdentTopic as Topic, TopicScoreParams, +}; +use async_std::net::Ipv4Addr; +use byteorder::{BigEndian, ByteOrder}; +use libp2p::core::{ConnectedPoint, Endpoint}; +use rand::Rng; +use std::thread::sleep; +use std::time::Duration; + +#[derive(Default, Debug)] +struct InjectNodes +// TODO: remove trait bound Default when this issue is fixed: +// https://github.com/colin-kiegel/rust-derive-builder/issues/93 +where + D: DataTransform + Default + Clone + Send + 'static, + F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +{ + peer_no: usize, + topics: Vec, + to_subscribe: bool, + gs_config: Config, + explicit: usize, + outbound: usize, + scoring: Option<(PeerScoreParams, PeerScoreThresholds)>, + data_transform: D, + subscription_filter: F, +} + +impl InjectNodes +where + D: DataTransform + Default + Clone + Send + 'static, + F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +{ + #[allow(clippy::type_complexity)] + pub(crate) fn create_network( + self, + ) -> ( + Behaviour, + Vec, + HashMap, + Vec, + ) { + let keypair = libp2p::identity::Keypair::generate_ed25519(); + // create a gossipsub struct + let mut gs: Behaviour = Behaviour::new_with_subscription_filter_and_transform( + MessageAuthenticity::Signed(keypair), + self.gs_config, + None, + self.subscription_filter, + self.data_transform, + ) + .unwrap(); + + if let Some((scoring_params, scoring_thresholds)) = self.scoring { + gs.with_peer_score(scoring_params, scoring_thresholds) + .unwrap(); + } + + let mut topic_hashes = vec![]; + + // subscribe to the topics + for t in self.topics { + let topic = Topic::new(t); + gs.subscribe(&topic).unwrap(); + topic_hashes.push(topic.hash().clone()); + } + + // build and connect peer_no random peers + let mut peers = vec![]; + let mut receivers = HashMap::new(); + + let empty = vec![]; + for i in 0..self.peer_no { + let (peer, receiver) = add_peer( + &mut gs, + if self.to_subscribe { + &topic_hashes + } else { + &empty + }, + i < self.outbound, + i < self.explicit, + ); + peers.push(peer); + receivers.insert(peer, receiver); + } + + (gs, peers, receivers, topic_hashes) + } + + fn peer_no(mut self, peer_no: usize) -> Self { + self.peer_no = peer_no; + self + } + + fn topics(mut self, topics: Vec) -> Self { + self.topics = topics; + self + } + + #[allow(clippy::wrong_self_convention)] + fn to_subscribe(mut self, to_subscribe: bool) -> Self { + self.to_subscribe = to_subscribe; + self + } + + fn gs_config(mut self, gs_config: Config) -> Self { + self.gs_config = gs_config; + self + } + + fn explicit(mut self, explicit: usize) -> Self { + self.explicit = explicit; + self + } + + fn outbound(mut self, outbound: usize) -> Self { + self.outbound = outbound; + self + } + + fn scoring(mut self, scoring: Option<(PeerScoreParams, PeerScoreThresholds)>) -> Self { + self.scoring = scoring; + self + } + + fn subscription_filter(mut self, subscription_filter: F) -> Self { + self.subscription_filter = subscription_filter; + self + } +} + +fn inject_nodes() -> InjectNodes +where + D: DataTransform + Default + Clone + Send + 'static, + F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +{ + InjectNodes::default() +} + +fn inject_nodes1() -> InjectNodes { + InjectNodes::::default() +} + +// helper functions for testing + +fn add_peer( + gs: &mut Behaviour, + topic_hashes: &[TopicHash], + outbound: bool, + explicit: bool, +) -> (PeerId, RpcReceiver) +where + D: DataTransform + Default + Clone + Send + 'static, + F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +{ + add_peer_with_addr(gs, topic_hashes, outbound, explicit, Multiaddr::empty()) +} + +fn add_peer_with_addr( + gs: &mut Behaviour, + topic_hashes: &[TopicHash], + outbound: bool, + explicit: bool, + address: Multiaddr, +) -> (PeerId, RpcReceiver) +where + D: DataTransform + Default + Clone + Send + 'static, + F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +{ + add_peer_with_addr_and_kind( + gs, + topic_hashes, + outbound, + explicit, + address, + Some(PeerKind::Gossipsubv1_1), + ) +} + +fn add_peer_with_addr_and_kind( + gs: &mut Behaviour, + topic_hashes: &[TopicHash], + outbound: bool, + explicit: bool, + address: Multiaddr, + kind: Option, +) -> (PeerId, RpcReceiver) +where + D: DataTransform + Default + Clone + Send + 'static, + F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +{ + let peer = PeerId::random(); + let endpoint = if outbound { + ConnectedPoint::Dialer { + address, + role_override: Endpoint::Dialer, + } + } else { + ConnectedPoint::Listener { + local_addr: Multiaddr::empty(), + send_back_addr: address, + } + }; + + let sender = RpcSender::new(gs.config.connection_handler_queue_len()); + let receiver = sender.new_receiver(); + let connection_id = ConnectionId::new_unchecked(0); + gs.connected_peers.insert( + peer, + PeerConnections { + kind: kind.clone().unwrap_or(PeerKind::Floodsub), + connections: vec![connection_id], + topics: Default::default(), + sender, + }, + ); + + gs.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { + peer_id: peer, + connection_id, + endpoint: &endpoint, + failed_addresses: &[], + other_established: 0, // first connection + })); + if let Some(kind) = kind { + gs.on_connection_handler_event( + peer, + ConnectionId::new_unchecked(0), + HandlerEvent::PeerKind(kind), + ); + } + if explicit { + gs.add_explicit_peer(&peer); + } + if !topic_hashes.is_empty() { + gs.handle_received_subscriptions( + &topic_hashes + .iter() + .cloned() + .map(|t| Subscription { + action: SubscriptionAction::Subscribe, + topic_hash: t, + }) + .collect::>(), + &peer, + ); + } + (peer, receiver) +} + +fn disconnect_peer(gs: &mut Behaviour, peer_id: &PeerId) +where + D: DataTransform + Default + Clone + Send + 'static, + F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +{ + if let Some(peer_connections) = gs.connected_peers.get(peer_id) { + let fake_endpoint = ConnectedPoint::Dialer { + address: Multiaddr::empty(), + role_override: Endpoint::Dialer, + }; // this is not relevant + // peer_connections.connections should never be empty. + + let mut active_connections = peer_connections.connections.len(); + for connection_id in peer_connections.connections.clone() { + active_connections = active_connections.checked_sub(1).unwrap(); + + gs.on_swarm_event(FromSwarm::ConnectionClosed(ConnectionClosed { + peer_id: *peer_id, + connection_id, + endpoint: &fake_endpoint, + remaining_established: active_connections, + })); + } + } +} + +// Converts a protobuf message into a gossipsub message for reading the Gossipsub event queue. +fn proto_to_message(rpc: &proto::RPC) -> Rpc { + // Store valid messages. + let mut messages = Vec::with_capacity(rpc.publish.len()); + let rpc = rpc.clone(); + for message in rpc.publish.into_iter() { + messages.push(RawMessage { + source: message.from.map(|x| PeerId::from_bytes(&x).unwrap()), + data: message.data.unwrap_or_default(), + sequence_number: message.seqno.map(|x| BigEndian::read_u64(&x)), // don't inform the application + topic: TopicHash::from_raw(message.topic), + signature: message.signature, // don't inform the application + key: None, + validated: false, + }); + } + let mut control_msgs = Vec::new(); + if let Some(rpc_control) = rpc.control { + // Collect the gossipsub control messages + let ihave_msgs: Vec = rpc_control + .ihave + .into_iter() + .map(|ihave| { + ControlAction::IHave(IHave { + topic_hash: TopicHash::from_raw(ihave.topic_id.unwrap_or_default()), + message_ids: ihave + .message_ids + .into_iter() + .map(MessageId::from) + .collect::>(), + }) + }) + .collect(); + + let iwant_msgs: Vec = rpc_control + .iwant + .into_iter() + .map(|iwant| { + ControlAction::IWant(IWant { + message_ids: iwant + .message_ids + .into_iter() + .map(MessageId::from) + .collect::>(), + }) + }) + .collect(); + + let graft_msgs: Vec = rpc_control + .graft + .into_iter() + .map(|graft| { + ControlAction::Graft(Graft { + topic_hash: TopicHash::from_raw(graft.topic_id.unwrap_or_default()), + }) + }) + .collect(); + + let mut prune_msgs = Vec::new(); + + for prune in rpc_control.prune { + // filter out invalid peers + let peers = prune + .peers + .into_iter() + .filter_map(|info| { + info.peer_id + .and_then(|id| PeerId::from_bytes(&id).ok()) + .map(|peer_id| + //TODO signedPeerRecord, see https://github.com/libp2p/specs/pull/217 + PeerInfo { + peer_id: Some(peer_id), + }) + }) + .collect::>(); + + let topic_hash = TopicHash::from_raw(prune.topic_id.unwrap_or_default()); + prune_msgs.push(ControlAction::Prune(Prune { + topic_hash, + peers, + backoff: prune.backoff, + })); + } + + control_msgs.extend(ihave_msgs); + control_msgs.extend(iwant_msgs); + control_msgs.extend(graft_msgs); + control_msgs.extend(prune_msgs); + } + + Rpc { + messages, + subscriptions: rpc + .subscriptions + .into_iter() + .map(|sub| Subscription { + action: if Some(true) == sub.subscribe { + SubscriptionAction::Subscribe + } else { + SubscriptionAction::Unsubscribe + }, + topic_hash: TopicHash::from_raw(sub.topic_id.unwrap_or_default()), + }) + .collect(), + control_msgs, + } +} + +#[test] +/// Test local node subscribing to a topic +fn test_subscribe() { + // The node should: + // - Create an empty vector in mesh[topic] + // - Send subscription request to all peers + // - run JOIN(topic) + + let subscribe_topic = vec![String::from("test_subscribe")]; + let (gs, _, receivers, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(subscribe_topic) + .to_subscribe(true) + .create_network(); + + assert!( + gs.mesh.get(&topic_hashes[0]).is_some(), + "Subscribe should add a new entry to the mesh[topic] hashmap" + ); + + // collect all the subscriptions + let subscriptions = receivers + .into_values() + .fold(0, |mut collected_subscriptions, c| { + let priority = c.priority.into_inner(); + while !priority.is_empty() { + if let Ok(RpcOut::Subscribe(_)) = priority.try_recv() { + collected_subscriptions += 1 + } + } + collected_subscriptions + }); + + // we sent a subscribe to all known peers + assert_eq!(subscriptions, 20); +} + +/// Test unsubscribe. +#[test] +fn test_unsubscribe() { + // Unsubscribe should: + // - Remove the mesh entry for topic + // - Send UNSUBSCRIBE to all known peers + // - Call Leave + + let topic_strings = vec![String::from("topic1"), String::from("topic2")]; + let topics = topic_strings + .iter() + .map(|t| Topic::new(t.clone())) + .collect::>(); + + // subscribe to topic_strings + let (mut gs, _, receivers, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(topic_strings) + .to_subscribe(true) + .create_network(); + + for topic_hash in &topic_hashes { + assert!( + gs.connected_peers + .values() + .any(|p| p.topics.contains(topic_hash)), + "Topic_peers contain a topic entry" + ); + assert!( + gs.mesh.get(topic_hash).is_some(), + "mesh should contain a topic entry" + ); + } + + // unsubscribe from both topics + assert!( + gs.unsubscribe(&topics[0]).unwrap(), + "should be able to unsubscribe successfully from each topic", + ); + assert!( + gs.unsubscribe(&topics[1]).unwrap(), + "should be able to unsubscribe successfully from each topic", + ); + + // collect all the subscriptions + let subscriptions = receivers + .into_values() + .fold(0, |mut collected_subscriptions, c| { + let priority = c.priority.into_inner(); + while !priority.is_empty() { + if let Ok(RpcOut::Subscribe(_)) = priority.try_recv() { + collected_subscriptions += 1 + } + } + collected_subscriptions + }); + + // we sent a unsubscribe to all known peers, for two topics + assert_eq!(subscriptions, 40); + + // check we clean up internal structures + for topic_hash in &topic_hashes { + assert!( + gs.mesh.get(topic_hash).is_none(), + "All topics should have been removed from the mesh" + ); + } +} + +/// Test JOIN(topic) functionality. +#[test] +fn test_join() { + // The Join function should: + // - Remove peers from fanout[topic] + // - Add any fanout[topic] peers to the mesh (up to mesh_n) + // - Fill up to mesh_n peers from known gossipsub peers in the topic + // - Send GRAFT messages to all nodes added to the mesh + + // This test is not an isolated unit test, rather it uses higher level, + // subscribe/unsubscribe to perform the test. + + let topic_strings = vec![String::from("topic1"), String::from("topic2")]; + let topics = topic_strings + .iter() + .map(|t| Topic::new(t.clone())) + .collect::>(); + + let (mut gs, _, mut receivers, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(topic_strings) + .to_subscribe(true) + .create_network(); + + // Flush previous GRAFT messages. + receivers = flush_events(&mut gs, receivers); + + // unsubscribe, then call join to invoke functionality + assert!( + gs.unsubscribe(&topics[0]).unwrap(), + "should be able to unsubscribe successfully" + ); + assert!( + gs.unsubscribe(&topics[1]).unwrap(), + "should be able to unsubscribe successfully" + ); + + // re-subscribe - there should be peers associated with the topic + assert!( + gs.subscribe(&topics[0]).unwrap(), + "should be able to subscribe successfully" + ); + + // should have added mesh_n nodes to the mesh + assert!( + gs.mesh.get(&topic_hashes[0]).unwrap().len() == 6, + "Should have added 6 nodes to the mesh" + ); + + fn count_grafts( + receivers: HashMap, + ) -> (usize, HashMap) { + let mut new_receivers = HashMap::new(); + let mut acc = 0; + + for (peer_id, c) in receivers.into_iter() { + let priority = c.priority.into_inner(); + while !priority.is_empty() { + if let Ok(RpcOut::Graft(_)) = priority.try_recv() { + acc += 1; + } + } + new_receivers.insert( + peer_id, + RpcReceiver { + priority_len: c.priority_len, + priority: priority.peekable(), + non_priority: c.non_priority, + }, + ); + } + (acc, new_receivers) + } + + // there should be mesh_n GRAFT messages. + let (graft_messages, mut receivers) = count_grafts(receivers); + + assert_eq!( + graft_messages, 6, + "There should be 6 grafts messages sent to peers" + ); + + // verify fanout nodes + // add 3 random peers to the fanout[topic1] + gs.fanout + .insert(topic_hashes[1].clone(), Default::default()); + let mut new_peers: Vec = vec![]; + + for _ in 0..3 { + let random_peer = PeerId::random(); + // inform the behaviour of a new peer + let address = "/ip4/127.0.0.1".parse::().unwrap(); + gs.handle_established_inbound_connection( + ConnectionId::new_unchecked(0), + random_peer, + &address, + &address, + ) + .unwrap(); + let sender = RpcSender::new(gs.config.connection_handler_queue_len()); + let receiver = sender.new_receiver(); + let connection_id = ConnectionId::new_unchecked(0); + gs.connected_peers.insert( + random_peer, + PeerConnections { + kind: PeerKind::Floodsub, + connections: vec![connection_id], + topics: Default::default(), + sender, + }, + ); + receivers.insert(random_peer, receiver); + + gs.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { + peer_id: random_peer, + connection_id, + endpoint: &ConnectedPoint::Dialer { + address, + role_override: Endpoint::Dialer, + }, + failed_addresses: &[], + other_established: 0, + })); + + // add the new peer to the fanout + let fanout_peers = gs.fanout.get_mut(&topic_hashes[1]).unwrap(); + fanout_peers.insert(random_peer); + new_peers.push(random_peer); + } + + // subscribe to topic1 + gs.subscribe(&topics[1]).unwrap(); + + // the three new peers should have been added, along with 3 more from the pool. + assert!( + gs.mesh.get(&topic_hashes[1]).unwrap().len() == 6, + "Should have added 6 nodes to the mesh" + ); + let mesh_peers = gs.mesh.get(&topic_hashes[1]).unwrap(); + for new_peer in new_peers { + assert!( + mesh_peers.contains(&new_peer), + "Fanout peer should be included in the mesh" + ); + } + + // there should now 6 graft messages to be sent + let (graft_messages, _) = count_grafts(receivers); + + assert_eq!( + graft_messages, 6, + "There should be 6 grafts messages sent to peers" + ); +} + +/// Test local node publish to subscribed topic +#[test] +fn test_publish_without_flood_publishing() { + // node should: + // - Send publish message to all peers + // - Insert message into gs.mcache and gs.received + + //turn off flood publish to test old behaviour + let config = ConfigBuilder::default() + .flood_publish(false) + .build() + .unwrap(); + + let publish_topic = String::from("test_publish"); + let (mut gs, _, receivers, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(vec![publish_topic.clone()]) + .to_subscribe(true) + .gs_config(config) + .create_network(); + + assert!( + gs.mesh.get(&topic_hashes[0]).is_some(), + "Subscribe should add a new entry to the mesh[topic] hashmap" + ); + + // all peers should be subscribed to the topic + assert_eq!( + gs.connected_peers + .values() + .filter(|p| p.topics.contains(&topic_hashes[0])) + .count(), + 20, + "Peers should be subscribed to the topic" + ); + + // publish on topic + let publish_data = vec![0; 42]; + gs.publish(Topic::new(publish_topic), publish_data).unwrap(); + + // Collect all publish messages + let publishes = receivers + .into_values() + .fold(vec![], |mut collected_publish, c| { + let priority = c.priority.into_inner(); + while !priority.is_empty() { + if let Ok(RpcOut::Publish { message, .. }) = priority.try_recv() { + collected_publish.push(message); + } + } + collected_publish + }); + + // Transform the inbound message + let message = &gs + .data_transform + .inbound_transform( + publishes + .first() + .expect("Should contain > 0 entries") + .clone(), + ) + .unwrap(); + + let msg_id = gs.config.message_id(message); + + let config: Config = Config::default(); + assert_eq!( + publishes.len(), + config.mesh_n(), + "Should send a publish message to at least mesh_n peers" + ); + + assert!( + gs.mcache.get(&msg_id).is_some(), + "Message cache should contain published message" + ); +} + +/// Test local node publish to unsubscribed topic +#[test] +fn test_fanout() { + // node should: + // - Populate fanout peers + // - Send publish message to fanout peers + // - Insert message into gs.mcache and gs.received + + //turn off flood publish to test fanout behaviour + let config = ConfigBuilder::default() + .flood_publish(false) + .build() + .unwrap(); + + let fanout_topic = String::from("test_fanout"); + let (mut gs, _, receivers, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(vec![fanout_topic.clone()]) + .to_subscribe(true) + .gs_config(config) + .create_network(); + + assert!( + gs.mesh.get(&topic_hashes[0]).is_some(), + "Subscribe should add a new entry to the mesh[topic] hashmap" + ); + // Unsubscribe from topic + assert!( + gs.unsubscribe(&Topic::new(fanout_topic.clone())).unwrap(), + "should be able to unsubscribe successfully from topic" + ); + + // Publish on unsubscribed topic + let publish_data = vec![0; 42]; + gs.publish(Topic::new(fanout_topic.clone()), publish_data) + .unwrap(); + + assert_eq!( + gs.fanout + .get(&TopicHash::from_raw(fanout_topic)) + .unwrap() + .len(), + gs.config.mesh_n(), + "Fanout should contain `mesh_n` peers for fanout topic" + ); + + // Collect all publish messages + let publishes = receivers + .into_values() + .fold(vec![], |mut collected_publish, c| { + let priority = c.priority.into_inner(); + while !priority.is_empty() { + if let Ok(RpcOut::Publish { message, .. }) = priority.try_recv() { + collected_publish.push(message); + } + } + collected_publish + }); + + // Transform the inbound message + let message = &gs + .data_transform + .inbound_transform( + publishes + .first() + .expect("Should contain > 0 entries") + .clone(), + ) + .unwrap(); + + let msg_id = gs.config.message_id(message); + + assert_eq!( + publishes.len(), + gs.config.mesh_n(), + "Should send a publish message to `mesh_n` fanout peers" + ); + + assert!( + gs.mcache.get(&msg_id).is_some(), + "Message cache should contain published message" + ); +} + +/// Test the gossipsub NetworkBehaviour peer connection logic. +#[test] +fn test_inject_connected() { + let (gs, peers, receivers, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(vec![String::from("topic1"), String::from("topic2")]) + .to_subscribe(true) + .create_network(); + + // check that our subscriptions are sent to each of the peers + // collect all the SendEvents + let subscriptions = receivers.into_iter().fold( + HashMap::>::new(), + |mut collected_subscriptions, (peer, c)| { + let priority = c.priority.into_inner(); + while !priority.is_empty() { + if let Ok(RpcOut::Subscribe(topic)) = priority.try_recv() { + let mut peer_subs = collected_subscriptions.remove(&peer).unwrap_or_default(); + peer_subs.push(topic.into_string()); + collected_subscriptions.insert(peer, peer_subs); + } + } + collected_subscriptions + }, + ); + + // check that there are two subscriptions sent to each peer + for peer_subs in subscriptions.values() { + assert!(peer_subs.contains(&String::from("topic1"))); + assert!(peer_subs.contains(&String::from("topic2"))); + assert_eq!(peer_subs.len(), 2); + } + + // check that there are 20 send events created + assert_eq!(subscriptions.len(), 20); + + // should add the new peers to `peer_topics` with an empty vec as a gossipsub node + for peer in peers { + let peer = gs.connected_peers.get(&peer).unwrap(); + assert!( + peer.topics == topic_hashes.iter().cloned().collect(), + "The topics for each node should all topics" + ); + } +} + +/// Test subscription handling +#[test] +fn test_handle_received_subscriptions() { + // For every subscription: + // SUBSCRIBE: - Add subscribed topic to peer_topics for peer. + // - Add peer to topics_peer. + // UNSUBSCRIBE - Remove topic from peer_topics for peer. + // - Remove peer from topic_peers. + + let topics = ["topic1", "topic2", "topic3", "topic4"] + .iter() + .map(|&t| String::from(t)) + .collect(); + let (mut gs, peers, _receivers, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(topics) + .to_subscribe(false) + .create_network(); + + // The first peer sends 3 subscriptions and 1 unsubscription + let mut subscriptions = topic_hashes[..3] + .iter() + .map(|topic_hash| Subscription { + action: SubscriptionAction::Subscribe, + topic_hash: topic_hash.clone(), + }) + .collect::>(); + + subscriptions.push(Subscription { + action: SubscriptionAction::Unsubscribe, + topic_hash: topic_hashes[topic_hashes.len() - 1].clone(), + }); + + let unknown_peer = PeerId::random(); + // process the subscriptions + // first and second peers send subscriptions + gs.handle_received_subscriptions(&subscriptions, &peers[0]); + gs.handle_received_subscriptions(&subscriptions, &peers[1]); + // unknown peer sends the same subscriptions + gs.handle_received_subscriptions(&subscriptions, &unknown_peer); + + // verify the result + + let peer = gs.connected_peers.get(&peers[0]).unwrap(); + assert!( + peer.topics + == topic_hashes + .iter() + .take(3) + .cloned() + .collect::>(), + "First peer should be subscribed to three topics" + ); + let peer1 = gs.connected_peers.get(&peers[1]).unwrap(); + assert!( + peer1.topics + == topic_hashes + .iter() + .take(3) + .cloned() + .collect::>(), + "Second peer should be subscribed to three topics" + ); + + assert!( + gs.connected_peers.get(&unknown_peer).is_none(), + "Unknown peer should not have been added" + ); + + for topic_hash in topic_hashes[..3].iter() { + let topic_peers = gs + .connected_peers + .iter() + .filter(|(_, p)| p.topics.contains(topic_hash)) + .map(|(peer_id, _)| *peer_id) + .collect::>(); + assert!( + topic_peers == peers[..2].iter().cloned().collect(), + "Two peers should be added to the first three topics" + ); + } + + // Peer 0 unsubscribes from the first topic + + gs.handle_received_subscriptions( + &[Subscription { + action: SubscriptionAction::Unsubscribe, + topic_hash: topic_hashes[0].clone(), + }], + &peers[0], + ); + + let peer = gs.connected_peers.get(&peers[0]).unwrap(); + assert!( + peer.topics == topic_hashes[1..3].iter().cloned().collect::>(), + "Peer should be subscribed to two topics" + ); + + // only gossipsub at the moment + let topic_peers = gs + .connected_peers + .iter() + .filter(|(_, p)| p.topics.contains(&topic_hashes[0])) + .map(|(peer_id, _)| *peer_id) + .collect::>(); + + assert!( + topic_peers == peers[1..2].iter().cloned().collect(), + "Only the second peers should be in the first topic" + ); +} + +/// Test Gossipsub.get_random_peers() function +#[test] +fn test_get_random_peers() { + // generate a default Config + let gs_config = ConfigBuilder::default() + .validation_mode(ValidationMode::Anonymous) + .build() + .unwrap(); + // create a gossipsub struct + let mut gs: Behaviour = Behaviour::new(MessageAuthenticity::Anonymous, gs_config).unwrap(); + + // create a topic and fill it with some peers + let topic_hash = Topic::new("Test").hash(); + let mut peers = vec![]; + let mut topics = BTreeSet::new(); + topics.insert(topic_hash.clone()); + + for _ in 0..20 { + let peer_id = PeerId::random(); + peers.push(peer_id); + gs.connected_peers.insert( + peer_id, + PeerConnections { + kind: PeerKind::Gossipsubv1_1, + connections: vec![ConnectionId::new_unchecked(0)], + topics: topics.clone(), + sender: RpcSender::new(gs.config.connection_handler_queue_len()), + }, + ); + } + + let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 5, |_| true); + assert_eq!(random_peers.len(), 5, "Expected 5 peers to be returned"); + let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 30, |_| true); + assert!(random_peers.len() == 20, "Expected 20 peers to be returned"); + assert!( + random_peers == peers.iter().cloned().collect(), + "Expected no shuffling" + ); + let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 20, |_| true); + assert!(random_peers.len() == 20, "Expected 20 peers to be returned"); + assert!( + random_peers == peers.iter().cloned().collect(), + "Expected no shuffling" + ); + let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 0, |_| true); + assert!(random_peers.is_empty(), "Expected 0 peers to be returned"); + // test the filter + let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 5, |_| false); + assert!(random_peers.is_empty(), "Expected 0 peers to be returned"); + let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 10, { + |peer| peers.contains(peer) + }); + assert!(random_peers.len() == 10, "Expected 10 peers to be returned"); +} + +/// Tests that the correct message is sent when a peer asks for a message in our cache. +#[test] +fn test_handle_iwant_msg_cached() { + let (mut gs, peers, receivers, _) = inject_nodes1() + .peer_no(20) + .topics(Vec::new()) + .to_subscribe(true) + .create_network(); + + let raw_message = RawMessage { + source: Some(peers[11]), + data: vec![1, 2, 3, 4], + sequence_number: Some(1u64), + topic: TopicHash::from_raw("topic"), + signature: None, + key: None, + validated: true, + }; + + // Transform the inbound message + let message = &gs + .data_transform + .inbound_transform(raw_message.clone()) + .unwrap(); + + let msg_id = gs.config.message_id(message); + gs.mcache.put(&msg_id, raw_message); + + gs.handle_iwant(&peers[7], vec![msg_id.clone()]); + + // the messages we are sending + let sent_messages = receivers + .into_values() + .fold(vec![], |mut collected_messages, c| { + let non_priority = c.non_priority.into_inner(); + while !non_priority.is_empty() { + if let Ok(RpcOut::Forward { message, .. }) = non_priority.try_recv() { + collected_messages.push(message) + } + } + collected_messages + }); + + assert!( + sent_messages + .iter() + .map(|msg| gs.data_transform.inbound_transform(msg.clone()).unwrap()) + .any(|msg| gs.config.message_id(&msg) == msg_id), + "Expected the cached message to be sent to an IWANT peer" + ); +} + +/// Tests that messages are sent correctly depending on the shifting of the message cache. +#[test] +fn test_handle_iwant_msg_cached_shifted() { + let (mut gs, peers, mut receivers, _) = inject_nodes1() + .peer_no(20) + .topics(Vec::new()) + .to_subscribe(true) + .create_network(); + + // perform 10 memshifts and check that it leaves the cache + for shift in 1..10 { + let raw_message = RawMessage { + source: Some(peers[11]), + data: vec![1, 2, 3, 4], + sequence_number: Some(shift), + topic: TopicHash::from_raw("topic"), + signature: None, + key: None, + validated: true, + }; + + // Transform the inbound message + let message = &gs + .data_transform + .inbound_transform(raw_message.clone()) + .unwrap(); + + let msg_id = gs.config.message_id(message); + gs.mcache.put(&msg_id, raw_message); + for _ in 0..shift { + gs.mcache.shift(); + } + + gs.handle_iwant(&peers[7], vec![msg_id.clone()]); + + // is the message is being sent? + let mut message_exists = false; + receivers = receivers.into_iter().map(|(peer_id, c)| { + let non_priority = c.non_priority.into_inner(); + while !non_priority.is_empty() { + if matches!(non_priority.try_recv(), Ok(RpcOut::Forward{message, timeout: _ }) if + gs.config.message_id( + &gs.data_transform + .inbound_transform(message.clone()) + .unwrap(), + ) == msg_id) + { + message_exists = true; + } + } + ( + peer_id, + RpcReceiver { + priority_len: c.priority_len, + priority: c.priority, + non_priority: non_priority.peekable(), + }, + ) + }).collect(); + // default history_length is 5, expect no messages after shift > 5 + if shift < 5 { + assert!( + message_exists, + "Expected the cached message to be sent to an IWANT peer before 5 shifts" + ); + } else { + assert!( + !message_exists, + "Expected the cached message to not be sent to an IWANT peer after 5 shifts" + ); + } + } +} + +/// tests that an event is not created when a peers asks for a message not in our cache +#[test] +fn test_handle_iwant_msg_not_cached() { + let (mut gs, peers, _, _) = inject_nodes1() + .peer_no(20) + .topics(Vec::new()) + .to_subscribe(true) + .create_network(); + + let events_before = gs.events.len(); + gs.handle_iwant(&peers[7], vec![MessageId::new(b"unknown id")]); + let events_after = gs.events.len(); + + assert_eq!( + events_before, events_after, + "Expected event count to stay the same" + ); +} + +/// tests that an event is created when a peer shares that it has a message we want +#[test] +fn test_handle_ihave_subscribed_and_msg_not_cached() { + let (mut gs, peers, mut receivers, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .create_network(); + + gs.handle_ihave( + &peers[7], + vec![(topic_hashes[0].clone(), vec![MessageId::new(b"unknown id")])], + ); + + // check that we sent an IWANT request for `unknown id` + let mut iwant_exists = false; + let receiver = receivers.remove(&peers[7]).unwrap(); + let non_priority = receiver.non_priority.into_inner(); + while !non_priority.is_empty() { + if let Ok(RpcOut::IWant(IWant { message_ids })) = non_priority.try_recv() { + if message_ids + .iter() + .any(|m| *m == MessageId::new(b"unknown id")) + { + iwant_exists = true; + break; + } + } + } + + assert!( + iwant_exists, + "Expected to send an IWANT control message for unkown message id" + ); +} + +/// tests that an event is not created when a peer shares that it has a message that +/// we already have +#[test] +fn test_handle_ihave_subscribed_and_msg_cached() { + let (mut gs, peers, _, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .create_network(); + + let msg_id = MessageId::new(b"known id"); + + let events_before = gs.events.len(); + gs.handle_ihave(&peers[7], vec![(topic_hashes[0].clone(), vec![msg_id])]); + let events_after = gs.events.len(); + + assert_eq!( + events_before, events_after, + "Expected event count to stay the same" + ) +} + +/// test that an event is not created when a peer shares that it has a message in +/// a topic that we are not subscribed to +#[test] +fn test_handle_ihave_not_subscribed() { + let (mut gs, peers, _, _) = inject_nodes1() + .peer_no(20) + .topics(vec![]) + .to_subscribe(true) + .create_network(); + + let events_before = gs.events.len(); + gs.handle_ihave( + &peers[7], + vec![( + TopicHash::from_raw(String::from("unsubscribed topic")), + vec![MessageId::new(b"irrelevant id")], + )], + ); + let events_after = gs.events.len(); + + assert_eq!( + events_before, events_after, + "Expected event count to stay the same" + ) +} + +/// tests that a peer is added to our mesh when we are both subscribed +/// to the same topic +#[test] +fn test_handle_graft_is_subscribed() { + let (mut gs, peers, _, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .create_network(); + + gs.handle_graft(&peers[7], topic_hashes.clone()); + + assert!( + gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]), + "Expected peer to have been added to mesh" + ); +} + +/// tests that a peer is not added to our mesh when they are subscribed to +/// a topic that we are not +#[test] +fn test_handle_graft_is_not_subscribed() { + let (mut gs, peers, _, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .create_network(); + + gs.handle_graft( + &peers[7], + vec![TopicHash::from_raw(String::from("unsubscribed topic"))], + ); + + assert!( + !gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]), + "Expected peer to have been added to mesh" + ); +} + +/// tests multiple topics in a single graft message +#[test] +fn test_handle_graft_multiple_topics() { + let topics: Vec = ["topic1", "topic2", "topic3", "topic4"] + .iter() + .map(|&t| String::from(t)) + .collect(); + + let (mut gs, peers, _, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(topics) + .to_subscribe(true) + .create_network(); + + let mut their_topics = topic_hashes.clone(); + // their_topics = [topic1, topic2, topic3] + // our_topics = [topic1, topic2, topic4] + their_topics.pop(); + gs.leave(&their_topics[2]); + + gs.handle_graft(&peers[7], their_topics.clone()); + + for hash in topic_hashes.iter().take(2) { + assert!( + gs.mesh.get(hash).unwrap().contains(&peers[7]), + "Expected peer to be in the mesh for the first 2 topics" + ); + } + + assert!( + gs.mesh.get(&topic_hashes[2]).is_none(), + "Expected the second topic to not be in the mesh" + ); +} + +/// tests that a peer is removed from our mesh +#[test] +fn test_handle_prune_peer_in_mesh() { + let (mut gs, peers, _, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .create_network(); + + // insert peer into our mesh for 'topic1' + gs.mesh + .insert(topic_hashes[0].clone(), peers.iter().cloned().collect()); + assert!( + gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]), + "Expected peer to be in mesh" + ); + + gs.handle_prune( + &peers[7], + topic_hashes + .iter() + .map(|h| (h.clone(), vec![], None)) + .collect(), + ); + assert!( + !gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]), + "Expected peer to be removed from mesh" + ); +} + +fn count_control_msgs( + receivers: HashMap, + mut filter: impl FnMut(&PeerId, &RpcOut) -> bool, +) -> (usize, HashMap) { + let mut new_receivers = HashMap::new(); + let mut collected_messages = 0; + for (peer_id, c) in receivers.into_iter() { + let priority = c.priority.into_inner(); + let non_priority = c.non_priority.into_inner(); + while !priority.is_empty() || !non_priority.is_empty() { + if let Ok(rpc) = priority.try_recv() { + if filter(&peer_id, &rpc) { + collected_messages += 1; + } + } + if let Ok(rpc) = non_priority.try_recv() { + if filter(&peer_id, &rpc) { + collected_messages += 1; + } + } + } + new_receivers.insert( + peer_id, + RpcReceiver { + priority_len: c.priority_len, + priority: priority.peekable(), + non_priority: non_priority.peekable(), + }, + ); + } + (collected_messages, new_receivers) +} + +fn flush_events( + gs: &mut Behaviour, + receivers: HashMap, +) -> HashMap { + gs.events.clear(); + let mut new_receivers = HashMap::new(); + for (peer_id, c) in receivers.into_iter() { + let priority = c.priority.into_inner(); + let non_priority = c.non_priority.into_inner(); + while !priority.is_empty() || !non_priority.is_empty() { + let _ = priority.try_recv(); + let _ = non_priority.try_recv(); + } + new_receivers.insert( + peer_id, + RpcReceiver { + priority_len: c.priority_len, + priority: priority.peekable(), + non_priority: non_priority.peekable(), + }, + ); + } + new_receivers +} + +/// tests that a peer added as explicit peer gets connected to +#[test] +fn test_explicit_peer_gets_connected() { + let (mut gs, _, _, _) = inject_nodes1() + .peer_no(0) + .topics(Vec::new()) + .to_subscribe(true) + .create_network(); + + //create new peer + let peer = PeerId::random(); + + //add peer as explicit peer + gs.add_explicit_peer(&peer); + + let num_events = gs + .events + .iter() + .filter(|e| match e { + ToSwarm::Dial { opts } => opts.get_peer_id() == Some(peer), + _ => false, + }) + .count(); + + assert_eq!( + num_events, 1, + "There was no dial peer event for the explicit peer" + ); +} + +#[test] +fn test_explicit_peer_reconnects() { + let config = ConfigBuilder::default() + .check_explicit_peers_ticks(2) + .build() + .unwrap(); + let (mut gs, others, receivers, _) = inject_nodes1() + .peer_no(1) + .topics(Vec::new()) + .to_subscribe(true) + .gs_config(config) + .create_network(); + + let peer = others.first().unwrap(); + + //add peer as explicit peer + gs.add_explicit_peer(peer); + + flush_events(&mut gs, receivers); + + //disconnect peer + disconnect_peer(&mut gs, peer); + + gs.heartbeat(); + + //check that no reconnect after first heartbeat since `explicit_peer_ticks == 2` + assert_eq!( + gs.events + .iter() + .filter(|e| match e { + ToSwarm::Dial { opts } => opts.get_peer_id() == Some(*peer), + _ => false, + }) + .count(), + 0, + "There was a dial peer event before explicit_peer_ticks heartbeats" + ); + + gs.heartbeat(); + + //check that there is a reconnect after second heartbeat + assert!( + gs.events + .iter() + .filter(|e| match e { + ToSwarm::Dial { opts } => opts.get_peer_id() == Some(*peer), + _ => false, + }) + .count() + >= 1, + "There was no dial peer event for the explicit peer" + ); +} + +#[test] +fn test_handle_graft_explicit_peer() { + let (mut gs, peers, receivers, topic_hashes) = inject_nodes1() + .peer_no(1) + .topics(vec![String::from("topic1"), String::from("topic2")]) + .to_subscribe(true) + .gs_config(Config::default()) + .explicit(1) + .create_network(); + + let peer = peers.first().unwrap(); + + gs.handle_graft(peer, topic_hashes.clone()); + + //peer got not added to mesh + assert!(gs.mesh[&topic_hashes[0]].is_empty()); + assert!(gs.mesh[&topic_hashes[1]].is_empty()); + + //check prunes + let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { + peer_id == peer + && match m { + RpcOut::Prune(Prune { topic_hash, .. }) => { + topic_hash == &topic_hashes[0] || topic_hash == &topic_hashes[1] + } + _ => false, + } + }); + assert!( + control_msgs >= 2, + "Not enough prunes sent when grafting from explicit peer" + ); +} + +#[test] +fn explicit_peers_not_added_to_mesh_on_receiving_subscription() { + let (gs, peers, receivers, topic_hashes) = inject_nodes1() + .peer_no(2) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .gs_config(Config::default()) + .explicit(1) + .create_network(); + + //only peer 1 is in the mesh not peer 0 (which is an explicit peer) + assert_eq!( + gs.mesh[&topic_hashes[0]], + vec![peers[1]].into_iter().collect() + ); + + //assert that graft gets created to non-explicit peer + let (control_msgs, receivers) = count_control_msgs(receivers, |peer_id, m| { + peer_id == &peers[1] && matches!(m, RpcOut::Graft { .. }) + }); + assert!( + control_msgs >= 1, + "No graft message got created to non-explicit peer" + ); + + //assert that no graft gets created to explicit peer + let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { + peer_id == &peers[0] && matches!(m, RpcOut::Graft { .. }) + }); + assert_eq!( + control_msgs, 0, + "A graft message got created to an explicit peer" + ); +} + +#[test] +fn do_not_graft_explicit_peer() { + let (mut gs, others, receivers, topic_hashes) = inject_nodes1() + .peer_no(1) + .topics(vec![String::from("topic")]) + .to_subscribe(true) + .gs_config(Config::default()) + .explicit(1) + .create_network(); + + gs.heartbeat(); + + //mesh stays empty + assert_eq!(gs.mesh[&topic_hashes[0]], BTreeSet::new()); + + //assert that no graft gets created to explicit peer + let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { + peer_id == &others[0] && matches!(m, RpcOut::Graft { .. }) + }); + assert_eq!( + control_msgs, 0, + "A graft message got created to an explicit peer" + ); +} + +#[test] +fn do_forward_messages_to_explicit_peers() { + let (mut gs, peers, receivers, topic_hashes) = inject_nodes1() + .peer_no(2) + .topics(vec![String::from("topic1"), String::from("topic2")]) + .to_subscribe(true) + .gs_config(Config::default()) + .explicit(1) + .create_network(); + + let local_id = PeerId::random(); + + let message = RawMessage { + source: Some(peers[1]), + data: vec![12], + sequence_number: Some(0), + topic: topic_hashes[0].clone(), + signature: None, + key: None, + validated: true, + }; + gs.handle_received_message(message.clone(), &local_id); + assert_eq!( + receivers.into_iter().fold(0, |mut fwds, (peer_id, c)| { + let non_priority = c.non_priority.into_inner(); + while !non_priority.is_empty() { + if matches!(non_priority.try_recv(), Ok(RpcOut::Forward{message: m, timeout: _}) if peer_id == peers[0] && m.data == message.data) { + fwds +=1; + } + } + fwds + }), + 1, + "The message did not get forwarded to the explicit peer" + ); +} + +#[test] +fn explicit_peers_not_added_to_mesh_on_subscribe() { + let (mut gs, peers, receivers, _) = inject_nodes1() + .peer_no(2) + .topics(Vec::new()) + .to_subscribe(true) + .gs_config(Config::default()) + .explicit(1) + .create_network(); + + //create new topic, both peers subscribing to it but we do not subscribe to it + let topic = Topic::new(String::from("t")); + let topic_hash = topic.hash(); + for peer in peers.iter().take(2) { + gs.handle_received_subscriptions( + &[Subscription { + action: SubscriptionAction::Subscribe, + topic_hash: topic_hash.clone(), + }], + peer, + ); + } + + //subscribe now to topic + gs.subscribe(&topic).unwrap(); + + //only peer 1 is in the mesh not peer 0 (which is an explicit peer) + assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect()); + + //assert that graft gets created to non-explicit peer + let (control_msgs, receivers) = count_control_msgs(receivers, |peer_id, m| { + peer_id == &peers[1] && matches!(m, RpcOut::Graft { .. }) + }); + assert!( + control_msgs > 0, + "No graft message got created to non-explicit peer" + ); + + //assert that no graft gets created to explicit peer + let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { + peer_id == &peers[0] && matches!(m, RpcOut::Graft { .. }) + }); + assert_eq!( + control_msgs, 0, + "A graft message got created to an explicit peer" + ); +} + +#[test] +fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() { + let (mut gs, peers, receivers, _) = inject_nodes1() + .peer_no(2) + .topics(Vec::new()) + .to_subscribe(true) + .gs_config(Config::default()) + .explicit(1) + .create_network(); + + //create new topic, both peers subscribing to it but we do not subscribe to it + let topic = Topic::new(String::from("t")); + let topic_hash = topic.hash(); + for peer in peers.iter().take(2) { + gs.handle_received_subscriptions( + &[Subscription { + action: SubscriptionAction::Subscribe, + topic_hash: topic_hash.clone(), + }], + peer, + ); + } + + //we send a message for this topic => this will initialize the fanout + gs.publish(topic.clone(), vec![1, 2, 3]).unwrap(); + + //subscribe now to topic + gs.subscribe(&topic).unwrap(); + + //only peer 1 is in the mesh not peer 0 (which is an explicit peer) + assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect()); + + //assert that graft gets created to non-explicit peer + let (control_msgs, receivers) = count_control_msgs(receivers, |peer_id, m| { + peer_id == &peers[1] && matches!(m, RpcOut::Graft { .. }) + }); + assert!( + control_msgs >= 1, + "No graft message got created to non-explicit peer" + ); + + //assert that no graft gets created to explicit peer + let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { + peer_id == &peers[0] && matches!(m, RpcOut::Graft { .. }) + }); + assert_eq!( + control_msgs, 0, + "A graft message got created to an explicit peer" + ); +} + +#[test] +fn no_gossip_gets_sent_to_explicit_peers() { + let (mut gs, peers, mut receivers, topic_hashes) = inject_nodes1() + .peer_no(2) + .topics(vec![String::from("topic1"), String::from("topic2")]) + .to_subscribe(true) + .gs_config(Config::default()) + .explicit(1) + .create_network(); + + let local_id = PeerId::random(); + + let message = RawMessage { + source: Some(peers[1]), + data: vec![], + sequence_number: Some(0), + topic: topic_hashes[0].clone(), + signature: None, + key: None, + validated: true, + }; + + //forward the message + gs.handle_received_message(message, &local_id); + + //simulate multiple gossip calls (for randomness) + for _ in 0..3 { + gs.emit_gossip(); + } + + //assert that no gossip gets sent to explicit peer + let receiver = receivers.remove(&peers[0]).unwrap(); + let mut gossips = 0; + let non_priority = receiver.non_priority.into_inner(); + while !non_priority.is_empty() { + if let Ok(RpcOut::IHave(_)) = non_priority.try_recv() { + gossips += 1; + } + } + assert_eq!(gossips, 0, "Gossip got emitted to explicit peer"); +} + +/// Tests the mesh maintenance addition +#[test] +fn test_mesh_addition() { + let config: Config = Config::default(); + + // Adds mesh_low peers and PRUNE 2 giving us a deficit. + let (mut gs, peers, _receivers, topics) = inject_nodes1() + .peer_no(config.mesh_n() + 1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .create_network(); + + let to_remove_peers = config.mesh_n() + 1 - config.mesh_n_low() - 1; + + for peer in peers.iter().take(to_remove_peers) { + gs.handle_prune( + peer, + topics.iter().map(|h| (h.clone(), vec![], None)).collect(), + ); + } + + // Verify the pruned peers are removed from the mesh. + assert_eq!( + gs.mesh.get(&topics[0]).unwrap().len(), + config.mesh_n_low() - 1 + ); + + // run a heartbeat + gs.heartbeat(); + + // Peers should be added to reach mesh_n + assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), config.mesh_n()); +} + +/// Tests the mesh maintenance subtraction +#[test] +fn test_mesh_subtraction() { + let config = Config::default(); + + // Adds mesh_low peers and PRUNE 2 giving us a deficit. + let n = config.mesh_n_high() + 10; + //make all outbound connections so that we allow grafting to all + let (mut gs, peers, _receivers, topics) = inject_nodes1() + .peer_no(n) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .outbound(n) + .create_network(); + + // graft all the peers + for peer in peers { + gs.handle_graft(&peer, topics.clone()); + } + + // run a heartbeat + gs.heartbeat(); + + // Peers should be removed to reach mesh_n + assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), config.mesh_n()); +} + +#[test] +fn test_connect_to_px_peers_on_handle_prune() { + let config: Config = Config::default(); + + let (mut gs, peers, _, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .create_network(); + + //handle prune from single peer with px peers + + let mut px = Vec::new(); + //propose more px peers than config.prune_peers() + for _ in 0..config.prune_peers() + 5 { + px.push(PeerInfo { + peer_id: Some(PeerId::random()), + }); + } + + gs.handle_prune( + &peers[0], + vec![( + topics[0].clone(), + px.clone(), + Some(config.prune_backoff().as_secs()), + )], + ); + + //Check DialPeer events for px peers + let dials: Vec<_> = gs + .events + .iter() + .filter_map(|e| match e { + ToSwarm::Dial { opts } => opts.get_peer_id(), + _ => None, + }) + .collect(); + + // Exactly config.prune_peers() many random peers should be dialled + assert_eq!(dials.len(), config.prune_peers()); + + let dials_set: HashSet<_> = dials.into_iter().collect(); + + // No duplicates + assert_eq!(dials_set.len(), config.prune_peers()); + + //all dial peers must be in px + assert!(dials_set.is_subset( + &px.iter() + .map(|i| *i.peer_id.as_ref().unwrap()) + .collect::>() + )); +} + +#[test] +fn test_send_px_and_backoff_in_prune() { + let config: Config = Config::default(); + + //build mesh with enough peers for px + let (mut gs, peers, receivers, topics) = inject_nodes1() + .peer_no(config.prune_peers() + 1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .create_network(); + + //send prune to peer + gs.send_graft_prune( + HashMap::new(), + vec![(peers[0], vec![topics[0].clone()])] + .into_iter() + .collect(), + HashSet::new(), + ); + + //check prune message + let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { + peer_id == &peers[0] + && match m { + RpcOut::Prune(Prune { + topic_hash, + peers, + backoff, + }) => { + topic_hash == &topics[0] && + peers.len() == config.prune_peers() && + //all peers are different + peers.iter().collect::>().len() == + config.prune_peers() && + backoff.unwrap() == config.prune_backoff().as_secs() + } + _ => false, + } + }); + assert_eq!(control_msgs, 1); +} + +#[test] +fn test_prune_backoffed_peer_on_graft() { + let config: Config = Config::default(); + + //build mesh with enough peers for px + let (mut gs, peers, receivers, topics) = inject_nodes1() + .peer_no(config.prune_peers() + 1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .create_network(); + + //remove peer from mesh and send prune to peer => this adds a backoff for this peer + gs.mesh.get_mut(&topics[0]).unwrap().remove(&peers[0]); + gs.send_graft_prune( + HashMap::new(), + vec![(peers[0], vec![topics[0].clone()])] + .into_iter() + .collect(), + HashSet::new(), + ); + + //ignore all messages until now + let receivers = flush_events(&mut gs, receivers); + + //handle graft + gs.handle_graft(&peers[0], vec![topics[0].clone()]); + + //check prune message + let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { + peer_id == &peers[0] + && match m { + RpcOut::Prune(Prune { + topic_hash, + peers, + backoff, + }) => { + topic_hash == &topics[0] && + //no px in this case + peers.is_empty() && + backoff.unwrap() == config.prune_backoff().as_secs() + } + _ => false, + } + }); + assert_eq!(control_msgs, 1); +} + +#[test] +fn test_do_not_graft_within_backoff_period() { + let config = ConfigBuilder::default() + .backoff_slack(1) + .heartbeat_interval(Duration::from_millis(100)) + .build() + .unwrap(); + //only one peer => mesh too small and will try to regraft as early as possible + let (mut gs, peers, receivers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .create_network(); + + //handle prune from peer with backoff of one second + gs.handle_prune(&peers[0], vec![(topics[0].clone(), Vec::new(), Some(1))]); + + //forget all events until now + let receivers = flush_events(&mut gs, receivers); + + //call heartbeat + gs.heartbeat(); + + //Sleep for one second and apply 10 regular heartbeats (interval = 100ms). + for _ in 0..10 { + sleep(Duration::from_millis(100)); + gs.heartbeat(); + } + + //Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat + // is needed). + let (control_msgs, receivers) = + count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); + assert_eq!( + control_msgs, 0, + "Graft message created too early within backoff period" + ); + + //Heartbeat one more time this should graft now + sleep(Duration::from_millis(100)); + gs.heartbeat(); + + //check that graft got created + let (control_msgs, _) = count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); + assert!( + control_msgs > 0, + "No graft message was created after backoff period" + ); +} + +#[test] +fn test_do_not_graft_within_default_backoff_period_after_receiving_prune_without_backoff() { + //set default backoff period to 1 second + let config = ConfigBuilder::default() + .prune_backoff(Duration::from_millis(90)) + .backoff_slack(1) + .heartbeat_interval(Duration::from_millis(100)) + .build() + .unwrap(); + //only one peer => mesh too small and will try to regraft as early as possible + let (mut gs, peers, receivers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .create_network(); + + //handle prune from peer without a specified backoff + gs.handle_prune(&peers[0], vec![(topics[0].clone(), Vec::new(), None)]); + + //forget all events until now + let receivers = flush_events(&mut gs, receivers); + + //call heartbeat + gs.heartbeat(); + + //Apply one more heartbeat + sleep(Duration::from_millis(100)); + gs.heartbeat(); + + //Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat + // is needed). + let (control_msgs, receivers) = + count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); + assert_eq!( + control_msgs, 0, + "Graft message created too early within backoff period" + ); + + //Heartbeat one more time this should graft now + sleep(Duration::from_millis(100)); + gs.heartbeat(); + + //check that graft got created + let (control_msgs, _) = count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); + assert!( + control_msgs > 0, + "No graft message was created after backoff period" + ); +} + +#[test] +fn test_unsubscribe_backoff() { + const HEARTBEAT_INTERVAL: Duration = Duration::from_millis(100); + let config = ConfigBuilder::default() + .backoff_slack(1) + // ensure a prune_backoff > unsubscribe_backoff + .prune_backoff(Duration::from_secs(5)) + .unsubscribe_backoff(1) + .heartbeat_interval(HEARTBEAT_INTERVAL) + .build() + .unwrap(); + + let topic = String::from("test"); + // only one peer => mesh too small and will try to regraft as early as possible + let (mut gs, _, receivers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec![topic.clone()]) + .to_subscribe(true) + .gs_config(config) + .create_network(); + + let _ = gs.unsubscribe(&Topic::new(topic)); + + let (control_msgs, receivers) = count_control_msgs(receivers, |_, m| match m { + RpcOut::Prune(Prune { backoff, .. }) => backoff == &Some(1), + _ => false, + }); + assert_eq!( + control_msgs, 1, + "Peer should be pruned with `unsubscribe_backoff`." + ); + + let _ = gs.subscribe(&Topic::new(topics[0].to_string())); + + // forget all events until now + let receivers = flush_events(&mut gs, receivers); + + // call heartbeat + gs.heartbeat(); + + // Sleep for one second and apply 10 regular heartbeats (interval = 100ms). + for _ in 0..10 { + sleep(HEARTBEAT_INTERVAL); + gs.heartbeat(); + } + + // Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat + // is needed). + let (control_msgs, receivers) = + count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); + assert_eq!( + control_msgs, 0, + "Graft message created too early within backoff period" + ); + + // Heartbeat one more time this should graft now + sleep(HEARTBEAT_INTERVAL); + gs.heartbeat(); + + // check that graft got created + let (control_msgs, _) = count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); + assert!( + control_msgs > 0, + "No graft message was created after backoff period" + ); +} + +#[test] +fn test_flood_publish() { + let config: Config = Config::default(); + + let topic = "test"; + // Adds more peers than mesh can hold to test flood publishing + let (mut gs, _, receivers, _) = inject_nodes1() + .peer_no(config.mesh_n_high() + 10) + .topics(vec![topic.into()]) + .to_subscribe(true) + .create_network(); + + //publish message + let publish_data = vec![0; 42]; + gs.publish(Topic::new(topic), publish_data).unwrap(); + + // Collect all publish messages + let publishes = receivers + .into_values() + .fold(vec![], |mut collected_publish, c| { + let priority = c.priority.into_inner(); + while !priority.is_empty() { + if let Ok(RpcOut::Publish { message, .. }) = priority.try_recv() { + collected_publish.push(message); + } + } + collected_publish + }); + + // Transform the inbound message + let message = &gs + .data_transform + .inbound_transform( + publishes + .first() + .expect("Should contain > 0 entries") + .clone(), + ) + .unwrap(); + + let msg_id = gs.config.message_id(message); + + let config: Config = Config::default(); + assert_eq!( + publishes.len(), + config.mesh_n_high() + 10, + "Should send a publish message to all known peers" + ); + + assert!( + gs.mcache.get(&msg_id).is_some(), + "Message cache should contain published message" + ); +} + +#[test] +fn test_gossip_to_at_least_gossip_lazy_peers() { + let config: Config = Config::default(); + + //add more peers than in mesh to test gossipping + //by default only mesh_n_low peers will get added to mesh + let (mut gs, _, receivers, topic_hashes) = inject_nodes1() + .peer_no(config.mesh_n_low() + config.gossip_lazy() + 1) + .topics(vec!["topic".into()]) + .to_subscribe(true) + .create_network(); + + //receive message + let raw_message = RawMessage { + source: Some(PeerId::random()), + data: vec![], + sequence_number: Some(0), + topic: topic_hashes[0].clone(), + signature: None, + key: None, + validated: true, + }; + gs.handle_received_message(raw_message.clone(), &PeerId::random()); + + //emit gossip + gs.emit_gossip(); + + // Transform the inbound message + let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); + + let msg_id = gs.config.message_id(message); + + //check that exactly config.gossip_lazy() many gossip messages were sent. + let (control_msgs, _) = count_control_msgs(receivers, |_, action| match action { + RpcOut::IHave(IHave { + topic_hash, + message_ids, + }) => topic_hash == &topic_hashes[0] && message_ids.iter().any(|id| id == &msg_id), + _ => false, + }); + assert_eq!(control_msgs, config.gossip_lazy()); +} + +#[test] +fn test_gossip_to_at_most_gossip_factor_peers() { + let config: Config = Config::default(); + + //add a lot of peers + let m = config.mesh_n_low() + config.gossip_lazy() * (2.0 / config.gossip_factor()) as usize; + let (mut gs, _, receivers, topic_hashes) = inject_nodes1() + .peer_no(m) + .topics(vec!["topic".into()]) + .to_subscribe(true) + .create_network(); + + //receive message + let raw_message = RawMessage { + source: Some(PeerId::random()), + data: vec![], + sequence_number: Some(0), + topic: topic_hashes[0].clone(), + signature: None, + key: None, + validated: true, + }; + gs.handle_received_message(raw_message.clone(), &PeerId::random()); + + //emit gossip + gs.emit_gossip(); + + // Transform the inbound message + let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); + + let msg_id = gs.config.message_id(message); + //check that exactly config.gossip_lazy() many gossip messages were sent. + let (control_msgs, _) = count_control_msgs(receivers, |_, action| match action { + RpcOut::IHave(IHave { + topic_hash, + message_ids, + }) => topic_hash == &topic_hashes[0] && message_ids.iter().any(|id| id == &msg_id), + _ => false, + }); + assert_eq!( + control_msgs, + ((m - config.mesh_n_low()) as f64 * config.gossip_factor()) as usize + ); +} + +#[test] +fn test_accept_only_outbound_peer_grafts_when_mesh_full() { + let config: Config = Config::default(); + + //enough peers to fill the mesh + let (mut gs, peers, _, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(true) + .create_network(); + + // graft all the peers => this will fill the mesh + for peer in peers { + gs.handle_graft(&peer, topics.clone()); + } + + //assert current mesh size + assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high()); + + //create an outbound and an inbound peer + let (inbound, _in_reciver) = add_peer(&mut gs, &topics, false, false); + let (outbound, _out_receiver) = add_peer(&mut gs, &topics, true, false); + + //send grafts + gs.handle_graft(&inbound, vec![topics[0].clone()]); + gs.handle_graft(&outbound, vec![topics[0].clone()]); + + //assert mesh size + assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high() + 1); + + //inbound is not in mesh + assert!(!gs.mesh[&topics[0]].contains(&inbound)); + + //outbound is in mesh + assert!(gs.mesh[&topics[0]].contains(&outbound)); +} + +#[test] +fn test_do_not_remove_too_many_outbound_peers() { + //use an extreme case to catch errors with high probability + let m = 50; + let n = 2 * m; + let config = ConfigBuilder::default() + .mesh_n_high(n) + .mesh_n(n) + .mesh_n_low(n) + .mesh_outbound_min(m) + .build() + .unwrap(); + + //fill the mesh with inbound connections + let (mut gs, peers, _receivers, topics) = inject_nodes1() + .peer_no(n) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .create_network(); + + // graft all the peers + for peer in peers { + gs.handle_graft(&peer, topics.clone()); + } + + //create m outbound connections and graft (we will accept the graft) + let mut outbound = HashSet::new(); + for _ in 0..m { + let (peer, _) = add_peer(&mut gs, &topics, true, false); + outbound.insert(peer); + gs.handle_graft(&peer, topics.clone()); + } + + //mesh is overly full + assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), n + m); + + // run a heartbeat + gs.heartbeat(); + + // Peers should be removed to reach n + assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), n); + + //all outbound peers are still in the mesh + assert!(outbound.iter().all(|p| gs.mesh[&topics[0]].contains(p))); +} + +#[test] +fn test_add_outbound_peers_if_min_is_not_satisfied() { + let config: Config = Config::default(); + + // Fill full mesh with inbound peers + let (mut gs, peers, _, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(true) + .create_network(); + + // graft all the peers + for peer in peers { + gs.handle_graft(&peer, topics.clone()); + } + + //create config.mesh_outbound_min() many outbound connections without grafting + let mut peers = vec![]; + for _ in 0..config.mesh_outbound_min() { + peers.push(add_peer(&mut gs, &topics, true, false)); + } + + // Nothing changed in the mesh yet + assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high()); + + // run a heartbeat + gs.heartbeat(); + + // The outbound peers got additionally added + assert_eq!( + gs.mesh[&topics[0]].len(), + config.mesh_n_high() + config.mesh_outbound_min() + ); +} + +#[test] +fn test_prune_negative_scored_peers() { + let config = Config::default(); + + //build mesh with one peer + let (mut gs, peers, receivers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .explicit(0) + .outbound(0) + .scoring(Some(( + PeerScoreParams::default(), + PeerScoreThresholds::default(), + ))) + .create_network(); + + //add penalty to peer + gs.peer_score.as_mut().unwrap().0.add_penalty(&peers[0], 1); + + //execute heartbeat + gs.heartbeat(); + + //peer should not be in mesh anymore + assert!(gs.mesh[&topics[0]].is_empty()); + + //check prune message + let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { + peer_id == &peers[0] + && match m { + RpcOut::Prune(Prune { + topic_hash, + peers, + backoff, + }) => { + topic_hash == &topics[0] && + //no px in this case + peers.is_empty() && + backoff.unwrap() == config.prune_backoff().as_secs() + } + _ => false, + } + }); + assert_eq!(control_msgs, 1); +} + +#[test] +fn test_dont_graft_to_negative_scored_peers() { + let config = Config::default(); + //init full mesh + let (mut gs, peers, _, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .scoring(Some(( + PeerScoreParams::default(), + PeerScoreThresholds::default(), + ))) + .create_network(); + + //add two additional peers that will not be part of the mesh + let (p1, _receiver1) = add_peer(&mut gs, &topics, false, false); + let (p2, _receiver2) = add_peer(&mut gs, &topics, false, false); + + //reduce score of p1 to negative + gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 1); + + //handle prunes of all other peers + for p in peers { + gs.handle_prune(&p, vec![(topics[0].clone(), Vec::new(), None)]); + } + + //heartbeat + gs.heartbeat(); + + //assert that mesh only contains p2 + assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), 1); + assert!(gs.mesh.get(&topics[0]).unwrap().contains(&p2)); +} + +///Note that in this test also without a penalty the px would be ignored because of the +/// acceptPXThreshold, but the spec still explicitely states the rule that px from negative +/// peers should get ignored, therefore we test it here. +#[test] +fn test_ignore_px_from_negative_scored_peer() { + let config = Config::default(); + + //build mesh with one peer + let (mut gs, peers, _, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .scoring(Some(( + PeerScoreParams::default(), + PeerScoreThresholds::default(), + ))) + .create_network(); + + //penalize peer + gs.peer_score.as_mut().unwrap().0.add_penalty(&peers[0], 1); + + //handle prune from single peer with px peers + let px = vec![PeerInfo { + peer_id: Some(PeerId::random()), + }]; + + gs.handle_prune( + &peers[0], + vec![( + topics[0].clone(), + px, + Some(config.prune_backoff().as_secs()), + )], + ); + + //assert no dials + assert_eq!( + gs.events + .iter() + .filter(|e| matches!(e, ToSwarm::Dial { .. })) + .count(), + 0 + ); +} + +#[test] +fn test_only_send_nonnegative_scoring_peers_in_px() { + let config = ConfigBuilder::default() + .prune_peers(16) + .do_px() + .build() + .unwrap(); + + // Build mesh with three peer + let (mut gs, peers, receivers, topics) = inject_nodes1() + .peer_no(3) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some(( + PeerScoreParams::default(), + PeerScoreThresholds::default(), + ))) + .create_network(); + + // Penalize first peer + gs.peer_score.as_mut().unwrap().0.add_penalty(&peers[0], 1); + + // Prune second peer + gs.send_graft_prune( + HashMap::new(), + vec![(peers[1], vec![topics[0].clone()])] + .into_iter() + .collect(), + HashSet::new(), + ); + + // Check that px in prune message only contains third peer + let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { + peer_id == &peers[1] + && match m { + RpcOut::Prune(Prune { + topic_hash, + peers: px, + .. + }) => { + topic_hash == &topics[0] + && px.len() == 1 + && px[0].peer_id.as_ref().unwrap() == &peers[2] + } + _ => false, + } + }); + assert_eq!(control_msgs, 1); +} + +#[test] +fn test_do_not_gossip_to_peers_below_gossip_threshold() { + let config = Config::default(); + let peer_score_params = PeerScoreParams::default(); + let peer_score_thresholds = PeerScoreThresholds { + gossip_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, + ..PeerScoreThresholds::default() + }; + + // Build full mesh + let (mut gs, peers, mut receivers, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + // Graft all the peer + for peer in peers { + gs.handle_graft(&peer, topics.clone()); + } + + // Add two additional peers that will not be part of the mesh + let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); + receivers.insert(p1, receiver1); + let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); + receivers.insert(p2, receiver2); + + // Reduce score of p1 below peer_score_thresholds.gossip_threshold + // note that penalties get squared so two penalties means a score of + // 4 * peer_score_params.behaviour_penalty_weight. + gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); + + // Reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold + gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + + // Receive message + let raw_message = RawMessage { + source: Some(PeerId::random()), + data: vec![], + sequence_number: Some(0), + topic: topics[0].clone(), + signature: None, + key: None, + validated: true, + }; + gs.handle_received_message(raw_message.clone(), &PeerId::random()); + + // Transform the inbound message + let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); + + let msg_id = gs.config.message_id(message); + + // Emit gossip + gs.emit_gossip(); + + // Check that exactly one gossip messages got sent and it got sent to p2 + let (control_msgs, _) = count_control_msgs(receivers, |peer, action| match action { + RpcOut::IHave(IHave { + topic_hash, + message_ids, + }) => { + if topic_hash == &topics[0] && message_ids.iter().any(|id| id == &msg_id) { + assert_eq!(peer, &p2); + true + } else { + false + } + } + _ => false, + }); + assert_eq!(control_msgs, 1); +} + +#[test] +fn test_iwant_msg_from_peer_below_gossip_threshold_gets_ignored() { + let config = Config::default(); + let peer_score_params = PeerScoreParams::default(); + let peer_score_thresholds = PeerScoreThresholds { + gossip_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, + ..PeerScoreThresholds::default() + }; + + // Build full mesh + let (mut gs, peers, mut receivers, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + // Graft all the peer + for peer in peers { + gs.handle_graft(&peer, topics.clone()); + } + + // Add two additional peers that will not be part of the mesh + let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); + receivers.insert(p1, receiver1); + let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); + receivers.insert(p2, receiver2); + + // Reduce score of p1 below peer_score_thresholds.gossip_threshold + // note that penalties get squared so two penalties means a score of + // 4 * peer_score_params.behaviour_penalty_weight. + gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); + + // Reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold + gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + + // Receive message + let raw_message = RawMessage { + source: Some(PeerId::random()), + data: vec![], + sequence_number: Some(0), + topic: topics[0].clone(), + signature: None, + key: None, + validated: true, + }; + gs.handle_received_message(raw_message.clone(), &PeerId::random()); + + // Transform the inbound message + let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); + + let msg_id = gs.config.message_id(message); + + gs.handle_iwant(&p1, vec![msg_id.clone()]); + gs.handle_iwant(&p2, vec![msg_id.clone()]); + + // the messages we are sending + let sent_messages = + receivers + .into_iter() + .fold(vec![], |mut collected_messages, (peer_id, c)| { + let non_priority = c.non_priority.into_inner(); + while !non_priority.is_empty() { + if let Ok(RpcOut::Forward { message, .. }) = non_priority.try_recv() { + collected_messages.push((peer_id, message)); + } + } + collected_messages + }); + + //the message got sent to p2 + assert!(sent_messages + .iter() + .map(|(peer_id, msg)| ( + peer_id, + gs.data_transform.inbound_transform(msg.clone()).unwrap() + )) + .any(|(peer_id, msg)| peer_id == &p2 && gs.config.message_id(&msg) == msg_id)); + //the message got not sent to p1 + assert!(sent_messages + .iter() + .map(|(peer_id, msg)| ( + peer_id, + gs.data_transform.inbound_transform(msg.clone()).unwrap() + )) + .all(|(peer_id, msg)| !(peer_id == &p1 && gs.config.message_id(&msg) == msg_id))); +} + +#[test] +fn test_ihave_msg_from_peer_below_gossip_threshold_gets_ignored() { + let config = Config::default(); + let peer_score_params = PeerScoreParams::default(); + let peer_score_thresholds = PeerScoreThresholds { + gossip_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, + ..PeerScoreThresholds::default() + }; + //build full mesh + let (mut gs, peers, mut receivers, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + // graft all the peer + for peer in peers { + gs.handle_graft(&peer, topics.clone()); + } + + //add two additional peers that will not be part of the mesh + let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); + receivers.insert(p1, receiver1); + let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); + receivers.insert(p2, receiver2); + + //reduce score of p1 below peer_score_thresholds.gossip_threshold + //note that penalties get squared so two penalties means a score of + // 4 * peer_score_params.behaviour_penalty_weight. + gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); + + //reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold + gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + + //message that other peers have + let raw_message = RawMessage { + source: Some(PeerId::random()), + data: vec![], + sequence_number: Some(0), + topic: topics[0].clone(), + signature: None, + key: None, + validated: true, + }; + + // Transform the inbound message + let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); + + let msg_id = gs.config.message_id(message); + + gs.handle_ihave(&p1, vec![(topics[0].clone(), vec![msg_id.clone()])]); + gs.handle_ihave(&p2, vec![(topics[0].clone(), vec![msg_id.clone()])]); + + // check that we sent exactly one IWANT request to p2 + let (control_msgs, _) = count_control_msgs(receivers, |peer, c| match c { + RpcOut::IWant(IWant { message_ids }) => { + if message_ids.iter().any(|m| m == &msg_id) { + assert_eq!(peer, &p2); + true + } else { + false + } + } + _ => false, + }); + assert_eq!(control_msgs, 1); +} + +#[test] +fn test_do_not_publish_to_peer_below_publish_threshold() { + let config = ConfigBuilder::default() + .flood_publish(false) + .build() + .unwrap(); + let peer_score_params = PeerScoreParams::default(); + let peer_score_thresholds = PeerScoreThresholds { + gossip_threshold: 0.5 * peer_score_params.behaviour_penalty_weight, + publish_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, + ..PeerScoreThresholds::default() + }; + + //build mesh with no peers and no subscribed topics + let (mut gs, _, mut receivers, _) = inject_nodes1() + .gs_config(config) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + //create a new topic for which we are not subscribed + let topic = Topic::new("test"); + let topics = vec![topic.hash()]; + + //add two additional peers that will be added to the mesh + let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); + receivers.insert(p1, receiver1); + let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); + receivers.insert(p2, receiver2); + + //reduce score of p1 below peer_score_thresholds.publish_threshold + //note that penalties get squared so two penalties means a score of + // 4 * peer_score_params.behaviour_penalty_weight. + gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); + + //reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold + gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + + //a heartbeat will remove the peers from the mesh + gs.heartbeat(); + + // publish on topic + let publish_data = vec![0; 42]; + gs.publish(topic, publish_data).unwrap(); + + // Collect all publish messages + let publishes = receivers + .into_iter() + .fold(vec![], |mut collected_publish, (peer_id, c)| { + let priority = c.priority.into_inner(); + while !priority.is_empty() { + if let Ok(RpcOut::Publish { message, .. }) = priority.try_recv() { + collected_publish.push((peer_id, message)); + } + } + collected_publish + }); + + //assert only published to p2 + assert_eq!(publishes.len(), 1); + assert_eq!(publishes[0].0, p2); +} + +#[test] +fn test_do_not_flood_publish_to_peer_below_publish_threshold() { + let config = Config::default(); + let peer_score_params = PeerScoreParams::default(); + let peer_score_thresholds = PeerScoreThresholds { + gossip_threshold: 0.5 * peer_score_params.behaviour_penalty_weight, + publish_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, + ..PeerScoreThresholds::default() + }; + //build mesh with no peers + let (mut gs, _, mut receivers, topics) = inject_nodes1() + .topics(vec!["test".into()]) + .gs_config(config) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + //add two additional peers that will be added to the mesh + let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); + receivers.insert(p1, receiver1); + let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); + receivers.insert(p2, receiver2); + + //reduce score of p1 below peer_score_thresholds.publish_threshold + //note that penalties get squared so two penalties means a score of + // 4 * peer_score_params.behaviour_penalty_weight. + gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); + + //reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold + gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + + //a heartbeat will remove the peers from the mesh + gs.heartbeat(); + + // publish on topic + let publish_data = vec![0; 42]; + gs.publish(Topic::new("test"), publish_data).unwrap(); + + // Collect all publish messages + let publishes = receivers + .into_iter() + .fold(vec![], |mut collected_publish, (peer_id, c)| { + let priority = c.priority.into_inner(); + while !priority.is_empty() { + if let Ok(RpcOut::Publish { message, .. }) = priority.try_recv() { + collected_publish.push((peer_id, message)) + } + } + collected_publish + }); + + //assert only published to p2 + assert_eq!(publishes.len(), 1); + assert!(publishes[0].0 == p2); +} + +#[test] +fn test_ignore_rpc_from_peers_below_graylist_threshold() { + let config = Config::default(); + let peer_score_params = PeerScoreParams::default(); + let peer_score_thresholds = PeerScoreThresholds { + gossip_threshold: 0.5 * peer_score_params.behaviour_penalty_weight, + publish_threshold: 0.5 * peer_score_params.behaviour_penalty_weight, + graylist_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, + ..PeerScoreThresholds::default() + }; + + //build mesh with no peers + let (mut gs, _, _, topics) = inject_nodes1() + .topics(vec!["test".into()]) + .gs_config(config.clone()) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + //add two additional peers that will be added to the mesh + let (p1, _receiver1) = add_peer(&mut gs, &topics, false, false); + let (p2, _receiver2) = add_peer(&mut gs, &topics, false, false); + + //reduce score of p1 below peer_score_thresholds.graylist_threshold + //note that penalties get squared so two penalties means a score of + // 4 * peer_score_params.behaviour_penalty_weight. + gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); + + //reduce score of p2 below publish_threshold but not below graylist_threshold + gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + + let raw_message1 = RawMessage { + source: Some(PeerId::random()), + data: vec![1, 2, 3, 4], + sequence_number: Some(1u64), + topic: topics[0].clone(), + signature: None, + key: None, + validated: true, + }; + + let raw_message2 = RawMessage { + source: Some(PeerId::random()), + data: vec![1, 2, 3, 4, 5], + sequence_number: Some(2u64), + topic: topics[0].clone(), + signature: None, + key: None, + validated: true, + }; + + let raw_message3 = RawMessage { + source: Some(PeerId::random()), + data: vec![1, 2, 3, 4, 5, 6], + sequence_number: Some(3u64), + topic: topics[0].clone(), + signature: None, + key: None, + validated: true, + }; + + let raw_message4 = RawMessage { + source: Some(PeerId::random()), + data: vec![1, 2, 3, 4, 5, 6, 7], + sequence_number: Some(4u64), + topic: topics[0].clone(), + signature: None, + key: None, + validated: true, + }; + + // Transform the inbound message + let message2 = &gs.data_transform.inbound_transform(raw_message2).unwrap(); + + // Transform the inbound message + let message4 = &gs.data_transform.inbound_transform(raw_message4).unwrap(); + + let subscription = Subscription { + action: SubscriptionAction::Subscribe, + topic_hash: topics[0].clone(), + }; + + let control_action = ControlAction::IHave(IHave { + topic_hash: topics[0].clone(), + message_ids: vec![config.message_id(message2)], + }); + + //clear events + gs.events.clear(); + + //receive from p1 + gs.on_connection_handler_event( + p1, + ConnectionId::new_unchecked(0), + HandlerEvent::Message { + rpc: Rpc { + messages: vec![raw_message1], + subscriptions: vec![subscription.clone()], + control_msgs: vec![control_action], + }, + invalid_messages: Vec::new(), + }, + ); + + //only the subscription event gets processed, the rest is dropped + assert_eq!(gs.events.len(), 1); + assert!(matches!( + gs.events[0], + ToSwarm::GenerateEvent(Event::Subscribed { .. }) + )); + + let control_action = ControlAction::IHave(IHave { + topic_hash: topics[0].clone(), + message_ids: vec![config.message_id(message4)], + }); + + //receive from p2 + gs.on_connection_handler_event( + p2, + ConnectionId::new_unchecked(0), + HandlerEvent::Message { + rpc: Rpc { + messages: vec![raw_message3], + subscriptions: vec![subscription], + control_msgs: vec![control_action], + }, + invalid_messages: Vec::new(), + }, + ); + + //events got processed + assert!(gs.events.len() > 1); +} + +#[test] +fn test_ignore_px_from_peers_below_accept_px_threshold() { + let config = ConfigBuilder::default().prune_peers(16).build().unwrap(); + let peer_score_params = PeerScoreParams::default(); + let peer_score_thresholds = PeerScoreThresholds { + accept_px_threshold: peer_score_params.app_specific_weight, + ..PeerScoreThresholds::default() + }; + // Build mesh with two peers + let (mut gs, peers, _, topics) = inject_nodes1() + .peer_no(2) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + // Decrease score of first peer to less than accept_px_threshold + gs.set_application_score(&peers[0], 0.99); + + // Increase score of second peer to accept_px_threshold + gs.set_application_score(&peers[1], 1.0); + + // Handle prune from peer peers[0] with px peers + let px = vec![PeerInfo { + peer_id: Some(PeerId::random()), + }]; + gs.handle_prune( + &peers[0], + vec![( + topics[0].clone(), + px, + Some(config.prune_backoff().as_secs()), + )], + ); + + // Assert no dials + assert_eq!( + gs.events + .iter() + .filter(|e| matches!(e, ToSwarm::Dial { .. })) + .count(), + 0 + ); + + //handle prune from peer peers[1] with px peers + let px = vec![PeerInfo { + peer_id: Some(PeerId::random()), + }]; + gs.handle_prune( + &peers[1], + vec![( + topics[0].clone(), + px, + Some(config.prune_backoff().as_secs()), + )], + ); + + //assert there are dials now + assert!( + gs.events + .iter() + .filter(|e| matches!(e, ToSwarm::Dial { .. })) + .count() + > 0 + ); +} + +#[test] +fn test_keep_best_scoring_peers_on_oversubscription() { + let config = ConfigBuilder::default() + .mesh_n_low(15) + .mesh_n(30) + .mesh_n_high(60) + .retain_scores(29) + .build() + .unwrap(); + + //build mesh with more peers than mesh can hold + let n = config.mesh_n_high() + 1; + let (mut gs, peers, _receivers, topics) = inject_nodes1() + .peer_no(n) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .explicit(0) + .outbound(n) + .scoring(Some(( + PeerScoreParams::default(), + PeerScoreThresholds::default(), + ))) + .create_network(); + + // graft all, will be accepted since the are outbound + for peer in &peers { + gs.handle_graft(peer, topics.clone()); + } + + //assign scores to peers equalling their index + + //set random positive scores + for (index, peer) in peers.iter().enumerate() { + gs.set_application_score(peer, index as f64); + } + + assert_eq!(gs.mesh[&topics[0]].len(), n); + + //heartbeat to prune some peers + gs.heartbeat(); + + assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n()); + + //mesh contains retain_scores best peers + assert!(gs.mesh[&topics[0]].is_superset( + &peers[(n - config.retain_scores())..] + .iter() + .cloned() + .collect() + )); +} + +#[test] +fn test_scoring_p1() { + let config = Config::default(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 2.0, + time_in_mesh_quantum: Duration::from_millis(50), + time_in_mesh_cap: 10.0, + topic_weight: 0.7, + ..TopicScoreParams::default() + }; + peer_score_params + .topics + .insert(topic_hash, topic_params.clone()); + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with one peer + let (mut gs, peers, _, _) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + //sleep for 2 times the mesh_quantum + sleep(topic_params.time_in_mesh_quantum * 2); + //refresh scores + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + assert!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]) + >= 2.0 * topic_params.time_in_mesh_weight * topic_params.topic_weight, + "score should be at least 2 * time_in_mesh_weight * topic_weight" + ); + assert!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]) + < 3.0 * topic_params.time_in_mesh_weight * topic_params.topic_weight, + "score should be less than 3 * time_in_mesh_weight * topic_weight" + ); + + //sleep again for 2 times the mesh_quantum + sleep(topic_params.time_in_mesh_quantum * 2); + //refresh scores + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + assert!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]) + >= 2.0 * topic_params.time_in_mesh_weight * topic_params.topic_weight, + "score should be at least 4 * time_in_mesh_weight * topic_weight" + ); + + //sleep for enough periods to reach maximum + sleep(topic_params.time_in_mesh_quantum * (topic_params.time_in_mesh_cap - 3.0) as u32); + //refresh scores + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + topic_params.time_in_mesh_cap + * topic_params.time_in_mesh_weight + * topic_params.topic_weight, + "score should be exactly time_in_mesh_cap * time_in_mesh_weight * topic_weight" + ); +} + +fn random_message(seq: &mut u64, topics: &[TopicHash]) -> RawMessage { + let mut rng = rand::thread_rng(); + *seq += 1; + RawMessage { + source: Some(PeerId::random()), + data: (0..rng.gen_range(10..30)).map(|_| rng.gen()).collect(), + sequence_number: Some(*seq), + topic: topics[rng.gen_range(0..topics.len())].clone(), + signature: None, + key: None, + validated: true, + } +} + +#[test] +fn test_scoring_p2() { + let config = Config::default(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 2.0, + first_message_deliveries_cap: 10.0, + first_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..TopicScoreParams::default() + }; + peer_score_params + .topics + .insert(topic_hash, topic_params.clone()); + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with one peer + let (mut gs, peers, _, topics) = inject_nodes1() + .peer_no(2) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Behaviour, index: usize, msg: RawMessage| { + gs.handle_received_message(msg, &peers[index]); + }; + + let m1 = random_message(&mut seq, &topics); + //peer 0 delivers message first + deliver_message(&mut gs, 0, m1.clone()); + //peer 1 delivers message second + deliver_message(&mut gs, 1, m1); + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + 1.0 * topic_params.first_message_deliveries_weight * topic_params.topic_weight, + "score should be exactly first_message_deliveries_weight * topic_weight" + ); + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[1]), + 0.0, + "there should be no score for second message deliveries * topic_weight" + ); + + //peer 2 delivers two new messages + deliver_message(&mut gs, 1, random_message(&mut seq, &topics)); + deliver_message(&mut gs, 1, random_message(&mut seq, &topics)); + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[1]), + 2.0 * topic_params.first_message_deliveries_weight * topic_params.topic_weight, + "score should be exactly 2 * first_message_deliveries_weight * topic_weight" + ); + + //test decaying + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + 1.0 * topic_params.first_message_deliveries_decay + * topic_params.first_message_deliveries_weight + * topic_params.topic_weight, + "score should be exactly first_message_deliveries_decay * \ + first_message_deliveries_weight * topic_weight" + ); + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[1]), + 2.0 * topic_params.first_message_deliveries_decay + * topic_params.first_message_deliveries_weight + * topic_params.topic_weight, + "score should be exactly 2 * first_message_deliveries_decay * \ + first_message_deliveries_weight * topic_weight" + ); + + //test cap + for _ in 0..topic_params.first_message_deliveries_cap as u64 { + deliver_message(&mut gs, 1, random_message(&mut seq, &topics)); + } + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[1]), + topic_params.first_message_deliveries_cap + * topic_params.first_message_deliveries_weight + * topic_params.topic_weight, + "score should be exactly first_message_deliveries_cap * \ + first_message_deliveries_weight * topic_weight" + ); +} + +#[test] +fn test_scoring_p3() { + let config = Config::default(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: -2.0, + mesh_message_deliveries_decay: 0.9, + mesh_message_deliveries_cap: 10.0, + mesh_message_deliveries_threshold: 5.0, + mesh_message_deliveries_activation: Duration::from_secs(1), + mesh_message_deliveries_window: Duration::from_millis(100), + topic_weight: 0.7, + ..TopicScoreParams::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with two peers + let (mut gs, peers, _, topics) = inject_nodes1() + .peer_no(2) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Behaviour, index: usize, msg: RawMessage| { + gs.handle_received_message(msg, &peers[index]); + }; + + let mut expected_message_deliveries = 0.0; + + //messages used to test window + let m1 = random_message(&mut seq, &topics); + let m2 = random_message(&mut seq, &topics); + + //peer 1 delivers m1 + deliver_message(&mut gs, 1, m1.clone()); + + //peer 0 delivers two message + deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); + deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); + expected_message_deliveries += 2.0; + + sleep(Duration::from_millis(60)); + + //peer 1 delivers m2 + deliver_message(&mut gs, 1, m2.clone()); + + sleep(Duration::from_millis(70)); + //peer 0 delivers m1 and m2 only m2 gets counted + deliver_message(&mut gs, 0, m1); + deliver_message(&mut gs, 0, m2); + expected_message_deliveries += 1.0; + + sleep(Duration::from_millis(900)); + + //message deliveries penalties get activated, peer 0 has only delivered 3 messages and + // therefore gets a penalty + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + expected_message_deliveries *= 0.9; //decay + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + (5f64 - expected_message_deliveries).powi(2) * -2.0 * 0.7 + ); + + // peer 0 delivers a lot of messages => message_deliveries should be capped at 10 + for _ in 0..20 { + deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); + } + + expected_message_deliveries = 10.0; + + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + + //apply 10 decays + for _ in 0..10 { + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + expected_message_deliveries *= 0.9; //decay + } + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + (5f64 - expected_message_deliveries).powi(2) * -2.0 * 0.7 + ); +} + +#[test] +fn test_scoring_p3b() { + let config = ConfigBuilder::default() + .prune_backoff(Duration::from_millis(100)) + .build() + .unwrap(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: -2.0, + mesh_message_deliveries_decay: 0.9, + mesh_message_deliveries_cap: 10.0, + mesh_message_deliveries_threshold: 5.0, + mesh_message_deliveries_activation: Duration::from_secs(1), + mesh_message_deliveries_window: Duration::from_millis(100), + mesh_failure_penalty_weight: -3.0, + mesh_failure_penalty_decay: 0.95, + topic_weight: 0.7, + ..Default::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + peer_score_params.app_specific_weight = 1.0; + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with one peer + let (mut gs, peers, _, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Behaviour, index: usize, msg: RawMessage| { + gs.handle_received_message(msg, &peers[index]); + }; + + let mut expected_message_deliveries = 0.0; + + //add some positive score + gs.peer_score + .as_mut() + .unwrap() + .0 + .set_application_score(&peers[0], 100.0); + + //peer 0 delivers two message + deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); + deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); + expected_message_deliveries += 2.0; + + sleep(Duration::from_millis(1050)); + + //activation kicks in + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + expected_message_deliveries *= 0.9; //decay + + //prune peer + gs.handle_prune(&peers[0], vec![(topics[0].clone(), vec![], None)]); + + //wait backoff + sleep(Duration::from_millis(130)); + + //regraft peer + gs.handle_graft(&peers[0], topics.clone()); + + //the score should now consider p3b + let mut expected_b3 = (5f64 - expected_message_deliveries).powi(2); + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + 100.0 + expected_b3 * -3.0 * 0.7 + ); + + //we can also add a new p3 to the score + + //peer 0 delivers one message + deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); + expected_message_deliveries += 1.0; + + sleep(Duration::from_millis(1050)); + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + expected_message_deliveries *= 0.9; //decay + expected_b3 *= 0.95; + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + 100.0 + (expected_b3 * -3.0 + (5f64 - expected_message_deliveries).powi(2) * -2.0) * 0.7 + ); +} + +#[test] +fn test_scoring_p4_valid_message() { + let config = ConfigBuilder::default() + .validate_messages() + .build() + .unwrap(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + peer_score_params.app_specific_weight = 1.0; + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with two peers + let (mut gs, peers, _, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Behaviour, index: usize, msg: RawMessage| { + gs.handle_received_message(msg, &peers[index]); + }; + + //peer 0 delivers valid message + let m1 = random_message(&mut seq, &topics); + deliver_message(&mut gs, 0, m1.clone()); + + // Transform the inbound message + let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); + + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + + //message m1 gets validated + gs.report_message_validation_result( + &config.message_id(message1), + &peers[0], + MessageAcceptance::Accept, + ) + .unwrap(); + + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); +} + +#[test] +fn test_scoring_p4_invalid_signature() { + let config = ConfigBuilder::default() + .validate_messages() + .build() + .unwrap(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + peer_score_params.app_specific_weight = 1.0; + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with one peer + let (mut gs, peers, _, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + + //peer 0 delivers message with invalid signature + let m = random_message(&mut seq, &topics); + + gs.on_connection_handler_event( + peers[0], + ConnectionId::new_unchecked(0), + HandlerEvent::Message { + rpc: Rpc { + messages: vec![], + subscriptions: vec![], + control_msgs: vec![], + }, + invalid_messages: vec![(m, ValidationError::InvalidSignature)], + }, + ); + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + -2.0 * 0.7 + ); +} + +#[test] +fn test_scoring_p4_message_from_self() { + let config = ConfigBuilder::default() + .validate_messages() + .build() + .unwrap(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + peer_score_params.app_specific_weight = 1.0; + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with two peers + let (mut gs, peers, _, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Behaviour, index: usize, msg: RawMessage| { + gs.handle_received_message(msg, &peers[index]); + }; + + //peer 0 delivers invalid message from self + let mut m = random_message(&mut seq, &topics); + m.source = Some(*gs.publish_config.get_own_id().unwrap()); + + deliver_message(&mut gs, 0, m); + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + -2.0 * 0.7 + ); +} + +#[test] +fn test_scoring_p4_ignored_message() { + let config = ConfigBuilder::default() + .validate_messages() + .build() + .unwrap(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + peer_score_params.app_specific_weight = 1.0; + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with two peers + let (mut gs, peers, _, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Behaviour, index: usize, msg: RawMessage| { + gs.handle_received_message(msg, &peers[index]); + }; + + //peer 0 delivers ignored message + let m1 = random_message(&mut seq, &topics); + deliver_message(&mut gs, 0, m1.clone()); + + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + + // Transform the inbound message + let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); + + //message m1 gets ignored + gs.report_message_validation_result( + &config.message_id(message1), + &peers[0], + MessageAcceptance::Ignore, + ) + .unwrap(); + + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); +} + +#[test] +fn test_scoring_p4_application_invalidated_message() { + let config = ConfigBuilder::default() + .validate_messages() + .build() + .unwrap(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + peer_score_params.app_specific_weight = 1.0; + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with two peers + let (mut gs, peers, _, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Behaviour, index: usize, msg: RawMessage| { + gs.handle_received_message(msg, &peers[index]); + }; + + //peer 0 delivers invalid message + let m1 = random_message(&mut seq, &topics); + deliver_message(&mut gs, 0, m1.clone()); + + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + + // Transform the inbound message + let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); + + //message m1 gets rejected + gs.report_message_validation_result( + &config.message_id(message1), + &peers[0], + MessageAcceptance::Reject, + ) + .unwrap(); + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + -2.0 * 0.7 + ); +} + +#[test] +fn test_scoring_p4_application_invalid_message_from_two_peers() { + let config = ConfigBuilder::default() + .validate_messages() + .build() + .unwrap(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + peer_score_params.app_specific_weight = 1.0; + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with two peers + let (mut gs, peers, _, topics) = inject_nodes1() + .peer_no(2) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Behaviour, index: usize, msg: RawMessage| { + gs.handle_received_message(msg, &peers[index]); + }; + + //peer 0 delivers invalid message + let m1 = random_message(&mut seq, &topics); + deliver_message(&mut gs, 0, m1.clone()); + + // Transform the inbound message + let message1 = &gs.data_transform.inbound_transform(m1.clone()).unwrap(); + + //peer 1 delivers same message + deliver_message(&mut gs, 1, m1); + + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[1]), 0.0); + + //message m1 gets rejected + gs.report_message_validation_result( + &config.message_id(message1), + &peers[0], + MessageAcceptance::Reject, + ) + .unwrap(); + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + -2.0 * 0.7 + ); + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[1]), + -2.0 * 0.7 + ); +} + +#[test] +fn test_scoring_p4_three_application_invalid_messages() { + let config = ConfigBuilder::default() + .validate_messages() + .build() + .unwrap(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + peer_score_params.app_specific_weight = 1.0; + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with one peer + let (mut gs, peers, _, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Behaviour, index: usize, msg: RawMessage| { + gs.handle_received_message(msg, &peers[index]); + }; + + //peer 0 delivers two invalid message + let m1 = random_message(&mut seq, &topics); + let m2 = random_message(&mut seq, &topics); + let m3 = random_message(&mut seq, &topics); + deliver_message(&mut gs, 0, m1.clone()); + deliver_message(&mut gs, 0, m2.clone()); + deliver_message(&mut gs, 0, m3.clone()); + + // Transform the inbound message + let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); + + // Transform the inbound message + let message2 = &gs.data_transform.inbound_transform(m2).unwrap(); + // Transform the inbound message + let message3 = &gs.data_transform.inbound_transform(m3).unwrap(); + + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + + //messages gets rejected + gs.report_message_validation_result( + &config.message_id(message1), + &peers[0], + MessageAcceptance::Reject, + ) + .unwrap(); + gs.report_message_validation_result( + &config.message_id(message2), + &peers[0], + MessageAcceptance::Reject, + ) + .unwrap(); + gs.report_message_validation_result( + &config.message_id(message3), + &peers[0], + MessageAcceptance::Reject, + ) + .unwrap(); + + //number of invalid messages gets squared + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + 9.0 * -2.0 * 0.7 + ); +} + +#[test] +fn test_scoring_p4_decay() { + let config = ConfigBuilder::default() + .validate_messages() + .build() + .unwrap(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + peer_score_params.app_specific_weight = 1.0; + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with one peer + let (mut gs, peers, _, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Behaviour, index: usize, msg: RawMessage| { + gs.handle_received_message(msg, &peers[index]); + }; + + //peer 0 delivers invalid message + let m1 = random_message(&mut seq, &topics); + deliver_message(&mut gs, 0, m1.clone()); + + // Transform the inbound message + let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + + //message m1 gets rejected + gs.report_message_validation_result( + &config.message_id(message1), + &peers[0], + MessageAcceptance::Reject, + ) + .unwrap(); + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + -2.0 * 0.7 + ); + + //we decay + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + + // the number of invalids gets decayed to 0.9 and then squared in the score + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + 0.9 * 0.9 * -2.0 * 0.7 + ); +} + +#[test] +fn test_scoring_p5() { + let peer_score_params = PeerScoreParams { + app_specific_weight: 2.0, + ..PeerScoreParams::default() + }; + + //build mesh with one peer + let (mut gs, peers, _, _) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(Config::default()) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) + .create_network(); + + gs.set_application_score(&peers[0], 1.1); + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + 1.1 * 2.0 + ); +} + +#[test] +fn test_scoring_p6() { + let peer_score_params = PeerScoreParams { + ip_colocation_factor_threshold: 5.0, + ip_colocation_factor_weight: -2.0, + ..Default::default() + }; + + let (mut gs, _, _, _) = inject_nodes1() + .peer_no(0) + .topics(vec![]) + .to_subscribe(false) + .gs_config(Config::default()) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) + .create_network(); + + //create 5 peers with the same ip + let addr = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 3)); + let peers = vec![ + add_peer_with_addr(&mut gs, &[], false, false, addr.clone()).0, + add_peer_with_addr(&mut gs, &[], false, false, addr.clone()).0, + add_peer_with_addr(&mut gs, &[], true, false, addr.clone()).0, + add_peer_with_addr(&mut gs, &[], true, false, addr.clone()).0, + add_peer_with_addr(&mut gs, &[], true, true, addr.clone()).0, + ]; + + //create 4 other peers with other ip + let addr2 = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 4)); + let others = vec![ + add_peer_with_addr(&mut gs, &[], false, false, addr2.clone()).0, + add_peer_with_addr(&mut gs, &[], false, false, addr2.clone()).0, + add_peer_with_addr(&mut gs, &[], true, false, addr2.clone()).0, + add_peer_with_addr(&mut gs, &[], true, false, addr2.clone()).0, + ]; + + //no penalties yet + for peer in peers.iter().chain(others.iter()) { + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 0.0); + } + + //add additional connection for 3 others with addr + for id in others.iter().take(3) { + gs.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { + peer_id: *id, + connection_id: ConnectionId::new_unchecked(0), + endpoint: &ConnectedPoint::Dialer { + address: addr.clone(), + role_override: Endpoint::Dialer, + }, + failed_addresses: &[], + other_established: 0, + })); + } + + //penalties apply squared + for peer in peers.iter().chain(others.iter().take(3)) { + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0); + } + //fourth other peer still no penalty + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&others[3]), 0.0); + + //add additional connection for 3 of the peers to addr2 + for peer in peers.iter().take(3) { + gs.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { + peer_id: *peer, + connection_id: ConnectionId::new_unchecked(0), + endpoint: &ConnectedPoint::Dialer { + address: addr2.clone(), + role_override: Endpoint::Dialer, + }, + failed_addresses: &[], + other_established: 1, + })); + } + + //double penalties for the first three of each + for peer in peers.iter().take(3).chain(others.iter().take(3)) { + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(peer), + (9.0 + 4.0) * -2.0 + ); + } + + //single penalties for the rest + for peer in peers.iter().skip(3) { + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0); + } + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&others[3]), + 4.0 * -2.0 + ); + + //two times same ip doesn't count twice + gs.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { + peer_id: peers[0], + connection_id: ConnectionId::new_unchecked(0), + endpoint: &ConnectedPoint::Dialer { + address: addr, + role_override: Endpoint::Dialer, + }, + failed_addresses: &[], + other_established: 2, + })); + + //nothing changed + //double penalties for the first three of each + for peer in peers.iter().take(3).chain(others.iter().take(3)) { + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(peer), + (9.0 + 4.0) * -2.0 + ); + } + + //single penalties for the rest + for peer in peers.iter().skip(3) { + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0); + } + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&others[3]), + 4.0 * -2.0 + ); +} + +#[test] +fn test_scoring_p7_grafts_before_backoff() { + let config = ConfigBuilder::default() + .prune_backoff(Duration::from_millis(200)) + .graft_flood_threshold(Duration::from_millis(100)) + .build() + .unwrap(); + let peer_score_params = PeerScoreParams { + behaviour_penalty_weight: -2.0, + behaviour_penalty_decay: 0.9, + ..Default::default() + }; + + let (mut gs, peers, _receivers, topics) = inject_nodes1() + .peer_no(2) + .topics(vec!["test".into()]) + .to_subscribe(false) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) + .create_network(); + + //remove peers from mesh and send prune to them => this adds a backoff for the peers + for peer in peers.iter().take(2) { + gs.mesh.get_mut(&topics[0]).unwrap().remove(peer); + gs.send_graft_prune( + HashMap::new(), + HashMap::from([(*peer, vec![topics[0].clone()])]), + HashSet::new(), + ); + } + + //wait 50 millisecs + sleep(Duration::from_millis(50)); + + //first peer tries to graft + gs.handle_graft(&peers[0], vec![topics[0].clone()]); + + //double behaviour penalty for first peer (squared) + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + 4.0 * -2.0 + ); + + //wait 100 millisecs + sleep(Duration::from_millis(100)); + + //second peer tries to graft + gs.handle_graft(&peers[1], vec![topics[0].clone()]); + + //single behaviour penalty for second peer + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[1]), + 1.0 * -2.0 + ); + + //test decay + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + 4.0 * 0.9 * 0.9 * -2.0 + ); + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[1]), + 1.0 * 0.9 * 0.9 * -2.0 + ); +} + +#[test] +fn test_opportunistic_grafting() { + let config = ConfigBuilder::default() + .mesh_n_low(3) + .mesh_n(5) + .mesh_n_high(7) + .mesh_outbound_min(0) //deactivate outbound handling + .opportunistic_graft_ticks(2) + .opportunistic_graft_peers(2) + .build() + .unwrap(); + let peer_score_params = PeerScoreParams { + app_specific_weight: 1.0, + ..Default::default() + }; + let thresholds = PeerScoreThresholds { + opportunistic_graft_threshold: 2.0, + ..Default::default() + }; + + let (mut gs, peers, _receivers, topics) = inject_nodes1() + .peer_no(5) + .topics(vec!["test".into()]) + .to_subscribe(false) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, thresholds))) + .create_network(); + + //fill mesh with 5 peers + for peer in &peers { + gs.handle_graft(peer, topics.clone()); + } + + //add additional 5 peers + let others: Vec<_> = (0..5) + .map(|_| add_peer(&mut gs, &topics, false, false)) + .collect(); + + //currently mesh equals peers + assert_eq!(gs.mesh[&topics[0]], peers.iter().cloned().collect()); + + //give others high scores (but the first two have not high enough scores) + for (i, peer) in peers.iter().enumerate().take(5) { + gs.set_application_score(peer, 0.0 + i as f64); + } + + //set scores for peers in the mesh + for (i, (peer, _receiver)) in others.iter().enumerate().take(5) { + gs.set_application_score(peer, 0.0 + i as f64); + } + + //this gives a median of exactly 2.0 => should not apply opportunistic grafting + gs.heartbeat(); + gs.heartbeat(); + + assert_eq!( + gs.mesh[&topics[0]].len(), + 5, + "should not apply opportunistic grafting" + ); + + //reduce middle score to 1.0 giving a median of 1.0 + gs.set_application_score(&peers[2], 1.0); + + //opportunistic grafting after two heartbeats + + gs.heartbeat(); + assert_eq!( + gs.mesh[&topics[0]].len(), + 5, + "should not apply opportunistic grafting after first tick" + ); + + gs.heartbeat(); + + assert_eq!( + gs.mesh[&topics[0]].len(), + 7, + "opportunistic grafting should have added 2 peers" + ); + + assert!( + gs.mesh[&topics[0]].is_superset(&peers.iter().cloned().collect()), + "old peers are still part of the mesh" + ); + + assert!( + gs.mesh[&topics[0]].is_disjoint(&others.iter().map(|(p, _)| p).cloned().take(2).collect()), + "peers below or equal to median should not be added in opportunistic grafting" + ); +} + +#[test] +fn test_ignore_graft_from_unknown_topic() { + //build gossipsub without subscribing to any topics + let (mut gs, peers, receivers, _) = inject_nodes1() + .peer_no(1) + .topics(vec![]) + .to_subscribe(false) + .create_network(); + + //handle an incoming graft for some topic + gs.handle_graft(&peers[0], vec![Topic::new("test").hash()]); + + //assert that no prune got created + let (control_msgs, _) = count_control_msgs(receivers, |_, a| matches!(a, RpcOut::Prune { .. })); + assert_eq!( + control_msgs, 0, + "we should not prune after graft in unknown topic" + ); +} + +#[test] +fn test_ignore_too_many_iwants_from_same_peer_for_same_message() { + let config = Config::default(); + //build gossipsub with full mesh + let (mut gs, _, mut receivers, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(false) + .create_network(); + + //add another peer not in the mesh + let (peer, receiver) = add_peer(&mut gs, &topics, false, false); + receivers.insert(peer, receiver); + + //receive a message + let mut seq = 0; + let m1 = random_message(&mut seq, &topics); + + // Transform the inbound message + let message1 = &gs.data_transform.inbound_transform(m1.clone()).unwrap(); + + let id = config.message_id(message1); + + gs.handle_received_message(m1, &PeerId::random()); + + //clear events + let receivers = flush_events(&mut gs, receivers); + + //the first gossip_retransimission many iwants return the valid message, all others are + // ignored. + for _ in 0..(2 * config.gossip_retransimission() + 10) { + gs.handle_iwant(&peer, vec![id.clone()]); + } + + assert_eq!( + receivers.into_values().fold(0, |mut fwds, c| { + let non_priority = c.non_priority.into_inner(); + while !non_priority.is_empty() { + if let Ok(RpcOut::Forward { .. }) = non_priority.try_recv() { + fwds += 1; + } + } + fwds + }), + config.gossip_retransimission() as usize, + "not more then gossip_retransmission many messages get sent back" + ); +} + +#[test] +fn test_ignore_too_many_ihaves() { + let config = ConfigBuilder::default() + .max_ihave_messages(10) + .build() + .unwrap(); + //build gossipsub with full mesh + let (mut gs, _, mut receivers, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(false) + .gs_config(config.clone()) + .create_network(); + + //add another peer not in the mesh + let (peer, receiver) = add_peer(&mut gs, &topics, false, false); + receivers.insert(peer, receiver); + + //peer has 20 messages + let mut seq = 0; + let messages: Vec<_> = (0..20).map(|_| random_message(&mut seq, &topics)).collect(); + + //peer sends us one ihave for each message in order + for raw_message in &messages { + // Transform the inbound message + let message = &gs + .data_transform + .inbound_transform(raw_message.clone()) + .unwrap(); + + gs.handle_ihave( + &peer, + vec![(topics[0].clone(), vec![config.message_id(message)])], + ); + } + + let first_ten: HashSet<_> = messages + .iter() + .take(10) + .map(|msg| gs.data_transform.inbound_transform(msg.clone()).unwrap()) + .map(|m| config.message_id(&m)) + .collect(); + + //we send iwant only for the first 10 messages + let (control_msgs, receivers) = count_control_msgs(receivers, |p, action| { + p == &peer + && matches!(action, RpcOut::IWant(IWant { message_ids }) if message_ids.len() == 1 && first_ten.contains(&message_ids[0])) + }); + assert_eq!( + control_msgs, 10, + "exactly the first ten ihaves should be processed and one iwant for each created" + ); + + //after a heartbeat everything is forgotten + gs.heartbeat(); + + for raw_message in messages[10..].iter() { + // Transform the inbound message + let message = &gs + .data_transform + .inbound_transform(raw_message.clone()) + .unwrap(); + + gs.handle_ihave( + &peer, + vec![(topics[0].clone(), vec![config.message_id(message)])], + ); + } + + //we sent iwant for all 10 messages + let (control_msgs, _) = count_control_msgs(receivers, |p, action| { + p == &peer + && matches!(action, RpcOut::IWant(IWant { message_ids }) if message_ids.len() == 1) + }); + assert_eq!(control_msgs, 10, "all 20 should get sent"); +} + +#[test] +fn test_ignore_too_many_messages_in_ihave() { + let config = ConfigBuilder::default() + .max_ihave_messages(10) + .max_ihave_length(10) + .build() + .unwrap(); + //build gossipsub with full mesh + let (mut gs, _, mut receivers, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(false) + .gs_config(config.clone()) + .create_network(); + + //add another peer not in the mesh + let (peer, receiver) = add_peer(&mut gs, &topics, false, false); + receivers.insert(peer, receiver); + + //peer has 20 messages + let mut seq = 0; + let message_ids: Vec<_> = (0..20) + .map(|_| random_message(&mut seq, &topics)) + .map(|msg| gs.data_transform.inbound_transform(msg).unwrap()) + .map(|msg| config.message_id(&msg)) + .collect(); + + //peer sends us three ihaves + gs.handle_ihave(&peer, vec![(topics[0].clone(), message_ids[0..8].to_vec())]); + gs.handle_ihave( + &peer, + vec![(topics[0].clone(), message_ids[0..12].to_vec())], + ); + gs.handle_ihave( + &peer, + vec![(topics[0].clone(), message_ids[0..20].to_vec())], + ); + + let first_twelve: HashSet<_> = message_ids.iter().take(12).collect(); + + //we send iwant only for the first 10 messages + let mut sum = 0; + let (control_msgs, receivers) = count_control_msgs(receivers, |p, rpc| match rpc { + RpcOut::IWant(IWant { message_ids }) => { + p == &peer && { + assert!(first_twelve.is_superset(&message_ids.iter().collect())); + sum += message_ids.len(); + true + } + } + _ => false, + }); + assert_eq!( + control_msgs, 2, + "the third ihave should get ignored and no iwant sent" + ); + + assert_eq!(sum, 10, "exactly the first ten ihaves should be processed"); + + //after a heartbeat everything is forgotten + gs.heartbeat(); + gs.handle_ihave( + &peer, + vec![(topics[0].clone(), message_ids[10..20].to_vec())], + ); + + //we sent 10 iwant messages ids via a IWANT rpc. + let mut sum = 0; + let (control_msgs, _) = count_control_msgs(receivers, |p, rpc| match rpc { + RpcOut::IWant(IWant { message_ids }) => { + p == &peer && { + sum += message_ids.len(); + true + } + } + _ => false, + }); + assert_eq!(control_msgs, 1); + assert_eq!(sum, 10, "exactly 20 iwants should get sent"); +} + +#[test] +fn test_limit_number_of_message_ids_inside_ihave() { + let config = ConfigBuilder::default() + .max_ihave_messages(10) + .max_ihave_length(100) + .build() + .unwrap(); + //build gossipsub with full mesh + let (mut gs, peers, mut receivers, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(false) + .gs_config(config) + .create_network(); + + //graft to all peers to really fill the mesh with all the peers + for peer in peers { + gs.handle_graft(&peer, topics.clone()); + } + + //add two other peers not in the mesh + let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); + receivers.insert(p1, receiver1); + let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); + receivers.insert(p2, receiver2); + + //receive 200 messages from another peer + let mut seq = 0; + for _ in 0..200 { + gs.handle_received_message(random_message(&mut seq, &topics), &PeerId::random()); + } + + //emit gossip + gs.emit_gossip(); + + // both peers should have gotten 100 random ihave messages, to asser the randomness, we + // assert that both have not gotten the same set of messages, but have an intersection + // (which is the case with very high probability, the probabiltity of failure is < 10^-58). + + let mut ihaves1 = HashSet::new(); + let mut ihaves2 = HashSet::new(); + + let (control_msgs, _) = count_control_msgs(receivers, |p, action| match action { + RpcOut::IHave(IHave { message_ids, .. }) => { + if p == &p1 { + ihaves1 = message_ids.iter().cloned().collect(); + true + } else if p == &p2 { + ihaves2 = message_ids.iter().cloned().collect(); + true + } else { + false + } + } + _ => false, + }); + assert_eq!( + control_msgs, 2, + "should have emitted one ihave to p1 and one to p2" + ); + + assert_eq!( + ihaves1.len(), + 100, + "should have sent 100 message ids in ihave to p1" + ); + assert_eq!( + ihaves2.len(), + 100, + "should have sent 100 message ids in ihave to p2" + ); + assert!( + ihaves1 != ihaves2, + "should have sent different random messages to p1 and p2 \ + (this may fail with a probability < 10^-58" + ); + assert!( + ihaves1.intersection(&ihaves2).count() > 0, + "should have sent random messages with some common messages to p1 and p2 \ + (this may fail with a probability < 10^-58" + ); +} + +#[test] +fn test_iwant_penalties() { + /* + use tracing_subscriber::EnvFilter; + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + */ + let config = ConfigBuilder::default() + .iwant_followup_time(Duration::from_secs(4)) + .build() + .unwrap(); + let peer_score_params = PeerScoreParams { + behaviour_penalty_weight: -1.0, + ..Default::default() + }; + + // fill the mesh + let (mut gs, peers, _, topics) = inject_nodes1() + .peer_no(2) + .topics(vec!["test".into()]) + .to_subscribe(false) + .gs_config(config.clone()) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) + .create_network(); + + // graft to all peers to really fill the mesh with all the peers + for peer in peers { + gs.handle_graft(&peer, topics.clone()); + } + + // add 100 more peers + let other_peers: Vec<_> = (0..100) + .map(|_| add_peer(&mut gs, &topics, false, false)) + .collect(); + + // each peer sends us an ihave containing each two message ids + let mut first_messages = Vec::new(); + let mut second_messages = Vec::new(); + let mut seq = 0; + for (peer, _receiver) in &other_peers { + let msg1 = random_message(&mut seq, &topics); + let msg2 = random_message(&mut seq, &topics); + + // Decompress the raw message and calculate the message id. + // Transform the inbound message + let message1 = &gs.data_transform.inbound_transform(msg1.clone()).unwrap(); + + // Transform the inbound message + let message2 = &gs.data_transform.inbound_transform(msg2.clone()).unwrap(); + + first_messages.push(msg1.clone()); + second_messages.push(msg2.clone()); + gs.handle_ihave( + peer, + vec![( + topics[0].clone(), + vec![config.message_id(message1), config.message_id(message2)], + )], + ); + } + + // the peers send us all the first message ids in time + for (index, (peer, _receiver)) in other_peers.iter().enumerate() { + gs.handle_received_message(first_messages[index].clone(), peer); + } + + // now we do a heartbeat no penalization should have been applied yet + gs.heartbeat(); + + for (peer, _receiver) in &other_peers { + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 0.0); + } + + // receive the first twenty of the other peers then send their response + for (index, (peer, _receiver)) in other_peers.iter().enumerate().take(20) { + gs.handle_received_message(second_messages[index].clone(), peer); + } + + // sleep for the promise duration + sleep(Duration::from_secs(4)); + + // now we do a heartbeat to apply penalization + gs.heartbeat(); + + // now we get the second messages from the last 80 peers. + for (index, (peer, _receiver)) in other_peers.iter().enumerate() { + if index > 19 { + gs.handle_received_message(second_messages[index].clone(), peer); + } + } + + // no further penalizations should get applied + gs.heartbeat(); + + // Only the last 80 peers should be penalized for not responding in time + let mut not_penalized = 0; + let mut single_penalized = 0; + let mut double_penalized = 0; + + for (i, (peer, _receiver)) in other_peers.iter().enumerate() { + let score = gs.peer_score.as_ref().unwrap().0.score(peer); + if score == 0.0 { + not_penalized += 1; + } else if score == -1.0 { + assert!(i > 9); + single_penalized += 1; + } else if score == -4.0 { + assert!(i > 9); + double_penalized += 1 + } else { + println!("{peer}"); + println!("{score}"); + panic!("Invalid score of peer"); + } + } + + assert_eq!(not_penalized, 20); + assert_eq!(single_penalized, 80); + assert_eq!(double_penalized, 0); +} + +#[test] +fn test_publish_to_floodsub_peers_without_flood_publish() { + let config = ConfigBuilder::default() + .flood_publish(false) + .build() + .unwrap(); + let (mut gs, _, mut receivers, topics) = inject_nodes1() + .peer_no(config.mesh_n_low() - 1) + .topics(vec!["test".into()]) + .to_subscribe(false) + .gs_config(config) + .create_network(); + + //add two floodsub peer, one explicit, one implicit + let (p1, receiver1) = add_peer_with_addr_and_kind( + &mut gs, + &topics, + false, + false, + Multiaddr::empty(), + Some(PeerKind::Floodsub), + ); + receivers.insert(p1, receiver1); + + let (p2, receiver2) = + add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); + receivers.insert(p2, receiver2); + + //p1 and p2 are not in the mesh + assert!(!gs.mesh[&topics[0]].contains(&p1) && !gs.mesh[&topics[0]].contains(&p2)); + + //publish a message + let publish_data = vec![0; 42]; + gs.publish(Topic::new("test"), publish_data).unwrap(); + + // Collect publish messages to floodsub peers + let publishes = receivers + .into_iter() + .fold(0, |mut collected_publish, (peer_id, c)| { + let priority = c.priority.into_inner(); + while !priority.is_empty() { + if matches!(priority.try_recv(), + Ok(RpcOut::Publish{..}) if peer_id == p1 || peer_id == p2) + { + collected_publish += 1; + } + } + collected_publish + }); + + assert_eq!( + publishes, 2, + "Should send a publish message to all floodsub peers" + ); +} + +#[test] +fn test_do_not_use_floodsub_in_fanout() { + let config = ConfigBuilder::default() + .flood_publish(false) + .build() + .unwrap(); + let (mut gs, _, mut receivers, _) = inject_nodes1() + .peer_no(config.mesh_n_low() - 1) + .topics(Vec::new()) + .to_subscribe(false) + .gs_config(config) + .create_network(); + + let topic = Topic::new("test"); + let topics = vec![topic.hash()]; + + //add two floodsub peer, one explicit, one implicit + let (p1, receiver1) = add_peer_with_addr_and_kind( + &mut gs, + &topics, + false, + false, + Multiaddr::empty(), + Some(PeerKind::Floodsub), + ); + + receivers.insert(p1, receiver1); + let (p2, receiver2) = + add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); + + receivers.insert(p2, receiver2); + //publish a message + let publish_data = vec![0; 42]; + gs.publish(Topic::new("test"), publish_data).unwrap(); + + // Collect publish messages to floodsub peers + let publishes = receivers + .into_iter() + .fold(0, |mut collected_publish, (peer_id, c)| { + let priority = c.priority.into_inner(); + while !priority.is_empty() { + if matches!(priority.try_recv(), + Ok(RpcOut::Publish{..}) if peer_id == p1 || peer_id == p2) + { + collected_publish += 1; + } + } + collected_publish + }); + + assert_eq!( + publishes, 2, + "Should send a publish message to all floodsub peers" + ); + + assert!( + !gs.fanout[&topics[0]].contains(&p1) && !gs.fanout[&topics[0]].contains(&p2), + "Floodsub peers are not allowed in fanout" + ); +} + +#[test] +fn test_dont_add_floodsub_peers_to_mesh_on_join() { + let (mut gs, _, _, _) = inject_nodes1() + .peer_no(0) + .topics(Vec::new()) + .to_subscribe(false) + .create_network(); + + let topic = Topic::new("test"); + let topics = vec![topic.hash()]; + + //add two floodsub peer, one explicit, one implicit + let _p1 = add_peer_with_addr_and_kind( + &mut gs, + &topics, + false, + false, + Multiaddr::empty(), + Some(PeerKind::Floodsub), + ); + let _p2 = add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); + + gs.join(&topics[0]); + + assert!( + gs.mesh[&topics[0]].is_empty(), + "Floodsub peers should not get added to mesh" + ); +} + +#[test] +fn test_dont_send_px_to_old_gossipsub_peers() { + let (mut gs, _, receivers, topics) = inject_nodes1() + .peer_no(0) + .topics(vec!["test".into()]) + .to_subscribe(false) + .create_network(); + + //add an old gossipsub peer + let (p1, _receiver1) = add_peer_with_addr_and_kind( + &mut gs, + &topics, + false, + false, + Multiaddr::empty(), + Some(PeerKind::Gossipsub), + ); + + //prune the peer + gs.send_graft_prune( + HashMap::new(), + vec![(p1, topics.clone())].into_iter().collect(), + HashSet::new(), + ); + + //check that prune does not contain px + let (control_msgs, _) = count_control_msgs(receivers, |_, m| match m { + RpcOut::Prune(Prune { peers: px, .. }) => !px.is_empty(), + _ => false, + }); + assert_eq!(control_msgs, 0, "Should not send px to floodsub peers"); +} + +#[test] +fn test_dont_send_floodsub_peers_in_px() { + //build mesh with one peer + let (mut gs, peers, receivers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .create_network(); + + //add two floodsub peers + let _p1 = add_peer_with_addr_and_kind( + &mut gs, + &topics, + false, + false, + Multiaddr::empty(), + Some(PeerKind::Floodsub), + ); + let _p2 = add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); + + //prune only mesh node + gs.send_graft_prune( + HashMap::new(), + vec![(peers[0], topics.clone())].into_iter().collect(), + HashSet::new(), + ); + + //check that px in prune message is empty + let (control_msgs, _) = count_control_msgs(receivers, |_, m| match m { + RpcOut::Prune(Prune { peers: px, .. }) => !px.is_empty(), + _ => false, + }); + assert_eq!(control_msgs, 0, "Should not include floodsub peers in px"); +} + +#[test] +fn test_dont_add_floodsub_peers_to_mesh_in_heartbeat() { + let (mut gs, _, _, topics) = inject_nodes1() + .peer_no(0) + .topics(vec!["test".into()]) + .to_subscribe(false) + .create_network(); + + //add two floodsub peer, one explicit, one implicit + let _p1 = add_peer_with_addr_and_kind( + &mut gs, + &topics, + true, + false, + Multiaddr::empty(), + Some(PeerKind::Floodsub), + ); + let _p2 = add_peer_with_addr_and_kind(&mut gs, &topics, true, false, Multiaddr::empty(), None); + + gs.heartbeat(); + + assert!( + gs.mesh[&topics[0]].is_empty(), + "Floodsub peers should not get added to mesh" + ); +} + +// Some very basic test of public api methods. +#[test] +fn test_public_api() { + let (gs, peers, _, topic_hashes) = inject_nodes1() + .peer_no(4) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .create_network(); + let peers = peers.into_iter().collect::>(); + + assert_eq!( + gs.topics().cloned().collect::>(), + topic_hashes, + "Expected topics to match registered topic." + ); + + assert_eq!( + gs.mesh_peers(&TopicHash::from_raw("topic1")) + .cloned() + .collect::>(), + peers, + "Expected peers for a registered topic to contain all peers." + ); + + assert_eq!( + gs.all_mesh_peers().cloned().collect::>(), + peers, + "Expected all_peers to contain all peers." + ); +} + +#[test] +fn test_subscribe_to_invalid_topic() { + let t1 = Topic::new("t1"); + let t2 = Topic::new("t2"); + let (mut gs, _, _, _) = inject_nodes::() + .subscription_filter(WhitelistSubscriptionFilter( + vec![t1.hash()].into_iter().collect(), + )) + .to_subscribe(false) + .create_network(); + + assert!(gs.subscribe(&t1).is_ok()); + assert!(gs.subscribe(&t2).is_err()); +} + +#[test] +fn test_subscribe_and_graft_with_negative_score() { + //simulate a communication between two gossipsub instances + let (mut gs1, _, _, topic_hashes) = inject_nodes1() + .topics(vec!["test".into()]) + .scoring(Some(( + PeerScoreParams::default(), + PeerScoreThresholds::default(), + ))) + .create_network(); + + let (mut gs2, _, receivers, _) = inject_nodes1().create_network(); + + let connection_id = ConnectionId::new_unchecked(0); + + let topic = Topic::new("test"); + + let (p2, _receiver1) = add_peer(&mut gs1, &Vec::new(), true, false); + let (p1, _receiver2) = add_peer(&mut gs2, &topic_hashes, false, false); + + //add penalty to peer p2 + gs1.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + + let original_score = gs1.peer_score.as_ref().unwrap().0.score(&p2); + + //subscribe to topic in gs2 + gs2.subscribe(&topic).unwrap(); + + let forward_messages_to_p1 = |gs1: &mut Behaviour<_, _>, + p1: PeerId, + p2: PeerId, + connection_id: ConnectionId, + receivers: HashMap| + -> HashMap { + let new_receivers = HashMap::new(); + for (peer_id, receiver) in receivers.into_iter() { + let non_priority = receiver.non_priority.into_inner(); + match non_priority.try_recv() { + Ok(rpc) if peer_id == p1 => { + gs1.on_connection_handler_event( + p2, + connection_id, + HandlerEvent::Message { + rpc: proto_to_message(&rpc.into_protobuf()), + invalid_messages: vec![], + }, + ); + } + _ => {} + } + } + new_receivers + }; + + //forward the subscribe message + let receivers = forward_messages_to_p1(&mut gs1, p1, p2, connection_id, receivers); + + //heartbeats on both + gs1.heartbeat(); + gs2.heartbeat(); + + //forward messages again + forward_messages_to_p1(&mut gs1, p1, p2, connection_id, receivers); + + //nobody got penalized + assert!(gs1.peer_score.as_ref().unwrap().0.score(&p2) >= original_score); +} + +#[test] +/// Test nodes that send grafts without subscriptions. +fn test_graft_without_subscribe() { + // The node should: + // - Create an empty vector in mesh[topic] + // - Send subscription request to all peers + // - run JOIN(topic) + + let topic = String::from("test_subscribe"); + let subscribe_topic = vec![topic.clone()]; + let subscribe_topic_hash = vec![Topic::new(topic.clone()).hash()]; + let (mut gs, peers, _, topic_hashes) = inject_nodes1() + .peer_no(1) + .topics(subscribe_topic) + .to_subscribe(false) + .create_network(); + + assert!( + gs.mesh.get(&topic_hashes[0]).is_some(), + "Subscribe should add a new entry to the mesh[topic] hashmap" + ); + + // The node sends a graft for the subscribe topic. + gs.handle_graft(&peers[0], subscribe_topic_hash); + + // The node disconnects + disconnect_peer(&mut gs, &peers[0]); + + // We unsubscribe from the topic. + let _ = gs.unsubscribe(&Topic::new(topic)); +} diff --git a/beacon_node/lighthouse_network/src/gossipsub/config.rs b/beacon_node/lighthouse_network/src/gossipsub/config.rs new file mode 100644 index 000000000..f7f967bfb --- /dev/null +++ b/beacon_node/lighthouse_network/src/gossipsub/config.rs @@ -0,0 +1,1026 @@ +// Copyright 2020 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use std::borrow::Cow; +use std::sync::Arc; +use std::time::Duration; + +use super::error::ConfigBuilderError; +use super::protocol::{ProtocolConfig, ProtocolId, FLOODSUB_PROTOCOL}; +use super::types::{Message, MessageId, PeerKind}; + +use libp2p::identity::PeerId; +use libp2p::swarm::StreamProtocol; + +/// The types of message validation that can be employed by gossipsub. +#[derive(Debug, Clone)] +pub enum ValidationMode { + /// This is the default setting. This requires the message author to be a valid [`PeerId`] and to + /// be present as well as the sequence number. All messages must have valid signatures. + /// + /// NOTE: This setting will reject messages from nodes using + /// [`crate::gossipsub::behaviour::MessageAuthenticity::Anonymous`] and all messages that do not have + /// signatures. + Strict, + /// This setting permits messages that have no author, sequence number or signature. If any of + /// these fields exist in the message these are validated. + Permissive, + /// This setting requires the author, sequence number and signature fields of a message to be + /// empty. Any message that contains these fields is considered invalid. + Anonymous, + /// This setting does not check the author, sequence number or signature fields of incoming + /// messages. If these fields contain data, they are simply ignored. + /// + /// NOTE: This setting will consider messages with invalid signatures as valid messages. + None, +} + +/// Selector for custom Protocol Id +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum Version { + V1_0, + V1_1, +} + +/// Configuration parameters that define the performance of the gossipsub network. +#[derive(Clone)] +pub struct Config { + protocol: ProtocolConfig, + history_length: usize, + history_gossip: usize, + mesh_n: usize, + mesh_n_low: usize, + mesh_n_high: usize, + retain_scores: usize, + gossip_lazy: usize, + gossip_factor: f64, + heartbeat_initial_delay: Duration, + heartbeat_interval: Duration, + fanout_ttl: Duration, + check_explicit_peers_ticks: u64, + duplicate_cache_time: Duration, + validate_messages: bool, + message_id_fn: Arc MessageId + Send + Sync + 'static>, + allow_self_origin: bool, + do_px: bool, + prune_peers: usize, + prune_backoff: Duration, + unsubscribe_backoff: Duration, + backoff_slack: u32, + flood_publish: bool, + graft_flood_threshold: Duration, + mesh_outbound_min: usize, + opportunistic_graft_ticks: u64, + opportunistic_graft_peers: usize, + gossip_retransimission: u32, + max_messages_per_rpc: Option, + max_ihave_length: usize, + max_ihave_messages: usize, + iwant_followup_time: Duration, + published_message_ids_cache_time: Duration, + connection_handler_queue_len: usize, + connection_handler_publish_duration: Duration, + connection_handler_forward_duration: Duration, +} + +impl Config { + pub(crate) fn protocol_config(&self) -> ProtocolConfig { + self.protocol.clone() + } + + // Overlay network parameters. + /// Number of heartbeats to keep in the `memcache` (default is 5). + pub fn history_length(&self) -> usize { + self.history_length + } + + /// Number of past heartbeats to gossip about (default is 3). + pub fn history_gossip(&self) -> usize { + self.history_gossip + } + + /// Target number of peers for the mesh network (D in the spec, default is 6). + pub fn mesh_n(&self) -> usize { + self.mesh_n + } + + /// Minimum number of peers in mesh network before adding more (D_lo in the spec, default is 5). + pub fn mesh_n_low(&self) -> usize { + self.mesh_n_low + } + + /// Maximum number of peers in mesh network before removing some (D_high in the spec, default + /// is 12). + pub fn mesh_n_high(&self) -> usize { + self.mesh_n_high + } + + /// Affects how peers are selected when pruning a mesh due to over subscription. + /// + /// At least `retain_scores` of the retained peers will be high-scoring, while the remainder are + /// chosen randomly (D_score in the spec, default is 4). + pub fn retain_scores(&self) -> usize { + self.retain_scores + } + + /// Minimum number of peers to emit gossip to during a heartbeat (D_lazy in the spec, + /// default is 6). + pub fn gossip_lazy(&self) -> usize { + self.gossip_lazy + } + + /// Affects how many peers we will emit gossip to at each heartbeat. + /// + /// We will send gossip to `gossip_factor * (total number of non-mesh peers)`, or + /// `gossip_lazy`, whichever is greater. The default is 0.25. + pub fn gossip_factor(&self) -> f64 { + self.gossip_factor + } + + /// Initial delay in each heartbeat (default is 5 seconds). + pub fn heartbeat_initial_delay(&self) -> Duration { + self.heartbeat_initial_delay + } + + /// Time between each heartbeat (default is 1 second). + pub fn heartbeat_interval(&self) -> Duration { + self.heartbeat_interval + } + + /// Time to live for fanout peers (default is 60 seconds). + pub fn fanout_ttl(&self) -> Duration { + self.fanout_ttl + } + + /// The number of heartbeat ticks until we recheck the connection to explicit peers and + /// reconnecting if necessary (default 300). + pub fn check_explicit_peers_ticks(&self) -> u64 { + self.check_explicit_peers_ticks + } + + /// The maximum byte size for each gossipsub RPC (default is 65536 bytes). + /// + /// This represents the maximum size of the entire protobuf payload. It must be at least + /// large enough to support basic control messages. If Peer eXchange is enabled, this + /// must be large enough to transmit the desired peer information on pruning. It must be at + /// least 100 bytes. Default is 65536 bytes. + pub fn max_transmit_size(&self) -> usize { + self.protocol.max_transmit_size + } + + /// Duplicates are prevented by storing message id's of known messages in an LRU time cache. + /// This settings sets the time period that messages are stored in the cache. Duplicates can be + /// received if duplicate messages are sent at a time greater than this setting apart. The + /// default is 1 minute. + pub fn duplicate_cache_time(&self) -> Duration { + self.duplicate_cache_time + } + + /// When set to `true`, prevents automatic forwarding of all received messages. This setting + /// allows a user to validate the messages before propagating them to their peers. If set to + /// true, the user must manually call [`crate::gossipsub::Behaviour::report_message_validation_result()`] + /// on the behaviour to forward message once validated (default is `false`). + /// The default is `false`. + pub fn validate_messages(&self) -> bool { + self.validate_messages + } + + /// Determines the level of validation used when receiving messages. See [`ValidationMode`] + /// for the available types. The default is ValidationMode::Strict. + pub fn validation_mode(&self) -> &ValidationMode { + &self.protocol.validation_mode + } + + /// A user-defined function allowing the user to specify the message id of a gossipsub message. + /// The default value is to concatenate the source peer id with a sequence number. Setting this + /// parameter allows the user to address packets arbitrarily. One example is content based + /// addressing, where this function may be set to `hash(message)`. This would prevent messages + /// of the same content from being duplicated. + /// + /// The function takes a [`Message`] as input and outputs a String to be interpreted as + /// the message id. + pub fn message_id(&self, message: &Message) -> MessageId { + (self.message_id_fn)(message) + } + + /// By default, gossipsub will reject messages that are sent to us that have the same message + /// source as we have specified locally. Enabling this, allows these messages and prevents + /// penalizing the peer that sent us the message. Default is false. + pub fn allow_self_origin(&self) -> bool { + self.allow_self_origin + } + + /// Whether Peer eXchange is enabled; this should be enabled in bootstrappers and other well + /// connected/trusted nodes. The default is false. + /// + /// Note: Peer exchange is not implemented today, see + /// . + pub fn do_px(&self) -> bool { + self.do_px + } + + /// Controls the number of peers to include in prune Peer eXchange. + /// When we prune a peer that's eligible for PX (has a good score, etc), we will try to + /// send them signed peer records for up to `prune_peers` other peers that we + /// know of. It is recommended that this value is larger than `mesh_n_high` so that the pruned + /// peer can reliably form a full mesh. The default is typically 16 however until signed + /// records are spec'd this is disabled and set to 0. + pub fn prune_peers(&self) -> usize { + self.prune_peers + } + + /// Controls the backoff time for pruned peers. This is how long + /// a peer must wait before attempting to graft into our mesh again after being pruned. + /// When pruning a peer, we send them our value of `prune_backoff` so they know + /// the minimum time to wait. Peers running older versions may not send a backoff time, + /// so if we receive a prune message without one, we will wait at least `prune_backoff` + /// before attempting to re-graft. The default is one minute. + pub fn prune_backoff(&self) -> Duration { + self.prune_backoff + } + + /// Controls the backoff time when unsubscribing from a topic. + /// + /// This is how long to wait before resubscribing to the topic. A short backoff period in case + /// of an unsubscribe event allows reaching a healthy mesh in a more timely manner. The default + /// is 10 seconds. + pub fn unsubscribe_backoff(&self) -> Duration { + self.unsubscribe_backoff + } + + /// Number of heartbeat slots considered as slack for backoffs. This gurantees that we wait + /// at least backoff_slack heartbeats after a backoff is over before we try to graft. This + /// solves problems occuring through high latencies. In particular if + /// `backoff_slack * heartbeat_interval` is longer than any latencies between processing + /// prunes on our side and processing prunes on the receiving side this guarantees that we + /// get not punished for too early grafting. The default is 1. + pub fn backoff_slack(&self) -> u32 { + self.backoff_slack + } + + /// Whether to do flood publishing or not. If enabled newly created messages will always be + /// sent to all peers that are subscribed to the topic and have a good enough score. + /// The default is true. + pub fn flood_publish(&self) -> bool { + self.flood_publish + } + + /// If a GRAFT comes before `graft_flood_threshold` has elapsed since the last PRUNE, + /// then there is an extra score penalty applied to the peer through P7. + pub fn graft_flood_threshold(&self) -> Duration { + self.graft_flood_threshold + } + + /// Minimum number of outbound peers in the mesh network before adding more (D_out in the spec). + /// This value must be smaller or equal than `mesh_n / 2` and smaller than `mesh_n_low`. + /// The default is 2. + pub fn mesh_outbound_min(&self) -> usize { + self.mesh_outbound_min + } + + /// Number of heartbeat ticks that specifcy the interval in which opportunistic grafting is + /// applied. Every `opportunistic_graft_ticks` we will attempt to select some high-scoring mesh + /// peers to replace lower-scoring ones, if the median score of our mesh peers falls below a + /// threshold (see ). + /// The default is 60. + pub fn opportunistic_graft_ticks(&self) -> u64 { + self.opportunistic_graft_ticks + } + + /// Controls how many times we will allow a peer to request the same message id through IWANT + /// gossip before we start ignoring them. This is designed to prevent peers from spamming us + /// with requests and wasting our resources. The default is 3. + pub fn gossip_retransimission(&self) -> u32 { + self.gossip_retransimission + } + + /// The maximum number of new peers to graft to during opportunistic grafting. The default is 2. + pub fn opportunistic_graft_peers(&self) -> usize { + self.opportunistic_graft_peers + } + + /// The maximum number of messages we will process in a given RPC. If this is unset, there is + /// no limit. The default is None. + pub fn max_messages_per_rpc(&self) -> Option { + self.max_messages_per_rpc + } + + /// The maximum number of messages to include in an IHAVE message. + /// Also controls the maximum number of IHAVE ids we will accept and request with IWANT from a + /// peer within a heartbeat, to protect from IHAVE floods. You should adjust this value from the + /// default if your system is pushing more than 5000 messages in GossipSubHistoryGossip + /// heartbeats; with the defaults this is 1666 messages/s. The default is 5000. + pub fn max_ihave_length(&self) -> usize { + self.max_ihave_length + } + + /// GossipSubMaxIHaveMessages is the maximum number of IHAVE messages to accept from a peer + /// within a heartbeat. + pub fn max_ihave_messages(&self) -> usize { + self.max_ihave_messages + } + + /// Time to wait for a message requested through IWANT following an IHAVE advertisement. + /// If the message is not received within this window, a broken promise is declared and + /// the router may apply behavioural penalties. The default is 3 seconds. + pub fn iwant_followup_time(&self) -> Duration { + self.iwant_followup_time + } + + /// Enable support for flooodsub peers. Default false. + pub fn support_floodsub(&self) -> bool { + self.protocol.protocol_ids.contains(&FLOODSUB_PROTOCOL) + } + + /// Published message ids time cache duration. The default is 10 seconds. + pub fn published_message_ids_cache_time(&self) -> Duration { + self.published_message_ids_cache_time + } + + /// The max number of messages a `ConnectionHandler` can buffer. The default is 5000. + pub fn connection_handler_queue_len(&self) -> usize { + self.connection_handler_queue_len + } + + /// The duration a message to be published can wait to be sent before it is abandoned. The + /// default is 5 seconds. + pub fn publish_queue_duration(&self) -> Duration { + self.connection_handler_publish_duration + } + + /// The duration a message to be forwarded can wait to be sent before it is abandoned. The + /// default is 1s. + pub fn forward_queue_duration(&self) -> Duration { + self.connection_handler_forward_duration + } +} + +impl Default for Config { + fn default() -> Self { + // use ConfigBuilder to also validate defaults + ConfigBuilder::default() + .build() + .expect("Default config parameters should be valid parameters") + } +} + +/// The builder struct for constructing a gossipsub configuration. +pub struct ConfigBuilder { + config: Config, + invalid_protocol: bool, // This is a bit of a hack to only expose one error to the user. +} + +impl Default for ConfigBuilder { + fn default() -> Self { + ConfigBuilder { + config: Config { + protocol: ProtocolConfig::default(), + history_length: 5, + history_gossip: 3, + mesh_n: 6, + mesh_n_low: 5, + mesh_n_high: 12, + retain_scores: 4, + gossip_lazy: 6, // default to mesh_n + gossip_factor: 0.25, + heartbeat_initial_delay: Duration::from_secs(5), + heartbeat_interval: Duration::from_secs(1), + fanout_ttl: Duration::from_secs(60), + check_explicit_peers_ticks: 300, + duplicate_cache_time: Duration::from_secs(60), + validate_messages: false, + message_id_fn: Arc::new(|message| { + // default message id is: source + sequence number + // NOTE: If either the peer_id or source is not provided, we set to 0; + let mut source_string = if let Some(peer_id) = message.source.as_ref() { + peer_id.to_base58() + } else { + PeerId::from_bytes(&[0, 1, 0]) + .expect("Valid peer id") + .to_base58() + }; + source_string + .push_str(&message.sequence_number.unwrap_or_default().to_string()); + MessageId::from(source_string) + }), + allow_self_origin: false, + do_px: false, + prune_peers: 0, // NOTE: Increasing this currently has little effect until Signed records are implemented. + prune_backoff: Duration::from_secs(60), + unsubscribe_backoff: Duration::from_secs(10), + backoff_slack: 1, + flood_publish: true, + graft_flood_threshold: Duration::from_secs(10), + mesh_outbound_min: 2, + opportunistic_graft_ticks: 60, + opportunistic_graft_peers: 2, + gossip_retransimission: 3, + max_messages_per_rpc: None, + max_ihave_length: 5000, + max_ihave_messages: 10, + iwant_followup_time: Duration::from_secs(3), + published_message_ids_cache_time: Duration::from_secs(10), + connection_handler_queue_len: 5000, + connection_handler_publish_duration: Duration::from_secs(5), + connection_handler_forward_duration: Duration::from_millis(1000), + }, + invalid_protocol: false, + } + } +} + +impl From for ConfigBuilder { + fn from(config: Config) -> Self { + ConfigBuilder { + config, + invalid_protocol: false, + } + } +} + +impl ConfigBuilder { + /// The protocol id prefix to negotiate this protocol (default is `/meshsub/1.1.0` and `/meshsub/1.0.0`). + pub fn protocol_id_prefix( + &mut self, + protocol_id_prefix: impl Into>, + ) -> &mut Self { + let cow = protocol_id_prefix.into(); + + match ( + StreamProtocol::try_from_owned(format!("{}/1.1.0", cow)), + StreamProtocol::try_from_owned(format!("{}/1.0.0", cow)), + ) { + (Ok(p1), Ok(p2)) => { + self.config.protocol.protocol_ids = vec![ + ProtocolId { + protocol: p1, + kind: PeerKind::Gossipsubv1_1, + }, + ProtocolId { + protocol: p2, + kind: PeerKind::Gossipsub, + }, + ] + } + _ => { + self.invalid_protocol = true; + } + } + + self + } + + /// The full protocol id to negotiate this protocol (does not append `/1.0.0` or `/1.1.0`). + pub fn protocol_id( + &mut self, + protocol_id: impl Into>, + custom_id_version: Version, + ) -> &mut Self { + let cow = protocol_id.into(); + + match StreamProtocol::try_from_owned(cow.to_string()) { + Ok(protocol) => { + self.config.protocol.protocol_ids = vec![ProtocolId { + protocol, + kind: match custom_id_version { + Version::V1_1 => PeerKind::Gossipsubv1_1, + Version::V1_0 => PeerKind::Gossipsub, + }, + }] + } + _ => { + self.invalid_protocol = true; + } + } + + self + } + + /// Number of heartbeats to keep in the `memcache` (default is 5). + pub fn history_length(&mut self, history_length: usize) -> &mut Self { + self.config.history_length = history_length; + self + } + + /// Number of past heartbeats to gossip about (default is 3). + pub fn history_gossip(&mut self, history_gossip: usize) -> &mut Self { + self.config.history_gossip = history_gossip; + self + } + + /// Target number of peers for the mesh network (D in the spec, default is 6). + pub fn mesh_n(&mut self, mesh_n: usize) -> &mut Self { + self.config.mesh_n = mesh_n; + self + } + + /// Minimum number of peers in mesh network before adding more (D_lo in the spec, default is 4). + pub fn mesh_n_low(&mut self, mesh_n_low: usize) -> &mut Self { + self.config.mesh_n_low = mesh_n_low; + self + } + + /// Maximum number of peers in mesh network before removing some (D_high in the spec, default + /// is 12). + pub fn mesh_n_high(&mut self, mesh_n_high: usize) -> &mut Self { + self.config.mesh_n_high = mesh_n_high; + self + } + + /// Affects how peers are selected when pruning a mesh due to over subscription. + /// + /// At least [`Self::retain_scores`] of the retained peers will be high-scoring, while the remainder are + /// chosen randomly (D_score in the spec, default is 4). + pub fn retain_scores(&mut self, retain_scores: usize) -> &mut Self { + self.config.retain_scores = retain_scores; + self + } + + /// Minimum number of peers to emit gossip to during a heartbeat (D_lazy in the spec, + /// default is 6). + pub fn gossip_lazy(&mut self, gossip_lazy: usize) -> &mut Self { + self.config.gossip_lazy = gossip_lazy; + self + } + + /// Affects how many peers we will emit gossip to at each heartbeat. + /// + /// We will send gossip to `gossip_factor * (total number of non-mesh peers)`, or + /// `gossip_lazy`, whichever is greater. The default is 0.25. + pub fn gossip_factor(&mut self, gossip_factor: f64) -> &mut Self { + self.config.gossip_factor = gossip_factor; + self + } + + /// Initial delay in each heartbeat (default is 5 seconds). + pub fn heartbeat_initial_delay(&mut self, heartbeat_initial_delay: Duration) -> &mut Self { + self.config.heartbeat_initial_delay = heartbeat_initial_delay; + self + } + + /// Time between each heartbeat (default is 1 second). + pub fn heartbeat_interval(&mut self, heartbeat_interval: Duration) -> &mut Self { + self.config.heartbeat_interval = heartbeat_interval; + self + } + + /// The number of heartbeat ticks until we recheck the connection to explicit peers and + /// reconnecting if necessary (default 300). + pub fn check_explicit_peers_ticks(&mut self, check_explicit_peers_ticks: u64) -> &mut Self { + self.config.check_explicit_peers_ticks = check_explicit_peers_ticks; + self + } + + /// Time to live for fanout peers (default is 60 seconds). + pub fn fanout_ttl(&mut self, fanout_ttl: Duration) -> &mut Self { + self.config.fanout_ttl = fanout_ttl; + self + } + + /// The maximum byte size for each gossip (default is 2048 bytes). + pub fn max_transmit_size(&mut self, max_transmit_size: usize) -> &mut Self { + self.config.protocol.max_transmit_size = max_transmit_size; + self + } + + /// Duplicates are prevented by storing message id's of known messages in an LRU time cache. + /// This settings sets the time period that messages are stored in the cache. Duplicates can be + /// received if duplicate messages are sent at a time greater than this setting apart. The + /// default is 1 minute. + pub fn duplicate_cache_time(&mut self, cache_size: Duration) -> &mut Self { + self.config.duplicate_cache_time = cache_size; + self + } + + /// When set, prevents automatic forwarding of all received messages. This setting + /// allows a user to validate the messages before propagating them to their peers. If set, + /// the user must manually call [`crate::gossipsub::Behaviour::report_message_validation_result()`] on the + /// behaviour to forward a message once validated. + pub fn validate_messages(&mut self) -> &mut Self { + self.config.validate_messages = true; + self + } + + /// Determines the level of validation used when receiving messages. See [`ValidationMode`] + /// for the available types. The default is ValidationMode::Strict. + pub fn validation_mode(&mut self, validation_mode: ValidationMode) -> &mut Self { + self.config.protocol.validation_mode = validation_mode; + self + } + + /// A user-defined function allowing the user to specify the message id of a gossipsub message. + /// The default value is to concatenate the source peer id with a sequence number. Setting this + /// parameter allows the user to address packets arbitrarily. One example is content based + /// addressing, where this function may be set to `hash(message)`. This would prevent messages + /// of the same content from being duplicated. + /// + /// The function takes a [`Message`] as input and outputs a String to be + /// interpreted as the message id. + pub fn message_id_fn(&mut self, id_fn: F) -> &mut Self + where + F: Fn(&Message) -> MessageId + Send + Sync + 'static, + { + self.config.message_id_fn = Arc::new(id_fn); + self + } + + /// Enables Peer eXchange. This should be enabled in bootstrappers and other well + /// connected/trusted nodes. The default is false. + /// + /// Note: Peer exchange is not implemented today, see + /// . + pub fn do_px(&mut self) -> &mut Self { + self.config.do_px = true; + self + } + + /// Controls the number of peers to include in prune Peer eXchange. + /// + /// When we prune a peer that's eligible for PX (has a good score, etc), we will try to + /// send them signed peer records for up to [`Self::prune_peers] other peers that we + /// know of. It is recommended that this value is larger than [`Self::mesh_n_high`] so that the + /// pruned peer can reliably form a full mesh. The default is 16. + pub fn prune_peers(&mut self, prune_peers: usize) -> &mut Self { + self.config.prune_peers = prune_peers; + self + } + + /// Controls the backoff time for pruned peers. This is how long + /// a peer must wait before attempting to graft into our mesh again after being pruned. + /// When pruning a peer, we send them our value of [`Self::prune_backoff`] so they know + /// the minimum time to wait. Peers running older versions may not send a backoff time, + /// so if we receive a prune message without one, we will wait at least [`Self::prune_backoff`] + /// before attempting to re-graft. The default is one minute. + pub fn prune_backoff(&mut self, prune_backoff: Duration) -> &mut Self { + self.config.prune_backoff = prune_backoff; + self + } + + /// Controls the backoff time when unsubscribing from a topic. + /// + /// This is how long to wait before resubscribing to the topic. A short backoff period in case + /// of an unsubscribe event allows reaching a healthy mesh in a more timely manner. The default + /// is 10 seconds. + pub fn unsubscribe_backoff(&mut self, unsubscribe_backoff: u64) -> &mut Self { + self.config.unsubscribe_backoff = Duration::from_secs(unsubscribe_backoff); + self + } + + /// Number of heartbeat slots considered as slack for backoffs. This gurantees that we wait + /// at least backoff_slack heartbeats after a backoff is over before we try to graft. This + /// solves problems occuring through high latencies. In particular if + /// `backoff_slack * heartbeat_interval` is longer than any latencies between processing + /// prunes on our side and processing prunes on the receiving side this guarantees that we + /// get not punished for too early grafting. The default is 1. + pub fn backoff_slack(&mut self, backoff_slack: u32) -> &mut Self { + self.config.backoff_slack = backoff_slack; + self + } + + /// Whether to do flood publishing or not. If enabled newly created messages will always be + /// sent to all peers that are subscribed to the topic and have a good enough score. + /// The default is true. + pub fn flood_publish(&mut self, flood_publish: bool) -> &mut Self { + self.config.flood_publish = flood_publish; + self + } + + /// If a GRAFT comes before `graft_flood_threshold` has elapsed since the last PRUNE, + /// then there is an extra score penalty applied to the peer through P7. + pub fn graft_flood_threshold(&mut self, graft_flood_threshold: Duration) -> &mut Self { + self.config.graft_flood_threshold = graft_flood_threshold; + self + } + + /// Minimum number of outbound peers in the mesh network before adding more (D_out in the spec). + /// This value must be smaller or equal than `mesh_n / 2` and smaller than `mesh_n_low`. + /// The default is 2. + pub fn mesh_outbound_min(&mut self, mesh_outbound_min: usize) -> &mut Self { + self.config.mesh_outbound_min = mesh_outbound_min; + self + } + + /// Number of heartbeat ticks that specifcy the interval in which opportunistic grafting is + /// applied. Every `opportunistic_graft_ticks` we will attempt to select some high-scoring mesh + /// peers to replace lower-scoring ones, if the median score of our mesh peers falls below a + /// threshold (see ). + /// The default is 60. + pub fn opportunistic_graft_ticks(&mut self, opportunistic_graft_ticks: u64) -> &mut Self { + self.config.opportunistic_graft_ticks = opportunistic_graft_ticks; + self + } + + /// Controls how many times we will allow a peer to request the same message id through IWANT + /// gossip before we start ignoring them. This is designed to prevent peers from spamming us + /// with requests and wasting our resources. + pub fn gossip_retransimission(&mut self, gossip_retransimission: u32) -> &mut Self { + self.config.gossip_retransimission = gossip_retransimission; + self + } + + /// The maximum number of new peers to graft to during opportunistic grafting. The default is 2. + pub fn opportunistic_graft_peers(&mut self, opportunistic_graft_peers: usize) -> &mut Self { + self.config.opportunistic_graft_peers = opportunistic_graft_peers; + self + } + + /// The maximum number of messages we will process in a given RPC. If this is unset, there is + /// no limit. The default is None. + pub fn max_messages_per_rpc(&mut self, max: Option) -> &mut Self { + self.config.max_messages_per_rpc = max; + self + } + + /// The maximum number of messages to include in an IHAVE message. + /// Also controls the maximum number of IHAVE ids we will accept and request with IWANT from a + /// peer within a heartbeat, to protect from IHAVE floods. You should adjust this value from the + /// default if your system is pushing more than 5000 messages in GossipSubHistoryGossip + /// heartbeats; with the defaults this is 1666 messages/s. The default is 5000. + pub fn max_ihave_length(&mut self, max_ihave_length: usize) -> &mut Self { + self.config.max_ihave_length = max_ihave_length; + self + } + + /// GossipSubMaxIHaveMessages is the maximum number of IHAVE messages to accept from a peer + /// within a heartbeat. + pub fn max_ihave_messages(&mut self, max_ihave_messages: usize) -> &mut Self { + self.config.max_ihave_messages = max_ihave_messages; + self + } + + /// By default, gossipsub will reject messages that are sent to us that has the same message + /// source as we have specified locally. Enabling this, allows these messages and prevents + /// penalizing the peer that sent us the message. Default is false. + pub fn allow_self_origin(&mut self, allow_self_origin: bool) -> &mut Self { + self.config.allow_self_origin = allow_self_origin; + self + } + + /// Time to wait for a message requested through IWANT following an IHAVE advertisement. + /// If the message is not received within this window, a broken promise is declared and + /// the router may apply behavioural penalties. The default is 3 seconds. + pub fn iwant_followup_time(&mut self, iwant_followup_time: Duration) -> &mut Self { + self.config.iwant_followup_time = iwant_followup_time; + self + } + + /// Enable support for flooodsub peers. + pub fn support_floodsub(&mut self) -> &mut Self { + if self + .config + .protocol + .protocol_ids + .contains(&FLOODSUB_PROTOCOL) + { + return self; + } + + self.config.protocol.protocol_ids.push(FLOODSUB_PROTOCOL); + self + } + + /// Published message ids time cache duration. The default is 10 seconds. + pub fn published_message_ids_cache_time( + &mut self, + published_message_ids_cache_time: Duration, + ) -> &mut Self { + self.config.published_message_ids_cache_time = published_message_ids_cache_time; + self + } + + /// The max number of messages a `ConnectionHandler` can buffer. The default is 5000. + pub fn connection_handler_queue_len(&mut self, len: usize) -> &mut Self { + self.config.connection_handler_queue_len = len; + self + } + + /// The duration a message to be published can wait to be sent before it is abandoned. The + /// default is 5 seconds. + pub fn publish_queue_duration(&mut self, duration: Duration) -> &mut Self { + self.config.connection_handler_publish_duration = duration; + self + } + + /// The duration a message to be forwarded can wait to be sent before it is abandoned. The + /// default is 1s. + pub fn forward_queue_duration(&mut self, duration: Duration) -> &mut Self { + self.config.connection_handler_forward_duration = duration; + self + } + + /// Constructs a [`Config`] from the given configuration and validates the settings. + pub fn build(&self) -> Result { + // check all constraints on config + + if self.config.protocol.max_transmit_size < 100 { + return Err(ConfigBuilderError::MaxTransmissionSizeTooSmall); + } + + if self.config.history_length < self.config.history_gossip { + return Err(ConfigBuilderError::HistoryLengthTooSmall); + } + + if !(self.config.mesh_outbound_min <= self.config.mesh_n_low + && self.config.mesh_n_low <= self.config.mesh_n + && self.config.mesh_n <= self.config.mesh_n_high) + { + return Err(ConfigBuilderError::MeshParametersInvalid); + } + + if self.config.mesh_outbound_min * 2 > self.config.mesh_n { + return Err(ConfigBuilderError::MeshOutboundInvalid); + } + + if self.config.unsubscribe_backoff.as_millis() == 0 { + return Err(ConfigBuilderError::UnsubscribeBackoffIsZero); + } + + if self.invalid_protocol { + return Err(ConfigBuilderError::InvalidProtocol); + } + + Ok(self.config.clone()) + } +} + +impl std::fmt::Debug for Config { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut builder = f.debug_struct("GossipsubConfig"); + let _ = builder.field("protocol", &self.protocol); + let _ = builder.field("history_length", &self.history_length); + let _ = builder.field("history_gossip", &self.history_gossip); + let _ = builder.field("mesh_n", &self.mesh_n); + let _ = builder.field("mesh_n_low", &self.mesh_n_low); + let _ = builder.field("mesh_n_high", &self.mesh_n_high); + let _ = builder.field("retain_scores", &self.retain_scores); + let _ = builder.field("gossip_lazy", &self.gossip_lazy); + let _ = builder.field("gossip_factor", &self.gossip_factor); + let _ = builder.field("heartbeat_initial_delay", &self.heartbeat_initial_delay); + let _ = builder.field("heartbeat_interval", &self.heartbeat_interval); + let _ = builder.field("fanout_ttl", &self.fanout_ttl); + let _ = builder.field("duplicate_cache_time", &self.duplicate_cache_time); + let _ = builder.field("validate_messages", &self.validate_messages); + let _ = builder.field("allow_self_origin", &self.allow_self_origin); + let _ = builder.field("do_px", &self.do_px); + let _ = builder.field("prune_peers", &self.prune_peers); + let _ = builder.field("prune_backoff", &self.prune_backoff); + let _ = builder.field("backoff_slack", &self.backoff_slack); + let _ = builder.field("flood_publish", &self.flood_publish); + let _ = builder.field("graft_flood_threshold", &self.graft_flood_threshold); + let _ = builder.field("mesh_outbound_min", &self.mesh_outbound_min); + let _ = builder.field("opportunistic_graft_ticks", &self.opportunistic_graft_ticks); + let _ = builder.field("opportunistic_graft_peers", &self.opportunistic_graft_peers); + let _ = builder.field("max_messages_per_rpc", &self.max_messages_per_rpc); + let _ = builder.field("max_ihave_length", &self.max_ihave_length); + let _ = builder.field("max_ihave_messages", &self.max_ihave_messages); + let _ = builder.field("iwant_followup_time", &self.iwant_followup_time); + let _ = builder.field( + "published_message_ids_cache_time", + &self.published_message_ids_cache_time, + ); + builder.finish() + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::gossipsub::topic::IdentityHash; + use crate::gossipsub::types::PeerKind; + use crate::gossipsub::Topic; + use libp2p::core::UpgradeInfo; + use libp2p::swarm::StreamProtocol; + use std::collections::hash_map::DefaultHasher; + use std::hash::{Hash, Hasher}; + + #[test] + fn create_config_with_message_id_as_plain_function() { + let config = ConfigBuilder::default() + .message_id_fn(message_id_plain_function) + .build() + .unwrap(); + + let result = config.message_id(&get_gossipsub_message()); + + assert_eq!(result, get_expected_message_id()); + } + + #[test] + fn create_config_with_message_id_as_closure() { + let config = ConfigBuilder::default() + .message_id_fn(|message: &Message| { + let mut s = DefaultHasher::new(); + message.data.hash(&mut s); + let mut v = s.finish().to_string(); + v.push('e'); + MessageId::from(v) + }) + .build() + .unwrap(); + + let result = config.message_id(&get_gossipsub_message()); + + assert_eq!(result, get_expected_message_id()); + } + + #[test] + fn create_config_with_message_id_as_closure_with_variable_capture() { + let captured: char = 'e'; + + let config = ConfigBuilder::default() + .message_id_fn(move |message: &Message| { + let mut s = DefaultHasher::new(); + message.data.hash(&mut s); + let mut v = s.finish().to_string(); + v.push(captured); + MessageId::from(v) + }) + .build() + .unwrap(); + + let result = config.message_id(&get_gossipsub_message()); + + assert_eq!(result, get_expected_message_id()); + } + + #[test] + fn create_config_with_protocol_id_prefix() { + let protocol_config = ConfigBuilder::default() + .protocol_id_prefix("/purple") + .build() + .unwrap() + .protocol_config(); + + let protocol_ids = protocol_config.protocol_info(); + + assert_eq!(protocol_ids.len(), 2); + + assert_eq!( + protocol_ids[0].protocol, + StreamProtocol::new("/purple/1.1.0") + ); + assert_eq!(protocol_ids[0].kind, PeerKind::Gossipsubv1_1); + + assert_eq!( + protocol_ids[1].protocol, + StreamProtocol::new("/purple/1.0.0") + ); + assert_eq!(protocol_ids[1].kind, PeerKind::Gossipsub); + } + + #[test] + fn create_config_with_custom_protocol_id() { + let protocol_config = ConfigBuilder::default() + .protocol_id("/purple", Version::V1_0) + .build() + .unwrap() + .protocol_config(); + + let protocol_ids = protocol_config.protocol_info(); + + assert_eq!(protocol_ids.len(), 1); + + assert_eq!(protocol_ids[0].protocol, "/purple"); + assert_eq!(protocol_ids[0].kind, PeerKind::Gossipsub); + } + + fn get_gossipsub_message() -> Message { + Message { + source: None, + data: vec![12, 34, 56], + sequence_number: None, + topic: Topic::::new("test").hash(), + } + } + + fn get_expected_message_id() -> MessageId { + MessageId::from([ + 49, 55, 56, 51, 56, 52, 49, 51, 52, 51, 52, 55, 51, 51, 53, 52, 54, 54, 52, 49, 101, + ]) + } + + fn message_id_plain_function(message: &Message) -> MessageId { + let mut s = DefaultHasher::new(); + message.data.hash(&mut s); + let mut v = s.finish().to_string(); + v.push('e'); + MessageId::from(v) + } +} diff --git a/beacon_node/lighthouse_network/src/gossipsub/error.rs b/beacon_node/lighthouse_network/src/gossipsub/error.rs new file mode 100644 index 000000000..df3332bc9 --- /dev/null +++ b/beacon_node/lighthouse_network/src/gossipsub/error.rs @@ -0,0 +1,159 @@ +// Copyright 2020 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +//! Error types that can result from gossipsub. + +use libp2p::identity::SigningError; + +/// Error associated with publishing a gossipsub message. +#[derive(Debug)] +pub enum PublishError { + /// This message has already been published. + Duplicate, + /// An error occurred whilst signing the message. + SigningError(SigningError), + /// There were no peers to send this message to. + InsufficientPeers, + /// The overall message was too large. This could be due to excessive topics or an excessive + /// message size. + MessageTooLarge, + /// The compression algorithm failed. + TransformFailed(std::io::Error), + /// Messages could not be sent because all queues for peers were full. The usize represents the + /// number of peers that have full queues. + AllQueuesFull(usize), +} + +impl std::fmt::Display for PublishError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{self:?}") + } +} + +impl std::error::Error for PublishError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Self::SigningError(err) => Some(err), + Self::TransformFailed(err) => Some(err), + _ => None, + } + } +} + +/// Error associated with subscribing to a topic. +#[derive(Debug)] +pub enum SubscriptionError { + /// Couldn't publish our subscription + PublishError(PublishError), + /// We are not allowed to subscribe to this topic by the subscription filter + NotAllowed, +} + +impl std::fmt::Display for SubscriptionError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{self:?}") + } +} + +impl std::error::Error for SubscriptionError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Self::PublishError(err) => Some(err), + _ => None, + } + } +} + +impl From for PublishError { + fn from(error: SigningError) -> Self { + PublishError::SigningError(error) + } +} + +#[derive(Debug, Clone, Copy)] +pub enum ValidationError { + /// The message has an invalid signature, + InvalidSignature, + /// The sequence number was empty, expected a value. + EmptySequenceNumber, + /// The sequence number was the incorrect size + InvalidSequenceNumber, + /// The PeerId was invalid + InvalidPeerId, + /// Signature existed when validation has been sent to + /// [`crate::behaviour::MessageAuthenticity::Anonymous`]. + SignaturePresent, + /// Sequence number existed when validation has been sent to + /// [`crate::behaviour::MessageAuthenticity::Anonymous`]. + SequenceNumberPresent, + /// Message source existed when validation has been sent to + /// [`crate::behaviour::MessageAuthenticity::Anonymous`]. + MessageSourcePresent, + /// The data transformation failed. + TransformFailed, +} + +impl std::fmt::Display for ValidationError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{self:?}") + } +} + +impl std::error::Error for ValidationError {} + +impl From for PublishError { + fn from(error: std::io::Error) -> PublishError { + PublishError::TransformFailed(error) + } +} + +/// Error associated with Config building. +#[derive(Debug)] +pub enum ConfigBuilderError { + /// Maximum transmission size is too small. + MaxTransmissionSizeTooSmall, + /// Histroy length less than history gossip length. + HistoryLengthTooSmall, + /// The ineauality doesn't hold mesh_outbound_min <= mesh_n_low <= mesh_n <= mesh_n_high + MeshParametersInvalid, + /// The inequality doesn't hold mesh_outbound_min <= self.config.mesh_n / 2 + MeshOutboundInvalid, + /// unsubscribe_backoff is zero + UnsubscribeBackoffIsZero, + /// Invalid protocol + InvalidProtocol, +} + +impl std::error::Error for ConfigBuilderError {} + +impl std::fmt::Display for ConfigBuilderError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + Self::MaxTransmissionSizeTooSmall => { + write!(f, "Maximum transmission size is too small") + } + Self::HistoryLengthTooSmall => write!(f, "Histroy length less than history gossip length"), + Self::MeshParametersInvalid => write!(f, "The ineauality doesn't hold mesh_outbound_min <= mesh_n_low <= mesh_n <= mesh_n_high"), + Self::MeshOutboundInvalid => write!(f, "The inequality doesn't hold mesh_outbound_min <= self.config.mesh_n / 2"), + Self::UnsubscribeBackoffIsZero => write!(f, "unsubscribe_backoff is zero"), + Self::InvalidProtocol => write!(f, "Invalid protocol"), + } + } +} diff --git a/beacon_node/lighthouse_network/src/gossipsub/generated/compat.proto b/beacon_node/lighthouse_network/src/gossipsub/generated/compat.proto new file mode 100644 index 000000000..b2753bf7e --- /dev/null +++ b/beacon_node/lighthouse_network/src/gossipsub/generated/compat.proto @@ -0,0 +1,12 @@ +syntax = "proto2"; + +package compat.pb; + +message Message { + optional bytes from = 1; + optional bytes data = 2; + optional bytes seqno = 3; + repeated string topic_ids = 4; + optional bytes signature = 5; + optional bytes key = 6; +} \ No newline at end of file diff --git a/beacon_node/lighthouse_network/src/gossipsub/generated/compat/mod.rs b/beacon_node/lighthouse_network/src/gossipsub/generated/compat/mod.rs new file mode 100644 index 000000000..aec6164c7 --- /dev/null +++ b/beacon_node/lighthouse_network/src/gossipsub/generated/compat/mod.rs @@ -0,0 +1,2 @@ +// Automatically generated mod.rs +pub mod pb; diff --git a/beacon_node/lighthouse_network/src/gossipsub/generated/compat/pb.rs b/beacon_node/lighthouse_network/src/gossipsub/generated/compat/pb.rs new file mode 100644 index 000000000..fd59c38e2 --- /dev/null +++ b/beacon_node/lighthouse_network/src/gossipsub/generated/compat/pb.rs @@ -0,0 +1,67 @@ +// Automatically generated rust module for 'compat.proto' file + +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_imports)] +#![allow(unknown_lints)] +#![allow(clippy::all)] +#![cfg_attr(rustfmt, rustfmt_skip)] + + +use quick_protobuf::{MessageInfo, MessageRead, MessageWrite, BytesReader, Writer, WriterBackend, Result}; +use quick_protobuf::sizeofs::*; +use super::super::*; + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct Message { + pub from: Option>, + pub data: Option>, + pub seqno: Option>, + pub topic_ids: Vec, + pub signature: Option>, + pub key: Option>, +} + +impl<'a> MessageRead<'a> for Message { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.from = Some(r.read_bytes(bytes)?.to_owned()), + Ok(18) => msg.data = Some(r.read_bytes(bytes)?.to_owned()), + Ok(26) => msg.seqno = Some(r.read_bytes(bytes)?.to_owned()), + Ok(34) => msg.topic_ids.push(r.read_string(bytes)?.to_owned()), + Ok(42) => msg.signature = Some(r.read_bytes(bytes)?.to_owned()), + Ok(50) => msg.key = Some(r.read_bytes(bytes)?.to_owned()), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for Message { + fn get_size(&self) -> usize { + 0 + + self.from.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.data.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.seqno.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.topic_ids.iter().map(|s| 1 + sizeof_len((s).len())).sum::() + + self.signature.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.key.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if let Some(ref s) = self.from { w.write_with_tag(10, |w| w.write_bytes(&**s))?; } + if let Some(ref s) = self.data { w.write_with_tag(18, |w| w.write_bytes(&**s))?; } + if let Some(ref s) = self.seqno { w.write_with_tag(26, |w| w.write_bytes(&**s))?; } + for s in &self.topic_ids { w.write_with_tag(34, |w| w.write_string(&**s))?; } + if let Some(ref s) = self.signature { w.write_with_tag(42, |w| w.write_bytes(&**s))?; } + if let Some(ref s) = self.key { w.write_with_tag(50, |w| w.write_bytes(&**s))?; } + Ok(()) + } +} + diff --git a/beacon_node/lighthouse_network/src/gossipsub/generated/gossipsub/mod.rs b/beacon_node/lighthouse_network/src/gossipsub/generated/gossipsub/mod.rs new file mode 100644 index 000000000..aec6164c7 --- /dev/null +++ b/beacon_node/lighthouse_network/src/gossipsub/generated/gossipsub/mod.rs @@ -0,0 +1,2 @@ +// Automatically generated mod.rs +pub mod pb; diff --git a/beacon_node/lighthouse_network/src/gossipsub/generated/gossipsub/pb.rs b/beacon_node/lighthouse_network/src/gossipsub/generated/gossipsub/pb.rs new file mode 100644 index 000000000..9a074fd61 --- /dev/null +++ b/beacon_node/lighthouse_network/src/gossipsub/generated/gossipsub/pb.rs @@ -0,0 +1,567 @@ +// Automatically generated rust module for 'rpc.proto' file + +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_imports)] +#![allow(unknown_lints)] +#![allow(clippy::all)] +#![cfg_attr(rustfmt, rustfmt_skip)] + + +use quick_protobuf::{MessageInfo, MessageRead, MessageWrite, BytesReader, Writer, WriterBackend, Result}; +use quick_protobuf::sizeofs::*; +use super::super::*; + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct RPC { + pub subscriptions: Vec, + pub publish: Vec, + pub control: Option, +} + +impl<'a> MessageRead<'a> for RPC { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.subscriptions.push(r.read_message::(bytes)?), + Ok(18) => msg.publish.push(r.read_message::(bytes)?), + Ok(26) => msg.control = Some(r.read_message::(bytes)?), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for RPC { + fn get_size(&self) -> usize { + 0 + + self.subscriptions.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + + self.publish.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + + self.control.as_ref().map_or(0, |m| 1 + sizeof_len((m).get_size())) + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + for s in &self.subscriptions { w.write_with_tag(10, |w| w.write_message(s))?; } + for s in &self.publish { w.write_with_tag(18, |w| w.write_message(s))?; } + if let Some(ref s) = self.control { w.write_with_tag(26, |w| w.write_message(s))?; } + Ok(()) + } +} + +pub mod mod_RPC { + +use super::*; + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct SubOpts { + pub subscribe: Option, + pub topic_id: Option, +} + +impl<'a> MessageRead<'a> for SubOpts { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(8) => msg.subscribe = Some(r.read_bool(bytes)?), + Ok(18) => msg.topic_id = Some(r.read_string(bytes)?.to_owned()), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for SubOpts { + fn get_size(&self) -> usize { + 0 + + self.subscribe.as_ref().map_or(0, |m| 1 + sizeof_varint(*(m) as u64)) + + self.topic_id.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if let Some(ref s) = self.subscribe { w.write_with_tag(8, |w| w.write_bool(*s))?; } + if let Some(ref s) = self.topic_id { w.write_with_tag(18, |w| w.write_string(&**s))?; } + Ok(()) + } +} + +} + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct Message { + pub from: Option>, + pub data: Option>, + pub seqno: Option>, + pub topic: String, + pub signature: Option>, + pub key: Option>, +} + +impl<'a> MessageRead<'a> for Message { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.from = Some(r.read_bytes(bytes)?.to_owned()), + Ok(18) => msg.data = Some(r.read_bytes(bytes)?.to_owned()), + Ok(26) => msg.seqno = Some(r.read_bytes(bytes)?.to_owned()), + Ok(34) => msg.topic = r.read_string(bytes)?.to_owned(), + Ok(42) => msg.signature = Some(r.read_bytes(bytes)?.to_owned()), + Ok(50) => msg.key = Some(r.read_bytes(bytes)?.to_owned()), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for Message { + fn get_size(&self) -> usize { + 0 + + self.from.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.data.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.seqno.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + 1 + sizeof_len((&self.topic).len()) + + self.signature.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.key.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if let Some(ref s) = self.from { w.write_with_tag(10, |w| w.write_bytes(&**s))?; } + if let Some(ref s) = self.data { w.write_with_tag(18, |w| w.write_bytes(&**s))?; } + if let Some(ref s) = self.seqno { w.write_with_tag(26, |w| w.write_bytes(&**s))?; } + w.write_with_tag(34, |w| w.write_string(&**&self.topic))?; + if let Some(ref s) = self.signature { w.write_with_tag(42, |w| w.write_bytes(&**s))?; } + if let Some(ref s) = self.key { w.write_with_tag(50, |w| w.write_bytes(&**s))?; } + Ok(()) + } +} + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct ControlMessage { + pub ihave: Vec, + pub iwant: Vec, + pub graft: Vec, + pub prune: Vec, +} + +impl<'a> MessageRead<'a> for ControlMessage { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.ihave.push(r.read_message::(bytes)?), + Ok(18) => msg.iwant.push(r.read_message::(bytes)?), + Ok(26) => msg.graft.push(r.read_message::(bytes)?), + Ok(34) => msg.prune.push(r.read_message::(bytes)?), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for ControlMessage { + fn get_size(&self) -> usize { + 0 + + self.ihave.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + + self.iwant.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + + self.graft.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + + self.prune.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + for s in &self.ihave { w.write_with_tag(10, |w| w.write_message(s))?; } + for s in &self.iwant { w.write_with_tag(18, |w| w.write_message(s))?; } + for s in &self.graft { w.write_with_tag(26, |w| w.write_message(s))?; } + for s in &self.prune { w.write_with_tag(34, |w| w.write_message(s))?; } + Ok(()) + } +} + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct ControlIHave { + pub topic_id: Option, + pub message_ids: Vec>, +} + +impl<'a> MessageRead<'a> for ControlIHave { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.topic_id = Some(r.read_string(bytes)?.to_owned()), + Ok(18) => msg.message_ids.push(r.read_bytes(bytes)?.to_owned()), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for ControlIHave { + fn get_size(&self) -> usize { + 0 + + self.topic_id.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.message_ids.iter().map(|s| 1 + sizeof_len((s).len())).sum::() + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if let Some(ref s) = self.topic_id { w.write_with_tag(10, |w| w.write_string(&**s))?; } + for s in &self.message_ids { w.write_with_tag(18, |w| w.write_bytes(&**s))?; } + Ok(()) + } +} + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct ControlIWant { + pub message_ids: Vec>, +} + +impl<'a> MessageRead<'a> for ControlIWant { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.message_ids.push(r.read_bytes(bytes)?.to_owned()), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for ControlIWant { + fn get_size(&self) -> usize { + 0 + + self.message_ids.iter().map(|s| 1 + sizeof_len((s).len())).sum::() + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + for s in &self.message_ids { w.write_with_tag(10, |w| w.write_bytes(&**s))?; } + Ok(()) + } +} + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct ControlGraft { + pub topic_id: Option, +} + +impl<'a> MessageRead<'a> for ControlGraft { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.topic_id = Some(r.read_string(bytes)?.to_owned()), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for ControlGraft { + fn get_size(&self) -> usize { + 0 + + self.topic_id.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if let Some(ref s) = self.topic_id { w.write_with_tag(10, |w| w.write_string(&**s))?; } + Ok(()) + } +} + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct ControlPrune { + pub topic_id: Option, + pub peers: Vec, + pub backoff: Option, +} + +impl<'a> MessageRead<'a> for ControlPrune { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.topic_id = Some(r.read_string(bytes)?.to_owned()), + Ok(18) => msg.peers.push(r.read_message::(bytes)?), + Ok(24) => msg.backoff = Some(r.read_uint64(bytes)?), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for ControlPrune { + fn get_size(&self) -> usize { + 0 + + self.topic_id.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.peers.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + + self.backoff.as_ref().map_or(0, |m| 1 + sizeof_varint(*(m) as u64)) + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if let Some(ref s) = self.topic_id { w.write_with_tag(10, |w| w.write_string(&**s))?; } + for s in &self.peers { w.write_with_tag(18, |w| w.write_message(s))?; } + if let Some(ref s) = self.backoff { w.write_with_tag(24, |w| w.write_uint64(*s))?; } + Ok(()) + } +} + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct PeerInfo { + pub peer_id: Option>, + pub signed_peer_record: Option>, +} + +impl<'a> MessageRead<'a> for PeerInfo { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.peer_id = Some(r.read_bytes(bytes)?.to_owned()), + Ok(18) => msg.signed_peer_record = Some(r.read_bytes(bytes)?.to_owned()), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for PeerInfo { + fn get_size(&self) -> usize { + 0 + + self.peer_id.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.signed_peer_record.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if let Some(ref s) = self.peer_id { w.write_with_tag(10, |w| w.write_bytes(&**s))?; } + if let Some(ref s) = self.signed_peer_record { w.write_with_tag(18, |w| w.write_bytes(&**s))?; } + Ok(()) + } +} + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct TopicDescriptor { + pub name: Option, + pub auth: Option, + pub enc: Option, +} + +impl<'a> MessageRead<'a> for TopicDescriptor { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.name = Some(r.read_string(bytes)?.to_owned()), + Ok(18) => msg.auth = Some(r.read_message::(bytes)?), + Ok(26) => msg.enc = Some(r.read_message::(bytes)?), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for TopicDescriptor { + fn get_size(&self) -> usize { + 0 + + self.name.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.auth.as_ref().map_or(0, |m| 1 + sizeof_len((m).get_size())) + + self.enc.as_ref().map_or(0, |m| 1 + sizeof_len((m).get_size())) + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if let Some(ref s) = self.name { w.write_with_tag(10, |w| w.write_string(&**s))?; } + if let Some(ref s) = self.auth { w.write_with_tag(18, |w| w.write_message(s))?; } + if let Some(ref s) = self.enc { w.write_with_tag(26, |w| w.write_message(s))?; } + Ok(()) + } +} + +pub mod mod_TopicDescriptor { + +use super::*; + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct AuthOpts { + pub mode: Option, + pub keys: Vec>, +} + +impl<'a> MessageRead<'a> for AuthOpts { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(8) => msg.mode = Some(r.read_enum(bytes)?), + Ok(18) => msg.keys.push(r.read_bytes(bytes)?.to_owned()), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for AuthOpts { + fn get_size(&self) -> usize { + 0 + + self.mode.as_ref().map_or(0, |m| 1 + sizeof_varint(*(m) as u64)) + + self.keys.iter().map(|s| 1 + sizeof_len((s).len())).sum::() + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if let Some(ref s) = self.mode { w.write_with_tag(8, |w| w.write_enum(*s as i32))?; } + for s in &self.keys { w.write_with_tag(18, |w| w.write_bytes(&**s))?; } + Ok(()) + } +} + +pub mod mod_AuthOpts { + + +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum AuthMode { + NONE = 0, + KEY = 1, + WOT = 2, +} + +impl Default for AuthMode { + fn default() -> Self { + AuthMode::NONE + } +} + +impl From for AuthMode { + fn from(i: i32) -> Self { + match i { + 0 => AuthMode::NONE, + 1 => AuthMode::KEY, + 2 => AuthMode::WOT, + _ => Self::default(), + } + } +} + +impl<'a> From<&'a str> for AuthMode { + fn from(s: &'a str) -> Self { + match s { + "NONE" => AuthMode::NONE, + "KEY" => AuthMode::KEY, + "WOT" => AuthMode::WOT, + _ => Self::default(), + } + } +} + +} + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct EncOpts { + pub mode: Option, + pub key_hashes: Vec>, +} + +impl<'a> MessageRead<'a> for EncOpts { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(8) => msg.mode = Some(r.read_enum(bytes)?), + Ok(18) => msg.key_hashes.push(r.read_bytes(bytes)?.to_owned()), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for EncOpts { + fn get_size(&self) -> usize { + 0 + + self.mode.as_ref().map_or(0, |m| 1 + sizeof_varint(*(m) as u64)) + + self.key_hashes.iter().map(|s| 1 + sizeof_len((s).len())).sum::() + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if let Some(ref s) = self.mode { w.write_with_tag(8, |w| w.write_enum(*s as i32))?; } + for s in &self.key_hashes { w.write_with_tag(18, |w| w.write_bytes(&**s))?; } + Ok(()) + } +} + +pub mod mod_EncOpts { + + +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum EncMode { + NONE = 0, + SHAREDKEY = 1, + WOT = 2, +} + +impl Default for EncMode { + fn default() -> Self { + EncMode::NONE + } +} + +impl From for EncMode { + fn from(i: i32) -> Self { + match i { + 0 => EncMode::NONE, + 1 => EncMode::SHAREDKEY, + 2 => EncMode::WOT, + _ => Self::default(), + } + } +} + +impl<'a> From<&'a str> for EncMode { + fn from(s: &'a str) -> Self { + match s { + "NONE" => EncMode::NONE, + "SHAREDKEY" => EncMode::SHAREDKEY, + "WOT" => EncMode::WOT, + _ => Self::default(), + } + } +} + +} + +} + diff --git a/beacon_node/lighthouse_network/src/gossipsub/generated/mod.rs b/beacon_node/lighthouse_network/src/gossipsub/generated/mod.rs new file mode 100644 index 000000000..7ac564f3c --- /dev/null +++ b/beacon_node/lighthouse_network/src/gossipsub/generated/mod.rs @@ -0,0 +1,3 @@ +// Automatically generated mod.rs +pub mod compat; +pub mod gossipsub; diff --git a/beacon_node/lighthouse_network/src/gossipsub/generated/rpc.proto b/beacon_node/lighthouse_network/src/gossipsub/generated/rpc.proto new file mode 100644 index 000000000..2ce12f3f3 --- /dev/null +++ b/beacon_node/lighthouse_network/src/gossipsub/generated/rpc.proto @@ -0,0 +1,84 @@ +syntax = "proto2"; + +package gossipsub.pb; + +message RPC { + repeated SubOpts subscriptions = 1; + repeated Message publish = 2; + + message SubOpts { + optional bool subscribe = 1; // subscribe or unsubscribe + optional string topic_id = 2; + } + + optional ControlMessage control = 3; +} + +message Message { + optional bytes from = 1; + optional bytes data = 2; + optional bytes seqno = 3; + required string topic = 4; + optional bytes signature = 5; + optional bytes key = 6; +} + +message ControlMessage { + repeated ControlIHave ihave = 1; + repeated ControlIWant iwant = 2; + repeated ControlGraft graft = 3; + repeated ControlPrune prune = 4; +} + +message ControlIHave { + optional string topic_id = 1; + repeated bytes message_ids = 2; +} + +message ControlIWant { + repeated bytes message_ids= 1; +} + +message ControlGraft { + optional string topic_id = 1; +} + +message ControlPrune { + optional string topic_id = 1; + repeated PeerInfo peers = 2; // gossipsub v1.1 PX + optional uint64 backoff = 3; // gossipsub v1.1 backoff time (in seconds) +} + +message PeerInfo { + optional bytes peer_id = 1; + optional bytes signed_peer_record = 2; +} + +// topicID = hash(topicDescriptor); (not the topic.name) +message TopicDescriptor { + optional string name = 1; + optional AuthOpts auth = 2; + optional EncOpts enc = 3; + + message AuthOpts { + optional AuthMode mode = 1; + repeated bytes keys = 2; // root keys to trust + + enum AuthMode { + NONE = 0; // no authentication, anyone can publish + KEY = 1; // only messages signed by keys in the topic descriptor are accepted + WOT = 2; // web of trust, certificates can allow publisher set to grow + } + } + + message EncOpts { + optional EncMode mode = 1; + repeated bytes key_hashes = 2; // the hashes of the shared keys used (salted) + + enum EncMode { + NONE = 0; // no encryption, anyone can read + SHAREDKEY = 1; // messages are encrypted with shared key + WOT = 2; // web of trust, certificates can allow publisher set to grow + } + } +} diff --git a/beacon_node/lighthouse_network/src/gossipsub/gossip_promises.rs b/beacon_node/lighthouse_network/src/gossipsub/gossip_promises.rs new file mode 100644 index 000000000..43ca17855 --- /dev/null +++ b/beacon_node/lighthouse_network/src/gossipsub/gossip_promises.rs @@ -0,0 +1,101 @@ +// Copyright 2020 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use super::peer_score::RejectReason; +use super::MessageId; +use super::ValidationError; +use instant::Instant; +use libp2p::identity::PeerId; +use std::collections::HashMap; + +/// Tracks recently sent `IWANT` messages and checks if peers respond to them. +#[derive(Default)] +pub(crate) struct GossipPromises { + /// Stores for each tracked message id and peer the instant when this promise expires. + /// + /// If the peer didn't respond until then we consider the promise as broken and penalize the + /// peer. + promises: HashMap>, +} + +impl GossipPromises { + /// Returns true if the message id exists in the promises. + pub(crate) fn contains(&self, message: &MessageId) -> bool { + self.promises.contains_key(message) + } + + /// Track a promise to deliver a message from a list of [`MessageId`]s we are requesting. + pub(crate) fn add_promise(&mut self, peer: PeerId, messages: &[MessageId], expires: Instant) { + for message_id in messages { + // If a promise for this message id and peer already exists we don't update the expiry! + self.promises + .entry(message_id.clone()) + .or_default() + .entry(peer) + .or_insert(expires); + } + } + + pub(crate) fn message_delivered(&mut self, message_id: &MessageId) { + // Someone delivered a message, we can stop tracking all promises for it. + self.promises.remove(message_id); + } + + pub(crate) fn reject_message(&mut self, message_id: &MessageId, reason: &RejectReason) { + // A message got rejected, so we can stop tracking promises and let the score penalty apply + // from invalid message delivery. + // We do take exception and apply promise penalty regardless in the following cases, where + // the peer delivered an obviously invalid message. + match reason { + RejectReason::ValidationError(ValidationError::InvalidSignature) => (), + RejectReason::SelfOrigin => (), + _ => { + self.promises.remove(message_id); + } + }; + } + + /// Returns the number of broken promises for each peer who didn't follow up on an IWANT + /// request. + /// This should be called not too often relative to the expire times, since it iterates over + /// the whole stored data. + pub(crate) fn get_broken_promises(&mut self) -> HashMap { + let now = Instant::now(); + let mut result = HashMap::new(); + self.promises.retain(|msg, peers| { + peers.retain(|peer_id, expires| { + if *expires < now { + let count = result.entry(*peer_id).or_insert(0); + *count += 1; + tracing::debug!( + peer=%peer_id, + message=%msg, + "[Penalty] The peer broke the promise to deliver message in time!" + ); + false + } else { + true + } + }); + !peers.is_empty() + }); + result + } +} diff --git a/beacon_node/lighthouse_network/src/gossipsub/handler.rs b/beacon_node/lighthouse_network/src/gossipsub/handler.rs new file mode 100644 index 000000000..298570955 --- /dev/null +++ b/beacon_node/lighthouse_network/src/gossipsub/handler.rs @@ -0,0 +1,562 @@ +// Copyright 2020 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use super::protocol::{GossipsubCodec, ProtocolConfig}; +use super::rpc_proto::proto; +use super::types::{PeerKind, RawMessage, Rpc, RpcOut, RpcReceiver}; +use super::ValidationError; +use asynchronous_codec::Framed; +use futures::future::Either; +use futures::prelude::*; +use futures::StreamExt; +use instant::Instant; +use libp2p::core::upgrade::DeniedUpgrade; +use libp2p::swarm::handler::{ + ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, + FullyNegotiatedInbound, FullyNegotiatedOutbound, StreamUpgradeError, SubstreamProtocol, +}; +use libp2p::swarm::Stream; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; + +/// The event emitted by the Handler. This informs the behaviour of various events created +/// by the handler. +#[derive(Debug)] +pub enum HandlerEvent { + /// A GossipsubRPC message has been received. This also contains a list of invalid messages (if + /// any) that were received. + Message { + /// The GossipsubRPC message excluding any invalid messages. + rpc: Rpc, + /// Any invalid messages that were received in the RPC, along with the associated + /// validation error. + invalid_messages: Vec<(RawMessage, ValidationError)>, + }, + /// An inbound or outbound substream has been established with the peer and this informs over + /// which protocol. This message only occurs once per connection. + PeerKind(PeerKind), + /// A message to be published was dropped because it could not be sent in time. + MessageDropped(RpcOut), +} + +/// A message sent from the behaviour to the handler. +#[allow(clippy::large_enum_variant)] +#[derive(Debug)] +pub enum HandlerIn { + /// The peer has joined the mesh. + JoinedMesh, + /// The peer has left the mesh. + LeftMesh, +} + +/// The maximum number of inbound or outbound substreams attempts we allow. +/// +/// Gossipsub is supposed to have a single long-lived inbound and outbound substream. On failure we +/// attempt to recreate these. This imposes an upper bound of new substreams before we consider the +/// connection faulty and disable the handler. This also prevents against potential substream +/// creation loops. +const MAX_SUBSTREAM_ATTEMPTS: usize = 5; + +#[allow(clippy::large_enum_variant)] +pub enum Handler { + Enabled(EnabledHandler), + Disabled(DisabledHandler), +} + +/// Protocol Handler that manages a single long-lived substream with a peer. +pub struct EnabledHandler { + /// Upgrade configuration for the gossipsub protocol. + listen_protocol: ProtocolConfig, + + /// The single long-lived outbound substream. + outbound_substream: Option, + + /// The single long-lived inbound substream. + inbound_substream: Option, + + /// Queue of values that we want to send to the remote + send_queue: RpcReceiver, + + /// Flag indicating that an outbound substream is being established to prevent duplicate + /// requests. + outbound_substream_establishing: bool, + + /// The number of outbound substreams we have requested. + outbound_substream_attempts: usize, + + /// The number of inbound substreams that have been created by the peer. + inbound_substream_attempts: usize, + + /// The type of peer this handler is associated to. + peer_kind: Option, + + /// Keeps track on whether we have sent the peer kind to the behaviour. + // + // NOTE: Use this flag rather than checking the substream count each poll. + peer_kind_sent: bool, + + last_io_activity: Instant, + + /// Keeps track of whether this connection is for a peer in the mesh. This is used to make + /// decisions about the keep alive state for this connection. + in_mesh: bool, +} + +pub enum DisabledHandler { + /// If the peer doesn't support the gossipsub protocol we do not immediately disconnect. + /// Rather, we disable the handler and prevent any incoming or outgoing substreams from being + /// established. + ProtocolUnsupported { + /// Keeps track on whether we have sent the peer kind to the behaviour. + peer_kind_sent: bool, + }, + /// The maximum number of inbound or outbound substream attempts have happened and thereby the + /// handler has been disabled. + MaxSubstreamAttempts, +} + +/// State of the inbound substream, opened either by us or by the remote. +enum InboundSubstreamState { + /// Waiting for a message from the remote. The idle state for an inbound substream. + WaitingInput(Framed), + /// The substream is being closed. + Closing(Framed), + /// An error occurred during processing. + Poisoned, +} + +/// State of the outbound substream, opened either by us or by the remote. +enum OutboundSubstreamState { + /// Waiting for the user to send a message. The idle state for an outbound substream. + WaitingOutput(Framed), + /// Waiting to send a message to the remote. + PendingSend(Framed, proto::RPC), + /// Waiting to flush the substream so that the data arrives to the remote. + PendingFlush(Framed), + /// An error occurred during processing. + Poisoned, +} + +impl Handler { + /// Builds a new [`Handler`]. + pub fn new(protocol_config: ProtocolConfig, message_queue: RpcReceiver) -> Self { + Handler::Enabled(EnabledHandler { + listen_protocol: protocol_config, + inbound_substream: None, + outbound_substream: None, + outbound_substream_establishing: false, + outbound_substream_attempts: 0, + inbound_substream_attempts: 0, + peer_kind: None, + peer_kind_sent: false, + last_io_activity: Instant::now(), + in_mesh: false, + send_queue: message_queue, + }) + } +} + +impl EnabledHandler { + fn on_fully_negotiated_inbound( + &mut self, + (substream, peer_kind): (Framed, PeerKind), + ) { + // update the known kind of peer + if self.peer_kind.is_none() { + self.peer_kind = Some(peer_kind); + } + + // new inbound substream. Replace the current one, if it exists. + tracing::trace!("New inbound substream request"); + self.inbound_substream = Some(InboundSubstreamState::WaitingInput(substream)); + } + + fn on_fully_negotiated_outbound( + &mut self, + FullyNegotiatedOutbound { protocol, .. }: FullyNegotiatedOutbound< + ::OutboundProtocol, + ::OutboundOpenInfo, + >, + ) { + let (substream, peer_kind) = protocol; + + // update the known kind of peer + if self.peer_kind.is_none() { + self.peer_kind = Some(peer_kind); + } + + assert!( + self.outbound_substream.is_none(), + "Established an outbound substream with one already available" + ); + self.outbound_substream = Some(OutboundSubstreamState::WaitingOutput(substream)); + } + + fn poll( + &mut self, + cx: &mut Context<'_>, + ) -> Poll< + ConnectionHandlerEvent< + ::OutboundProtocol, + ::OutboundOpenInfo, + ::ToBehaviour, + >, + > { + if !self.peer_kind_sent { + if let Some(peer_kind) = self.peer_kind.as_ref() { + self.peer_kind_sent = true; + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::PeerKind(peer_kind.clone()), + )); + } + } + + // determine if we need to create the outbound stream + if !self.send_queue.poll_is_empty(cx) + && self.outbound_substream.is_none() + && !self.outbound_substream_establishing + { + self.outbound_substream_establishing = true; + return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(self.listen_protocol.clone(), ()), + }); + } + + // process outbound stream + loop { + match std::mem::replace( + &mut self.outbound_substream, + Some(OutboundSubstreamState::Poisoned), + ) { + // outbound idle state + Some(OutboundSubstreamState::WaitingOutput(substream)) => { + if let Poll::Ready(Some(mut message)) = self.send_queue.poll_next_unpin(cx) { + match message { + RpcOut::Publish { + message: _, + ref mut timeout, + } + | RpcOut::Forward { + message: _, + ref mut timeout, + } => { + if Pin::new(timeout).poll(cx).is_ready() { + // Inform the behaviour and end the poll. + self.outbound_substream = + Some(OutboundSubstreamState::WaitingOutput(substream)); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::MessageDropped(message), + )); + } + } + _ => {} // All other messages are not time-bound. + } + self.outbound_substream = Some(OutboundSubstreamState::PendingSend( + substream, + message.into_protobuf(), + )); + continue; + } + + self.outbound_substream = + Some(OutboundSubstreamState::WaitingOutput(substream)); + break; + } + Some(OutboundSubstreamState::PendingSend(mut substream, message)) => { + match Sink::poll_ready(Pin::new(&mut substream), cx) { + Poll::Ready(Ok(())) => { + match Sink::start_send(Pin::new(&mut substream), message) { + Ok(()) => { + self.outbound_substream = + Some(OutboundSubstreamState::PendingFlush(substream)) + } + Err(e) => { + tracing::debug!( + "Failed to send message on outbound stream: {e}" + ); + self.outbound_substream = None; + break; + } + } + } + Poll::Ready(Err(e)) => { + tracing::debug!("Failed to send message on outbound stream: {e}"); + self.outbound_substream = None; + break; + } + Poll::Pending => { + self.outbound_substream = + Some(OutboundSubstreamState::PendingSend(substream, message)); + break; + } + } + } + Some(OutboundSubstreamState::PendingFlush(mut substream)) => { + match Sink::poll_flush(Pin::new(&mut substream), cx) { + Poll::Ready(Ok(())) => { + self.last_io_activity = Instant::now(); + self.outbound_substream = + Some(OutboundSubstreamState::WaitingOutput(substream)) + } + Poll::Ready(Err(e)) => { + tracing::debug!("Failed to flush outbound stream: {e}"); + self.outbound_substream = None; + break; + } + Poll::Pending => { + self.outbound_substream = + Some(OutboundSubstreamState::PendingFlush(substream)); + break; + } + } + } + None => { + self.outbound_substream = None; + break; + } + Some(OutboundSubstreamState::Poisoned) => { + unreachable!("Error occurred during outbound stream processing") + } + } + } + + // Handle inbound messages. + loop { + match std::mem::replace( + &mut self.inbound_substream, + Some(InboundSubstreamState::Poisoned), + ) { + // inbound idle state + Some(InboundSubstreamState::WaitingInput(mut substream)) => { + match substream.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(message))) => { + self.last_io_activity = Instant::now(); + self.inbound_substream = + Some(InboundSubstreamState::WaitingInput(substream)); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(message)); + } + Poll::Ready(Some(Err(error))) => { + tracing::debug!("Failed to read from inbound stream: {error}"); + // Close this side of the stream. If the + // peer is still around, they will re-establish their + // outbound stream i.e. our inbound stream. + self.inbound_substream = + Some(InboundSubstreamState::Closing(substream)); + } + // peer closed the stream + Poll::Ready(None) => { + tracing::debug!("Inbound stream closed by remote"); + self.inbound_substream = + Some(InboundSubstreamState::Closing(substream)); + } + Poll::Pending => { + self.inbound_substream = + Some(InboundSubstreamState::WaitingInput(substream)); + break; + } + } + } + Some(InboundSubstreamState::Closing(mut substream)) => { + match Sink::poll_close(Pin::new(&mut substream), cx) { + Poll::Ready(res) => { + if let Err(e) = res { + // Don't close the connection but just drop the inbound substream. + // In case the remote has more to send, they will open up a new + // substream. + tracing::debug!("Inbound substream error while closing: {e}"); + } + self.inbound_substream = None; + break; + } + Poll::Pending => { + self.inbound_substream = + Some(InboundSubstreamState::Closing(substream)); + break; + } + } + } + None => { + self.inbound_substream = None; + break; + } + Some(InboundSubstreamState::Poisoned) => { + unreachable!("Error occurred during inbound stream processing") + } + } + } + + // Drop the next message in queue if it's stale. + if let Poll::Ready(Some(rpc)) = self.send_queue.poll_stale(cx) { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::MessageDropped(rpc), + )); + } + + Poll::Pending + } +} + +impl ConnectionHandler for Handler { + type FromBehaviour = HandlerIn; + type ToBehaviour = HandlerEvent; + type InboundOpenInfo = (); + type InboundProtocol = either::Either; + type OutboundOpenInfo = (); + type OutboundProtocol = ProtocolConfig; + + fn listen_protocol(&self) -> SubstreamProtocol { + match self { + Handler::Enabled(handler) => { + SubstreamProtocol::new(either::Either::Left(handler.listen_protocol.clone()), ()) + } + Handler::Disabled(_) => { + SubstreamProtocol::new(either::Either::Right(DeniedUpgrade), ()) + } + } + } + + fn on_behaviour_event(&mut self, message: HandlerIn) { + match self { + Handler::Enabled(handler) => match message { + HandlerIn::JoinedMesh => { + handler.in_mesh = true; + } + HandlerIn::LeftMesh => { + handler.in_mesh = false; + } + }, + Handler::Disabled(_) => { + tracing::debug!(?message, "Handler is disabled. Dropping message"); + } + } + } + + fn connection_keep_alive(&self) -> bool { + matches!(self, Handler::Enabled(h) if h.in_mesh) + } + + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] + fn poll( + &mut self, + cx: &mut Context<'_>, + ) -> Poll< + ConnectionHandlerEvent, + > { + match self { + Handler::Enabled(handler) => handler.poll(cx), + Handler::Disabled(DisabledHandler::ProtocolUnsupported { peer_kind_sent }) => { + if !*peer_kind_sent { + *peer_kind_sent = true; + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::PeerKind(PeerKind::NotSupported), + )); + } + + Poll::Pending + } + Handler::Disabled(DisabledHandler::MaxSubstreamAttempts) => Poll::Pending, + } + } + + fn on_connection_event( + &mut self, + event: ConnectionEvent< + Self::InboundProtocol, + Self::OutboundProtocol, + Self::InboundOpenInfo, + Self::OutboundOpenInfo, + >, + ) { + match self { + Handler::Enabled(handler) => { + if event.is_inbound() { + handler.inbound_substream_attempts += 1; + + if handler.inbound_substream_attempts == MAX_SUBSTREAM_ATTEMPTS { + tracing::warn!( + "The maximum number of inbound substreams attempts has been exceeded" + ); + *self = Handler::Disabled(DisabledHandler::MaxSubstreamAttempts); + return; + } + } + + if event.is_outbound() { + handler.outbound_substream_establishing = false; + + handler.outbound_substream_attempts += 1; + + if handler.outbound_substream_attempts == MAX_SUBSTREAM_ATTEMPTS { + tracing::warn!( + "The maximum number of outbound substream attempts has been exceeded" + ); + *self = Handler::Disabled(DisabledHandler::MaxSubstreamAttempts); + return; + } + } + + match event { + ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { + protocol, + .. + }) => match protocol { + Either::Left(protocol) => handler.on_fully_negotiated_inbound(protocol), + Either::Right(v) => void::unreachable(v), + }, + ConnectionEvent::FullyNegotiatedOutbound(fully_negotiated_outbound) => { + handler.on_fully_negotiated_outbound(fully_negotiated_outbound) + } + ConnectionEvent::DialUpgradeError(DialUpgradeError { + error: StreamUpgradeError::Timeout, + .. + }) => { + tracing::debug!("Dial upgrade error: Protocol negotiation timeout"); + } + ConnectionEvent::DialUpgradeError(DialUpgradeError { + error: StreamUpgradeError::Apply(e), + .. + }) => void::unreachable(e), + ConnectionEvent::DialUpgradeError(DialUpgradeError { + error: StreamUpgradeError::NegotiationFailed, + .. + }) => { + // The protocol is not supported + tracing::debug!( + "The remote peer does not support gossipsub on this connection" + ); + *self = Handler::Disabled(DisabledHandler::ProtocolUnsupported { + peer_kind_sent: false, + }); + } + ConnectionEvent::DialUpgradeError(DialUpgradeError { + error: StreamUpgradeError::Io(e), + .. + }) => { + tracing::debug!("Protocol negotiation failed: {e}") + } + _ => {} + } + } + Handler::Disabled(_) => {} + } + } +} diff --git a/beacon_node/lighthouse_network/src/gossipsub/mcache.rs b/beacon_node/lighthouse_network/src/gossipsub/mcache.rs new file mode 100644 index 000000000..31931d756 --- /dev/null +++ b/beacon_node/lighthouse_network/src/gossipsub/mcache.rs @@ -0,0 +1,387 @@ +// Copyright 2020 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use super::topic::TopicHash; +use super::types::{MessageId, RawMessage}; +use libp2p::identity::PeerId; +use std::collections::hash_map::Entry; +use std::fmt::Debug; +use std::{ + collections::{HashMap, HashSet}, + fmt, +}; + +/// CacheEntry stored in the history. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub(crate) struct CacheEntry { + mid: MessageId, + topic: TopicHash, +} + +/// MessageCache struct holding history of messages. +#[derive(Clone)] +pub(crate) struct MessageCache { + msgs: HashMap)>, + /// For every message and peer the number of times this peer asked for the message + iwant_counts: HashMap>, + history: Vec>, + /// The number of indices in the cache history used for gossiping. That means that a message + /// won't get gossiped anymore when shift got called `gossip` many times after inserting the + /// message in the cache. + gossip: usize, +} + +impl fmt::Debug for MessageCache { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MessageCache") + .field("msgs", &self.msgs) + .field("history", &self.history) + .field("gossip", &self.gossip) + .finish() + } +} + +/// Implementation of the MessageCache. +impl MessageCache { + pub(crate) fn new(gossip: usize, history_capacity: usize) -> Self { + MessageCache { + gossip, + msgs: HashMap::default(), + iwant_counts: HashMap::default(), + history: vec![Vec::new(); history_capacity], + } + } + + /// Put a message into the memory cache. + /// + /// Returns true if the message didn't already exist in the cache. + pub(crate) fn put(&mut self, message_id: &MessageId, msg: RawMessage) -> bool { + match self.msgs.entry(message_id.clone()) { + Entry::Occupied(_) => { + // Don't add duplicate entries to the cache. + false + } + Entry::Vacant(entry) => { + let cache_entry = CacheEntry { + mid: message_id.clone(), + topic: msg.topic.clone(), + }; + entry.insert((msg, HashSet::default())); + self.history[0].push(cache_entry); + + tracing::trace!(message=?message_id, "Put message in mcache"); + true + } + } + } + + /// Keeps track of peers we know have received the message to prevent forwarding to said peers. + pub(crate) fn observe_duplicate(&mut self, message_id: &MessageId, source: &PeerId) { + if let Some((message, originating_peers)) = self.msgs.get_mut(message_id) { + // if the message is already validated, we don't need to store extra peers sending us + // duplicates as the message has already been forwarded + if message.validated { + return; + } + + originating_peers.insert(*source); + } + } + + /// Get a message with `message_id` + #[cfg(test)] + pub(crate) fn get(&self, message_id: &MessageId) -> Option<&RawMessage> { + self.msgs.get(message_id).map(|(message, _)| message) + } + + /// Increases the iwant count for the given message by one and returns the message together + /// with the iwant if the message exists. + pub(crate) fn get_with_iwant_counts( + &mut self, + message_id: &MessageId, + peer: &PeerId, + ) -> Option<(&RawMessage, u32)> { + let iwant_counts = &mut self.iwant_counts; + self.msgs.get(message_id).and_then(|(message, _)| { + if !message.validated { + None + } else { + Some((message, { + let count = iwant_counts + .entry(message_id.clone()) + .or_default() + .entry(*peer) + .or_default(); + *count += 1; + *count + })) + } + }) + } + + /// Gets a message with [`MessageId`] and tags it as validated. + /// This function also returns the known peers that have sent us this message. This is used to + /// prevent us sending redundant messages to peers who have already propagated it. + pub(crate) fn validate( + &mut self, + message_id: &MessageId, + ) -> Option<(&RawMessage, HashSet)> { + self.msgs.get_mut(message_id).map(|(message, known_peers)| { + message.validated = true; + // Clear the known peers list (after a message is validated, it is forwarded and we no + // longer need to store the originating peers). + let originating_peers = std::mem::take(known_peers); + (&*message, originating_peers) + }) + } + + /// Get a list of [`MessageId`]s for a given topic. + pub(crate) fn get_gossip_message_ids(&self, topic: &TopicHash) -> Vec { + self.history[..self.gossip] + .iter() + .fold(vec![], |mut current_entries, entries| { + // search for entries with desired topic + let mut found_entries: Vec = entries + .iter() + .filter_map(|entry| { + if &entry.topic == topic { + let mid = &entry.mid; + // Only gossip validated messages + if let Some(true) = self.msgs.get(mid).map(|(msg, _)| msg.validated) { + Some(mid.clone()) + } else { + None + } + } else { + None + } + }) + .collect(); + + // generate the list + current_entries.append(&mut found_entries); + current_entries + }) + } + + /// Shift the history array down one and delete messages associated with the + /// last entry. + pub(crate) fn shift(&mut self) { + for entry in self.history.pop().expect("history is always > 1") { + if let Some((msg, _)) = self.msgs.remove(&entry.mid) { + if !msg.validated { + // If GossipsubConfig::validate_messages is true, the implementing + // application has to ensure that Gossipsub::validate_message gets called for + // each received message within the cache timeout time." + tracing::debug!( + message=%&entry.mid, + "The message got removed from the cache without being validated." + ); + } + } + tracing::trace!(message=%&entry.mid, "Remove message from the cache"); + + self.iwant_counts.remove(&entry.mid); + } + + // Insert an empty vec in position 0 + self.history.insert(0, Vec::new()); + } + + /// Removes a message from the cache and returns it if existent + pub(crate) fn remove( + &mut self, + message_id: &MessageId, + ) -> Option<(RawMessage, HashSet)> { + //We only remove the message from msgs and iwant_count and keep the message_id in the + // history vector. Zhe id in the history vector will simply be ignored on popping. + + self.iwant_counts.remove(message_id); + self.msgs.remove(message_id) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::gossipsub::types::RawMessage; + use crate::{IdentTopic as Topic, TopicHash}; + use libp2p::identity::PeerId; + + fn gen_testm(x: u64, topic: TopicHash) -> (MessageId, RawMessage) { + let default_id = |message: &RawMessage| { + // default message id is: source + sequence number + let mut source_string = message.source.as_ref().unwrap().to_base58(); + source_string.push_str(&message.sequence_number.unwrap().to_string()); + MessageId::from(source_string) + }; + let u8x: u8 = x as u8; + let source = Some(PeerId::random()); + let data: Vec = vec![u8x]; + let sequence_number = Some(x); + + let m = RawMessage { + source, + data, + sequence_number, + topic, + signature: None, + key: None, + validated: false, + }; + + let id = default_id(&m); + (id, m) + } + + fn new_cache(gossip_size: usize, history: usize) -> MessageCache { + MessageCache::new(gossip_size, history) + } + + #[test] + /// Test that the message cache can be created. + fn test_new_cache() { + let x: usize = 3; + let mc = new_cache(x, 5); + + assert_eq!(mc.gossip, x); + } + + #[test] + /// Test you can put one message and get one. + fn test_put_get_one() { + let mut mc = new_cache(10, 15); + + let topic1_hash = Topic::new("topic1").hash(); + let (id, m) = gen_testm(10, topic1_hash); + + mc.put(&id, m.clone()); + + assert_eq!(mc.history[0].len(), 1); + + let fetched = mc.get(&id); + + assert_eq!(fetched.unwrap(), &m); + } + + #[test] + /// Test attempting to 'get' with a wrong id. + fn test_get_wrong() { + let mut mc = new_cache(10, 15); + + let topic1_hash = Topic::new("topic1").hash(); + let (id, m) = gen_testm(10, topic1_hash); + + mc.put(&id, m); + + // Try to get an incorrect ID + let wrong_id = MessageId::new(b"wrongid"); + let fetched = mc.get(&wrong_id); + assert!(fetched.is_none()); + } + + #[test] + /// Test attempting to 'get' empty message cache. + fn test_get_empty() { + let mc = new_cache(10, 15); + + // Try to get an incorrect ID + let wrong_string = MessageId::new(b"imempty"); + let fetched = mc.get(&wrong_string); + assert!(fetched.is_none()); + } + + #[test] + /// Test shift mechanism. + fn test_shift() { + let mut mc = new_cache(1, 5); + + let topic1_hash = Topic::new("topic1").hash(); + + // Build the message + for i in 0..10 { + let (id, m) = gen_testm(i, topic1_hash.clone()); + mc.put(&id, m.clone()); + } + + mc.shift(); + + // Ensure the shift occurred + assert!(mc.history[0].is_empty()); + assert!(mc.history[1].len() == 10); + + // Make sure no messages deleted + assert!(mc.msgs.len() == 10); + } + + #[test] + /// Test Shift with no additions. + fn test_empty_shift() { + let mut mc = new_cache(1, 5); + + let topic1_hash = Topic::new("topic1").hash(); + + // Build the message + for i in 0..10 { + let (id, m) = gen_testm(i, topic1_hash.clone()); + mc.put(&id, m.clone()); + } + + mc.shift(); + + // Ensure the shift occurred + assert!(mc.history[0].is_empty()); + assert!(mc.history[1].len() == 10); + + mc.shift(); + + assert!(mc.history[2].len() == 10); + assert!(mc.history[1].is_empty()); + assert!(mc.history[0].is_empty()); + } + + #[test] + /// Test shift to see if the last history messages are removed. + fn test_remove_last_from_shift() { + let mut mc = new_cache(4, 5); + + let topic1_hash = Topic::new("topic1").hash(); + + // Build the message + for i in 0..10 { + let (id, m) = gen_testm(i, topic1_hash.clone()); + mc.put(&id, m.clone()); + } + + // Shift right until deleting messages + mc.shift(); + mc.shift(); + mc.shift(); + mc.shift(); + + assert_eq!(mc.history[mc.history.len() - 1].len(), 10); + + // Shift and delete the messages + mc.shift(); + assert_eq!(mc.history[mc.history.len() - 1].len(), 0); + assert_eq!(mc.history[0].len(), 0); + assert_eq!(mc.msgs.len(), 0); + } +} diff --git a/beacon_node/lighthouse_network/src/gossipsub/metrics.rs b/beacon_node/lighthouse_network/src/gossipsub/metrics.rs new file mode 100644 index 000000000..91bcd5f54 --- /dev/null +++ b/beacon_node/lighthouse_network/src/gossipsub/metrics.rs @@ -0,0 +1,680 @@ +// Copyright 2020 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +//! A set of metrics used to help track and diagnose the network behaviour of the gossipsub +//! protocol. + +use std::collections::HashMap; + +use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; +use prometheus_client::metrics::counter::Counter; +use prometheus_client::metrics::family::{Family, MetricConstructor}; +use prometheus_client::metrics::gauge::Gauge; +use prometheus_client::metrics::histogram::{linear_buckets, Histogram}; +use prometheus_client::registry::Registry; + +use super::topic::TopicHash; +use super::types::{MessageAcceptance, PeerKind}; + +// Default value that limits for how many topics do we store metrics. +const DEFAULT_MAX_TOPICS: usize = 300; + +// Default value that limits how many topics for which there has never been a subscription do we +// store metrics. +const DEFAULT_MAX_NEVER_SUBSCRIBED_TOPICS: usize = 100; + +#[derive(Debug, Clone)] +pub struct Config { + /// This provides an upper bound to the number of mesh topics we create metrics for. It + /// prevents unbounded labels being created in the metrics. + pub max_topics: usize, + /// Mesh topics are controlled by the user via subscriptions whereas non-mesh topics are + /// determined by users on the network. This limit permits a fixed amount of topics to allow, + /// in-addition to the mesh topics. + pub max_never_subscribed_topics: usize, + /// Buckets used for the score histograms. + pub score_buckets: Vec, +} + +impl Config { + /// Create buckets for the score histograms based on score thresholds. + pub fn buckets_using_scoring_thresholds(&mut self, params: &super::PeerScoreThresholds) { + self.score_buckets = vec![ + params.graylist_threshold, + params.publish_threshold, + params.gossip_threshold, + params.gossip_threshold / 2.0, + params.gossip_threshold / 4.0, + 0.0, + 1.0, + 10.0, + 100.0, + ]; + } +} + +impl Default for Config { + fn default() -> Self { + // Some sensible defaults + let gossip_threshold = -4000.0; + let publish_threshold = -8000.0; + let graylist_threshold = -16000.0; + let score_buckets: Vec = vec![ + graylist_threshold, + publish_threshold, + gossip_threshold, + gossip_threshold / 2.0, + gossip_threshold / 4.0, + 0.0, + 1.0, + 10.0, + 100.0, + ]; + Config { + max_topics: DEFAULT_MAX_TOPICS, + max_never_subscribed_topics: DEFAULT_MAX_NEVER_SUBSCRIBED_TOPICS, + score_buckets, + } + } +} + +/// Whether we have ever been subscribed to this topic. +type EverSubscribed = bool; + +/// A collection of metrics used throughout the Gossipsub behaviour. +pub(crate) struct Metrics { + /* Configuration parameters */ + /// Maximum number of topics for which we store metrics. This helps keep the metrics bounded. + max_topics: usize, + /// Maximum number of topics for which we store metrics, where the topic in not one to which we + /// have subscribed at some point. This helps keep the metrics bounded, since these topics come + /// from received messages and not explicit application subscriptions. + max_never_subscribed_topics: usize, + + /* Auxiliary variables */ + /// Information needed to decide if a topic is allowed or not. + topic_info: HashMap, + + /* Metrics per known topic */ + /// Status of our subscription to this topic. This metric allows analyzing other topic metrics + /// filtered by our current subscription status. + topic_subscription_status: Family, + /// Number of peers subscribed to each topic. This allows us to analyze a topic's behaviour + /// regardless of our subscription status. + topic_peers_count: Family, + /// The number of invalid messages received for a given topic. + invalid_messages: Family, + /// The number of messages accepted by the application (validation result). + accepted_messages: Family, + /// The number of messages ignored by the application (validation result). + ignored_messages: Family, + /// The number of messages rejected by the application (validation result). + rejected_messages: Family, + /// The number of publish messages dropped by the sender. + publish_messages_dropped: Family, + /// The number of forward messages dropped by the sender. + forward_messages_dropped: Family, + + /* Metrics regarding mesh state */ + /// Number of peers in our mesh. This metric should be updated with the count of peers for a + /// topic in the mesh regardless of inclusion and churn events. + mesh_peer_counts: Family, + /// Number of times we include peers in a topic mesh for different reasons. + mesh_peer_inclusion_events: Family, + /// Number of times we remove peers in a topic mesh for different reasons. + mesh_peer_churn_events: Family, + + /* Metrics regarding messages sent/received */ + /// Number of gossip messages sent to each topic. + topic_msg_sent_counts: Family, + /// Bytes from gossip messages sent to each topic. + topic_msg_sent_bytes: Family, + /// Number of gossipsub messages published to each topic. + topic_msg_published: Family, + + /// Number of gossipsub messages received on each topic (without filtering duplicates). + topic_msg_recv_counts_unfiltered: Family, + /// Number of gossipsub messages received on each topic (after filtering duplicates). + topic_msg_recv_counts: Family, + /// Bytes received from gossip messages for each topic. + topic_msg_recv_bytes: Family, + + /* Metrics related to scoring */ + /// Histogram of the scores for each mesh topic. + score_per_mesh: Family, + /// A counter of the kind of penalties being applied to peers. + scoring_penalties: Family, + + /* General Metrics */ + /// Gossipsub supports floodsub, gossipsub v1.0 and gossipsub v1.1. Peers are classified based + /// on which protocol they support. This metric keeps track of the number of peers that are + /// connected of each type. + peers_per_protocol: Family, + /// The time it takes to complete one iteration of the heartbeat. + heartbeat_duration: Histogram, + + /* Performance metrics */ + /// When the user validates a message, it tries to re propagate it to its mesh peers. If the + /// message expires from the memcache before it can be validated, we count this a cache miss + /// and it is an indicator that the memcache size should be increased. + memcache_misses: Counter, + /// The number of times we have decided that an IWANT control message is required for this + /// topic. A very high metric might indicate an underperforming network. + topic_iwant_msgs: Family, + + /// The size of the priority queue. + priority_queue_size: Histogram, + /// The size of the non-priority queue. + non_priority_queue_size: Histogram, +} + +impl Metrics { + pub(crate) fn new(registry: &mut Registry, config: Config) -> Self { + // Destructure the config to be sure everything is used. + let Config { + max_topics, + max_never_subscribed_topics, + score_buckets, + } = config; + + macro_rules! register_family { + ($name:expr, $help:expr) => {{ + let fam = Family::default(); + registry.register($name, $help, fam.clone()); + fam + }}; + } + + let topic_subscription_status = register_family!( + "topic_subscription_status", + "Subscription status per known topic" + ); + let topic_peers_count = register_family!( + "topic_peers_counts", + "Number of peers subscribed to each topic" + ); + + let invalid_messages = register_family!( + "invalid_messages_per_topic", + "Number of invalid messages received for each topic" + ); + + let accepted_messages = register_family!( + "accepted_messages_per_topic", + "Number of accepted messages received for each topic" + ); + + let ignored_messages = register_family!( + "ignored_messages_per_topic", + "Number of ignored messages received for each topic" + ); + + let rejected_messages = register_family!( + "rejected_messages_per_topic", + "Number of rejected messages received for each topic" + ); + + let publish_messages_dropped = register_family!( + "publish_messages_dropped_per_topic", + "Number of publish messages dropped per topic" + ); + + let forward_messages_dropped = register_family!( + "forward_messages_dropped_per_topic", + "Number of forward messages dropped per topic" + ); + + let mesh_peer_counts = register_family!( + "mesh_peer_counts", + "Number of peers in each topic in our mesh" + ); + let mesh_peer_inclusion_events = register_family!( + "mesh_peer_inclusion_events", + "Number of times a peer gets added to our mesh for different reasons" + ); + let mesh_peer_churn_events = register_family!( + "mesh_peer_churn_events", + "Number of times a peer gets removed from our mesh for different reasons" + ); + let topic_msg_sent_counts = register_family!( + "topic_msg_sent_counts", + "Number of gossip messages sent to each topic" + ); + let topic_msg_published = register_family!( + "topic_msg_published", + "Number of gossip messages published to each topic" + ); + let topic_msg_sent_bytes = register_family!( + "topic_msg_sent_bytes", + "Bytes from gossip messages sent to each topic" + ); + + let topic_msg_recv_counts_unfiltered = register_family!( + "topic_msg_recv_counts_unfiltered", + "Number of gossip messages received on each topic (without duplicates being filtered)" + ); + + let topic_msg_recv_counts = register_family!( + "topic_msg_recv_counts", + "Number of gossip messages received on each topic (after duplicates have been filtered)" + ); + let topic_msg_recv_bytes = register_family!( + "topic_msg_recv_bytes", + "Bytes received from gossip messages for each topic" + ); + + let hist_builder = HistBuilder { + buckets: score_buckets, + }; + + let score_per_mesh: Family<_, _, HistBuilder> = Family::new_with_constructor(hist_builder); + registry.register( + "score_per_mesh", + "Histogram of scores per mesh topic", + score_per_mesh.clone(), + ); + + let scoring_penalties = register_family!( + "scoring_penalties", + "Counter of types of scoring penalties given to peers" + ); + let peers_per_protocol = register_family!( + "peers_per_protocol", + "Number of connected peers by protocol type" + ); + + let heartbeat_duration = Histogram::new(linear_buckets(0.0, 50.0, 10)); + registry.register( + "heartbeat_duration", + "Histogram of observed heartbeat durations", + heartbeat_duration.clone(), + ); + + let topic_iwant_msgs = register_family!( + "topic_iwant_msgs", + "Number of times we have decided an IWANT is required for this topic" + ); + let memcache_misses = { + let metric = Counter::default(); + registry.register( + "memcache_misses", + "Number of times a message is not found in the duplicate cache when validating", + metric.clone(), + ); + metric + }; + + let priority_queue_size = Histogram::new(linear_buckets(0.0, 25.0, 100)); + registry.register( + "priority_queue_size", + "Histogram of observed priority queue sizes", + priority_queue_size.clone(), + ); + + let non_priority_queue_size = Histogram::new(linear_buckets(0.0, 25.0, 100)); + registry.register( + "non_priority_queue_size", + "Histogram of observed non-priority queue sizes", + non_priority_queue_size.clone(), + ); + + Self { + max_topics, + max_never_subscribed_topics, + topic_info: HashMap::default(), + topic_subscription_status, + topic_peers_count, + invalid_messages, + accepted_messages, + ignored_messages, + rejected_messages, + publish_messages_dropped, + forward_messages_dropped, + mesh_peer_counts, + mesh_peer_inclusion_events, + mesh_peer_churn_events, + topic_msg_sent_counts, + topic_msg_sent_bytes, + topic_msg_published, + topic_msg_recv_counts_unfiltered, + topic_msg_recv_counts, + topic_msg_recv_bytes, + score_per_mesh, + scoring_penalties, + peers_per_protocol, + heartbeat_duration, + memcache_misses, + topic_iwant_msgs, + priority_queue_size, + non_priority_queue_size, + } + } + + fn non_subscription_topics_count(&self) -> usize { + self.topic_info + .values() + .filter(|&ever_subscribed| !ever_subscribed) + .count() + } + + /// Registers a topic if not already known and if the bounds allow it. + fn register_topic(&mut self, topic: &TopicHash) -> Result<(), ()> { + if self.topic_info.contains_key(topic) { + Ok(()) + } else if self.topic_info.len() < self.max_topics + && self.non_subscription_topics_count() < self.max_never_subscribed_topics + { + // This is a topic without an explicit subscription and we register it if we are within + // the configured bounds. + self.topic_info.entry(topic.clone()).or_insert(false); + self.topic_subscription_status.get_or_create(topic).set(0); + Ok(()) + } else { + // We don't know this topic and there is no space left to store it + Err(()) + } + } + + /// Registers a set of topics that we want to store calculate metrics for. + pub(crate) fn register_allowed_topics(&mut self, topics: Vec) { + for topic_hash in topics { + self.topic_info.insert(topic_hash, true); + } + } + + /// Increase the number of peers that are subscribed to this topic. + pub(crate) fn inc_topic_peers(&mut self, topic: &TopicHash) { + if self.register_topic(topic).is_ok() { + self.topic_peers_count.get_or_create(topic).inc(); + } + } + + /// Decrease the number of peers that are subscribed to this topic. + pub(crate) fn dec_topic_peers(&mut self, topic: &TopicHash) { + if self.register_topic(topic).is_ok() { + self.topic_peers_count.get_or_create(topic).dec(); + } + } + + /* Mesh related methods */ + + /// Registers the subscription to a topic if the configured limits allow it. + /// Sets the registered number of peers in the mesh to 0. + pub(crate) fn joined(&mut self, topic: &TopicHash) { + if self.topic_info.contains_key(topic) || self.topic_info.len() < self.max_topics { + self.topic_info.insert(topic.clone(), true); + let was_subscribed = self.topic_subscription_status.get_or_create(topic).set(1); + debug_assert_eq!(was_subscribed, 0); + self.mesh_peer_counts.get_or_create(topic).set(0); + } + } + + /// Registers the unsubscription to a topic if the topic was previously allowed. + /// Sets the registered number of peers in the mesh to 0. + pub(crate) fn left(&mut self, topic: &TopicHash) { + if self.topic_info.contains_key(topic) { + // Depending on the configured topic bounds we could miss a mesh topic. + // So, check first if the topic was previously allowed. + let was_subscribed = self.topic_subscription_status.get_or_create(topic).set(0); + debug_assert_eq!(was_subscribed, 1); + self.mesh_peer_counts.get_or_create(topic).set(0); + } + } + + /// Register the inclusion of peers in our mesh due to some reason. + pub(crate) fn peers_included(&mut self, topic: &TopicHash, reason: Inclusion, count: usize) { + if self.register_topic(topic).is_ok() { + self.mesh_peer_inclusion_events + .get_or_create(&InclusionLabel { + hash: topic.to_string(), + reason, + }) + .inc_by(count as u64); + } + } + + /// Register the removal of peers in our mesh due to some reason. + pub(crate) fn peers_removed(&mut self, topic: &TopicHash, reason: Churn, count: usize) { + if self.register_topic(topic).is_ok() { + self.mesh_peer_churn_events + .get_or_create(&ChurnLabel { + hash: topic.to_string(), + reason, + }) + .inc_by(count as u64); + } + } + + /// Register the current number of peers in our mesh for this topic. + pub(crate) fn set_mesh_peers(&mut self, topic: &TopicHash, count: usize) { + if self.register_topic(topic).is_ok() { + // Due to limits, this topic could have not been allowed, so we check. + self.mesh_peer_counts.get_or_create(topic).set(count as i64); + } + } + + /// Register that an invalid message was received on a specific topic. + pub(crate) fn register_invalid_message(&mut self, topic: &TopicHash) { + if self.register_topic(topic).is_ok() { + self.invalid_messages.get_or_create(topic).inc(); + } + } + + /// Register a score penalty. + pub(crate) fn register_score_penalty(&mut self, penalty: Penalty) { + self.scoring_penalties + .get_or_create(&PenaltyLabel { penalty }) + .inc(); + } + + /// Registers that a message was published on a specific topic. + pub(crate) fn register_published_message(&mut self, topic: &TopicHash) { + if self.register_topic(topic).is_ok() { + self.topic_msg_published.get_or_create(topic).inc(); + } + } + + /// Register sending a message over a topic. + pub(crate) fn msg_sent(&mut self, topic: &TopicHash, bytes: usize) { + if self.register_topic(topic).is_ok() { + self.topic_msg_sent_counts.get_or_create(topic).inc(); + self.topic_msg_sent_bytes + .get_or_create(topic) + .inc_by(bytes as u64); + } + } + + /// Register sending a message over a topic. + pub(crate) fn publish_msg_dropped(&mut self, topic: &TopicHash) { + if self.register_topic(topic).is_ok() { + self.publish_messages_dropped.get_or_create(topic).inc(); + } + } + + /// Register dropping a message over a topic. + pub(crate) fn forward_msg_dropped(&mut self, topic: &TopicHash) { + if self.register_topic(topic).is_ok() { + self.forward_messages_dropped.get_or_create(topic).inc(); + } + } + + /// Register that a message was received (and was not a duplicate). + pub(crate) fn msg_recvd(&mut self, topic: &TopicHash) { + if self.register_topic(topic).is_ok() { + self.topic_msg_recv_counts.get_or_create(topic).inc(); + } + } + + /// Register that a message was received (could have been a duplicate). + pub(crate) fn msg_recvd_unfiltered(&mut self, topic: &TopicHash, bytes: usize) { + if self.register_topic(topic).is_ok() { + self.topic_msg_recv_counts_unfiltered + .get_or_create(topic) + .inc(); + self.topic_msg_recv_bytes + .get_or_create(topic) + .inc_by(bytes as u64); + } + } + + pub(crate) fn register_msg_validation( + &mut self, + topic: &TopicHash, + validation: &MessageAcceptance, + ) { + if self.register_topic(topic).is_ok() { + match validation { + MessageAcceptance::Accept => self.accepted_messages.get_or_create(topic).inc(), + MessageAcceptance::Ignore => self.ignored_messages.get_or_create(topic).inc(), + MessageAcceptance::Reject => self.rejected_messages.get_or_create(topic).inc(), + }; + } + } + + /// Register a memcache miss. + pub(crate) fn memcache_miss(&mut self) { + self.memcache_misses.inc(); + } + + /// Register sending an IWANT msg for this topic. + pub(crate) fn register_iwant(&mut self, topic: &TopicHash) { + if self.register_topic(topic).is_ok() { + self.topic_iwant_msgs.get_or_create(topic).inc(); + } + } + + /// Observes a heartbeat duration. + pub(crate) fn observe_heartbeat_duration(&mut self, millis: u64) { + self.heartbeat_duration.observe(millis as f64); + } + + /// Observes a priority queue size. + pub(crate) fn observe_priority_queue_size(&mut self, len: usize) { + self.priority_queue_size.observe(len as f64); + } + + /// Observes a non-priority queue size. + pub(crate) fn observe_non_priority_queue_size(&mut self, len: usize) { + self.non_priority_queue_size.observe(len as f64); + } + + /// Observe a score of a mesh peer. + pub(crate) fn observe_mesh_peers_score(&mut self, topic: &TopicHash, score: f64) { + if self.register_topic(topic).is_ok() { + self.score_per_mesh.get_or_create(topic).observe(score); + } + } + + /// Register a new peers connection based on its protocol. + pub(crate) fn peer_protocol_connected(&mut self, kind: PeerKind) { + self.peers_per_protocol + .get_or_create(&ProtocolLabel { protocol: kind }) + .inc(); + } + + /// Removes a peer from the counter based on its protocol when it disconnects. + pub(crate) fn peer_protocol_disconnected(&mut self, kind: PeerKind) { + let metric = self + .peers_per_protocol + .get_or_create(&ProtocolLabel { protocol: kind }); + if metric.get() != 0 { + // decrement the counter + metric.set(metric.get() - 1); + } + } +} + +/// Reasons why a peer was included in the mesh. +#[derive(PartialEq, Eq, Hash, EncodeLabelValue, Clone, Debug)] +pub(crate) enum Inclusion { + /// Peer was a fanaout peer. + Fanout, + /// Included from random selection. + Random, + /// Peer subscribed. + Subscribed, + /// Peer was included to fill the outbound quota. + Outbound, +} + +/// Reasons why a peer was removed from the mesh. +#[derive(PartialEq, Eq, Hash, EncodeLabelValue, Clone, Debug)] +pub(crate) enum Churn { + /// Peer disconnected. + Dc, + /// Peer had a bad score. + BadScore, + /// Peer sent a PRUNE. + Prune, + /// Peer unsubscribed. + Unsub, + /// Too many peers. + Excess, +} + +/// Kinds of reasons a peer's score has been penalized +#[derive(PartialEq, Eq, Hash, EncodeLabelValue, Clone, Debug)] +pub(crate) enum Penalty { + /// A peer grafted before waiting the back-off time. + GraftBackoff, + /// A Peer did not respond to an IWANT request in time. + BrokenPromise, + /// A Peer did not send enough messages as expected. + MessageDeficit, + /// Too many peers under one IP address. + IPColocation, +} + +/// Label for the mesh inclusion event metrics. +#[derive(PartialEq, Eq, Hash, EncodeLabelSet, Clone, Debug)] +struct InclusionLabel { + hash: String, + reason: Inclusion, +} + +/// Label for the mesh churn event metrics. +#[derive(PartialEq, Eq, Hash, EncodeLabelSet, Clone, Debug)] +struct ChurnLabel { + hash: String, + reason: Churn, +} + +/// Label for the kinds of protocols peers can connect as. +#[derive(PartialEq, Eq, Hash, EncodeLabelSet, Clone, Debug)] +struct ProtocolLabel { + protocol: PeerKind, +} + +/// Label for the kinds of scoring penalties that can occur +#[derive(PartialEq, Eq, Hash, EncodeLabelSet, Clone, Debug)] +struct PenaltyLabel { + penalty: Penalty, +} + +#[derive(Clone)] +struct HistBuilder { + buckets: Vec, +} + +impl MetricConstructor for HistBuilder { + fn new_metric(&self) -> Histogram { + Histogram::new(self.buckets.clone().into_iter()) + } +} diff --git a/beacon_node/lighthouse_network/src/gossipsub/mod.rs b/beacon_node/lighthouse_network/src/gossipsub/mod.rs new file mode 100644 index 000000000..8ccdc32cd --- /dev/null +++ b/beacon_node/lighthouse_network/src/gossipsub/mod.rs @@ -0,0 +1,111 @@ +//! Implementation of the [Gossipsub](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/README.md) protocol. +//! +//! Gossipsub is a P2P pubsub (publish/subscription) routing layer designed to extend upon +//! floodsub and meshsub routing protocols. +//! +//! # Overview +//! +//! *Note: The gossipsub protocol specifications +//! () provide an outline for the +//! routing protocol. They should be consulted for further detail.* +//! +//! Gossipsub is a blend of meshsub for data and randomsub for mesh metadata. It provides bounded +//! degree and amplification factor with the meshsub construction and augments it using gossip +//! propagation of metadata with the randomsub technique. +//! +//! The router maintains an overlay mesh network of peers on which to efficiently send messages and +//! metadata. Peers use control messages to broadcast and request known messages and +//! subscribe/unsubscribe from topics in the mesh network. +//! +//! # Important Discrepancies +//! +//! This section outlines the current implementation's potential discrepancies from that of other +//! implementations, due to undefined elements in the current specification. +//! +//! - **Topics** - In gossipsub, topics configurable by the `hash_topics` configuration parameter. +//! Topics are of type [`TopicHash`]. The current go implementation uses raw utf-8 strings, and this +//! is default configuration in rust-libp2p. Topics can be hashed (SHA256 hashed then base64 +//! encoded) by setting the `hash_topics` configuration parameter to true. +//! +//! - **Sequence Numbers** - A message on the gossipsub network is identified by the source +//! [`PeerId`](libp2p_identity::PeerId) and a nonce (sequence number) of the message. The sequence numbers in +//! this implementation are sent as raw bytes across the wire. They are 64-bit big-endian unsigned +//! integers. When messages are signed, they are monotonically increasing integers starting from a +//! random value and wrapping around u64::MAX. When messages are unsigned, they are chosen at random. +//! NOTE: These numbers are sequential in the current go implementation. +//! +//! # Peer Discovery +//! +//! Gossipsub does not provide peer discovery by itself. Peer discovery is the process by which +//! peers in a p2p network exchange information about each other among other reasons to become resistant +//! against the failure or replacement of the +//! [boot nodes](https://docs.libp2p.io/reference/glossary/#boot-node) of the network. +//! +//! Peer +//! discovery can e.g. be implemented with the help of the [Kademlia](https://github.com/libp2p/specs/blob/master/kad-dht/README.md) protocol +//! in combination with the [Identify](https://github.com/libp2p/specs/tree/master/identify) protocol. See the +//! Kademlia implementation documentation for more information. +//! +//! # Using Gossipsub +//! +//! ## Gossipsub Config +//! +//! The [`Config`] struct specifies various network performance/tuning configuration +//! parameters. Specifically it specifies: +//! +//! [`Config`]: struct.Config.html +//! +//! This struct implements the [`Default`] trait and can be initialised via +//! [`Config::default()`]. +//! +//! +//! ## Behaviour +//! +//! The [`Behaviour`] struct implements the [`libp2p_swarm::NetworkBehaviour`] trait allowing it to +//! act as the routing behaviour in a [`libp2p_swarm::Swarm`]. This struct requires an instance of +//! [`PeerId`](libp2p_identity::PeerId) and [`Config`]. +//! +//! [`Behaviour`]: struct.Behaviour.html + +//! ## Example +//! +//! For an example on how to use gossipsub, see the [chat-example](https://github.com/libp2p/rust-libp2p/tree/master/examples/chat). + +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +mod backoff; +mod behaviour; +mod config; +mod error; +mod gossip_promises; +mod handler; +mod mcache; +mod metrics; +mod peer_score; +mod protocol; +mod rpc_proto; +mod subscription_filter; +mod time_cache; +mod topic; +mod transform; +mod types; + +pub use self::behaviour::{Behaviour, Event, MessageAuthenticity}; +pub use self::config::{Config, ConfigBuilder, ValidationMode, Version}; +pub use self::error::{ConfigBuilderError, PublishError, SubscriptionError, ValidationError}; +pub use self::metrics::Config as MetricsConfig; +pub use self::peer_score::{ + score_parameter_decay, score_parameter_decay_with_base, PeerScoreParams, PeerScoreThresholds, + TopicScoreParams, +}; +pub use self::subscription_filter::{ + AllowAllSubscriptionFilter, CallbackSubscriptionFilter, CombinedSubscriptionFilters, + MaxCountSubscriptionFilter, RegexSubscriptionFilter, TopicSubscriptionFilter, + WhitelistSubscriptionFilter, +}; +pub use self::topic::{Hasher, Topic, TopicHash}; +pub use self::transform::{DataTransform, IdentityTransform}; +pub use self::types::{Message, MessageAcceptance, MessageId, RawMessage}; +pub type IdentTopic = Topic; +pub type Sha256Topic = Topic; +pub use self::types::FailedMessages; diff --git a/beacon_node/lighthouse_network/src/gossipsub/peer_score.rs b/beacon_node/lighthouse_network/src/gossipsub/peer_score.rs new file mode 100644 index 000000000..d84b2416c --- /dev/null +++ b/beacon_node/lighthouse_network/src/gossipsub/peer_score.rs @@ -0,0 +1,937 @@ +// Copyright 2020 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +//! +//! Manages and stores the Scoring logic of a particular peer on the gossipsub behaviour. + +use super::metrics::{Metrics, Penalty}; +use super::time_cache::TimeCache; +use super::{MessageId, TopicHash}; +use instant::Instant; +use libp2p::identity::PeerId; +use std::collections::{hash_map, HashMap, HashSet}; +use std::net::IpAddr; +use std::time::Duration; + +mod params; +use super::ValidationError; +pub use params::{ + score_parameter_decay, score_parameter_decay_with_base, PeerScoreParams, PeerScoreThresholds, + TopicScoreParams, +}; + +#[cfg(test)] +mod tests; + +/// The number of seconds delivery messages are stored in the cache. +const TIME_CACHE_DURATION: u64 = 120; + +pub(crate) struct PeerScore { + params: PeerScoreParams, + /// The score parameters. + peer_stats: HashMap, + /// Tracking peers per IP. + peer_ips: HashMap>, + /// Message delivery tracking. This is a time-cache of [`DeliveryRecord`]s. + deliveries: TimeCache, + /// callback for monitoring message delivery times + message_delivery_time_callback: Option, +} + +/// General statistics for a given gossipsub peer. +struct PeerStats { + /// Connection status of the peer. + status: ConnectionStatus, + /// Stats per topic. + topics: HashMap, + /// IP tracking for individual peers. + known_ips: HashSet, + /// Behaviour penalty that is applied to the peer, assigned by the behaviour. + behaviour_penalty: f64, + /// Application specific score. Can be manipulated by calling PeerScore::set_application_score + application_score: f64, + /// Scoring based on how whether this peer consumes messages fast enough or not. + slow_peer_penalty: f64, +} + +enum ConnectionStatus { + /// The peer is connected. + Connected, + /// The peer is disconnected + Disconnected { + /// Expiration time of the score state for disconnected peers. + expire: Instant, + }, +} + +impl Default for PeerStats { + fn default() -> Self { + PeerStats { + status: ConnectionStatus::Connected, + topics: HashMap::new(), + known_ips: HashSet::new(), + behaviour_penalty: 0f64, + application_score: 0f64, + slow_peer_penalty: 0f64, + } + } +} + +impl PeerStats { + /// Returns a mutable reference to topic stats if they exist, otherwise if the supplied parameters score the + /// topic, inserts the default stats and returns a reference to those. If neither apply, returns None. + pub(crate) fn stats_or_default_mut( + &mut self, + topic_hash: TopicHash, + params: &PeerScoreParams, + ) -> Option<&mut TopicStats> { + if params.topics.get(&topic_hash).is_some() { + Some(self.topics.entry(topic_hash).or_default()) + } else { + self.topics.get_mut(&topic_hash) + } + } +} + +/// Stats assigned to peer for each topic. +struct TopicStats { + mesh_status: MeshStatus, + /// Number of first message deliveries. + first_message_deliveries: f64, + /// True if the peer has been in the mesh for enough time to activate mesh message deliveries. + mesh_message_deliveries_active: bool, + /// Number of message deliveries from the mesh. + mesh_message_deliveries: f64, + /// Mesh rate failure penalty. + mesh_failure_penalty: f64, + /// Invalid message counter. + invalid_message_deliveries: f64, +} + +impl TopicStats { + /// Returns true if the peer is in the `mesh`. + pub(crate) fn in_mesh(&self) -> bool { + matches!(self.mesh_status, MeshStatus::Active { .. }) + } +} + +/// Status defining a peer's inclusion in the mesh and associated parameters. +enum MeshStatus { + Active { + /// The time the peer was last GRAFTed; + graft_time: Instant, + /// The time the peer has been in the mesh. + mesh_time: Duration, + }, + InActive, +} + +impl MeshStatus { + /// Initialises a new [`MeshStatus::Active`] mesh status. + pub(crate) fn new_active() -> Self { + MeshStatus::Active { + graft_time: Instant::now(), + mesh_time: Duration::from_secs(0), + } + } +} + +impl Default for TopicStats { + fn default() -> Self { + TopicStats { + mesh_status: MeshStatus::InActive, + first_message_deliveries: Default::default(), + mesh_message_deliveries_active: Default::default(), + mesh_message_deliveries: Default::default(), + mesh_failure_penalty: Default::default(), + invalid_message_deliveries: Default::default(), + } + } +} + +#[derive(PartialEq, Debug)] +struct DeliveryRecord { + status: DeliveryStatus, + first_seen: Instant, + peers: HashSet, +} + +#[derive(PartialEq, Debug)] +enum DeliveryStatus { + /// Don't know (yet) if the message is valid. + Unknown, + /// The message is valid together with the validated time. + Valid(Instant), + /// The message is invalid. + Invalid, + /// Instructed by the validator to ignore the message. + Ignored, +} + +impl Default for DeliveryRecord { + fn default() -> Self { + DeliveryRecord { + status: DeliveryStatus::Unknown, + first_seen: Instant::now(), + peers: HashSet::new(), + } + } +} + +impl PeerScore { + /// Creates a new [`PeerScore`] using a given set of peer scoring parameters. + #[allow(dead_code)] + pub(crate) fn new(params: PeerScoreParams) -> Self { + Self::new_with_message_delivery_time_callback(params, None) + } + + pub(crate) fn new_with_message_delivery_time_callback( + params: PeerScoreParams, + callback: Option, + ) -> Self { + PeerScore { + params, + peer_stats: HashMap::new(), + peer_ips: HashMap::new(), + deliveries: TimeCache::new(Duration::from_secs(TIME_CACHE_DURATION)), + message_delivery_time_callback: callback, + } + } + + /// Returns the score for a peer + pub(crate) fn score(&self, peer_id: &PeerId) -> f64 { + self.metric_score(peer_id, None) + } + + /// Returns the score for a peer, logging metrics. This is called from the heartbeat and + /// increments the metric counts for penalties. + pub(crate) fn metric_score(&self, peer_id: &PeerId, mut metrics: Option<&mut Metrics>) -> f64 { + let Some(peer_stats) = self.peer_stats.get(peer_id) else { + return 0.0; + }; + let mut score = 0.0; + + // topic scores + for (topic, topic_stats) in peer_stats.topics.iter() { + // topic parameters + if let Some(topic_params) = self.params.topics.get(topic) { + // we are tracking the topic + + // the topic score + let mut topic_score = 0.0; + + // P1: time in mesh + if let MeshStatus::Active { mesh_time, .. } = topic_stats.mesh_status { + let p1 = { + let v = mesh_time.as_secs_f64() + / topic_params.time_in_mesh_quantum.as_secs_f64(); + if v < topic_params.time_in_mesh_cap { + v + } else { + topic_params.time_in_mesh_cap + } + }; + topic_score += p1 * topic_params.time_in_mesh_weight; + } + + // P2: first message deliveries + let p2 = { + let v = topic_stats.first_message_deliveries; + if v < topic_params.first_message_deliveries_cap { + v + } else { + topic_params.first_message_deliveries_cap + } + }; + topic_score += p2 * topic_params.first_message_deliveries_weight; + + // P3: mesh message deliveries + if topic_stats.mesh_message_deliveries_active + && topic_stats.mesh_message_deliveries + < topic_params.mesh_message_deliveries_threshold + { + let deficit = topic_params.mesh_message_deliveries_threshold + - topic_stats.mesh_message_deliveries; + let p3 = deficit * deficit; + topic_score += p3 * topic_params.mesh_message_deliveries_weight; + if let Some(metrics) = metrics.as_mut() { + metrics.register_score_penalty(Penalty::MessageDeficit); + } + tracing::debug!( + peer=%peer_id, + %topic, + %deficit, + penalty=%topic_score, + "[Penalty] The peer has a mesh deliveries deficit and will be penalized" + ); + } + + // P3b: + // NOTE: the weight of P3b is negative (validated in TopicScoreParams.validate), so this detracts. + let p3b = topic_stats.mesh_failure_penalty; + topic_score += p3b * topic_params.mesh_failure_penalty_weight; + + // P4: invalid messages + // NOTE: the weight of P4 is negative (validated in TopicScoreParams.validate), so this detracts. + let p4 = + topic_stats.invalid_message_deliveries * topic_stats.invalid_message_deliveries; + topic_score += p4 * topic_params.invalid_message_deliveries_weight; + + // update score, mixing with topic weight + score += topic_score * topic_params.topic_weight; + } + } + + // apply the topic score cap, if any + if self.params.topic_score_cap > 0f64 && score > self.params.topic_score_cap { + score = self.params.topic_score_cap; + } + + // P5: application-specific score + let p5 = peer_stats.application_score; + score += p5 * self.params.app_specific_weight; + + // P6: IP collocation factor + for ip in peer_stats.known_ips.iter() { + if self.params.ip_colocation_factor_whitelist.get(ip).is_some() { + continue; + } + + // P6 has a cliff (ip_colocation_factor_threshold); it's only applied iff + // at least that many peers are connected to us from that source IP + // addr. It is quadratic, and the weight is negative (validated by + // peer_score_params.validate()). + if let Some(peers_in_ip) = self.peer_ips.get(ip).map(|peers| peers.len()) { + if (peers_in_ip as f64) > self.params.ip_colocation_factor_threshold { + let surplus = (peers_in_ip as f64) - self.params.ip_colocation_factor_threshold; + let p6 = surplus * surplus; + if let Some(metrics) = metrics.as_mut() { + metrics.register_score_penalty(Penalty::IPColocation); + } + tracing::debug!( + peer=%peer_id, + surplus_ip=%ip, + surplus=%surplus, + "[Penalty] The peer gets penalized because of too many peers with the same ip" + ); + score += p6 * self.params.ip_colocation_factor_weight; + } + } + } + + // P7: behavioural pattern penalty + if peer_stats.behaviour_penalty > self.params.behaviour_penalty_threshold { + let excess = peer_stats.behaviour_penalty - self.params.behaviour_penalty_threshold; + let p7 = excess * excess; + score += p7 * self.params.behaviour_penalty_weight; + } + + // Slow peer weighting + if peer_stats.slow_peer_penalty > self.params.slow_peer_threshold { + let excess = peer_stats.slow_peer_penalty - self.params.slow_peer_threshold; + score += excess * self.params.slow_peer_weight; + } + + score + } + + pub(crate) fn add_penalty(&mut self, peer_id: &PeerId, count: usize) { + if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { + tracing::debug!( + peer=%peer_id, + %count, + "[Penalty] Behavioral penalty for peer" + ); + peer_stats.behaviour_penalty += count as f64; + } + } + + fn remove_ips_for_peer( + peer_stats: &PeerStats, + peer_ips: &mut HashMap>, + peer_id: &PeerId, + ) { + for ip in peer_stats.known_ips.iter() { + if let Some(peer_set) = peer_ips.get_mut(ip) { + peer_set.remove(peer_id); + } + } + } + + pub(crate) fn refresh_scores(&mut self) { + let now = Instant::now(); + let params_ref = &self.params; + let peer_ips_ref = &mut self.peer_ips; + self.peer_stats.retain(|peer_id, peer_stats| { + if let ConnectionStatus::Disconnected { expire } = peer_stats.status { + // has the retention period expired? + if now > expire { + // yes, throw it away (but clean up the IP tracking first) + Self::remove_ips_for_peer(peer_stats, peer_ips_ref, peer_id); + // re address this, use retain or entry + return false; + } + + // we don't decay retained scores, as the peer is not active. + // this way the peer cannot reset a negative score by simply disconnecting and reconnecting, + // unless the retention period has elapsed. + // similarly, a well behaved peer does not lose its score by getting disconnected. + return true; + } + + for (topic, topic_stats) in peer_stats.topics.iter_mut() { + // the topic parameters + if let Some(topic_params) = params_ref.topics.get(topic) { + // decay counters + topic_stats.first_message_deliveries *= + topic_params.first_message_deliveries_decay; + if topic_stats.first_message_deliveries < params_ref.decay_to_zero { + topic_stats.first_message_deliveries = 0.0; + } + topic_stats.mesh_message_deliveries *= + topic_params.mesh_message_deliveries_decay; + if topic_stats.mesh_message_deliveries < params_ref.decay_to_zero { + topic_stats.mesh_message_deliveries = 0.0; + } + topic_stats.mesh_failure_penalty *= topic_params.mesh_failure_penalty_decay; + if topic_stats.mesh_failure_penalty < params_ref.decay_to_zero { + topic_stats.mesh_failure_penalty = 0.0; + } + topic_stats.invalid_message_deliveries *= + topic_params.invalid_message_deliveries_decay; + if topic_stats.invalid_message_deliveries < params_ref.decay_to_zero { + topic_stats.invalid_message_deliveries = 0.0; + } + // update mesh time and activate mesh message delivery parameter if need be + if let MeshStatus::Active { + ref mut mesh_time, + ref mut graft_time, + } = topic_stats.mesh_status + { + *mesh_time = now.duration_since(*graft_time); + if *mesh_time > topic_params.mesh_message_deliveries_activation { + topic_stats.mesh_message_deliveries_active = true; + } + } + } + } + + // decay P7 counter + peer_stats.behaviour_penalty *= params_ref.behaviour_penalty_decay; + if peer_stats.behaviour_penalty < params_ref.decay_to_zero { + peer_stats.behaviour_penalty = 0.0; + } + + // decay slow peer score + peer_stats.slow_peer_penalty *= params_ref.slow_peer_decay; + if peer_stats.slow_peer_penalty < params_ref.decay_to_zero { + peer_stats.slow_peer_penalty = 0.0; + } + + true + }); + } + + /// Adds a connected peer to [`PeerScore`], initialising with empty ips (ips get added later + /// through add_ip. + pub(crate) fn add_peer(&mut self, peer_id: PeerId) { + let peer_stats = self.peer_stats.entry(peer_id).or_default(); + + // mark the peer as connected + peer_stats.status = ConnectionStatus::Connected; + } + + /// Adds a new ip to a peer, if the peer is not yet known creates a new peer_stats entry for it + pub(crate) fn add_ip(&mut self, peer_id: &PeerId, ip: IpAddr) { + tracing::trace!(peer=%peer_id, %ip, "Add ip for peer"); + let peer_stats = self.peer_stats.entry(*peer_id).or_default(); + + // Mark the peer as connected (currently the default is connected, but we don't want to + // rely on the default). + peer_stats.status = ConnectionStatus::Connected; + + // Insert the ip + peer_stats.known_ips.insert(ip); + self.peer_ips.entry(ip).or_default().insert(*peer_id); + } + + /// Indicate that a peer has been too slow to consume a message. + pub(crate) fn failed_message_slow_peer(&mut self, peer_id: &PeerId) { + if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { + peer_stats.slow_peer_penalty += 1.0; + tracing::debug!(peer=%peer_id, %peer_stats.slow_peer_penalty, "[Penalty] Expired message penalty."); + } + } + + /// Removes an ip from a peer + pub(crate) fn remove_ip(&mut self, peer_id: &PeerId, ip: &IpAddr) { + if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { + peer_stats.known_ips.remove(ip); + if let Some(peer_ids) = self.peer_ips.get_mut(ip) { + tracing::trace!(peer=%peer_id, %ip, "Remove ip for peer"); + peer_ids.remove(peer_id); + } else { + tracing::trace!( + peer=%peer_id, + %ip, + "No entry in peer_ips for ip which should get removed for peer" + ); + } + } else { + tracing::trace!( + peer=%peer_id, + %ip, + "No peer_stats for peer which should remove the ip" + ); + } + } + + /// Removes a peer from the score table. This retains peer statistics if their score is + /// non-positive. + pub(crate) fn remove_peer(&mut self, peer_id: &PeerId) { + // we only retain non-positive scores of peers + if self.score(peer_id) > 0f64 { + if let hash_map::Entry::Occupied(entry) = self.peer_stats.entry(*peer_id) { + Self::remove_ips_for_peer(entry.get(), &mut self.peer_ips, peer_id); + entry.remove(); + } + return; + } + + // if the peer is retained (including it's score) the `first_message_delivery` counters + // are reset to 0 and mesh delivery penalties applied. + if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { + for (topic, topic_stats) in peer_stats.topics.iter_mut() { + topic_stats.first_message_deliveries = 0f64; + + if let Some(threshold) = self + .params + .topics + .get(topic) + .map(|param| param.mesh_message_deliveries_threshold) + { + if topic_stats.in_mesh() + && topic_stats.mesh_message_deliveries_active + && topic_stats.mesh_message_deliveries < threshold + { + let deficit = threshold - topic_stats.mesh_message_deliveries; + topic_stats.mesh_failure_penalty += deficit * deficit; + } + } + + topic_stats.mesh_status = MeshStatus::InActive; + topic_stats.mesh_message_deliveries_active = false; + } + + peer_stats.status = ConnectionStatus::Disconnected { + expire: Instant::now() + self.params.retain_score, + }; + } + } + + /// Handles scoring functionality as a peer GRAFTs to a topic. + pub(crate) fn graft(&mut self, peer_id: &PeerId, topic: impl Into) { + let topic = topic.into(); + if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { + // if we are scoring the topic, update the mesh status. + if let Some(topic_stats) = peer_stats.stats_or_default_mut(topic, &self.params) { + topic_stats.mesh_status = MeshStatus::new_active(); + topic_stats.mesh_message_deliveries_active = false; + } + } + } + + /// Handles scoring functionality as a peer PRUNEs from a topic. + pub(crate) fn prune(&mut self, peer_id: &PeerId, topic: TopicHash) { + if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { + // if we are scoring the topic, update the mesh status. + if let Some(topic_stats) = peer_stats.stats_or_default_mut(topic.clone(), &self.params) + { + // sticky mesh delivery rate failure penalty + let threshold = self + .params + .topics + .get(&topic) + .expect("Topic must exist in order for there to be topic stats") + .mesh_message_deliveries_threshold; + if topic_stats.mesh_message_deliveries_active + && topic_stats.mesh_message_deliveries < threshold + { + let deficit = threshold - topic_stats.mesh_message_deliveries; + topic_stats.mesh_failure_penalty += deficit * deficit; + } + topic_stats.mesh_message_deliveries_active = false; + topic_stats.mesh_status = MeshStatus::InActive; + } + } + } + + pub(crate) fn validate_message( + &mut self, + from: &PeerId, + msg_id: &MessageId, + topic_hash: &TopicHash, + ) { + // adds an empty record with the message id + self.deliveries.entry(msg_id.clone()).or_default(); + + if let Some(callback) = self.message_delivery_time_callback { + if self + .peer_stats + .get(from) + .and_then(|s| s.topics.get(topic_hash)) + .map(|ts| ts.in_mesh()) + .unwrap_or(false) + { + callback(from, topic_hash, 0.0); + } + } + } + + pub(crate) fn deliver_message( + &mut self, + from: &PeerId, + msg_id: &MessageId, + topic_hash: &TopicHash, + ) { + self.mark_first_message_delivery(from, topic_hash); + + let record = self.deliveries.entry(msg_id.clone()).or_default(); + + // this should be the first delivery trace + if record.status != DeliveryStatus::Unknown { + tracing::warn!( + peer=%from, + status=?record.status, + first_seen=?record.first_seen.elapsed().as_secs(), + "Unexpected delivery trace" + ); + return; + } + + // mark the message as valid and reward mesh peers that have already forwarded it to us + record.status = DeliveryStatus::Valid(Instant::now()); + for peer in record.peers.iter().cloned().collect::>() { + // this check is to make sure a peer can't send us a message twice and get a double + // count if it is a first delivery + if &peer != from { + self.mark_duplicate_message_delivery(&peer, topic_hash, None); + } + } + } + + /// Similar to `reject_message` except does not require the message id or reason for an invalid message. + pub(crate) fn reject_invalid_message(&mut self, from: &PeerId, topic_hash: &TopicHash) { + tracing::debug!( + peer=%from, + "[Penalty] Message from peer rejected because of ValidationError or SelfOrigin" + ); + + self.mark_invalid_message_delivery(from, topic_hash); + } + + // Reject a message. + pub(crate) fn reject_message( + &mut self, + from: &PeerId, + msg_id: &MessageId, + topic_hash: &TopicHash, + reason: RejectReason, + ) { + match reason { + // these messages are not tracked, but the peer is penalized as they are invalid + RejectReason::ValidationError(_) | RejectReason::SelfOrigin => { + self.reject_invalid_message(from, topic_hash); + return; + } + // we ignore those messages, so do nothing. + RejectReason::BlackListedPeer | RejectReason::BlackListedSource => { + return; + } + _ => {} // the rest are handled after record creation + } + + let peers: Vec<_> = { + let record = self.deliveries.entry(msg_id.clone()).or_default(); + + // Multiple peers can now reject the same message as we track which peers send us the + // message. If we have already updated the status, return. + if record.status != DeliveryStatus::Unknown { + return; + } + + if let RejectReason::ValidationIgnored = reason { + // we were explicitly instructed by the validator to ignore the message but not penalize + // the peer + record.status = DeliveryStatus::Ignored; + record.peers.clear(); + return; + } + + // mark the message as invalid and penalize peers that have already forwarded it. + record.status = DeliveryStatus::Invalid; + // release the delivery time tracking map to free some memory early + record.peers.drain().collect() + }; + + self.mark_invalid_message_delivery(from, topic_hash); + for peer_id in peers.iter() { + self.mark_invalid_message_delivery(peer_id, topic_hash) + } + } + + pub(crate) fn duplicated_message( + &mut self, + from: &PeerId, + msg_id: &MessageId, + topic_hash: &TopicHash, + ) { + let record = self.deliveries.entry(msg_id.clone()).or_default(); + + if record.peers.get(from).is_some() { + // we have already seen this duplicate! + return; + } + + if let Some(callback) = self.message_delivery_time_callback { + let time = if let DeliveryStatus::Valid(validated) = record.status { + validated.elapsed().as_secs_f64() + } else { + 0.0 + }; + if self + .peer_stats + .get(from) + .and_then(|s| s.topics.get(topic_hash)) + .map(|ts| ts.in_mesh()) + .unwrap_or(false) + { + callback(from, topic_hash, time); + } + } + + match record.status { + DeliveryStatus::Unknown => { + // the message is being validated; track the peer delivery and wait for + // the Deliver/Reject notification. + record.peers.insert(*from); + } + DeliveryStatus::Valid(validated) => { + // mark the peer delivery time to only count a duplicate delivery once. + record.peers.insert(*from); + self.mark_duplicate_message_delivery(from, topic_hash, Some(validated)); + } + DeliveryStatus::Invalid => { + // we no longer track delivery time + self.mark_invalid_message_delivery(from, topic_hash); + } + DeliveryStatus::Ignored => { + // the message was ignored; do nothing (we don't know if it was valid) + } + } + } + + /// Sets the application specific score for a peer. Returns true if the peer is the peer is + /// connected or if the score of the peer is not yet expired and false otherwise. + pub(crate) fn set_application_score(&mut self, peer_id: &PeerId, new_score: f64) -> bool { + if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { + peer_stats.application_score = new_score; + true + } else { + false + } + } + + /// Sets scoring parameters for a topic. + pub(crate) fn set_topic_params(&mut self, topic_hash: TopicHash, params: TopicScoreParams) { + use hash_map::Entry::*; + match self.params.topics.entry(topic_hash.clone()) { + Occupied(mut entry) => { + let first_message_deliveries_cap = params.first_message_deliveries_cap; + let mesh_message_deliveries_cap = params.mesh_message_deliveries_cap; + let old_params = entry.insert(params); + + if old_params.first_message_deliveries_cap > first_message_deliveries_cap { + for stats in &mut self.peer_stats.values_mut() { + if let Some(tstats) = stats.topics.get_mut(&topic_hash) { + if tstats.first_message_deliveries > first_message_deliveries_cap { + tstats.first_message_deliveries = first_message_deliveries_cap; + } + } + } + } + + if old_params.mesh_message_deliveries_cap > mesh_message_deliveries_cap { + for stats in self.peer_stats.values_mut() { + if let Some(tstats) = stats.topics.get_mut(&topic_hash) { + if tstats.mesh_message_deliveries > mesh_message_deliveries_cap { + tstats.mesh_message_deliveries = mesh_message_deliveries_cap; + } + } + } + } + } + Vacant(entry) => { + entry.insert(params); + } + } + } + + /// Returns a scoring parameters for a topic if existent. + pub(crate) fn get_topic_params(&self, topic_hash: &TopicHash) -> Option<&TopicScoreParams> { + self.params.topics.get(topic_hash) + } + + /// Increments the "invalid message deliveries" counter for all scored topics the message + /// is published in. + fn mark_invalid_message_delivery(&mut self, peer_id: &PeerId, topic_hash: &TopicHash) { + if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { + if let Some(topic_stats) = + peer_stats.stats_or_default_mut(topic_hash.clone(), &self.params) + { + tracing::debug!( + peer=%peer_id, + topic=%topic_hash, + "[Penalty] Peer delivered an invalid message in topic and gets penalized \ + for it", + ); + topic_stats.invalid_message_deliveries += 1f64; + } + } + } + + /// Increments the "first message deliveries" counter for all scored topics the message is + /// published in, as well as the "mesh message deliveries" counter, if the peer is in the + /// mesh for the topic. + fn mark_first_message_delivery(&mut self, peer_id: &PeerId, topic_hash: &TopicHash) { + if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { + if let Some(topic_stats) = + peer_stats.stats_or_default_mut(topic_hash.clone(), &self.params) + { + let cap = self + .params + .topics + .get(topic_hash) + .expect("Topic must exist if there are known topic_stats") + .first_message_deliveries_cap; + topic_stats.first_message_deliveries = + if topic_stats.first_message_deliveries + 1f64 > cap { + cap + } else { + topic_stats.first_message_deliveries + 1f64 + }; + + if let MeshStatus::Active { .. } = topic_stats.mesh_status { + let cap = self + .params + .topics + .get(topic_hash) + .expect("Topic must exist if there are known topic_stats") + .mesh_message_deliveries_cap; + + topic_stats.mesh_message_deliveries = + if topic_stats.mesh_message_deliveries + 1f64 > cap { + cap + } else { + topic_stats.mesh_message_deliveries + 1f64 + }; + } + } + } + } + + /// Increments the "mesh message deliveries" counter for messages we've seen before, as long the + /// message was received within the P3 window. + fn mark_duplicate_message_delivery( + &mut self, + peer_id: &PeerId, + topic_hash: &TopicHash, + validated_time: Option, + ) { + if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { + let now = if validated_time.is_some() { + Some(Instant::now()) + } else { + None + }; + if let Some(topic_stats) = + peer_stats.stats_or_default_mut(topic_hash.clone(), &self.params) + { + if let MeshStatus::Active { .. } = topic_stats.mesh_status { + let topic_params = self + .params + .topics + .get(topic_hash) + .expect("Topic must exist if there are known topic_stats"); + + // check against the mesh delivery window -- if the validated time is passed as 0, then + // the message was received before we finished validation and thus falls within the mesh + // delivery window. + let mut falls_in_mesh_deliver_window = true; + if let Some(validated_time) = validated_time { + if let Some(now) = &now { + //should always be true + let window_time = validated_time + .checked_add(topic_params.mesh_message_deliveries_window) + .unwrap_or(*now); + if now > &window_time { + falls_in_mesh_deliver_window = false; + } + } + } + + if falls_in_mesh_deliver_window { + let cap = topic_params.mesh_message_deliveries_cap; + topic_stats.mesh_message_deliveries = + if topic_stats.mesh_message_deliveries + 1f64 > cap { + cap + } else { + topic_stats.mesh_message_deliveries + 1f64 + }; + } + } + } + } + } + + pub(crate) fn mesh_message_deliveries(&self, peer: &PeerId, topic: &TopicHash) -> Option { + self.peer_stats + .get(peer) + .and_then(|s| s.topics.get(topic)) + .map(|t| t.mesh_message_deliveries) + } +} + +/// The reason a Gossipsub message has been rejected. +#[derive(Clone, Copy)] +pub(crate) enum RejectReason { + /// The message failed the configured validation during decoding. + ValidationError(ValidationError), + /// The message source is us. + SelfOrigin, + /// The peer that sent the message was blacklisted. + BlackListedPeer, + /// The source (from field) of the message was blacklisted. + BlackListedSource, + /// The validation was ignored. + ValidationIgnored, + /// The validation failed. + ValidationFailed, +} diff --git a/beacon_node/lighthouse_network/src/gossipsub/peer_score/params.rs b/beacon_node/lighthouse_network/src/gossipsub/peer_score/params.rs new file mode 100644 index 000000000..4ece940e5 --- /dev/null +++ b/beacon_node/lighthouse_network/src/gossipsub/peer_score/params.rs @@ -0,0 +1,404 @@ +// Copyright 2020 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use crate::gossipsub::TopicHash; +use std::collections::{HashMap, HashSet}; +use std::net::IpAddr; +use std::time::Duration; + +/// The default number of seconds for a decay interval. +const DEFAULT_DECAY_INTERVAL: u64 = 1; +/// The default rate to decay to 0. +const DEFAULT_DECAY_TO_ZERO: f64 = 0.1; + +/// Computes the decay factor for a parameter, assuming the `decay_interval` is 1s +/// and that the value decays to zero if it drops below 0.01. +pub fn score_parameter_decay(decay: Duration) -> f64 { + score_parameter_decay_with_base( + decay, + Duration::from_secs(DEFAULT_DECAY_INTERVAL), + DEFAULT_DECAY_TO_ZERO, + ) +} + +/// Computes the decay factor for a parameter using base as the `decay_interval`. +pub fn score_parameter_decay_with_base(decay: Duration, base: Duration, decay_to_zero: f64) -> f64 { + // the decay is linear, so after n ticks the value is factor^n + // so factor^n = decay_to_zero => factor = decay_to_zero^(1/n) + let ticks = decay.as_secs_f64() / base.as_secs_f64(); + decay_to_zero.powf(1f64 / ticks) +} + +#[derive(Debug, Clone)] +pub struct PeerScoreThresholds { + /// The score threshold below which gossip propagation is suppressed; + /// should be negative. + pub gossip_threshold: f64, + + /// The score threshold below which we shouldn't publish when using flood + /// publishing (also applies to fanout peers); should be negative and <= `gossip_threshold`. + pub publish_threshold: f64, + + /// The score threshold below which message processing is suppressed altogether, + /// implementing an effective graylist according to peer score; should be negative and + /// <= `publish_threshold`. + pub graylist_threshold: f64, + + /// The score threshold below which px will be ignored; this should be positive + /// and limited to scores attainable by bootstrappers and other trusted nodes. + pub accept_px_threshold: f64, + + /// The median mesh score threshold before triggering opportunistic + /// grafting; this should have a small positive value. + pub opportunistic_graft_threshold: f64, +} + +impl Default for PeerScoreThresholds { + fn default() -> Self { + PeerScoreThresholds { + gossip_threshold: -10.0, + publish_threshold: -50.0, + graylist_threshold: -80.0, + accept_px_threshold: 10.0, + opportunistic_graft_threshold: 20.0, + } + } +} + +impl PeerScoreThresholds { + pub fn validate(&self) -> Result<(), &'static str> { + if self.gossip_threshold > 0f64 { + return Err("invalid gossip threshold; it must be <= 0"); + } + if self.publish_threshold > 0f64 || self.publish_threshold > self.gossip_threshold { + return Err("Invalid publish threshold; it must be <= 0 and <= gossip threshold"); + } + if self.graylist_threshold > 0f64 || self.graylist_threshold > self.publish_threshold { + return Err("Invalid graylist threshold; it must be <= 0 and <= publish threshold"); + } + if self.accept_px_threshold < 0f64 { + return Err("Invalid accept px threshold; it must be >= 0"); + } + if self.opportunistic_graft_threshold < 0f64 { + return Err("Invalid opportunistic grafting threshold; it must be >= 0"); + } + Ok(()) + } +} + +#[derive(Debug, Clone)] +pub struct PeerScoreParams { + /// Score parameters per topic. + pub topics: HashMap, + + /// Aggregate topic score cap; this limits the total contribution of topics towards a positive + /// score. It must be positive (or 0 for no cap). + pub topic_score_cap: f64, + + /// P5: Application-specific peer scoring + pub app_specific_weight: f64, + + /// P6: IP-colocation factor. + /// The parameter has an associated counter which counts the number of peers with the same IP. + /// If the number of peers in the same IP exceeds `ip_colocation_factor_threshold, then the value + /// is the square of the difference, ie `(peers_in_same_ip - ip_colocation_threshold)^2`. + /// If the number of peers in the same IP is less than the threshold, then the value is 0. + /// The weight of the parameter MUST be negative, unless you want to disable for testing. + /// Note: In order to simulate many IPs in a manageable manner when testing, you can set the weight to 0 + /// thus disabling the IP colocation penalty. + pub ip_colocation_factor_weight: f64, + pub ip_colocation_factor_threshold: f64, + pub ip_colocation_factor_whitelist: HashSet, + + /// P7: behavioural pattern penalties. + /// This parameter has an associated counter which tracks misbehaviour as detected by the + /// router. The router currently applies penalties for the following behaviors: + /// - attempting to re-graft before the prune backoff time has elapsed. + /// - not following up in IWANT requests for messages advertised with IHAVE. + /// + /// The value of the parameter is the square of the counter over the threshold, which decays + /// with BehaviourPenaltyDecay. + /// The weight of the parameter MUST be negative (or zero to disable). + pub behaviour_penalty_weight: f64, + pub behaviour_penalty_threshold: f64, + pub behaviour_penalty_decay: f64, + + /// The decay interval for parameter counters. + pub decay_interval: Duration, + + /// Counter value below which it is considered 0. + pub decay_to_zero: f64, + + /// Time to remember counters for a disconnected peer. + pub retain_score: Duration, + + /// Slow peer penalty conditions + pub slow_peer_weight: f64, + pub slow_peer_threshold: f64, + pub slow_peer_decay: f64, +} + +impl Default for PeerScoreParams { + fn default() -> Self { + PeerScoreParams { + topics: HashMap::new(), + topic_score_cap: 3600.0, + app_specific_weight: 10.0, + ip_colocation_factor_weight: -5.0, + ip_colocation_factor_threshold: 10.0, + ip_colocation_factor_whitelist: HashSet::new(), + behaviour_penalty_weight: -10.0, + behaviour_penalty_threshold: 0.0, + behaviour_penalty_decay: 0.2, + decay_interval: Duration::from_secs(DEFAULT_DECAY_INTERVAL), + decay_to_zero: DEFAULT_DECAY_TO_ZERO, + retain_score: Duration::from_secs(3600), + slow_peer_weight: -0.2, + slow_peer_threshold: 0.0, + slow_peer_decay: 0.2, + } + } +} + +/// Peer score parameter validation +impl PeerScoreParams { + pub fn validate(&self) -> Result<(), String> { + for (topic, params) in self.topics.iter() { + if let Err(e) = params.validate() { + return Err(format!("Invalid score parameters for topic {topic}: {e}")); + } + } + + // check that the topic score is 0 or something positive + if self.topic_score_cap < 0f64 { + return Err("Invalid topic score cap; must be positive (or 0 for no cap)".into()); + } + + // check the IP colocation factor + if self.ip_colocation_factor_weight > 0f64 { + return Err( + "Invalid ip_colocation_factor_weight; must be negative (or 0 to disable)".into(), + ); + } + if self.ip_colocation_factor_weight != 0f64 && self.ip_colocation_factor_threshold < 1f64 { + return Err("Invalid ip_colocation_factor_threshold; must be at least 1".into()); + } + + // check the behaviour penalty + if self.behaviour_penalty_weight > 0f64 { + return Err( + "Invalid behaviour_penalty_weight; must be negative (or 0 to disable)".into(), + ); + } + if self.behaviour_penalty_weight != 0f64 + && (self.behaviour_penalty_decay <= 0f64 || self.behaviour_penalty_decay >= 1f64) + { + return Err("invalid behaviour_penalty_decay; must be between 0 and 1".into()); + } + + if self.behaviour_penalty_threshold < 0f64 { + return Err("invalid behaviour_penalty_threshold; must be >= 0".into()); + } + + // check the decay parameters + if self.decay_interval < Duration::from_secs(1) { + return Err("Invalid decay_interval; must be at least 1s".into()); + } + if self.decay_to_zero <= 0f64 || self.decay_to_zero >= 1f64 { + return Err("Invalid decay_to_zero; must be between 0 and 1".into()); + } + + // no need to check the score retention; a value of 0 means that we don't retain scores + Ok(()) + } +} + +#[derive(Debug, Clone)] +pub struct TopicScoreParams { + /// The weight of the topic. + pub topic_weight: f64, + + /// P1: time in the mesh + /// This is the time the peer has been grafted in the mesh. + /// The value of of the parameter is the `time/time_in_mesh_quantum`, capped by `time_in_mesh_cap` + /// The weight of the parameter must be positive (or zero to disable). + pub time_in_mesh_weight: f64, + pub time_in_mesh_quantum: Duration, + pub time_in_mesh_cap: f64, + + /// P2: first message deliveries + /// This is the number of message deliveries in the topic. + /// The value of the parameter is a counter, decaying with `first_message_deliveries_decay`, and capped + /// by `first_message_deliveries_cap`. + /// The weight of the parameter MUST be positive (or zero to disable). + pub first_message_deliveries_weight: f64, + pub first_message_deliveries_decay: f64, + pub first_message_deliveries_cap: f64, + + /// P3: mesh message deliveries + /// This is the number of message deliveries in the mesh, within the + /// `mesh_message_deliveries_window` of message validation; deliveries during validation also + /// count and are retroactively applied when validation succeeds. + /// This window accounts for the minimum time before a hostile mesh peer trying to game the + /// score could replay back a valid message we just sent them. + /// It effectively tracks first and near-first deliveries, ie a message seen from a mesh peer + /// before we have forwarded it to them. + /// The parameter has an associated counter, decaying with `mesh_message_deliveries_decay`. + /// If the counter exceeds the threshold, its value is 0. + /// If the counter is below the `mesh_message_deliveries_threshold`, the value is the square of + /// the deficit, ie (`message_deliveries_threshold - counter)^2` + /// The penalty is only activated after `mesh_message_deliveries_activation` time in the mesh. + /// The weight of the parameter MUST be negative (or zero to disable). + pub mesh_message_deliveries_weight: f64, + pub mesh_message_deliveries_decay: f64, + pub mesh_message_deliveries_cap: f64, + pub mesh_message_deliveries_threshold: f64, + pub mesh_message_deliveries_window: Duration, + pub mesh_message_deliveries_activation: Duration, + + /// P3b: sticky mesh propagation failures + /// This is a sticky penalty that applies when a peer gets pruned from the mesh with an active + /// mesh message delivery penalty. + /// The weight of the parameter MUST be negative (or zero to disable) + pub mesh_failure_penalty_weight: f64, + pub mesh_failure_penalty_decay: f64, + + /// P4: invalid messages + /// This is the number of invalid messages in the topic. + /// The value of the parameter is the square of the counter, decaying with + /// `invalid_message_deliveries_decay`. + /// The weight of the parameter MUST be negative (or zero to disable). + pub invalid_message_deliveries_weight: f64, + pub invalid_message_deliveries_decay: f64, +} + +/// NOTE: The topic score parameters are very network specific. +/// For any production system, these values should be manually set. +impl Default for TopicScoreParams { + fn default() -> Self { + TopicScoreParams { + topic_weight: 0.5, + // P1 + time_in_mesh_weight: 1.0, + time_in_mesh_quantum: Duration::from_millis(1), + time_in_mesh_cap: 3600.0, + // P2 + first_message_deliveries_weight: 1.0, + first_message_deliveries_decay: 0.5, + first_message_deliveries_cap: 2000.0, + // P3 + mesh_message_deliveries_weight: -1.0, + mesh_message_deliveries_decay: 0.5, + mesh_message_deliveries_cap: 100.0, + mesh_message_deliveries_threshold: 20.0, + mesh_message_deliveries_window: Duration::from_millis(10), + mesh_message_deliveries_activation: Duration::from_secs(5), + // P3b + mesh_failure_penalty_weight: -1.0, + mesh_failure_penalty_decay: 0.5, + // P4 + invalid_message_deliveries_weight: -1.0, + invalid_message_deliveries_decay: 0.3, + } + } +} + +impl TopicScoreParams { + pub fn validate(&self) -> Result<(), &'static str> { + // make sure we have a sane topic weight + if self.topic_weight < 0f64 { + return Err("invalid topic weight; must be >= 0"); + } + + if self.time_in_mesh_quantum == Duration::from_secs(0) { + return Err("Invalid time_in_mesh_quantum; must be non zero"); + } + if self.time_in_mesh_weight < 0f64 { + return Err("Invalid time_in_mesh_weight; must be positive (or 0 to disable)"); + } + if self.time_in_mesh_weight != 0f64 && self.time_in_mesh_cap <= 0f64 { + return Err("Invalid time_in_mesh_cap must be positive"); + } + + if self.first_message_deliveries_weight < 0f64 { + return Err( + "Invalid first_message_deliveries_weight; must be positive (or 0 to disable)", + ); + } + if self.first_message_deliveries_weight != 0f64 + && (self.first_message_deliveries_decay <= 0f64 + || self.first_message_deliveries_decay >= 1f64) + { + return Err("Invalid first_message_deliveries_decay; must be between 0 and 1"); + } + if self.first_message_deliveries_weight != 0f64 && self.first_message_deliveries_cap <= 0f64 + { + return Err("Invalid first_message_deliveries_cap must be positive"); + } + + if self.mesh_message_deliveries_weight > 0f64 { + return Err( + "Invalid mesh_message_deliveries_weight; must be negative (or 0 to disable)", + ); + } + if self.mesh_message_deliveries_weight != 0f64 + && (self.mesh_message_deliveries_decay <= 0f64 + || self.mesh_message_deliveries_decay >= 1f64) + { + return Err("Invalid mesh_message_deliveries_decay; must be between 0 and 1"); + } + if self.mesh_message_deliveries_weight != 0f64 && self.mesh_message_deliveries_cap <= 0f64 { + return Err("Invalid mesh_message_deliveries_cap must be positive"); + } + if self.mesh_message_deliveries_weight != 0f64 + && self.mesh_message_deliveries_threshold <= 0f64 + { + return Err("Invalid mesh_message_deliveries_threshold; must be positive"); + } + if self.mesh_message_deliveries_weight != 0f64 + && self.mesh_message_deliveries_activation < Duration::from_secs(1) + { + return Err("Invalid mesh_message_deliveries_activation; must be at least 1s"); + } + + // check P3b + if self.mesh_failure_penalty_weight > 0f64 { + return Err("Invalid mesh_failure_penalty_weight; must be negative (or 0 to disable)"); + } + if self.mesh_failure_penalty_weight != 0f64 + && (self.mesh_failure_penalty_decay <= 0f64 || self.mesh_failure_penalty_decay >= 1f64) + { + return Err("Invalid mesh_failure_penalty_decay; must be between 0 and 1"); + } + + // check P4 + if self.invalid_message_deliveries_weight > 0f64 { + return Err( + "Invalid invalid_message_deliveries_weight; must be negative (or 0 to disable)", + ); + } + if self.invalid_message_deliveries_decay <= 0f64 + || self.invalid_message_deliveries_decay >= 1f64 + { + return Err("Invalid invalid_message_deliveries_decay; must be between 0 and 1"); + } + Ok(()) + } +} diff --git a/beacon_node/lighthouse_network/src/gossipsub/peer_score/tests.rs b/beacon_node/lighthouse_network/src/gossipsub/peer_score/tests.rs new file mode 100644 index 000000000..97587ebdb --- /dev/null +++ b/beacon_node/lighthouse_network/src/gossipsub/peer_score/tests.rs @@ -0,0 +1,978 @@ +// Copyright 2020 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +/// A collection of unit tests mostly ported from the go implementation. +use super::*; + +use crate::gossipsub::types::RawMessage; +use crate::gossipsub::{IdentTopic as Topic, Message}; + +// estimates a value within variance +fn within_variance(value: f64, expected: f64, variance: f64) -> bool { + if expected >= 0.0 { + return value > expected * (1.0 - variance) && value < expected * (1.0 + variance); + } + value > expected * (1.0 + variance) && value < expected * (1.0 - variance) +} + +// generates a random gossipsub message with sequence number i +fn make_test_message(seq: u64) -> (MessageId, RawMessage) { + let raw_message = RawMessage { + source: Some(PeerId::random()), + data: vec![12, 34, 56], + sequence_number: Some(seq), + topic: Topic::new("test").hash(), + signature: None, + key: None, + validated: true, + }; + + let message = Message { + source: raw_message.source, + data: raw_message.data.clone(), + sequence_number: raw_message.sequence_number, + topic: raw_message.topic.clone(), + }; + + let id = default_message_id()(&message); + (id, raw_message) +} + +fn default_message_id() -> fn(&Message) -> MessageId { + |message| { + // default message id is: source + sequence number + // NOTE: If either the peer_id or source is not provided, we set to 0; + let mut source_string = if let Some(peer_id) = message.source.as_ref() { + peer_id.to_base58() + } else { + PeerId::from_bytes(&[0, 1, 0]) + .expect("Valid peer id") + .to_base58() + }; + source_string.push_str(&message.sequence_number.unwrap_or_default().to_string()); + MessageId::from(source_string) + } +} + +#[test] +fn test_score_time_in_mesh() { + // Create parameters with reasonable default values + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let mut params = PeerScoreParams { + topic_score_cap: 1000.0, + ..Default::default() + }; + + let topic_params = TopicScoreParams { + topic_weight: 0.5, + time_in_mesh_weight: 1.0, + time_in_mesh_quantum: Duration::from_millis(1), + time_in_mesh_cap: 3600.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params.clone()); + + let peer_id = PeerId::random(); + + let mut peer_score = PeerScore::new(params); + // Peer score should start at 0 + peer_score.add_peer(peer_id); + + let score = peer_score.score(&peer_id); + assert!( + score == 0.0, + "expected score to start at zero. Score found: {score}" + ); + + // The time in mesh depends on how long the peer has been grafted + peer_score.graft(&peer_id, topic); + let elapsed = topic_params.time_in_mesh_quantum * 200; + std::thread::sleep(elapsed); + peer_score.refresh_scores(); + + let score = peer_score.score(&peer_id); + let expected = topic_params.topic_weight + * topic_params.time_in_mesh_weight + * (elapsed.as_millis() / topic_params.time_in_mesh_quantum.as_millis()) as f64; + assert!( + score >= expected, + "The score: {score} should be greater than or equal to: {expected}" + ); +} + +#[test] +fn test_score_time_in_mesh_cap() { + // Create parameters with reasonable default values + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let mut params = PeerScoreParams::default(); + + let topic_params = TopicScoreParams { + topic_weight: 0.5, + time_in_mesh_weight: 1.0, + time_in_mesh_quantum: Duration::from_millis(1), + time_in_mesh_cap: 10.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params.clone()); + + let peer_id = PeerId::random(); + + let mut peer_score = PeerScore::new(params); + // Peer score should start at 0 + peer_score.add_peer(peer_id); + + let score = peer_score.score(&peer_id); + assert!( + score == 0.0, + "expected score to start at zero. Score found: {score}" + ); + + // The time in mesh depends on how long the peer has been grafted + peer_score.graft(&peer_id, topic); + let elapsed = topic_params.time_in_mesh_quantum * 40; + std::thread::sleep(elapsed); + peer_score.refresh_scores(); + + let score = peer_score.score(&peer_id); + let expected = topic_params.topic_weight + * topic_params.time_in_mesh_weight + * topic_params.time_in_mesh_cap; + let variance = 0.5; + assert!( + within_variance(score, expected, variance), + "The score: {} should be within {} of {}", + score, + score * variance, + expected + ); +} + +#[test] +fn test_score_first_message_deliveries() { + // Create parameters with reasonable default values + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let mut params = PeerScoreParams::default(); + + let topic_params = TopicScoreParams { + topic_weight: 1.0, + first_message_deliveries_weight: 1.0, + first_message_deliveries_decay: 1.0, + first_message_deliveries_cap: 2000.0, + time_in_mesh_weight: 0.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params.clone()); + + let peer_id = PeerId::random(); + + let mut peer_score = PeerScore::new(params); + // Peer score should start at 0 + peer_score.add_peer(peer_id); + peer_score.graft(&peer_id, topic); + + // deliver a bunch of messages from the peer + let messages = 100; + for seq in 0..messages { + let (id, msg) = make_test_message(seq); + peer_score.validate_message(&peer_id, &id, &msg.topic); + peer_score.deliver_message(&peer_id, &id, &msg.topic); + } + + peer_score.refresh_scores(); + + let score = peer_score.score(&peer_id); + let expected = + topic_params.topic_weight * topic_params.first_message_deliveries_weight * messages as f64; + assert!(score == expected, "The score: {score} should be {expected}"); +} + +#[test] +fn test_score_first_message_deliveries_cap() { + // Create parameters with reasonable default values + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let mut params = PeerScoreParams::default(); + + let topic_params = TopicScoreParams { + topic_weight: 1.0, + first_message_deliveries_weight: 1.0, + first_message_deliveries_decay: 1.0, // test without decay + first_message_deliveries_cap: 50.0, + time_in_mesh_weight: 0.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params.clone()); + + let peer_id = PeerId::random(); + + let mut peer_score = PeerScore::new(params); + // Peer score should start at 0 + peer_score.add_peer(peer_id); + peer_score.graft(&peer_id, topic); + + // deliver a bunch of messages from the peer + let messages = 100; + for seq in 0..messages { + let (id, msg) = make_test_message(seq); + peer_score.validate_message(&peer_id, &id, &msg.topic); + peer_score.deliver_message(&peer_id, &id, &msg.topic); + } + + peer_score.refresh_scores(); + let score = peer_score.score(&peer_id); + let expected = topic_params.topic_weight + * topic_params.first_message_deliveries_weight + * topic_params.first_message_deliveries_cap; + assert!(score == expected, "The score: {score} should be {expected}"); +} + +#[test] +fn test_score_first_message_deliveries_decay() { + // Create parameters with reasonable default values + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let mut params = PeerScoreParams::default(); + + let topic_params = TopicScoreParams { + topic_weight: 1.0, + first_message_deliveries_weight: 1.0, + first_message_deliveries_decay: 0.9, // decay 10% per decay interval + first_message_deliveries_cap: 2000.0, + time_in_mesh_weight: 0.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params.clone()); + let peer_id = PeerId::random(); + let mut peer_score = PeerScore::new(params); + peer_score.add_peer(peer_id); + peer_score.graft(&peer_id, topic); + + // deliver a bunch of messages from the peer + let messages = 100; + for seq in 0..messages { + let (id, msg) = make_test_message(seq); + peer_score.validate_message(&peer_id, &id, &msg.topic); + peer_score.deliver_message(&peer_id, &id, &msg.topic); + } + + peer_score.refresh_scores(); + let score = peer_score.score(&peer_id); + let mut expected = topic_params.topic_weight + * topic_params.first_message_deliveries_weight + * topic_params.first_message_deliveries_decay + * messages as f64; + assert!(score == expected, "The score: {score} should be {expected}"); + + // refreshing the scores applies the decay param + let decay_intervals = 10; + for _ in 0..decay_intervals { + peer_score.refresh_scores(); + expected *= topic_params.first_message_deliveries_decay; + } + let score = peer_score.score(&peer_id); + assert!(score == expected, "The score: {score} should be {expected}"); +} + +#[test] +fn test_score_mesh_message_deliveries() { + // Create parameters with reasonable default values + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let mut params = PeerScoreParams::default(); + + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: -1.0, + mesh_message_deliveries_activation: Duration::from_secs(1), + mesh_message_deliveries_window: Duration::from_millis(10), + mesh_message_deliveries_threshold: 20.0, + mesh_message_deliveries_cap: 100.0, + mesh_message_deliveries_decay: 1.0, + first_message_deliveries_weight: 0.0, + time_in_mesh_weight: 0.0, + mesh_failure_penalty_weight: 0.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params.clone()); + let mut peer_score = PeerScore::new(params); + + // peer A always delivers the message first. + // peer B delivers next (within the delivery window). + // peer C delivers outside the delivery window. + // we expect peers A and B to have a score of zero, since all other parameter weights are zero. + // Peer C should have a negative score. + let peer_id_a = PeerId::random(); + let peer_id_b = PeerId::random(); + let peer_id_c = PeerId::random(); + + let peers = vec![peer_id_a, peer_id_b, peer_id_c]; + + for peer_id in &peers { + peer_score.add_peer(*peer_id); + peer_score.graft(peer_id, topic.clone()); + } + + // assert that nobody has been penalized yet for not delivering messages before activation time + peer_score.refresh_scores(); + for peer_id in &peers { + let score = peer_score.score(peer_id); + assert!( + score >= 0.0, + "expected no mesh delivery penalty before activation time, got score {score}" + ); + } + + // wait for the activation time to kick in + std::thread::sleep(topic_params.mesh_message_deliveries_activation); + + // deliver a bunch of messages from peer A, with duplicates within the window from peer B, + // and duplicates outside the window from peer C. + let messages = 100; + let mut messages_to_send = Vec::new(); + for seq in 0..messages { + let (id, msg) = make_test_message(seq); + peer_score.validate_message(&peer_id_a, &id, &msg.topic); + peer_score.deliver_message(&peer_id_a, &id, &msg.topic); + + peer_score.duplicated_message(&peer_id_b, &id, &msg.topic); + messages_to_send.push((id, msg)); + } + + std::thread::sleep(topic_params.mesh_message_deliveries_window + Duration::from_millis(20)); + + for (id, msg) in messages_to_send { + peer_score.duplicated_message(&peer_id_c, &id, &msg.topic); + } + + peer_score.refresh_scores(); + let score_a = peer_score.score(&peer_id_a); + let score_b = peer_score.score(&peer_id_b); + let score_c = peer_score.score(&peer_id_c); + + assert!( + score_a >= 0.0, + "expected non-negative score for Peer A, got score {score_a}" + ); + assert!( + score_b >= 0.0, + "expected non-negative score for Peer B, got score {score_b}" + ); + + // the penalty is the difference between the threshold and the actual mesh deliveries, squared. + // since we didn't deliver anything, this is just the value of the threshold + let penalty = topic_params.mesh_message_deliveries_threshold + * topic_params.mesh_message_deliveries_threshold; + let expected = + topic_params.topic_weight * topic_params.mesh_message_deliveries_weight * penalty; + + assert!(score_c == expected, "Score: {score_c}. Expected {expected}"); +} + +#[test] +fn test_score_mesh_message_deliveries_decay() { + // Create parameters with reasonable default values + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let mut params = PeerScoreParams::default(); + + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: -1.0, + mesh_message_deliveries_activation: Duration::from_secs(0), + mesh_message_deliveries_window: Duration::from_millis(10), + mesh_message_deliveries_threshold: 20.0, + mesh_message_deliveries_cap: 100.0, + mesh_message_deliveries_decay: 0.9, + first_message_deliveries_weight: 0.0, + time_in_mesh_weight: 0.0, + time_in_mesh_quantum: Duration::from_secs(1), + mesh_failure_penalty_weight: 0.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params.clone()); + let mut peer_score = PeerScore::new(params); + + let peer_id_a = PeerId::random(); + peer_score.add_peer(peer_id_a); + peer_score.graft(&peer_id_a, topic); + + // deliver a bunch of messages from peer A + let messages = 100; + for seq in 0..messages { + let (id, msg) = make_test_message(seq); + peer_score.validate_message(&peer_id_a, &id, &msg.topic); + peer_score.deliver_message(&peer_id_a, &id, &msg.topic); + } + + // we should have a positive score, since we delivered more messages than the threshold + peer_score.refresh_scores(); + + let score_a = peer_score.score(&peer_id_a); + assert!( + score_a >= 0.0, + "expected non-negative score for Peer A, got score {score_a}" + ); + + let mut decayed_delivery_count = (messages as f64) * topic_params.mesh_message_deliveries_decay; + for _ in 0..20 { + peer_score.refresh_scores(); + decayed_delivery_count *= topic_params.mesh_message_deliveries_decay; + } + + let score_a = peer_score.score(&peer_id_a); + // the penalty is the difference between the threshold and the (decayed) mesh deliveries, squared. + let deficit = topic_params.mesh_message_deliveries_threshold - decayed_delivery_count; + let penalty = deficit * deficit; + let expected = + topic_params.topic_weight * topic_params.mesh_message_deliveries_weight * penalty; + + assert_eq!(score_a, expected, "Invalid score"); +} + +#[test] +fn test_score_mesh_failure_penalty() { + // Create parameters with reasonable default values + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let mut params = PeerScoreParams::default(); + + let topic_params = TopicScoreParams { + // the mesh failure penalty is applied when a peer is pruned while their + // mesh deliveries are under the threshold. + // for this test, we set the mesh delivery threshold, but set + // mesh_message_deliveries to zero, so the only affect on the score + // is from the mesh failure penalty + topic_weight: 1.0, + mesh_message_deliveries_weight: 0.0, + mesh_message_deliveries_activation: Duration::from_secs(0), + mesh_message_deliveries_window: Duration::from_millis(10), + mesh_message_deliveries_threshold: 20.0, + mesh_message_deliveries_cap: 100.0, + mesh_message_deliveries_decay: 1.0, + first_message_deliveries_weight: 0.0, + time_in_mesh_weight: 0.0, + time_in_mesh_quantum: Duration::from_secs(1), + mesh_failure_penalty_weight: -1.0, + mesh_failure_penalty_decay: 1.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params.clone()); + let mut peer_score = PeerScore::new(params); + + let peer_id_a = PeerId::random(); + let peer_id_b = PeerId::random(); + + let peers = vec![peer_id_a, peer_id_b]; + + for peer_id in &peers { + peer_score.add_peer(*peer_id); + peer_score.graft(peer_id, topic.clone()); + } + + // deliver a bunch of messages from peer A + let messages = 100; + for seq in 0..messages { + let (id, msg) = make_test_message(seq); + + peer_score.validate_message(&peer_id_a, &id, &msg.topic); + peer_score.deliver_message(&peer_id_a, &id, &msg.topic); + } + + // peers A and B should both have zero scores, since the failure penalty hasn't been applied yet + peer_score.refresh_scores(); + let score_a = peer_score.score(&peer_id_a); + let score_b = peer_score.score(&peer_id_b); + assert!( + score_a >= 0.0, + "expected non-negative score for Peer A, got score {score_a}" + ); + assert!( + score_b >= 0.0, + "expected non-negative score for Peer B, got score {score_b}" + ); + + // prune peer B to apply the penalty + peer_score.prune(&peer_id_b, topic.hash()); + peer_score.refresh_scores(); + let score_a = peer_score.score(&peer_id_a); + + assert_eq!(score_a, 0.0, "expected Peer A to have a 0"); + + // penalty calculation is the same as for mesh_message_deliveries, but multiplied by + // mesh_failure_penalty_weigh + // instead of mesh_message_deliveries_weight + let penalty = topic_params.mesh_message_deliveries_threshold + * topic_params.mesh_message_deliveries_threshold; + let expected = topic_params.topic_weight * topic_params.mesh_failure_penalty_weight * penalty; + + let score_b = peer_score.score(&peer_id_b); + + assert_eq!(score_b, expected, "Peer B should have expected score",); +} + +#[test] +fn test_score_invalid_message_deliveries() { + // Create parameters with reasonable default values + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let mut params = PeerScoreParams::default(); + + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: 0.0, + mesh_message_deliveries_activation: Duration::from_secs(1), + mesh_message_deliveries_window: Duration::from_millis(10), + mesh_message_deliveries_threshold: 20.0, + mesh_message_deliveries_cap: 100.0, + mesh_message_deliveries_decay: 1.0, + first_message_deliveries_weight: 0.0, + time_in_mesh_weight: 0.0, + mesh_failure_penalty_weight: 0.0, + invalid_message_deliveries_weight: -1.0, + invalid_message_deliveries_decay: 1.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params.clone()); + let mut peer_score = PeerScore::new(params); + + let peer_id_a = PeerId::random(); + peer_score.add_peer(peer_id_a); + peer_score.graft(&peer_id_a, topic); + + // reject a bunch of messages from peer A + let messages = 100; + for seq in 0..messages { + let (id, msg) = make_test_message(seq); + peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationFailed); + } + + peer_score.refresh_scores(); + let score_a = peer_score.score(&peer_id_a); + + let expected = topic_params.topic_weight + * topic_params.invalid_message_deliveries_weight + * (messages * messages) as f64; + + assert_eq!(score_a, expected, "Peer has unexpected score",); +} + +#[test] +fn test_score_invalid_message_deliveris_decay() { + // Create parameters with reasonable default values + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let mut params = PeerScoreParams::default(); + + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: 0.0, + mesh_message_deliveries_activation: Duration::from_secs(1), + mesh_message_deliveries_window: Duration::from_millis(10), + mesh_message_deliveries_threshold: 20.0, + mesh_message_deliveries_cap: 100.0, + mesh_message_deliveries_decay: 1.0, + first_message_deliveries_weight: 0.0, + time_in_mesh_weight: 0.0, + mesh_failure_penalty_weight: 0.0, + invalid_message_deliveries_weight: -1.0, + invalid_message_deliveries_decay: 0.9, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params.clone()); + let mut peer_score = PeerScore::new(params); + + let peer_id_a = PeerId::random(); + peer_score.add_peer(peer_id_a); + peer_score.graft(&peer_id_a, topic); + + // reject a bunch of messages from peer A + let messages = 100; + for seq in 0..messages { + let (id, msg) = make_test_message(seq); + peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationFailed); + } + + peer_score.refresh_scores(); + + let decay = topic_params.invalid_message_deliveries_decay * messages as f64; + + let mut expected = + topic_params.topic_weight * topic_params.invalid_message_deliveries_weight * decay * decay; + + let score_a = peer_score.score(&peer_id_a); + assert_eq!(score_a, expected, "Peer has unexpected score"); + + // refresh scores a few times to apply decay + for _ in 0..10 { + peer_score.refresh_scores(); + expected *= topic_params.invalid_message_deliveries_decay + * topic_params.invalid_message_deliveries_decay; + } + + let score_a = peer_score.score(&peer_id_a); + assert_eq!(score_a, expected, "Peer has unexpected score"); +} + +#[test] +fn test_score_reject_message_deliveries() { + // This tests adds coverage for the dark corners of rejection tracing + + // Create parameters with reasonable default values + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let mut params = PeerScoreParams::default(); + + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: 0.0, + first_message_deliveries_weight: 0.0, + mesh_failure_penalty_weight: 0.0, + time_in_mesh_weight: 0.0, + time_in_mesh_quantum: Duration::from_secs(1), + invalid_message_deliveries_weight: -1.0, + invalid_message_deliveries_decay: 1.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params); + let mut peer_score = PeerScore::new(params); + + let peer_id_a = PeerId::random(); + let peer_id_b = PeerId::random(); + + let peers = vec![peer_id_a, peer_id_b]; + + for peer_id in &peers { + peer_score.add_peer(*peer_id); + } + + let (id, msg) = make_test_message(1); + + // these should have no effect in the score + peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::BlackListedPeer); + peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::BlackListedSource); + peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationIgnored); + + peer_score.refresh_scores(); + let score_a = peer_score.score(&peer_id_a); + let score_b = peer_score.score(&peer_id_b); + + assert_eq!(score_a, 0.0, "Should have no effect on the score"); + assert_eq!(score_b, 0.0, "Should have no effect on the score"); + + // insert a record in the message deliveries + peer_score.validate_message(&peer_id_a, &id, &msg.topic); + + // this should have no effect in the score, and subsequent duplicate messages should have no + // effect either + peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationIgnored); + peer_score.duplicated_message(&peer_id_b, &id, &msg.topic); + + peer_score.refresh_scores(); + let score_a = peer_score.score(&peer_id_a); + let score_b = peer_score.score(&peer_id_b); + + assert_eq!(score_a, 0.0, "Should have no effect on the score"); + assert_eq!(score_b, 0.0, "Should have no effect on the score"); + + // now clear the delivery record + peer_score.deliveries.clear(); + + // insert a record in the message deliveries + peer_score.validate_message(&peer_id_a, &id, &msg.topic); + + // this should have no effect in the score, and subsequent duplicate messages should have no + // effect either + peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationIgnored); + peer_score.duplicated_message(&peer_id_b, &id, &msg.topic); + + peer_score.refresh_scores(); + let score_a = peer_score.score(&peer_id_a); + let score_b = peer_score.score(&peer_id_b); + + assert_eq!(score_a, 0.0, "Should have no effect on the score"); + assert_eq!(score_b, 0.0, "Should have no effect on the score"); + + // now clear the delivery record + peer_score.deliveries.clear(); + + // insert a new record in the message deliveries + peer_score.validate_message(&peer_id_a, &id, &msg.topic); + + // and reject the message to make sure duplicates are also penalized + peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationFailed); + peer_score.duplicated_message(&peer_id_b, &id, &msg.topic); + + peer_score.refresh_scores(); + let score_a = peer_score.score(&peer_id_a); + let score_b = peer_score.score(&peer_id_b); + + assert_eq!(score_a, -1.0, "Score should be effected"); + assert_eq!(score_b, -1.0, "Score should be effected"); + + // now clear the delivery record again + peer_score.deliveries.clear(); + + // insert a new record in the message deliveries + peer_score.validate_message(&peer_id_a, &id, &msg.topic); + + // and reject the message after a duplicate has arrived + peer_score.duplicated_message(&peer_id_b, &id, &msg.topic); + peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationFailed); + + peer_score.refresh_scores(); + let score_a = peer_score.score(&peer_id_a); + let score_b = peer_score.score(&peer_id_b); + + assert_eq!(score_a, -4.0, "Score should be effected"); + assert_eq!(score_b, -4.0, "Score should be effected"); +} + +#[test] +fn test_application_score() { + // Create parameters with reasonable default values + let app_specific_weight = 0.5; + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let mut params = PeerScoreParams { + app_specific_weight, + ..Default::default() + }; + + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: 0.0, + first_message_deliveries_weight: 0.0, + mesh_failure_penalty_weight: 0.0, + time_in_mesh_weight: 0.0, + time_in_mesh_quantum: Duration::from_secs(1), + invalid_message_deliveries_weight: 0.0, + invalid_message_deliveries_decay: 1.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params); + let mut peer_score = PeerScore::new(params); + + let peer_id_a = PeerId::random(); + peer_score.add_peer(peer_id_a); + peer_score.graft(&peer_id_a, topic); + + let messages = 100; + for i in -100..messages { + let app_score_value = i as f64; + peer_score.set_application_score(&peer_id_a, app_score_value); + peer_score.refresh_scores(); + let score_a = peer_score.score(&peer_id_a); + let expected = (i as f64) * app_specific_weight; + assert_eq!(score_a, expected, "Peer has unexpected score"); + } +} + +#[test] +fn test_score_ip_colocation() { + // Create parameters with reasonable default values + let ip_colocation_factor_weight = -1.0; + let ip_colocation_factor_threshold = 1.0; + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let mut params = PeerScoreParams { + ip_colocation_factor_weight, + ip_colocation_factor_threshold, + ..Default::default() + }; + + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: 0.0, + first_message_deliveries_weight: 0.0, + mesh_failure_penalty_weight: 0.0, + time_in_mesh_weight: 0.0, + time_in_mesh_quantum: Duration::from_secs(1), + invalid_message_deliveries_weight: 0.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params); + let mut peer_score = PeerScore::new(params); + + let peer_id_a = PeerId::random(); + let peer_id_b = PeerId::random(); + let peer_id_c = PeerId::random(); + let peer_id_d = PeerId::random(); + + let peers = vec![peer_id_a, peer_id_b, peer_id_c, peer_id_d]; + for peer_id in &peers { + peer_score.add_peer(*peer_id); + peer_score.graft(peer_id, topic.clone()); + } + + // peerA should have no penalty, but B, C, and D should be penalized for sharing an IP + peer_score.add_ip(&peer_id_a, "1.2.3.4".parse().unwrap()); + peer_score.add_ip(&peer_id_b, "2.3.4.5".parse().unwrap()); + peer_score.add_ip(&peer_id_c, "2.3.4.5".parse().unwrap()); + peer_score.add_ip(&peer_id_c, "3.4.5.6".parse().unwrap()); + peer_score.add_ip(&peer_id_d, "2.3.4.5".parse().unwrap()); + + peer_score.refresh_scores(); + let score_a = peer_score.score(&peer_id_a); + let score_b = peer_score.score(&peer_id_b); + let score_c = peer_score.score(&peer_id_c); + let score_d = peer_score.score(&peer_id_d); + + assert_eq!(score_a, 0.0, "Peer A should be unaffected"); + + let n_shared = 3.0; + let ip_surplus = n_shared - ip_colocation_factor_threshold; + let penalty = ip_surplus * ip_surplus; + let expected = ip_colocation_factor_weight * penalty; + + assert_eq!(score_b, expected, "Peer B should have expected score"); + assert_eq!(score_c, expected, "Peer C should have expected score"); + assert_eq!(score_d, expected, "Peer D should have expected score"); +} + +#[test] +fn test_score_behaviour_penality() { + // Create parameters with reasonable default values + let behaviour_penalty_weight = -1.0; + let behaviour_penalty_decay = 0.99; + + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let mut params = PeerScoreParams { + behaviour_penalty_decay, + behaviour_penalty_weight, + ..Default::default() + }; + + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: 0.0, + first_message_deliveries_weight: 0.0, + mesh_failure_penalty_weight: 0.0, + time_in_mesh_weight: 0.0, + time_in_mesh_quantum: Duration::from_secs(1), + invalid_message_deliveries_weight: 0.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params); + let mut peer_score = PeerScore::new(params); + + let peer_id_a = PeerId::random(); + + // add a penalty to a non-existent peer. + peer_score.add_penalty(&peer_id_a, 1); + + let score_a = peer_score.score(&peer_id_a); + assert_eq!(score_a, 0.0, "Peer A should be unaffected"); + + // add the peer and test penalties + peer_score.add_peer(peer_id_a); + assert_eq!(score_a, 0.0, "Peer A should be unaffected"); + + peer_score.add_penalty(&peer_id_a, 1); + + let score_a = peer_score.score(&peer_id_a); + assert_eq!(score_a, -1.0, "Peer A should have been penalized"); + + peer_score.add_penalty(&peer_id_a, 1); + let score_a = peer_score.score(&peer_id_a); + assert_eq!(score_a, -4.0, "Peer A should have been penalized"); + + peer_score.refresh_scores(); + + let score_a = peer_score.score(&peer_id_a); + assert_eq!(score_a, -3.9204, "Peer A should have been penalized"); +} + +#[test] +fn test_score_retention() { + // Create parameters with reasonable default values + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let app_specific_weight = 1.0; + let app_score_value = -1000.0; + let retain_score = Duration::from_secs(1); + let mut params = PeerScoreParams { + app_specific_weight, + retain_score, + ..Default::default() + }; + + let topic_params = TopicScoreParams { + topic_weight: 0.0, + mesh_message_deliveries_weight: 0.0, + mesh_message_deliveries_activation: Duration::from_secs(0), + first_message_deliveries_weight: 0.0, + time_in_mesh_weight: 0.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params); + let mut peer_score = PeerScore::new(params); + + let peer_id_a = PeerId::random(); + peer_score.add_peer(peer_id_a); + peer_score.graft(&peer_id_a, topic); + + peer_score.set_application_score(&peer_id_a, app_score_value); + + // score should equal -1000 (app specific score) + peer_score.refresh_scores(); + let score_a = peer_score.score(&peer_id_a); + assert_eq!( + score_a, app_score_value, + "Score should be the application specific score" + ); + + // disconnect & wait half of RetainScore time. Should still have negative score + peer_score.remove_peer(&peer_id_a); + std::thread::sleep(retain_score / 2); + peer_score.refresh_scores(); + let score_a = peer_score.score(&peer_id_a); + assert_eq!( + score_a, app_score_value, + "Score should be the application specific score" + ); + + // wait remaining time (plus a little slop) and the score should reset to zero + std::thread::sleep(retain_score / 2 + Duration::from_millis(50)); + peer_score.refresh_scores(); + let score_a = peer_score.score(&peer_id_a); + assert_eq!( + score_a, 0.0, + "Score should be the application specific score" + ); +} diff --git a/beacon_node/lighthouse_network/src/gossipsub/protocol.rs b/beacon_node/lighthouse_network/src/gossipsub/protocol.rs new file mode 100644 index 000000000..fe6c8f787 --- /dev/null +++ b/beacon_node/lighthouse_network/src/gossipsub/protocol.rs @@ -0,0 +1,625 @@ +// Copyright 2020 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use super::config::ValidationMode; +use super::handler::HandlerEvent; +use super::rpc_proto::proto; +use super::topic::TopicHash; +use super::types::{ + ControlAction, Graft, IHave, IWant, MessageId, PeerInfo, PeerKind, Prune, RawMessage, Rpc, + Subscription, SubscriptionAction, +}; +use super::ValidationError; +use asynchronous_codec::{Decoder, Encoder, Framed}; +use byteorder::{BigEndian, ByteOrder}; +use bytes::BytesMut; +use futures::future; +use futures::prelude::*; +use libp2p::core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use libp2p::identity::{PeerId, PublicKey}; +use libp2p::swarm::StreamProtocol; +use quick_protobuf::Writer; +use std::pin::Pin; +use void::Void; + +pub(crate) const SIGNING_PREFIX: &[u8] = b"libp2p-pubsub:"; + +pub(crate) const GOSSIPSUB_1_1_0_PROTOCOL: ProtocolId = ProtocolId { + protocol: StreamProtocol::new("/meshsub/1.1.0"), + kind: PeerKind::Gossipsubv1_1, +}; +pub(crate) const GOSSIPSUB_1_0_0_PROTOCOL: ProtocolId = ProtocolId { + protocol: StreamProtocol::new("/meshsub/1.0.0"), + kind: PeerKind::Gossipsub, +}; +pub(crate) const FLOODSUB_PROTOCOL: ProtocolId = ProtocolId { + protocol: StreamProtocol::new("/floodsub/1.0.0"), + kind: PeerKind::Floodsub, +}; + +/// Implementation of [`InboundUpgrade`] and [`OutboundUpgrade`] for the Gossipsub protocol. +#[derive(Debug, Clone)] +pub struct ProtocolConfig { + /// The Gossipsub protocol id to listen on. + pub(crate) protocol_ids: Vec, + /// The maximum transmit size for a packet. + pub(crate) max_transmit_size: usize, + /// Determines the level of validation to be done on incoming messages. + pub(crate) validation_mode: ValidationMode, +} + +impl Default for ProtocolConfig { + fn default() -> Self { + Self { + max_transmit_size: 65536, + validation_mode: ValidationMode::Strict, + protocol_ids: vec![GOSSIPSUB_1_1_0_PROTOCOL, GOSSIPSUB_1_0_0_PROTOCOL], + } + } +} + +/// The protocol ID +#[derive(Clone, Debug, PartialEq)] +pub struct ProtocolId { + /// The RPC message type/name. + pub protocol: StreamProtocol, + /// The type of protocol we support + pub kind: PeerKind, +} + +impl AsRef for ProtocolId { + fn as_ref(&self) -> &str { + self.protocol.as_ref() + } +} + +impl UpgradeInfo for ProtocolConfig { + type Info = ProtocolId; + type InfoIter = Vec; + + fn protocol_info(&self) -> Self::InfoIter { + self.protocol_ids.clone() + } +} + +impl InboundUpgrade for ProtocolConfig +where + TSocket: AsyncRead + AsyncWrite + Unpin + Send + 'static, +{ + type Output = (Framed, PeerKind); + type Error = Void; + type Future = Pin> + Send>>; + + fn upgrade_inbound(self, socket: TSocket, protocol_id: Self::Info) -> Self::Future { + Box::pin(future::ok(( + Framed::new( + socket, + GossipsubCodec::new(self.max_transmit_size, self.validation_mode), + ), + protocol_id.kind, + ))) + } +} + +impl OutboundUpgrade for ProtocolConfig +where + TSocket: AsyncWrite + AsyncRead + Unpin + Send + 'static, +{ + type Output = (Framed, PeerKind); + type Error = Void; + type Future = Pin> + Send>>; + + fn upgrade_outbound(self, socket: TSocket, protocol_id: Self::Info) -> Self::Future { + Box::pin(future::ok(( + Framed::new( + socket, + GossipsubCodec::new(self.max_transmit_size, self.validation_mode), + ), + protocol_id.kind, + ))) + } +} + +/* Gossip codec for the framing */ + +pub struct GossipsubCodec { + /// Determines the level of validation performed on incoming messages. + validation_mode: ValidationMode, + /// The codec to handle common encoding/decoding of protobuf messages + codec: quick_protobuf_codec::Codec, +} + +impl GossipsubCodec { + pub fn new(max_length: usize, validation_mode: ValidationMode) -> GossipsubCodec { + let codec = quick_protobuf_codec::Codec::new(max_length); + GossipsubCodec { + validation_mode, + codec, + } + } + + /// Verifies a gossipsub message. This returns either a success or failure. All errors + /// are logged, which prevents error handling in the codec and handler. We simply drop invalid + /// messages and log warnings, rather than propagating errors through the codec. + fn verify_signature(message: &proto::Message) -> bool { + use quick_protobuf::MessageWrite; + + let Some(from) = message.from.as_ref() else { + tracing::debug!("Signature verification failed: No source id given"); + return false; + }; + + let Ok(source) = PeerId::from_bytes(from) else { + tracing::debug!("Signature verification failed: Invalid Peer Id"); + return false; + }; + + let Some(signature) = message.signature.as_ref() else { + tracing::debug!("Signature verification failed: No signature provided"); + return false; + }; + + // If there is a key value in the protobuf, use that key otherwise the key must be + // obtained from the inlined source peer_id. + let public_key = match message.key.as_deref().map(PublicKey::try_decode_protobuf) { + Some(Ok(key)) => key, + _ => match PublicKey::try_decode_protobuf(&source.to_bytes()[2..]) { + Ok(v) => v, + Err(_) => { + tracing::warn!("Signature verification failed: No valid public key supplied"); + return false; + } + }, + }; + + // The key must match the peer_id + if source != public_key.to_peer_id() { + tracing::warn!( + "Signature verification failed: Public key doesn't match source peer id" + ); + return false; + } + + // Construct the signature bytes + let mut message_sig = message.clone(); + message_sig.signature = None; + message_sig.key = None; + let mut buf = Vec::with_capacity(message_sig.get_size()); + let mut writer = Writer::new(&mut buf); + message_sig + .write_message(&mut writer) + .expect("Encoding to succeed"); + let mut signature_bytes = SIGNING_PREFIX.to_vec(); + signature_bytes.extend_from_slice(&buf); + public_key.verify(&signature_bytes, signature) + } +} + +impl Encoder for GossipsubCodec { + type Item<'a> = proto::RPC; + type Error = quick_protobuf_codec::Error; + + fn encode(&mut self, item: Self::Item<'_>, dst: &mut BytesMut) -> Result<(), Self::Error> { + self.codec.encode(item, dst) + } +} + +impl Decoder for GossipsubCodec { + type Item = HandlerEvent; + type Error = quick_protobuf_codec::Error; + + fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { + let Some(rpc) = self.codec.decode(src)? else { + return Ok(None); + }; + // Store valid messages. + let mut messages = Vec::with_capacity(rpc.publish.len()); + // Store any invalid messages. + let mut invalid_messages = Vec::new(); + + for message in rpc.publish.into_iter() { + // Keep track of the type of invalid message. + let mut invalid_kind = None; + let mut verify_signature = false; + let mut verify_sequence_no = false; + let mut verify_source = false; + + match self.validation_mode { + ValidationMode::Strict => { + // Validate everything + verify_signature = true; + verify_sequence_no = true; + verify_source = true; + } + ValidationMode::Permissive => { + // If the fields exist, validate them + if message.signature.is_some() { + verify_signature = true; + } + if message.seqno.is_some() { + verify_sequence_no = true; + } + if message.from.is_some() { + verify_source = true; + } + } + ValidationMode::Anonymous => { + if message.signature.is_some() { + tracing::warn!( + "Signature field was non-empty and anonymous validation mode is set" + ); + invalid_kind = Some(ValidationError::SignaturePresent); + } else if message.seqno.is_some() { + tracing::warn!( + "Sequence number was non-empty and anonymous validation mode is set" + ); + invalid_kind = Some(ValidationError::SequenceNumberPresent); + } else if message.from.is_some() { + tracing::warn!("Message dropped. Message source was non-empty and anonymous validation mode is set"); + invalid_kind = Some(ValidationError::MessageSourcePresent); + } + } + ValidationMode::None => {} + } + + // If the initial validation logic failed, add the message to invalid messages and + // continue processing the others. + if let Some(validation_error) = invalid_kind.take() { + let message = RawMessage { + source: None, // don't bother inform the application + data: message.data.unwrap_or_default(), + sequence_number: None, // don't inform the application + topic: TopicHash::from_raw(message.topic), + signature: None, // don't inform the application + key: message.key, + validated: false, + }; + invalid_messages.push((message, validation_error)); + // proceed to the next message + continue; + } + + // verify message signatures if required + if verify_signature && !GossipsubCodec::verify_signature(&message) { + tracing::warn!("Invalid signature for received message"); + + // Build the invalid message (ignoring further validation of sequence number + // and source) + let message = RawMessage { + source: None, // don't bother inform the application + data: message.data.unwrap_or_default(), + sequence_number: None, // don't inform the application + topic: TopicHash::from_raw(message.topic), + signature: None, // don't inform the application + key: message.key, + validated: false, + }; + invalid_messages.push((message, ValidationError::InvalidSignature)); + // proceed to the next message + continue; + } + + // ensure the sequence number is a u64 + let sequence_number = if verify_sequence_no { + if let Some(seq_no) = message.seqno { + if seq_no.is_empty() { + None + } else if seq_no.len() != 8 { + tracing::debug!( + sequence_number=?seq_no, + sequence_length=%seq_no.len(), + "Invalid sequence number length for received message" + ); + let message = RawMessage { + source: None, // don't bother inform the application + data: message.data.unwrap_or_default(), + sequence_number: None, // don't inform the application + topic: TopicHash::from_raw(message.topic), + signature: message.signature, // don't inform the application + key: message.key, + validated: false, + }; + invalid_messages.push((message, ValidationError::InvalidSequenceNumber)); + // proceed to the next message + continue; + } else { + // valid sequence number + Some(BigEndian::read_u64(&seq_no)) + } + } else { + // sequence number was not present + tracing::debug!("Sequence number not present but expected"); + let message = RawMessage { + source: None, // don't bother inform the application + data: message.data.unwrap_or_default(), + sequence_number: None, // don't inform the application + topic: TopicHash::from_raw(message.topic), + signature: message.signature, // don't inform the application + key: message.key, + validated: false, + }; + invalid_messages.push((message, ValidationError::EmptySequenceNumber)); + continue; + } + } else { + // Do not verify the sequence number, consider it empty + None + }; + + // Verify the message source if required + let source = if verify_source { + if let Some(bytes) = message.from { + if !bytes.is_empty() { + match PeerId::from_bytes(&bytes) { + Ok(peer_id) => Some(peer_id), // valid peer id + Err(_) => { + // invalid peer id, add to invalid messages + tracing::debug!("Message source has an invalid PeerId"); + let message = RawMessage { + source: None, // don't bother inform the application + data: message.data.unwrap_or_default(), + sequence_number, + topic: TopicHash::from_raw(message.topic), + signature: message.signature, // don't inform the application + key: message.key, + validated: false, + }; + invalid_messages.push((message, ValidationError::InvalidPeerId)); + continue; + } + } + } else { + None + } + } else { + None + } + } else { + None + }; + + // This message has passed all validation, add it to the validated messages. + messages.push(RawMessage { + source, + data: message.data.unwrap_or_default(), + sequence_number, + topic: TopicHash::from_raw(message.topic), + signature: message.signature, + key: message.key, + validated: false, + }); + } + + let mut control_msgs = Vec::new(); + + if let Some(rpc_control) = rpc.control { + // Collect the gossipsub control messages + let ihave_msgs: Vec = rpc_control + .ihave + .into_iter() + .map(|ihave| { + ControlAction::IHave(IHave { + topic_hash: TopicHash::from_raw(ihave.topic_id.unwrap_or_default()), + message_ids: ihave + .message_ids + .into_iter() + .map(MessageId::from) + .collect::>(), + }) + }) + .collect(); + + let iwant_msgs: Vec = rpc_control + .iwant + .into_iter() + .map(|iwant| { + ControlAction::IWant(IWant { + message_ids: iwant + .message_ids + .into_iter() + .map(MessageId::from) + .collect::>(), + }) + }) + .collect(); + + let graft_msgs: Vec = rpc_control + .graft + .into_iter() + .map(|graft| { + ControlAction::Graft(Graft { + topic_hash: TopicHash::from_raw(graft.topic_id.unwrap_or_default()), + }) + }) + .collect(); + + let mut prune_msgs = Vec::new(); + + for prune in rpc_control.prune { + // filter out invalid peers + let peers = prune + .peers + .into_iter() + .filter_map(|info| { + info.peer_id + .as_ref() + .and_then(|id| PeerId::from_bytes(id).ok()) + .map(|peer_id| + //TODO signedPeerRecord, see https://github.com/libp2p/specs/pull/217 + PeerInfo { + peer_id: Some(peer_id), + }) + }) + .collect::>(); + + let topic_hash = TopicHash::from_raw(prune.topic_id.unwrap_or_default()); + prune_msgs.push(ControlAction::Prune(Prune { + topic_hash, + peers, + backoff: prune.backoff, + })); + } + + control_msgs.extend(ihave_msgs); + control_msgs.extend(iwant_msgs); + control_msgs.extend(graft_msgs); + control_msgs.extend(prune_msgs); + } + + Ok(Some(HandlerEvent::Message { + rpc: Rpc { + messages, + subscriptions: rpc + .subscriptions + .into_iter() + .map(|sub| Subscription { + action: if Some(true) == sub.subscribe { + SubscriptionAction::Subscribe + } else { + SubscriptionAction::Unsubscribe + }, + topic_hash: TopicHash::from_raw(sub.topic_id.unwrap_or_default()), + }) + .collect(), + control_msgs, + }, + invalid_messages, + })) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::gossipsub::config::Config; + use crate::gossipsub::protocol::{BytesMut, GossipsubCodec, HandlerEvent}; + use crate::gossipsub::*; + use crate::gossipsub::{IdentTopic as Topic, Version}; + use libp2p::identity::Keypair; + use quickcheck::*; + + #[derive(Clone, Debug)] + struct Message(RawMessage); + + impl Arbitrary for Message { + fn arbitrary(g: &mut Gen) -> Self { + let keypair = TestKeypair::arbitrary(g); + + // generate an arbitrary GossipsubMessage using the behaviour signing functionality + let config = Config::default(); + let mut gs: Behaviour = + Behaviour::new(MessageAuthenticity::Signed(keypair.0), config).unwrap(); + let mut data_g = quickcheck::Gen::new(10024); + let data = (0..u8::arbitrary(&mut data_g)) + .map(|_| u8::arbitrary(g)) + .collect::>(); + let topic_id = TopicId::arbitrary(g).0; + Message(gs.build_raw_message(topic_id, data).unwrap()) + } + } + + #[derive(Clone, Debug)] + struct TopicId(TopicHash); + + impl Arbitrary for TopicId { + fn arbitrary(g: &mut Gen) -> Self { + let mut data_g = quickcheck::Gen::new(1024); + let topic_string: String = (0..u8::arbitrary(&mut data_g)) + .map(|_| char::arbitrary(g)) + .collect::(); + TopicId(Topic::new(topic_string).into()) + } + } + + #[derive(Clone)] + struct TestKeypair(Keypair); + + impl Arbitrary for TestKeypair { + #[cfg(feature = "rsa")] + fn arbitrary(g: &mut Gen) -> Self { + let keypair = if bool::arbitrary(g) { + // Small enough to be inlined. + Keypair::generate_ed25519() + } else { + // Too large to be inlined. + let mut rsa_key = hex::decode("308204bd020100300d06092a864886f70d0101010500048204a7308204a30201000282010100ef930f41a71288b643c1cbecbf5f72ab53992249e2b00835bf07390b6745419f3848cbcc5b030faa127bc88cdcda1c1d6f3ff699f0524c15ab9d2c9d8015f5d4bd09881069aad4e9f91b8b0d2964d215cdbbae83ddd31a7622a8228acee07079f6e501aea95508fa26c6122816ef7b00ac526d422bd12aed347c37fff6c1c307f3ba57bb28a7f28609e0bdcc839da4eedca39f5d2fa855ba4b0f9c763e9764937db929a1839054642175312a3de2d3405c9d27bdf6505ef471ce85c5e015eee85bf7874b3d512f715de58d0794fd8afe021c197fbd385bb88a930342fac8da31c27166e2edab00fa55dc1c3814448ba38363077f4e8fe2bdea1c081f85f1aa6f02030100010282010028ff427a1aac1a470e7b4879601a6656193d3857ea79f33db74df61e14730e92bf9ffd78200efb0c40937c3356cbe049cd32e5f15be5c96d5febcaa9bd3484d7fded76a25062d282a3856a1b3b7d2c525cdd8434beae147628e21adf241dd64198d5819f310d033743915ba40ea0b6acdbd0533022ad6daa1ff42de51885f9e8bab2306c6ef1181902d1cd7709006eba1ab0587842b724e0519f295c24f6d848907f772ae9a0953fc931f4af16a07df450fb8bfa94572562437056613647818c238a6ff3f606cffa0533e4b8755da33418dfbc64a85110b1a036623c947400a536bb8df65e5ebe46f2dfd0cfc86e7aeeddd7574c253e8fbf755562b3669525d902818100f9fff30c6677b78dd31ec7a634361438457e80be7a7faf390903067ea8355faa78a1204a82b6e99cb7d9058d23c1ecf6cfe4a900137a00cecc0113fd68c5931602980267ea9a95d182d48ba0a6b4d5dd32fdac685cb2e5d8b42509b2eb59c9579ea6a67ccc7547427e2bd1fb1f23b0ccb4dd6ba7d206c8dd93253d70a451701302818100f5530dfef678d73ce6a401ae47043af10a2e3f224c71ae933035ecd68ccbc4df52d72bc6ca2b17e8faf3e548b483a2506c0369ab80df3b137b54d53fac98f95547c2bc245b416e650ce617e0d29db36066f1335a9ba02ad3e0edf9dc3d58fd835835042663edebce81803972696c789012847cb1f854ab2ac0a1bd3867ac7fb502818029c53010d456105f2bf52a9a8482bca2224a5eac74bf3cc1a4d5d291fafcdffd15a6a6448cce8efdd661f6617ca5fc37c8c885cc3374e109ac6049bcbf72b37eabf44602a2da2d4a1237fd145c863e6d75059976de762d9d258c42b0984e2a2befa01c95217c3ee9c736ff209c355466ff99375194eff943bc402ea1d172a1ed02818027175bf493bbbfb8719c12b47d967bf9eac061c90a5b5711172e9095c38bb8cc493c063abffe4bea110b0a2f22ac9311b3947ba31b7ef6bfecf8209eebd6d86c316a2366bbafda7279b2b47d5bb24b6202254f249205dcad347b574433f6593733b806f84316276c1990a016ce1bbdbe5f650325acc7791aefe515ecc60063bd02818100b6a2077f4adcf15a17092d9c4a346d6022ac48f3861b73cf714f84c440a07419a7ce75a73b9cbff4597c53c128bf81e87b272d70428a272d99f90cd9b9ea1033298e108f919c6477400145a102df3fb5601ffc4588203cf710002517bfa24e6ad32f4d09c6b1a995fa28a3104131bedd9072f3b4fb4a5c2056232643d310453f").unwrap(); + Keypair::rsa_from_pkcs8(&mut rsa_key).unwrap() + }; + TestKeypair(keypair) + } + + #[cfg(not(feature = "rsa"))] + fn arbitrary(_g: &mut Gen) -> Self { + // Small enough to be inlined. + TestKeypair(Keypair::generate_ed25519()) + } + } + + impl std::fmt::Debug for TestKeypair { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TestKeypair") + .field("public", &self.0.public()) + .finish() + } + } + + #[test] + /// Test that RPC messages can be encoded and decoded successfully. + fn encode_decode() { + fn prop(message: Message) { + let message = message.0; + + let rpc = crate::gossipsub::types::Rpc { + messages: vec![message.clone()], + subscriptions: vec![], + control_msgs: vec![], + }; + + let mut codec = GossipsubCodec::new(u32::MAX as usize, ValidationMode::Strict); + let mut buf = BytesMut::new(); + codec.encode(rpc.into_protobuf(), &mut buf).unwrap(); + let decoded_rpc = codec.decode(&mut buf).unwrap().unwrap(); + // mark as validated as its a published message + match decoded_rpc { + HandlerEvent::Message { mut rpc, .. } => { + rpc.messages[0].validated = true; + + assert_eq!(vec![message], rpc.messages); + } + _ => panic!("Must decode a message"), + } + } + + QuickCheck::new().quickcheck(prop as fn(_) -> _) + } + + #[test] + fn support_floodsub_with_custom_protocol() { + let protocol_config = ConfigBuilder::default() + .protocol_id("/foosub", Version::V1_1) + .support_floodsub() + .build() + .unwrap() + .protocol_config(); + + assert_eq!(protocol_config.protocol_ids[0].protocol, "/foosub"); + assert_eq!(protocol_config.protocol_ids[1].protocol, "/floodsub/1.0.0"); + } +} diff --git a/beacon_node/lighthouse_network/src/gossipsub/rpc_proto.rs b/beacon_node/lighthouse_network/src/gossipsub/rpc_proto.rs new file mode 100644 index 000000000..ce468b7c8 --- /dev/null +++ b/beacon_node/lighthouse_network/src/gossipsub/rpc_proto.rs @@ -0,0 +1,92 @@ +// Copyright 2020 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +pub(crate) mod proto { + #![allow(unreachable_pub)] + include!("generated/mod.rs"); + pub use self::gossipsub::pb::{mod_RPC::SubOpts, *}; +} + +#[cfg(test)] +mod test { + use crate::gossipsub::rpc_proto::proto::compat; + use crate::gossipsub::IdentTopic as Topic; + use libp2p::identity::PeerId; + use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer}; + use rand::Rng; + + #[test] + fn test_multi_topic_message_compatibility() { + let topic1 = Topic::new("t1").hash(); + let topic2 = Topic::new("t2").hash(); + + let new_message1 = super::proto::Message { + from: Some(PeerId::random().to_bytes()), + data: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()), + seqno: Some(rand::thread_rng().gen::<[u8; 8]>().to_vec()), + topic: topic1.clone().into_string(), + signature: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()), + key: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()), + }; + let old_message1 = compat::pb::Message { + from: Some(PeerId::random().to_bytes()), + data: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()), + seqno: Some(rand::thread_rng().gen::<[u8; 8]>().to_vec()), + topic_ids: vec![topic1.clone().into_string()], + signature: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()), + key: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()), + }; + let old_message2 = compat::pb::Message { + from: Some(PeerId::random().to_bytes()), + data: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()), + seqno: Some(rand::thread_rng().gen::<[u8; 8]>().to_vec()), + topic_ids: vec![topic1.clone().into_string(), topic2.clone().into_string()], + signature: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()), + key: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()), + }; + + let mut new_message1b = Vec::with_capacity(new_message1.get_size()); + let mut writer = Writer::new(&mut new_message1b); + new_message1.write_message(&mut writer).unwrap(); + + let mut old_message1b = Vec::with_capacity(old_message1.get_size()); + let mut writer = Writer::new(&mut old_message1b); + old_message1.write_message(&mut writer).unwrap(); + + let mut old_message2b = Vec::with_capacity(old_message2.get_size()); + let mut writer = Writer::new(&mut old_message2b); + old_message2.write_message(&mut writer).unwrap(); + + let mut reader = BytesReader::from_bytes(&old_message1b[..]); + let new_message = + super::proto::Message::from_reader(&mut reader, &old_message1b[..]).unwrap(); + assert_eq!(new_message.topic, topic1.clone().into_string()); + + let mut reader = BytesReader::from_bytes(&old_message2b[..]); + let new_message = + super::proto::Message::from_reader(&mut reader, &old_message2b[..]).unwrap(); + assert_eq!(new_message.topic, topic2.into_string()); + + let mut reader = BytesReader::from_bytes(&new_message1b[..]); + let old_message = + compat::pb::Message::from_reader(&mut reader, &new_message1b[..]).unwrap(); + assert_eq!(old_message.topic_ids, vec![topic1.into_string()]); + } +} diff --git a/beacon_node/lighthouse_network/src/gossipsub/subscription_filter.rs b/beacon_node/lighthouse_network/src/gossipsub/subscription_filter.rs new file mode 100644 index 000000000..aa0ec7d3e --- /dev/null +++ b/beacon_node/lighthouse_network/src/gossipsub/subscription_filter.rs @@ -0,0 +1,436 @@ +// Copyright 2020 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use crate::gossipsub::types::Subscription; +use crate::gossipsub::TopicHash; +use std::collections::{BTreeSet, HashMap, HashSet}; + +pub trait TopicSubscriptionFilter { + /// Returns true iff the topic is of interest and we can subscribe to it. + fn can_subscribe(&mut self, topic_hash: &TopicHash) -> bool; + + /// Filters a list of incoming subscriptions and returns a filtered set + /// By default this deduplicates the subscriptions and calls + /// [`Self::filter_incoming_subscription_set`] on the filtered set. + fn filter_incoming_subscriptions<'a>( + &mut self, + subscriptions: &'a [Subscription], + currently_subscribed_topics: &BTreeSet, + ) -> Result, String> { + let mut filtered_subscriptions: HashMap = HashMap::new(); + for subscription in subscriptions { + use std::collections::hash_map::Entry::*; + match filtered_subscriptions.entry(subscription.topic_hash.clone()) { + Occupied(entry) => { + if entry.get().action != subscription.action { + entry.remove(); + } + } + Vacant(entry) => { + entry.insert(subscription); + } + } + } + self.filter_incoming_subscription_set( + filtered_subscriptions.into_values().collect(), + currently_subscribed_topics, + ) + } + + /// Filters a set of deduplicated subscriptions + /// By default this filters the elements based on [`Self::allow_incoming_subscription`]. + fn filter_incoming_subscription_set<'a>( + &mut self, + mut subscriptions: HashSet<&'a Subscription>, + _currently_subscribed_topics: &BTreeSet, + ) -> Result, String> { + subscriptions.retain(|s| { + if self.allow_incoming_subscription(s) { + true + } else { + tracing::debug!(subscription=?s, "Filtered incoming subscription"); + false + } + }); + Ok(subscriptions) + } + + /// Returns true iff we allow an incoming subscription. + /// This is used by the default implementation of filter_incoming_subscription_set to decide + /// whether to filter out a subscription or not. + /// By default this uses can_subscribe to decide the same for incoming subscriptions as for + /// outgoing ones. + fn allow_incoming_subscription(&mut self, subscription: &Subscription) -> bool { + self.can_subscribe(&subscription.topic_hash) + } +} + +//some useful implementers + +/// Allows all subscriptions +#[derive(Default, Clone)] +pub struct AllowAllSubscriptionFilter {} + +impl TopicSubscriptionFilter for AllowAllSubscriptionFilter { + fn can_subscribe(&mut self, _: &TopicHash) -> bool { + true + } +} + +/// Allows only whitelisted subscriptions +#[derive(Default, Clone)] +pub struct WhitelistSubscriptionFilter(pub HashSet); + +impl TopicSubscriptionFilter for WhitelistSubscriptionFilter { + fn can_subscribe(&mut self, topic_hash: &TopicHash) -> bool { + self.0.contains(topic_hash) + } +} + +/// Adds a max count to a given subscription filter +pub struct MaxCountSubscriptionFilter { + pub filter: T, + pub max_subscribed_topics: usize, + pub max_subscriptions_per_request: usize, +} + +impl TopicSubscriptionFilter for MaxCountSubscriptionFilter { + fn can_subscribe(&mut self, topic_hash: &TopicHash) -> bool { + self.filter.can_subscribe(topic_hash) + } + + fn filter_incoming_subscriptions<'a>( + &mut self, + subscriptions: &'a [Subscription], + currently_subscribed_topics: &BTreeSet, + ) -> Result, String> { + if subscriptions.len() > self.max_subscriptions_per_request { + return Err("too many subscriptions per request".into()); + } + let result = self + .filter + .filter_incoming_subscriptions(subscriptions, currently_subscribed_topics)?; + + use crate::gossipsub::types::SubscriptionAction::*; + + let mut unsubscribed = 0; + let mut new_subscribed = 0; + for s in &result { + let currently_contained = currently_subscribed_topics.contains(&s.topic_hash); + match s.action { + Unsubscribe => { + if currently_contained { + unsubscribed += 1; + } + } + Subscribe => { + if !currently_contained { + new_subscribed += 1; + } + } + } + } + + if new_subscribed + currently_subscribed_topics.len() + > self.max_subscribed_topics + unsubscribed + { + return Err("too many subscribed topics".into()); + } + + Ok(result) + } +} + +/// Combines two subscription filters +pub struct CombinedSubscriptionFilters { + pub filter1: T, + pub filter2: S, +} + +impl TopicSubscriptionFilter for CombinedSubscriptionFilters +where + T: TopicSubscriptionFilter, + S: TopicSubscriptionFilter, +{ + fn can_subscribe(&mut self, topic_hash: &TopicHash) -> bool { + self.filter1.can_subscribe(topic_hash) && self.filter2.can_subscribe(topic_hash) + } + + fn filter_incoming_subscription_set<'a>( + &mut self, + subscriptions: HashSet<&'a Subscription>, + currently_subscribed_topics: &BTreeSet, + ) -> Result, String> { + let intermediate = self + .filter1 + .filter_incoming_subscription_set(subscriptions, currently_subscribed_topics)?; + self.filter2 + .filter_incoming_subscription_set(intermediate, currently_subscribed_topics) + } +} + +pub struct CallbackSubscriptionFilter(pub T) +where + T: FnMut(&TopicHash) -> bool; + +impl TopicSubscriptionFilter for CallbackSubscriptionFilter +where + T: FnMut(&TopicHash) -> bool, +{ + fn can_subscribe(&mut self, topic_hash: &TopicHash) -> bool { + (self.0)(topic_hash) + } +} + +///A subscription filter that filters topics based on a regular expression. +pub struct RegexSubscriptionFilter(pub regex::Regex); + +impl TopicSubscriptionFilter for RegexSubscriptionFilter { + fn can_subscribe(&mut self, topic_hash: &TopicHash) -> bool { + self.0.is_match(topic_hash.as_str()) + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::gossipsub::types::SubscriptionAction::*; + use std::iter::FromIterator; + + #[test] + fn test_filter_incoming_allow_all_with_duplicates() { + let mut filter = AllowAllSubscriptionFilter {}; + + let t1 = TopicHash::from_raw("t1"); + let t2 = TopicHash::from_raw("t2"); + + let old = BTreeSet::from_iter(vec![t1.clone()]); + let subscriptions = vec![ + Subscription { + action: Unsubscribe, + topic_hash: t1.clone(), + }, + Subscription { + action: Unsubscribe, + topic_hash: t2.clone(), + }, + Subscription { + action: Subscribe, + topic_hash: t2, + }, + Subscription { + action: Subscribe, + topic_hash: t1.clone(), + }, + Subscription { + action: Unsubscribe, + topic_hash: t1, + }, + ]; + + let result = filter + .filter_incoming_subscriptions(&subscriptions, &old) + .unwrap(); + assert_eq!(result, vec![&subscriptions[4]].into_iter().collect()); + } + + #[test] + fn test_filter_incoming_whitelist() { + let t1 = TopicHash::from_raw("t1"); + let t2 = TopicHash::from_raw("t2"); + + let mut filter = WhitelistSubscriptionFilter(HashSet::from_iter(vec![t1.clone()])); + + let old = Default::default(); + let subscriptions = vec![ + Subscription { + action: Subscribe, + topic_hash: t1, + }, + Subscription { + action: Subscribe, + topic_hash: t2, + }, + ]; + + let result = filter + .filter_incoming_subscriptions(&subscriptions, &old) + .unwrap(); + assert_eq!(result, vec![&subscriptions[0]].into_iter().collect()); + } + + #[test] + fn test_filter_incoming_too_many_subscriptions_per_request() { + let t1 = TopicHash::from_raw("t1"); + + let mut filter = MaxCountSubscriptionFilter { + filter: AllowAllSubscriptionFilter {}, + max_subscribed_topics: 100, + max_subscriptions_per_request: 2, + }; + + let old = Default::default(); + + let subscriptions = vec![ + Subscription { + action: Subscribe, + topic_hash: t1.clone(), + }, + Subscription { + action: Unsubscribe, + topic_hash: t1.clone(), + }, + Subscription { + action: Subscribe, + topic_hash: t1, + }, + ]; + + let result = filter.filter_incoming_subscriptions(&subscriptions, &old); + assert_eq!(result, Err("too many subscriptions per request".into())); + } + + #[test] + fn test_filter_incoming_too_many_subscriptions() { + let t: Vec<_> = (0..4) + .map(|i| TopicHash::from_raw(format!("t{i}"))) + .collect(); + + let mut filter = MaxCountSubscriptionFilter { + filter: AllowAllSubscriptionFilter {}, + max_subscribed_topics: 3, + max_subscriptions_per_request: 2, + }; + + let old = t[0..2].iter().cloned().collect(); + + let subscriptions = vec![ + Subscription { + action: Subscribe, + topic_hash: t[2].clone(), + }, + Subscription { + action: Subscribe, + topic_hash: t[3].clone(), + }, + ]; + + let result = filter.filter_incoming_subscriptions(&subscriptions, &old); + assert_eq!(result, Err("too many subscribed topics".into())); + } + + #[test] + fn test_filter_incoming_max_subscribed_valid() { + let t: Vec<_> = (0..5) + .map(|i| TopicHash::from_raw(format!("t{i}"))) + .collect(); + + let mut filter = MaxCountSubscriptionFilter { + filter: WhitelistSubscriptionFilter(t.iter().take(4).cloned().collect()), + max_subscribed_topics: 2, + max_subscriptions_per_request: 5, + }; + + let old = t[0..2].iter().cloned().collect(); + + let subscriptions = vec![ + Subscription { + action: Subscribe, + topic_hash: t[4].clone(), + }, + Subscription { + action: Subscribe, + topic_hash: t[2].clone(), + }, + Subscription { + action: Subscribe, + topic_hash: t[3].clone(), + }, + Subscription { + action: Unsubscribe, + topic_hash: t[0].clone(), + }, + Subscription { + action: Unsubscribe, + topic_hash: t[1].clone(), + }, + ]; + + let result = filter + .filter_incoming_subscriptions(&subscriptions, &old) + .unwrap(); + assert_eq!(result, subscriptions[1..].iter().collect()); + } + + #[test] + fn test_callback_filter() { + let t1 = TopicHash::from_raw("t1"); + let t2 = TopicHash::from_raw("t2"); + + let mut filter = CallbackSubscriptionFilter(|h| h.as_str() == "t1"); + + let old = Default::default(); + let subscriptions = vec![ + Subscription { + action: Subscribe, + topic_hash: t1, + }, + Subscription { + action: Subscribe, + topic_hash: t2, + }, + ]; + + let result = filter + .filter_incoming_subscriptions(&subscriptions, &old) + .unwrap(); + assert_eq!(result, vec![&subscriptions[0]].into_iter().collect()); + } + + #[test] + fn test_regex_subscription_filter() { + let t1 = TopicHash::from_raw("tt"); + let t2 = TopicHash::from_raw("et3t3te"); + let t3 = TopicHash::from_raw("abcdefghijklmnopqrsuvwxyz"); + + let mut filter = RegexSubscriptionFilter(regex::Regex::new("t.*t").unwrap()); + + let old = Default::default(); + let subscriptions = vec![ + Subscription { + action: Subscribe, + topic_hash: t1, + }, + Subscription { + action: Subscribe, + topic_hash: t2, + }, + Subscription { + action: Subscribe, + topic_hash: t3, + }, + ]; + + let result = filter + .filter_incoming_subscriptions(&subscriptions, &old) + .unwrap(); + assert_eq!(result, subscriptions[..2].iter().collect()); + } +} diff --git a/beacon_node/lighthouse_network/src/gossipsub/time_cache.rs b/beacon_node/lighthouse_network/src/gossipsub/time_cache.rs new file mode 100644 index 000000000..89fd4afee --- /dev/null +++ b/beacon_node/lighthouse_network/src/gossipsub/time_cache.rs @@ -0,0 +1,219 @@ +// Copyright 2020 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +//! This implements a time-based LRU cache for checking gossipsub message duplicates. + +use fnv::FnvHashMap; +use instant::Instant; +use std::collections::hash_map::{ + self, + Entry::{Occupied, Vacant}, +}; +use std::collections::VecDeque; +use std::time::Duration; + +struct ExpiringElement { + /// The element that expires + element: Element, + /// The expire time. + expires: Instant, +} + +pub(crate) struct TimeCache { + /// Mapping a key to its value together with its latest expire time (can be updated through + /// reinserts). + map: FnvHashMap>, + /// An ordered list of keys by expires time. + list: VecDeque>, + /// The time elements remain in the cache. + ttl: Duration, +} + +pub(crate) struct OccupiedEntry<'a, K, V> { + entry: hash_map::OccupiedEntry<'a, K, ExpiringElement>, +} + +impl<'a, K, V> OccupiedEntry<'a, K, V> +where + K: Eq + std::hash::Hash + Clone, +{ + pub(crate) fn into_mut(self) -> &'a mut V { + &mut self.entry.into_mut().element + } +} + +pub(crate) struct VacantEntry<'a, K, V> { + expiration: Instant, + entry: hash_map::VacantEntry<'a, K, ExpiringElement>, + list: &'a mut VecDeque>, +} + +impl<'a, K, V> VacantEntry<'a, K, V> +where + K: Eq + std::hash::Hash + Clone, +{ + pub(crate) fn insert(self, value: V) -> &'a mut V { + self.list.push_back(ExpiringElement { + element: self.entry.key().clone(), + expires: self.expiration, + }); + &mut self + .entry + .insert(ExpiringElement { + element: value, + expires: self.expiration, + }) + .element + } +} + +pub(crate) enum Entry<'a, K: 'a, V: 'a> { + Occupied(OccupiedEntry<'a, K, V>), + Vacant(VacantEntry<'a, K, V>), +} + +impl<'a, K: 'a, V: 'a> Entry<'a, K, V> +where + K: Eq + std::hash::Hash + Clone, +{ + pub(crate) fn or_default(self) -> &'a mut V + where + V: Default, + { + match self { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => entry.insert(V::default()), + } + } +} + +impl TimeCache +where + Key: Eq + std::hash::Hash + Clone, +{ + pub(crate) fn new(ttl: Duration) -> Self { + TimeCache { + map: FnvHashMap::default(), + list: VecDeque::new(), + ttl, + } + } + + fn remove_expired_keys(&mut self, now: Instant) { + while let Some(element) = self.list.pop_front() { + if element.expires > now { + self.list.push_front(element); + break; + } + if let Occupied(entry) = self.map.entry(element.element.clone()) { + if entry.get().expires <= now { + entry.remove(); + } + } + } + } + + pub(crate) fn entry(&mut self, key: Key) -> Entry { + let now = Instant::now(); + self.remove_expired_keys(now); + match self.map.entry(key) { + Occupied(entry) => Entry::Occupied(OccupiedEntry { entry }), + Vacant(entry) => Entry::Vacant(VacantEntry { + expiration: now + self.ttl, + entry, + list: &mut self.list, + }), + } + } + + /// Empties the entire cache. + #[cfg(test)] + pub(crate) fn clear(&mut self) { + self.map.clear(); + self.list.clear(); + } + + pub(crate) fn contains_key(&self, key: &Key) -> bool { + self.map.contains_key(key) + } +} + +pub(crate) struct DuplicateCache(TimeCache); + +impl DuplicateCache +where + Key: Eq + std::hash::Hash + Clone, +{ + pub(crate) fn new(ttl: Duration) -> Self { + Self(TimeCache::new(ttl)) + } + + // Inserts new elements and removes any expired elements. + // + // If the key was not present this returns `true`. If the value was already present this + // returns `false`. + pub(crate) fn insert(&mut self, key: Key) -> bool { + if let Entry::Vacant(entry) = self.0.entry(key) { + entry.insert(()); + true + } else { + false + } + } + + pub(crate) fn contains(&self, key: &Key) -> bool { + self.0.contains_key(key) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn cache_added_entries_exist() { + let mut cache = DuplicateCache::new(Duration::from_secs(10)); + + cache.insert("t"); + cache.insert("e"); + + // Should report that 't' and 't' already exists + assert!(!cache.insert("t")); + assert!(!cache.insert("e")); + } + + #[test] + fn cache_entries_expire() { + let mut cache = DuplicateCache::new(Duration::from_millis(100)); + + cache.insert("t"); + assert!(!cache.insert("t")); + cache.insert("e"); + //assert!(!cache.insert("t")); + assert!(!cache.insert("e")); + // sleep until cache expiry + std::thread::sleep(Duration::from_millis(101)); + // add another element to clear previous cache + cache.insert("s"); + + // should be removed from the cache + assert!(cache.insert("t")); + } +} diff --git a/beacon_node/lighthouse_network/src/gossipsub/topic.rs b/beacon_node/lighthouse_network/src/gossipsub/topic.rs new file mode 100644 index 000000000..068d2e8b2 --- /dev/null +++ b/beacon_node/lighthouse_network/src/gossipsub/topic.rs @@ -0,0 +1,123 @@ +// Copyright 2020 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use crate::gossipsub::rpc_proto::proto; +use base64::prelude::*; +use prometheus_client::encoding::EncodeLabelSet; +use quick_protobuf::Writer; +use sha2::{Digest, Sha256}; +use std::fmt; + +/// A generic trait that can be extended for various hashing types for a topic. +pub trait Hasher { + /// The function that takes a topic string and creates a topic hash. + fn hash(topic_string: String) -> TopicHash; +} + +/// A type for representing topics who use the identity hash. +#[derive(Debug, Clone)] +pub struct IdentityHash {} +impl Hasher for IdentityHash { + /// Creates a [`TopicHash`] as a raw string. + fn hash(topic_string: String) -> TopicHash { + TopicHash { hash: topic_string } + } +} + +#[derive(Debug, Clone)] +pub struct Sha256Hash {} +impl Hasher for Sha256Hash { + /// Creates a [`TopicHash`] by SHA256 hashing the topic then base64 encoding the + /// hash. + fn hash(topic_string: String) -> TopicHash { + use quick_protobuf::MessageWrite; + + let topic_descripter = proto::TopicDescriptor { + name: Some(topic_string), + auth: None, + enc: None, + }; + let mut bytes = Vec::with_capacity(topic_descripter.get_size()); + let mut writer = Writer::new(&mut bytes); + topic_descripter + .write_message(&mut writer) + .expect("Encoding to succeed"); + let hash = BASE64_STANDARD.encode(Sha256::digest(&bytes)); + TopicHash { hash } + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, EncodeLabelSet)] +pub struct TopicHash { + /// The topic hash. Stored as a string to align with the protobuf API. + hash: String, +} + +impl TopicHash { + pub fn from_raw(hash: impl Into) -> TopicHash { + TopicHash { hash: hash.into() } + } + + pub fn into_string(self) -> String { + self.hash + } + + pub fn as_str(&self) -> &str { + &self.hash + } +} + +/// A gossipsub topic. +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +pub struct Topic { + topic: String, + phantom_data: std::marker::PhantomData, +} + +impl From> for TopicHash { + fn from(topic: Topic) -> TopicHash { + topic.hash() + } +} + +impl Topic { + pub fn new(topic: impl Into) -> Self { + Topic { + topic: topic.into(), + phantom_data: std::marker::PhantomData, + } + } + + pub fn hash(&self) -> TopicHash { + H::hash(self.topic.clone()) + } +} + +impl fmt::Display for Topic { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.topic) + } +} + +impl fmt::Display for TopicHash { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.hash) + } +} diff --git a/beacon_node/lighthouse_network/src/gossipsub/transform.rs b/beacon_node/lighthouse_network/src/gossipsub/transform.rs new file mode 100644 index 000000000..8eacdbb39 --- /dev/null +++ b/beacon_node/lighthouse_network/src/gossipsub/transform.rs @@ -0,0 +1,72 @@ +// Copyright 2020 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +//! This trait allows of extended user-level decoding that can apply to message-data before a +//! message-id is calculated. +//! +//! This is primarily designed to allow applications to implement their own custom compression +//! algorithms that can be topic-specific. Once the raw data is transformed the message-id is then +//! calculated, allowing for applications to employ message-id functions post compression. + +use crate::gossipsub::{Message, RawMessage, TopicHash}; + +/// A general trait of transforming a [`RawMessage`] into a [`Message`]. The +/// [`RawMessage`] is obtained from the wire and the [`Message`] is used to +/// calculate the [`crate::gossipsub::MessageId`] of the message and is what is sent to the application. +/// +/// The inbound/outbound transforms must be inverses. Applying the inbound transform and then the +/// outbound transform MUST leave the underlying data un-modified. +/// +/// By default, this is the identity transform for all fields in [`Message`]. +pub trait DataTransform { + /// Takes a [`RawMessage`] received and converts it to a [`Message`]. + fn inbound_transform(&self, raw_message: RawMessage) -> Result; + + /// Takes the data to be published (a topic and associated data) transforms the data. The + /// transformed data will then be used to create a [`crate::gossipsub::RawMessage`] to be sent to peers. + fn outbound_transform( + &self, + topic: &TopicHash, + data: Vec, + ) -> Result, std::io::Error>; +} + +/// The default transform, the raw data is propagated as is to the application layer gossipsub. +#[derive(Default, Clone)] +pub struct IdentityTransform; + +impl DataTransform for IdentityTransform { + fn inbound_transform(&self, raw_message: RawMessage) -> Result { + Ok(Message { + source: raw_message.source, + data: raw_message.data, + sequence_number: raw_message.sequence_number, + topic: raw_message.topic, + }) + } + + fn outbound_transform( + &self, + _topic: &TopicHash, + data: Vec, + ) -> Result, std::io::Error> { + Ok(data) + } +} diff --git a/beacon_node/lighthouse_network/src/gossipsub/types.rs b/beacon_node/lighthouse_network/src/gossipsub/types.rs new file mode 100644 index 000000000..f77185c7c --- /dev/null +++ b/beacon_node/lighthouse_network/src/gossipsub/types.rs @@ -0,0 +1,818 @@ +// Copyright 2020 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +//! A collection of types using the Gossipsub system. +use crate::gossipsub::metrics::Metrics; +use crate::gossipsub::TopicHash; +use async_channel::{Receiver, Sender}; +use futures::stream::Peekable; +use futures::{Future, Stream, StreamExt}; +use futures_timer::Delay; +use instant::Duration; +use libp2p::identity::PeerId; +use libp2p::swarm::ConnectionId; +use prometheus_client::encoding::EncodeLabelValue; +use quick_protobuf::MessageWrite; +use std::collections::BTreeSet; +use std::fmt::Debug; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::Arc; +use std::task::{Context, Poll}; +use std::{fmt, pin::Pin}; + +use crate::gossipsub::rpc_proto::proto; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +/// The type of messages that have expired while attempting to send to a peer. +#[derive(Clone, Debug, Default)] +pub struct FailedMessages { + /// The number of publish messages that failed to be published in a heartbeat. + pub publish: usize, + /// The number of forward messages that failed to be published in a heartbeat. + pub forward: usize, + /// The number of messages that were failed to be sent to the priority queue as it was full. + pub priority: usize, + /// The number of messages that were failed to be sent to the non-priority queue as it was full. + pub non_priority: usize, +} + +impl FailedMessages { + /// The total number of messages that expired due a timeout. + pub fn total_timeout(&self) -> usize { + self.publish + self.forward + } + + /// The total number of messages that failed due to the queue being full. + pub fn total_queue_full(&self) -> usize { + self.priority + self.non_priority + } + + /// The total failed messages in a heartbeat. + pub fn total(&self) -> usize { + self.total_timeout() + self.total_queue_full() + } +} + +#[derive(Debug)] +/// Validation kinds from the application for received messages. +pub enum MessageAcceptance { + /// The message is considered valid, and it should be delivered and forwarded to the network. + Accept, + /// The message is considered invalid, and it should be rejected and trigger the P₄ penalty. + Reject, + /// The message is neither delivered nor forwarded to the network, but the router does not + /// trigger the P₄ penalty. + Ignore, +} + +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct MessageId(pub Vec); + +impl MessageId { + pub fn new(value: &[u8]) -> Self { + Self(value.to_vec()) + } +} + +impl>> From for MessageId { + fn from(value: T) -> Self { + Self(value.into()) + } +} + +impl std::fmt::Display for MessageId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", hex_fmt::HexFmt(&self.0)) + } +} + +impl std::fmt::Debug for MessageId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "MessageId({})", hex_fmt::HexFmt(&self.0)) + } +} + +#[derive(Debug, Clone)] +pub(crate) struct PeerConnections { + /// The kind of protocol the peer supports. + pub(crate) kind: PeerKind, + /// Its current connections. + pub(crate) connections: Vec, + /// The rpc sender to the peer. + pub(crate) sender: RpcSender, + /// Subscribed topics. + pub(crate) topics: BTreeSet, +} + +/// Describes the types of peers that can exist in the gossipsub context. +#[derive(Debug, Clone, PartialEq, Hash, EncodeLabelValue, Eq)] +pub enum PeerKind { + /// A gossipsub 1.1 peer. + Gossipsubv1_1, + /// A gossipsub 1.0 peer. + Gossipsub, + /// A floodsub peer. + Floodsub, + /// The peer doesn't support any of the protocols. + NotSupported, +} + +/// A message received by the gossipsub system and stored locally in caches.. +#[derive(Clone, PartialEq, Eq, Hash, Debug)] +pub struct RawMessage { + /// Id of the peer that published this message. + pub source: Option, + + /// Content of the message. Its meaning is out of scope of this library. + pub data: Vec, + + /// A random sequence number. + pub sequence_number: Option, + + /// The topic this message belongs to + pub topic: TopicHash, + + /// The signature of the message if it's signed. + pub signature: Option>, + + /// The public key of the message if it is signed and the source [`PeerId`] cannot be inlined. + pub key: Option>, + + /// Flag indicating if this message has been validated by the application or not. + pub validated: bool, +} + +impl RawMessage { + /// Calculates the encoded length of this message (used for calculating metrics). + pub fn raw_protobuf_len(&self) -> usize { + let message = proto::Message { + from: self.source.map(|m| m.to_bytes()), + data: Some(self.data.clone()), + seqno: self.sequence_number.map(|s| s.to_be_bytes().to_vec()), + topic: TopicHash::into_string(self.topic.clone()), + signature: self.signature.clone(), + key: self.key.clone(), + }; + message.get_size() + } +} + +impl From for proto::Message { + fn from(raw: RawMessage) -> Self { + proto::Message { + from: raw.source.map(|m| m.to_bytes()), + data: Some(raw.data), + seqno: raw.sequence_number.map(|s| s.to_be_bytes().to_vec()), + topic: TopicHash::into_string(raw.topic), + signature: raw.signature, + key: raw.key, + } + } +} + +/// The message sent to the user after a [`RawMessage`] has been transformed by a +/// [`crate::gossipsub::DataTransform`]. +#[derive(Clone, PartialEq, Eq, Hash)] +pub struct Message { + /// Id of the peer that published this message. + pub source: Option, + + /// Content of the message. + pub data: Vec, + + /// A random sequence number. + pub sequence_number: Option, + + /// The topic this message belongs to + pub topic: TopicHash, +} + +impl fmt::Debug for Message { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Message") + .field( + "data", + &format_args!("{:<20}", &hex_fmt::HexFmt(&self.data)), + ) + .field("source", &self.source) + .field("sequence_number", &self.sequence_number) + .field("topic", &self.topic) + .finish() + } +} + +/// A subscription received by the gossipsub system. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct Subscription { + /// Action to perform. + pub action: SubscriptionAction, + /// The topic from which to subscribe or unsubscribe. + pub topic_hash: TopicHash, +} + +/// Action that a subscription wants to perform. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub enum SubscriptionAction { + /// The remote wants to subscribe to the given topic. + Subscribe, + /// The remote wants to unsubscribe from the given topic. + Unsubscribe, +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub(crate) struct PeerInfo { + pub(crate) peer_id: Option, + //TODO add this when RFC: Signed Address Records got added to the spec (see pull request + // https://github.com/libp2p/specs/pull/217) + //pub signed_peer_record: ?, +} + +/// A Control message received by the gossipsub system. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub enum ControlAction { + /// Node broadcasts known messages per topic - IHave control message. + IHave(IHave), + /// The node requests specific message ids (peer_id + sequence _number) - IWant control message. + IWant(IWant), + /// The node has been added to the mesh - Graft control message. + Graft(Graft), + /// The node has been removed from the mesh - Prune control message. + Prune(Prune), +} + +/// Node broadcasts known messages per topic - IHave control message. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct IHave { + /// The topic of the messages. + pub(crate) topic_hash: TopicHash, + /// A list of known message ids (peer_id + sequence _number) as a string. + pub(crate) message_ids: Vec, +} + +/// The node requests specific message ids (peer_id + sequence _number) - IWant control message. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct IWant { + /// A list of known message ids (peer_id + sequence _number) as a string. + pub(crate) message_ids: Vec, +} + +/// The node has been added to the mesh - Graft control message. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct Graft { + /// The mesh topic the peer should be added to. + pub(crate) topic_hash: TopicHash, +} + +/// The node has been removed from the mesh - Prune control message. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct Prune { + /// The mesh topic the peer should be removed from. + pub(crate) topic_hash: TopicHash, + /// A list of peers to be proposed to the removed peer as peer exchange + pub(crate) peers: Vec, + /// The backoff time in seconds before we allow to reconnect + pub(crate) backoff: Option, +} + +/// A Gossipsub RPC message sent. +#[derive(Debug)] +pub enum RpcOut { + /// Publish a Gossipsub message on network. The [`Delay`] tags the time we attempted to + /// send it. + Publish { message: RawMessage, timeout: Delay }, + /// Forward a Gossipsub message to the network. The [`Delay`] tags the time we attempted to + /// send it. + Forward { message: RawMessage, timeout: Delay }, + /// Subscribe a topic. + Subscribe(TopicHash), + /// Unsubscribe a topic. + Unsubscribe(TopicHash), + /// Send a GRAFT control message. + Graft(Graft), + /// Send a PRUNE control message. + Prune(Prune), + /// Send a IHave control message. + IHave(IHave), + /// Send a IWant control message. + IWant(IWant), +} + +impl RpcOut { + /// Converts the GossipsubRPC into its protobuf format. + // A convenience function to avoid explicitly specifying types. + pub fn into_protobuf(self) -> proto::RPC { + self.into() + } +} + +impl From for proto::RPC { + /// Converts the RPC into protobuf format. + fn from(rpc: RpcOut) -> Self { + match rpc { + RpcOut::Publish { + message, + timeout: _, + } => proto::RPC { + subscriptions: Vec::new(), + publish: vec![message.into()], + control: None, + }, + RpcOut::Forward { + message, + timeout: _, + } => proto::RPC { + publish: vec![message.into()], + subscriptions: Vec::new(), + control: None, + }, + RpcOut::Subscribe(topic) => proto::RPC { + publish: Vec::new(), + subscriptions: vec![proto::SubOpts { + subscribe: Some(true), + topic_id: Some(topic.into_string()), + }], + control: None, + }, + RpcOut::Unsubscribe(topic) => proto::RPC { + publish: Vec::new(), + subscriptions: vec![proto::SubOpts { + subscribe: Some(false), + topic_id: Some(topic.into_string()), + }], + control: None, + }, + RpcOut::IHave(IHave { + topic_hash, + message_ids, + }) => proto::RPC { + publish: Vec::new(), + subscriptions: Vec::new(), + control: Some(proto::ControlMessage { + ihave: vec![proto::ControlIHave { + topic_id: Some(topic_hash.into_string()), + message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(), + }], + iwant: vec![], + graft: vec![], + prune: vec![], + }), + }, + RpcOut::IWant(IWant { message_ids }) => proto::RPC { + publish: Vec::new(), + subscriptions: Vec::new(), + control: Some(proto::ControlMessage { + ihave: vec![], + iwant: vec![proto::ControlIWant { + message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(), + }], + graft: vec![], + prune: vec![], + }), + }, + RpcOut::Graft(Graft { topic_hash }) => proto::RPC { + publish: Vec::new(), + subscriptions: vec![], + control: Some(proto::ControlMessage { + ihave: vec![], + iwant: vec![], + graft: vec![proto::ControlGraft { + topic_id: Some(topic_hash.into_string()), + }], + prune: vec![], + }), + }, + RpcOut::Prune(Prune { + topic_hash, + peers, + backoff, + }) => { + proto::RPC { + publish: Vec::new(), + subscriptions: vec![], + control: Some(proto::ControlMessage { + ihave: vec![], + iwant: vec![], + graft: vec![], + prune: vec![proto::ControlPrune { + topic_id: Some(topic_hash.into_string()), + peers: peers + .into_iter() + .map(|info| proto::PeerInfo { + peer_id: info.peer_id.map(|id| id.to_bytes()), + // TODO, see https://github.com/libp2p/specs/pull/217 + signed_peer_record: None, + }) + .collect(), + backoff, + }], + }), + } + } + } + } +} + +/// An RPC received/sent. +#[derive(Clone, PartialEq, Eq, Hash)] +pub struct Rpc { + /// List of messages that were part of this RPC query. + pub messages: Vec, + /// List of subscriptions. + pub subscriptions: Vec, + /// List of Gossipsub control messages. + pub control_msgs: Vec, +} + +impl Rpc { + /// Converts the GossipsubRPC into its protobuf format. + // A convenience function to avoid explicitly specifying types. + pub fn into_protobuf(self) -> proto::RPC { + self.into() + } +} + +impl From for proto::RPC { + /// Converts the RPC into protobuf format. + fn from(rpc: Rpc) -> Self { + // Messages + let mut publish = Vec::new(); + + for message in rpc.messages.into_iter() { + let message = proto::Message { + from: message.source.map(|m| m.to_bytes()), + data: Some(message.data), + seqno: message.sequence_number.map(|s| s.to_be_bytes().to_vec()), + topic: TopicHash::into_string(message.topic), + signature: message.signature, + key: message.key, + }; + + publish.push(message); + } + + // subscriptions + let subscriptions = rpc + .subscriptions + .into_iter() + .map(|sub| proto::SubOpts { + subscribe: Some(sub.action == SubscriptionAction::Subscribe), + topic_id: Some(sub.topic_hash.into_string()), + }) + .collect::>(); + + // control messages + let mut control = proto::ControlMessage { + ihave: Vec::new(), + iwant: Vec::new(), + graft: Vec::new(), + prune: Vec::new(), + }; + + let empty_control_msg = rpc.control_msgs.is_empty(); + + for action in rpc.control_msgs { + match action { + // collect all ihave messages + ControlAction::IHave(IHave { + topic_hash, + message_ids, + }) => { + let rpc_ihave = proto::ControlIHave { + topic_id: Some(topic_hash.into_string()), + message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(), + }; + control.ihave.push(rpc_ihave); + } + ControlAction::IWant(IWant { message_ids }) => { + let rpc_iwant = proto::ControlIWant { + message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(), + }; + control.iwant.push(rpc_iwant); + } + ControlAction::Graft(Graft { topic_hash }) => { + let rpc_graft = proto::ControlGraft { + topic_id: Some(topic_hash.into_string()), + }; + control.graft.push(rpc_graft); + } + ControlAction::Prune(Prune { + topic_hash, + peers, + backoff, + }) => { + let rpc_prune = proto::ControlPrune { + topic_id: Some(topic_hash.into_string()), + peers: peers + .into_iter() + .map(|info| proto::PeerInfo { + peer_id: info.peer_id.map(|id| id.to_bytes()), + // TODO, see https://github.com/libp2p/specs/pull/217 + signed_peer_record: None, + }) + .collect(), + backoff, + }; + control.prune.push(rpc_prune); + } + } + } + + proto::RPC { + subscriptions, + publish, + control: if empty_control_msg { + None + } else { + Some(control) + }, + } + } +} + +impl fmt::Debug for Rpc { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut b = f.debug_struct("GossipsubRpc"); + if !self.messages.is_empty() { + b.field("messages", &self.messages); + } + if !self.subscriptions.is_empty() { + b.field("subscriptions", &self.subscriptions); + } + if !self.control_msgs.is_empty() { + b.field("control_msgs", &self.control_msgs); + } + b.finish() + } +} + +impl PeerKind { + pub fn as_static_ref(&self) -> &'static str { + match self { + Self::NotSupported => "Not Supported", + Self::Floodsub => "Floodsub", + Self::Gossipsub => "Gossipsub v1.0", + Self::Gossipsubv1_1 => "Gossipsub v1.1", + } + } +} + +impl AsRef for PeerKind { + fn as_ref(&self) -> &str { + self.as_static_ref() + } +} + +impl fmt::Display for PeerKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_ref()) + } +} + +/// `RpcOut` sender that is priority aware. +#[derive(Debug, Clone)] +pub(crate) struct RpcSender { + cap: usize, + len: Arc, + pub(crate) priority_sender: Sender, + pub(crate) non_priority_sender: Sender, + priority_receiver: Receiver, + non_priority_receiver: Receiver, +} + +impl RpcSender { + /// Create a RpcSender. + pub(crate) fn new(cap: usize) -> RpcSender { + let (priority_sender, priority_receiver) = async_channel::unbounded(); + let (non_priority_sender, non_priority_receiver) = async_channel::bounded(cap / 2); + let len = Arc::new(AtomicUsize::new(0)); + RpcSender { + cap: cap / 2, + len, + priority_sender, + non_priority_sender, + priority_receiver, + non_priority_receiver, + } + } + + /// Create a new Receiver to the sender. + pub(crate) fn new_receiver(&self) -> RpcReceiver { + RpcReceiver { + priority_len: self.len.clone(), + priority: self.priority_receiver.clone().peekable(), + non_priority: self.non_priority_receiver.clone().peekable(), + } + } + + /// Send a `RpcOut::Graft` message to the `RpcReceiver` + /// this is high priority. + pub(crate) fn graft(&mut self, graft: Graft) { + self.priority_sender + .try_send(RpcOut::Graft(graft)) + .expect("Channel is unbounded and should always be open"); + } + + /// Send a `RpcOut::Prune` message to the `RpcReceiver` + /// this is high priority. + pub(crate) fn prune(&mut self, prune: Prune) { + self.priority_sender + .try_send(RpcOut::Prune(prune)) + .expect("Channel is unbounded and should always be open"); + } + + /// Send a `RpcOut::IHave` message to the `RpcReceiver` + /// this is low priority, if the queue is full an Err is returned. + #[allow(clippy::result_large_err)] + pub(crate) fn ihave(&mut self, ihave: IHave) -> Result<(), RpcOut> { + self.non_priority_sender + .try_send(RpcOut::IHave(ihave)) + .map_err(|err| err.into_inner()) + } + + /// Send a `RpcOut::IHave` message to the `RpcReceiver` + /// this is low priority, if the queue is full an Err is returned. + #[allow(clippy::result_large_err)] + pub(crate) fn iwant(&mut self, iwant: IWant) -> Result<(), RpcOut> { + self.non_priority_sender + .try_send(RpcOut::IWant(iwant)) + .map_err(|err| err.into_inner()) + } + + /// Send a `RpcOut::Subscribe` message to the `RpcReceiver` + /// this is high priority. + pub(crate) fn subscribe(&mut self, topic: TopicHash) { + self.priority_sender + .try_send(RpcOut::Subscribe(topic)) + .expect("Channel is unbounded and should always be open"); + } + + /// Send a `RpcOut::Unsubscribe` message to the `RpcReceiver` + /// this is high priority. + pub(crate) fn unsubscribe(&mut self, topic: TopicHash) { + self.priority_sender + .try_send(RpcOut::Unsubscribe(topic)) + .expect("Channel is unbounded and should always be open"); + } + + /// Send a `RpcOut::Publish` message to the `RpcReceiver` + /// this is high priority. If message sending fails, an `Err` is returned. + pub(crate) fn publish( + &mut self, + message: RawMessage, + timeout: Duration, + metrics: Option<&mut Metrics>, + ) -> Result<(), ()> { + if self.len.load(Ordering::Relaxed) >= self.cap { + return Err(()); + } + self.priority_sender + .try_send(RpcOut::Publish { + message: message.clone(), + timeout: Delay::new(timeout), + }) + .expect("Channel is unbounded and should always be open"); + self.len.fetch_add(1, Ordering::Relaxed); + + if let Some(m) = metrics { + m.msg_sent(&message.topic, message.raw_protobuf_len()); + } + + Ok(()) + } + + /// Send a `RpcOut::Forward` message to the `RpcReceiver` + /// this is high priority. If the queue is full the message is discarded. + pub(crate) fn forward( + &mut self, + message: RawMessage, + timeout: Duration, + metrics: Option<&mut Metrics>, + ) -> Result<(), ()> { + self.non_priority_sender + .try_send(RpcOut::Forward { + message: message.clone(), + timeout: Delay::new(timeout), + }) + .map_err(|_| ())?; + + if let Some(m) = metrics { + m.msg_sent(&message.topic, message.raw_protobuf_len()); + } + + Ok(()) + } + + /// Returns the current size of the priority queue. + pub(crate) fn priority_len(&self) -> usize { + self.len.load(Ordering::Relaxed) + } + + /// Returns the current size of the non-priority queue. + pub(crate) fn non_priority_len(&self) -> usize { + self.non_priority_sender.len() + } +} + +/// `RpcOut` sender that is priority aware. +#[derive(Debug)] +pub struct RpcReceiver { + /// The maximum length of the priority queue. + pub(crate) priority_len: Arc, + /// The priority queue receiver. + pub(crate) priority: Peekable>, + /// The non priority queue receiver. + pub(crate) non_priority: Peekable>, +} + +impl RpcReceiver { + // Peek the next message in the queues and return it if its timeout has elapsed. + // Returns `None` if there aren't any more messages on the stream or none is stale. + pub(crate) fn poll_stale(&mut self, cx: &mut Context<'_>) -> Poll> { + // Peek priority queue. + let priority = match Pin::new(&mut self.priority).poll_peek_mut(cx) { + Poll::Ready(Some(RpcOut::Publish { + message: _, + ref mut timeout, + })) => { + if Pin::new(timeout).poll(cx).is_ready() { + // Return the message. + let dropped = futures::ready!(self.priority.poll_next_unpin(cx)) + .expect("There should be a message"); + return Poll::Ready(Some(dropped)); + } + Poll::Ready(None) + } + poll => poll, + }; + + let non_priority = match Pin::new(&mut self.non_priority).poll_peek_mut(cx) { + Poll::Ready(Some(RpcOut::Forward { + message: _, + ref mut timeout, + })) => { + if Pin::new(timeout).poll(cx).is_ready() { + // Return the message. + let dropped = futures::ready!(self.non_priority.poll_next_unpin(cx)) + .expect("There should be a message"); + return Poll::Ready(Some(dropped)); + } + Poll::Ready(None) + } + poll => poll, + }; + + match (priority, non_priority) { + (Poll::Ready(None), Poll::Ready(None)) => Poll::Ready(None), + _ => Poll::Pending, + } + } + + /// Poll queues and return true if both are empty. + pub(crate) fn poll_is_empty(&mut self, cx: &mut Context<'_>) -> bool { + matches!( + ( + Pin::new(&mut self.priority).poll_peek(cx), + Pin::new(&mut self.non_priority).poll_peek(cx), + ), + (Poll::Ready(None), Poll::Ready(None)) + ) + } +} + +impl Stream for RpcReceiver { + type Item = RpcOut; + + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + // The priority queue is first polled. + if let Poll::Ready(rpc) = Pin::new(&mut self.priority).poll_next(cx) { + if let Some(RpcOut::Publish { .. }) = rpc { + self.priority_len.fetch_sub(1, Ordering::Relaxed); + } + return Poll::Ready(rpc); + } + // Then we poll the non priority. + Pin::new(&mut self.non_priority).poll_next(cx) + } +} diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index ea1ab07e3..8cf0d95f2 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -10,6 +10,7 @@ pub mod service; #[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy pub mod discovery; +pub mod gossipsub; pub mod listen_addr; pub mod metrics; pub mod peer_manager; @@ -114,8 +115,8 @@ pub use prometheus_client; pub use config::Config as NetworkConfig; pub use discovery::{CombinedKeyExt, EnrExt, Eth2Enr}; pub use discv5; +pub use gossipsub::{IdentTopic, MessageAcceptance, MessageId, Topic, TopicHash}; pub use libp2p; -pub use libp2p::gossipsub::{IdentTopic, MessageAcceptance, MessageId, Topic, TopicHash}; pub use libp2p::{core::ConnectedPoint, PeerId, Swarm}; pub use libp2p::{multiaddr, Multiaddr}; pub use metrics::scrape_discovery_metrics; diff --git a/beacon_node/lighthouse_network/src/metrics.rs b/beacon_node/lighthouse_network/src/metrics.rs index ae02b689d..fc441f253 100644 --- a/beacon_node/lighthouse_network/src/metrics.rs +++ b/beacon_node/lighthouse_network/src/metrics.rs @@ -1,9 +1,10 @@ pub use lighthouse_metrics::*; lazy_static! { - pub static ref NAT_OPEN: Result = try_create_int_counter( + pub static ref NAT_OPEN: Result = try_create_int_gauge_vec( "nat_open", - "An estimate indicating if the local node is exposed to the internet." + "An estimate indicating if the local node is reachable from external nodes", + &["protocol"] ); pub static ref ADDRESS_UPDATE_COUNT: Result = try_create_int_counter( "libp2p_address_update_total", @@ -14,6 +15,9 @@ lazy_static! { "Count of libp2p peers currently connected" ); + pub static ref PEERS_CONNECTED_MULTI: Result = + try_create_int_gauge_vec("libp2p_peers_multi", "Count of libp2p peers currently connected", &["direction", "transport"]); + pub static ref TCP_PEERS_CONNECTED: Result = try_create_int_gauge( "libp2p_tcp_peers", "Count of libp2p peers currently connected via TCP" @@ -32,13 +36,10 @@ lazy_static! { "libp2p_peer_disconnect_event_total", "Count of libp2p peer disconnect events" ); - pub static ref DISCOVERY_SENT_BYTES: Result = try_create_int_gauge( - "discovery_sent_bytes", - "The number of bytes sent in discovery" - ); - pub static ref DISCOVERY_RECV_BYTES: Result = try_create_int_gauge( - "discovery_recv_bytes", - "The number of bytes received in discovery" + pub static ref DISCOVERY_BYTES: Result = try_create_int_gauge_vec( + "discovery_bytes", + "The number of bytes sent and received in discovery", + &["direction"] ); pub static ref DISCOVERY_QUEUE: Result = try_create_int_gauge( "discovery_queue_size", @@ -135,17 +136,6 @@ lazy_static! { &["type"] ); - /* - * Inbound/Outbound peers - */ - /// The number of peers that dialed us. - pub static ref NETWORK_INBOUND_PEERS: Result = - try_create_int_gauge("network_inbound_peers","The number of peers that are currently connected that have dialed us."); - - /// The number of peers that we dialed us. - pub static ref NETWORK_OUTBOUND_PEERS: Result = - try_create_int_gauge("network_outbound_peers","The number of peers that are currently connected that we dialed."); - /* * Peer Reporting */ @@ -156,31 +146,11 @@ lazy_static! { ); } -/// Checks if we consider the NAT open. -/// -/// Conditions for an open NAT: -/// 1. We have 1 or more SOCKET_UPDATED messages. This occurs when discovery has a majority of -/// users reporting an external port and our ENR gets updated. -/// 2. We have 0 SOCKET_UPDATED messages (can be true if the port was correct on boot), then we -/// rely on whether we have any inbound messages. If we have no socket update messages, but -/// manage to get at least one inbound peer, we are exposed correctly. -pub fn check_nat() { - // NAT is already deemed open. - if NAT_OPEN.as_ref().map(|v| v.get()).unwrap_or(0) != 0 { - return; - } - if ADDRESS_UPDATE_COUNT.as_ref().map(|v| v.get()).unwrap_or(0) != 0 - || NETWORK_INBOUND_PEERS.as_ref().map(|v| v.get()).unwrap_or(0) != 0_i64 - { - inc_counter(&NAT_OPEN); - } -} - pub fn scrape_discovery_metrics() { let metrics = discv5::metrics::Metrics::from(discv5::Discv5::::raw_metrics()); set_float_gauge(&DISCOVERY_REQS, metrics.unsolicited_requests_per_second); set_gauge(&DISCOVERY_SESSIONS, metrics.active_sessions as i64); - set_gauge(&DISCOVERY_SENT_BYTES, metrics.bytes_sent as i64); - set_gauge(&DISCOVERY_RECV_BYTES, metrics.bytes_recv as i64); + set_gauge_vec(&DISCOVERY_BYTES, &["inbound"], metrics.bytes_recv as i64); + set_gauge_vec(&DISCOVERY_BYTES, &["outbound"], metrics.bytes_sent as i64); } diff --git a/beacon_node/lighthouse_network/src/peer_manager/config.rs b/beacon_node/lighthouse_network/src/peer_manager/config.rs index 6c5523de4..d2fc7a8ab 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/config.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/config.rs @@ -18,6 +18,8 @@ pub struct Config { pub discovery_enabled: bool, /// Whether metrics are enabled. pub metrics_enabled: bool, + /// Whether quic is enabled. + pub quic_enabled: bool, /// Target number of peers to connect to. pub target_peer_count: usize, @@ -37,6 +39,7 @@ impl Default for Config { Config { discovery_enabled: true, metrics_enabled: false, + quic_enabled: true, target_peer_count: DEFAULT_TARGET_PEERS, status_interval: DEFAULT_STATUS_INTERVAL, ping_interval_inbound: DEFAULT_PING_INTERVAL_INBOUND, diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 4316c0d07..92f876ee0 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -10,7 +10,7 @@ use delay_map::HashSetDelay; use discv5::Enr; use libp2p::identify::Info as IdentifyInfo; use lru_cache::LRUTimeCache; -use peerdb::{client::ClientKind, BanOperation, BanResult, ScoreUpdateResult}; +use peerdb::{BanOperation, BanResult, ScoreUpdateResult}; use rand::seq::SliceRandom; use slog::{debug, error, trace, warn}; use smallvec::SmallVec; @@ -18,7 +18,6 @@ use std::{ sync::Arc, time::{Duration, Instant}, }; -use strum::IntoEnumIterator; use types::{EthSpec, SyncSubnetId}; pub use libp2p::core::Multiaddr; @@ -104,6 +103,8 @@ pub struct PeerManager { discovery_enabled: bool, /// Keeps track if the current instance is reporting metrics or not. metrics_enabled: bool, + /// Keeps track of whether the QUIC protocol is enabled or not. + quic_enabled: bool, /// The logger associated with the `PeerManager`. log: slog::Logger, } @@ -149,6 +150,7 @@ impl PeerManager { status_interval, ping_interval_inbound, ping_interval_outbound, + quic_enabled, } = cfg; // Set up the peer manager heartbeat interval @@ -167,6 +169,7 @@ impl PeerManager { heartbeat, discovery_enabled, metrics_enabled, + quic_enabled, log: log.clone(), }) } @@ -715,46 +718,6 @@ impl PeerManager { } } - // This function updates metrics for all connected peers. - fn update_connected_peer_metrics(&self) { - // Do nothing if we don't have metrics enabled. - if !self.metrics_enabled { - return; - } - - let mut connected_peer_count = 0; - let mut inbound_connected_peers = 0; - let mut outbound_connected_peers = 0; - let mut clients_per_peer = HashMap::new(); - - for (_peer, peer_info) in self.network_globals.peers.read().connected_peers() { - connected_peer_count += 1; - if let PeerConnectionStatus::Connected { n_in, .. } = peer_info.connection_status() { - if *n_in > 0 { - inbound_connected_peers += 1; - } else { - outbound_connected_peers += 1; - } - } - *clients_per_peer - .entry(peer_info.client().kind.to_string()) - .or_default() += 1; - } - - metrics::set_gauge(&metrics::PEERS_CONNECTED, connected_peer_count); - metrics::set_gauge(&metrics::NETWORK_INBOUND_PEERS, inbound_connected_peers); - metrics::set_gauge(&metrics::NETWORK_OUTBOUND_PEERS, outbound_connected_peers); - - for client_kind in ClientKind::iter() { - let value = clients_per_peer.get(&client_kind.to_string()).unwrap_or(&0); - metrics::set_gauge_vec( - &metrics::PEERS_PER_CLIENT, - &[client_kind.as_ref()], - *value as i64, - ); - } - } - /* Internal functions */ /// Sets a peer as connected as long as their reputation allows it @@ -917,8 +880,7 @@ impl PeerManager { let outbound_only_peer_count = self.network_globals.connected_outbound_only_peers(); let wanted_peers = if peer_count < self.target_peers.saturating_sub(dialing_peers) { // We need more peers in general. - // Note: The maximum discovery query is bounded by `Discovery`. - self.target_peers.saturating_sub(dialing_peers) - peer_count + self.max_peers().saturating_sub(dialing_peers) - peer_count } else if outbound_only_peer_count < self.min_outbound_only_peers() && peer_count < self.max_outbound_dialing_peers() { diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index da205d169..5dda78a01 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -96,10 +96,16 @@ impl NetworkBehaviour for PeerManager { if let Some(enr) = self.peers_to_dial.pop() { let peer_id = enr.peer_id(); self.inject_peer_connection(&peer_id, ConnectingType::Dialing, Some(enr.clone())); - let quic_multiaddrs = enr.multiaddr_quic(); - if !quic_multiaddrs.is_empty() { - debug!(self.log, "Dialing QUIC supported peer"; "peer_id"=> %peer_id, "quic_multiaddrs" => ?quic_multiaddrs); - } + + let quic_multiaddrs = if self.quic_enabled { + let quic_multiaddrs = enr.multiaddr_quic(); + if !quic_multiaddrs.is_empty() { + debug!(self.log, "Dialing QUIC supported peer"; "peer_id"=> %peer_id, "quic_multiaddrs" => ?quic_multiaddrs); + } + quic_multiaddrs + } else { + Vec::new() + }; // Prioritize Quic connections over Tcp ones. let multiaddrs = quic_multiaddrs @@ -148,8 +154,8 @@ impl NetworkBehaviour for PeerManager { self.on_dial_failure(peer_id); } FromSwarm::ExternalAddrConfirmed(_) => { - // TODO: we likely want to check this against our assumed external tcp - // address + // We have an external address confirmed, means we are able to do NAT traversal. + metrics::set_gauge_vec(&metrics::NAT_OPEN, &["libp2p"], 1); } _ => { // NOTE: FromSwarm is a non exhaustive enum so updates should be based on release @@ -237,14 +243,15 @@ impl PeerManager { self.events.push(PeerManagerEvent::MetaData(peer_id)); } - // Check NAT if metrics are enabled - if self.network_globals.local_enr.read().udp4().is_some() { - metrics::check_nat(); - } - // increment prometheus metrics if self.metrics_enabled { let remote_addr = endpoint.get_remote_address(); + let direction = if endpoint.is_dialer() { + "outbound" + } else { + "inbound" + }; + match remote_addr.iter().find(|proto| { matches!( proto, @@ -252,10 +259,10 @@ impl PeerManager { ) }) { Some(multiaddr::Protocol::QuicV1) => { - metrics::inc_gauge(&metrics::QUIC_PEERS_CONNECTED); + metrics::inc_gauge_vec(&metrics::PEERS_CONNECTED_MULTI, &[direction, "quic"]); } Some(multiaddr::Protocol::Tcp(_)) => { - metrics::inc_gauge(&metrics::TCP_PEERS_CONNECTED); + metrics::inc_gauge_vec(&metrics::PEERS_CONNECTED_MULTI, &[direction, "tcp"]); } Some(_) => unreachable!(), None => { @@ -263,7 +270,7 @@ impl PeerManager { } }; - self.update_connected_peer_metrics(); + metrics::inc_gauge(&metrics::PEERS_CONNECTED); metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT); } @@ -333,6 +340,12 @@ impl PeerManager { let remote_addr = endpoint.get_remote_address(); // Update the prometheus metrics if self.metrics_enabled { + let direction = if endpoint.is_dialer() { + "outbound" + } else { + "inbound" + }; + match remote_addr.iter().find(|proto| { matches!( proto, @@ -340,15 +353,16 @@ impl PeerManager { ) }) { Some(multiaddr::Protocol::QuicV1) => { - metrics::dec_gauge(&metrics::QUIC_PEERS_CONNECTED); + metrics::dec_gauge_vec(&metrics::PEERS_CONNECTED_MULTI, &[direction, "quic"]); } Some(multiaddr::Protocol::Tcp(_)) => { - metrics::dec_gauge(&metrics::TCP_PEERS_CONNECTED); + metrics::dec_gauge_vec(&metrics::PEERS_CONNECTED_MULTI, &[direction, "tcp"]); } // If it's an unknown protocol we already logged when connection was established. _ => {} }; - self.update_connected_peer_metrics(); + // Legacy standard metrics. + metrics::dec_gauge(&metrics::PEERS_CONNECTED); metrics::inc_counter(&metrics::PEER_DISCONNECT_EVENT_COUNT); } } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index a6bf3ffec..ebb355fef 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -168,7 +168,7 @@ impl PeerDB { fn score_state_banned_or_disconnected(&self, peer_id: &PeerId) -> bool { if let Some(peer) = self.peers.get(peer_id) { match peer.score_state() { - ScoreState::Banned | ScoreState::Disconnected => true, + ScoreState::Banned | ScoreState::ForcedDisconnect => true, _ => self.ip_is_banned(peer).is_some(), } } else { @@ -756,8 +756,8 @@ impl PeerDB { // Update the connection state match direction { - ConnectionDirection::Incoming => info.connect_ingoing(Some(seen_address)), - ConnectionDirection::Outgoing => info.connect_outgoing(Some(seen_address)), + ConnectionDirection::Incoming => info.connect_ingoing(seen_address), + ConnectionDirection::Outgoing => info.connect_outgoing(seen_address), } } @@ -1062,12 +1062,12 @@ impl PeerDB { log: &slog::Logger, ) -> ScoreTransitionResult { match (info.score_state(), previous_state) { - (ScoreState::Banned, ScoreState::Healthy | ScoreState::Disconnected) => { + (ScoreState::Banned, ScoreState::Healthy | ScoreState::ForcedDisconnect) => { debug!(log, "Peer has been banned"; "peer_id" => %peer_id, "score" => %info.score()); ScoreTransitionResult::Banned } - (ScoreState::Disconnected, ScoreState::Banned | ScoreState::Healthy) => { - debug!(log, "Peer transitioned to disconnect state"; "peer_id" => %peer_id, "score" => %info.score(), "past_state" => %previous_state); + (ScoreState::ForcedDisconnect, ScoreState::Banned | ScoreState::Healthy) => { + debug!(log, "Peer transitioned to forced disconnect score state"; "peer_id" => %peer_id, "score" => %info.score(), "past_score_state" => %previous_state); // disconnect the peer if it's currently connected or dialing if info.is_connected_or_dialing() { ScoreTransitionResult::Disconnected @@ -1079,18 +1079,20 @@ impl PeerDB { ScoreTransitionResult::NoAction } } - (ScoreState::Healthy, ScoreState::Disconnected) => { - debug!(log, "Peer transitioned to healthy state"; "peer_id" => %peer_id, "score" => %info.score(), "past_state" => %previous_state); + (ScoreState::Healthy, ScoreState::ForcedDisconnect) => { + debug!(log, "Peer transitioned to healthy score state"; "peer_id" => %peer_id, "score" => %info.score(), "past_score_state" => %previous_state); ScoreTransitionResult::NoAction } (ScoreState::Healthy, ScoreState::Banned) => { - debug!(log, "Peer transitioned to healthy state"; "peer_id" => %peer_id, "score" => %info.score(), "past_state" => %previous_state); + debug!(log, "Peer transitioned to healthy score state"; "peer_id" => %peer_id, "score" => %info.score(), "past_score_state" => %previous_state); // unban the peer if it was previously banned. ScoreTransitionResult::Unbanned } // Explicitly ignore states that haven't transitioned. (ScoreState::Healthy, ScoreState::Healthy) => ScoreTransitionResult::NoAction, - (ScoreState::Disconnected, ScoreState::Disconnected) => ScoreTransitionResult::NoAction, + (ScoreState::ForcedDisconnect, ScoreState::ForcedDisconnect) => { + ScoreTransitionResult::NoAction + } (ScoreState::Banned, ScoreState::Banned) => ScoreTransitionResult::NoAction, } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs index 1178dbcb9..9450584d6 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs @@ -33,6 +33,8 @@ pub enum ClientKind { Prysm, /// A lodestar node. Lodestar, + /// A Caplin node. + Caplin, /// An unknown client. Unknown, } @@ -88,6 +90,7 @@ impl std::fmt::Display for Client { self.version, self.os_version ), ClientKind::Lodestar => write!(f, "Lodestar: version: {}", self.version), + ClientKind::Caplin => write!(f, "Caplin"), ClientKind::Unknown => { if let Some(agent_string) = &self.agent_string { write!(f, "Unknown: {}", agent_string) @@ -109,11 +112,11 @@ impl std::fmt::Display for ClientKind { // kind and it's associated version and the OS kind. fn client_from_agent_version(agent_version: &str) -> (ClientKind, String, String) { let mut agent_split = agent_version.split('/'); + let mut version = String::from("unknown"); + let mut os_version = String::from("unknown"); match agent_split.next() { Some("Lighthouse") => { let kind = ClientKind::Lighthouse; - let mut version = String::from("unknown"); - let mut os_version = version.clone(); if let Some(agent_version) = agent_split.next() { version = agent_version.into(); if let Some(agent_os_version) = agent_split.next() { @@ -124,8 +127,6 @@ fn client_from_agent_version(agent_version: &str) -> (ClientKind, String, String } Some("teku") => { let kind = ClientKind::Teku; - let mut version = String::from("unknown"); - let mut os_version = version.clone(); if agent_split.next().is_some() { if let Some(agent_version) = agent_split.next() { version = agent_version.into(); @@ -138,13 +139,10 @@ fn client_from_agent_version(agent_version: &str) -> (ClientKind, String, String } Some("github.com") => { let kind = ClientKind::Prysm; - let unknown = String::from("unknown"); - (kind, unknown.clone(), unknown) + (kind, version, os_version) } Some("Prysm") => { let kind = ClientKind::Prysm; - let mut version = String::from("unknown"); - let mut os_version = version.clone(); if agent_split.next().is_some() { if let Some(agent_version) = agent_split.next() { version = agent_version.into(); @@ -157,8 +155,6 @@ fn client_from_agent_version(agent_version: &str) -> (ClientKind, String, String } Some("nimbus") => { let kind = ClientKind::Nimbus; - let mut version = String::from("unknown"); - let mut os_version = version.clone(); if agent_split.next().is_some() { if let Some(agent_version) = agent_split.next() { version = agent_version.into(); @@ -171,8 +167,6 @@ fn client_from_agent_version(agent_version: &str) -> (ClientKind, String, String } Some("nim-libp2p") => { let kind = ClientKind::Nimbus; - let mut version = String::from("unknown"); - let mut os_version = version.clone(); if let Some(agent_version) = agent_split.next() { version = agent_version.into(); if let Some(agent_os_version) = agent_split.next() { @@ -183,8 +177,6 @@ fn client_from_agent_version(agent_version: &str) -> (ClientKind, String, String } Some("js-libp2p") | Some("lodestar") => { let kind = ClientKind::Lodestar; - let mut version = String::from("unknown"); - let mut os_version = version.clone(); if let Some(agent_version) = agent_split.next() { version = agent_version.into(); if let Some(agent_os_version) = agent_split.next() { @@ -193,6 +185,14 @@ fn client_from_agent_version(agent_version: &str) -> (ClientKind, String, String } (kind, version, os_version) } + Some("erigon") => { + let client_kind = if let Some("caplin") = agent_split.next() { + ClientKind::Caplin + } else { + ClientKind::Unknown + }; + (client_kind, version, os_version) + } _ => { let unknown = String::from("unknown"); (ClientKind::Unknown, unknown.clone(), unknown) diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs index 44c54511d..c5e13c515 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs @@ -307,13 +307,13 @@ impl PeerInfo { /// Checks if the peer is outbound-only pub fn is_outbound_only(&self) -> bool { - matches!(self.connection_status, Connected {n_in, n_out} if n_in == 0 && n_out > 0) + matches!(self.connection_status, Connected {n_in, n_out, ..} if n_in == 0 && n_out > 0) } /// Returns the number of connections with this peer. pub fn connections(&self) -> (u8, u8) { match self.connection_status { - Connected { n_in, n_out } => (n_in, n_out), + Connected { n_in, n_out, .. } => (n_in, n_out), _ => (0, 0), } } @@ -421,7 +421,9 @@ impl PeerInfo { /// Modifies the status to Connected and increases the number of ingoing /// connections by one - pub(super) fn connect_ingoing(&mut self, seen_multiaddr: Option) { + pub(super) fn connect_ingoing(&mut self, multiaddr: Multiaddr) { + self.seen_multiaddrs.insert(multiaddr.clone()); + match &mut self.connection_status { Connected { n_in, .. } => *n_in += 1, Disconnected { .. } @@ -429,19 +431,20 @@ impl PeerInfo { | Dialing { .. } | Disconnecting { .. } | Unknown => { - self.connection_status = Connected { n_in: 1, n_out: 0 }; + self.connection_status = Connected { + n_in: 1, + n_out: 0, + multiaddr, + }; self.connection_direction = Some(ConnectionDirection::Incoming); } } - - if let Some(multiaddr) = seen_multiaddr { - self.seen_multiaddrs.insert(multiaddr); - } } /// Modifies the status to Connected and increases the number of outgoing /// connections by one - pub(super) fn connect_outgoing(&mut self, seen_multiaddr: Option) { + pub(super) fn connect_outgoing(&mut self, multiaddr: Multiaddr) { + self.seen_multiaddrs.insert(multiaddr.clone()); match &mut self.connection_status { Connected { n_out, .. } => *n_out += 1, Disconnected { .. } @@ -449,13 +452,14 @@ impl PeerInfo { | Dialing { .. } | Disconnecting { .. } | Unknown => { - self.connection_status = Connected { n_in: 0, n_out: 1 }; + self.connection_status = Connected { + n_in: 0, + n_out: 1, + multiaddr, + }; self.connection_direction = Some(ConnectionDirection::Outgoing); } } - if let Some(multiaddr) = seen_multiaddr { - self.seen_multiaddrs.insert(multiaddr); - } } #[cfg(test)] @@ -487,6 +491,8 @@ pub enum ConnectionDirection { pub enum PeerConnectionStatus { /// The peer is connected. Connected { + /// The multiaddr that we are connected via. + multiaddr: Multiaddr, /// number of ingoing connections. n_in: u8, /// number of outgoing connections. @@ -522,7 +528,12 @@ impl Serialize for PeerConnectionStatus { fn serialize(&self, serializer: S) -> Result { let mut s = serializer.serialize_struct("connection_status", 6)?; match self { - Connected { n_in, n_out } => { + Connected { + n_in, + n_out, + multiaddr, + } => { + s.serialize_field("multiaddr", multiaddr)?; s.serialize_field("status", "connected")?; s.serialize_field("connections_in", n_in)?; s.serialize_field("connections_out", n_out)?; diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs index 877d72581..ba9bd3147 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs @@ -104,7 +104,7 @@ pub(crate) enum ScoreState { /// We are content with the peers performance. We permit connections and messages. Healthy, /// The peer should be disconnected. We allow re-connections if the peer is persistent. - Disconnected, + ForcedDisconnect, /// The peer is banned. We disallow new connections until it's score has decayed into a /// tolerable threshold. Banned, @@ -115,7 +115,7 @@ impl std::fmt::Display for ScoreState { match self { ScoreState::Healthy => write!(f, "Healthy"), ScoreState::Banned => write!(f, "Banned"), - ScoreState::Disconnected => write!(f, "Disconnected"), + ScoreState::ForcedDisconnect => write!(f, "Disconnected"), } } } @@ -313,7 +313,7 @@ impl Score { pub(crate) fn state(&self) -> ScoreState { match self.score() { x if x <= MIN_SCORE_BEFORE_BAN => ScoreState::Banned, - x if x <= MIN_SCORE_BEFORE_DISCONNECT => ScoreState::Disconnected, + x if x <= MIN_SCORE_BEFORE_DISCONNECT => ScoreState::ForcedDisconnect, _ => ScoreState::Healthy, } } @@ -407,7 +407,7 @@ mod tests { assert!(score.score() < 0.0); assert_eq!(score.state(), ScoreState::Healthy); score.test_add(-1.0001); - assert_eq!(score.state(), ScoreState::Disconnected); + assert_eq!(score.state(), ScoreState::ForcedDisconnect); } #[test] diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 03f4761ff..f4971c18d 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -24,7 +24,7 @@ use std::{ task::{Context, Poll}, time::{Duration, Instant}, }; -use tokio::time::{sleep_until, Instant as TInstant, Sleep}; +use tokio::time::{sleep, Sleep}; use tokio_util::time::{delay_queue, DelayQueue}; use types::{EthSpec, ForkContext}; @@ -32,7 +32,7 @@ use types::{EthSpec, ForkContext}; const IO_ERROR_RETRIES: u8 = 3; /// Maximum time given to the handler to perform shutdown operations. -const SHUTDOWN_TIMEOUT_SECS: u8 = 15; +const SHUTDOWN_TIMEOUT_SECS: u64 = 15; /// Maximum number of simultaneous inbound substreams we keep for this peer. const MAX_INBOUND_SUBSTREAMS: usize = 32; @@ -266,9 +266,9 @@ where self.dial_queue.push((id, OutboundRequest::Goodbye(reason))); } - self.state = HandlerState::ShuttingDown(Box::pin(sleep_until( - TInstant::now() + Duration::from_secs(SHUTDOWN_TIMEOUT_SECS as u64), - ))); + self.state = HandlerState::ShuttingDown(Box::pin(sleep(Duration::from_secs( + SHUTDOWN_TIMEOUT_SECS, + )))); } } @@ -349,23 +349,7 @@ where } fn connection_keep_alive(&self) -> bool { - // Check that we don't have outbound items pending for dialing, nor dialing, nor - // established. Also check that there are no established inbound substreams. - // Errors and events need to be reported back, so check those too. - match self.state { - HandlerState::ShuttingDown(_) => { - !self.dial_queue.is_empty() - || !self.outbound_substreams.is_empty() - || !self.inbound_substreams.is_empty() - || !self.events_out.is_empty() - || !self.dial_negotiated != 0 - } - HandlerState::Deactivated => { - // Regardless of events, the timeout has expired. Force the disconnect. - false - } - _ => true, - } + !matches!(self.state, HandlerState::Deactivated) } fn poll( @@ -395,7 +379,7 @@ where match delay.as_mut().poll(cx) { Poll::Ready(_) => { self.state = HandlerState::Deactivated; - debug!(self.log, "Handler deactivated"); + debug!(self.log, "Shutdown timeout elapsed, Handler deactivated"); return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( HandlerEvent::Close(RPCError::Disconnected), )); @@ -844,6 +828,8 @@ where && self.events_out.is_empty() && self.dial_negotiated == 0 { + debug!(self.log, "Goodbye sent, Handler deactivated"); + self.state = HandlerState::Deactivated; return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( HandlerEvent::Close(RPCError::Disconnected), )); diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 04ec6bac4..cd3579ad6 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -384,7 +384,7 @@ pub enum RPCResponse { /// A response to a get BLOBS_BY_RANGE request BlobsByRange(Arc>), - /// A response to a get LIGHTCLIENT_BOOTSTRAP request. + /// A response to a get LIGHT_CLIENT_BOOTSTRAP request. LightClientBootstrap(LightClientBootstrap), /// A response to a get BLOBS_BY_ROOT request. @@ -426,7 +426,7 @@ pub enum RPCCodedResponse { StreamTermination(ResponseTermination), } -/// Request a light_client_bootstrap for lightclients peers. +/// Request a light_client_bootstrap for light_clients peers. #[derive(Encode, Decode, Clone, Debug, PartialEq)] pub struct LightClientBootstrapRequest { pub root: Hash256, diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 3606438fb..335d4de1a 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -223,7 +223,7 @@ where fn handle_established_inbound_connection( &mut self, - _connection_id: ConnectionId, + connection_id: ConnectionId, peer_id: PeerId, _local_addr: &libp2p::Multiaddr, _remote_addr: &libp2p::Multiaddr, @@ -238,9 +238,9 @@ where }, (), ); - // NOTE: this is needed because PeerIds have interior mutability. - let peer_repr = peer_id.to_string(); - let log = self.log.new(slog::o!("peer_id" => peer_repr)); + let log = self + .log + .new(slog::o!("peer_id" => peer_id.to_string(), "connection_id" => connection_id.to_string())); let handler = RPCHandler::new( protocol, self.fork_context.clone(), @@ -253,7 +253,7 @@ where fn handle_established_outbound_connection( &mut self, - _connection_id: ConnectionId, + connection_id: ConnectionId, peer_id: PeerId, _addr: &libp2p::Multiaddr, _role_override: libp2p::core::Endpoint, @@ -269,9 +269,10 @@ where (), ); - // NOTE: this is needed because PeerIds have interior mutability. - let peer_repr = peer_id.to_string(); - let log = self.log.new(slog::o!("peer_id" => peer_repr)); + let log = self + .log + .new(slog::o!("peer_id" => peer_id.to_string(), "connection_id" => connection_id.to_string())); + let handler = RPCHandler::new( protocol, self.fork_context.clone(), @@ -309,11 +310,16 @@ where Err(RateLimitedErr::TooLarge) => { // we set the batch sizes, so this is a coding/config err for most protocols let protocol = req.versioned_protocol().protocol(); - if matches!(protocol, Protocol::BlocksByRange) - || matches!(protocol, Protocol::BlobsByRange) - { - debug!(self.log, "By range request will never be processed"; "request" => %req, "protocol" => %protocol); + if matches!( + protocol, + Protocol::BlocksByRange + | Protocol::BlobsByRange + | Protocol::BlocksByRoot + | Protocol::BlobsByRoot + ) { + debug!(self.log, "Request too large to process"; "request" => %req, "protocol" => %protocol); } else { + // Other protocols shouldn't be sending large messages, we should flag the peer kind crit!(self.log, "Request size too large to ever be processed"; "protocol" => %protocol); } // send an error code to the peer. diff --git a/beacon_node/lighthouse_network/src/service/behaviour.rs b/beacon_node/lighthouse_network/src/service/behaviour.rs index 8dd750429..5a04d6c2d 100644 --- a/beacon_node/lighthouse_network/src/service/behaviour.rs +++ b/beacon_node/lighthouse_network/src/service/behaviour.rs @@ -3,9 +3,10 @@ use crate::peer_manager::PeerManager; use crate::rpc::{ReqId, RPC}; use crate::types::SnappyTransform; -use libp2p::gossipsub; +use crate::gossipsub; use libp2p::identify; use libp2p::swarm::NetworkBehaviour; +use libp2p::upnp::tokio::Behaviour as Upnp; use types::EthSpec; use super::api_types::RequestId; @@ -22,8 +23,8 @@ where { /// Keep track of active and pending connections to enforce hard limits. pub connection_limits: libp2p::connection_limits::Behaviour, - /// The routing pub-sub mechanism for eth2. - pub gossipsub: Gossipsub, + /// The peer manager that keeps track of peer's reputation and status. + pub peer_manager: PeerManager, /// The Eth2 RPC specified in the wire-0 protocol. pub eth2_rpc: RPC, TSpec>, /// Discv5 Discovery protocol. @@ -32,6 +33,8 @@ where // NOTE: The id protocol is used for initial interop. This will be removed by mainnet. /// Provides IP addresses and peer information. pub identify: identify::Behaviour, - /// The peer manager that keeps track of peer's reputation and status. - pub peer_manager: PeerManager, + /// Libp2p UPnP port mapping. + pub upnp: Upnp, + /// The routing pub-sub mechanism for eth2. + pub gossipsub: Gossipsub, } diff --git a/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs b/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs index 47c2c9e56..a8299d707 100644 --- a/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs +++ b/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs @@ -1,9 +1,9 @@ -use crate::types::{GossipEncoding, GossipKind, GossipTopic}; -use crate::{error, TopicHash}; -use libp2p::gossipsub::{ +use crate::gossipsub::{ Config as GossipsubConfig, IdentTopic as Topic, PeerScoreParams, PeerScoreThresholds, TopicScoreParams, }; +use crate::types::{GossipEncoding, GossipKind, GossipTopic}; +use crate::{error, TopicHash}; use std::cmp::max; use std::collections::HashMap; use std::marker::PhantomData; diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 2b20c76cf..aed9d54ba 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -4,6 +4,10 @@ use crate::config::{gossipsub_config, GossipsubConfigParams, NetworkLoad}; use crate::discovery::{ subnet_predicate, DiscoveredPeers, Discovery, FIND_NODE_QUERY_CLOSEST_PEERS, }; +use crate::gossipsub::{ + self, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, PublishError, + TopicScoreParams, +}; use crate::peer_manager::{ config::Config as PeerManagerCfg, peerdb::score::PeerAction, peerdb::score::ReportSource, ConnectionDirection, PeerManager, PeerManagerEvent, @@ -14,9 +18,9 @@ use crate::rpc::*; use crate::service::behaviour::BehaviourEvent; pub use crate::service::behaviour::Gossipsub; use crate::types::{ - fork_core_topics, subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, - SnappyTransform, Subnet, SubnetDiscovery, ALTAIR_CORE_TOPICS, BASE_CORE_TOPICS, - CAPELLA_CORE_TOPICS, DENEB_CORE_TOPICS, LIGHT_CLIENT_GOSSIP_TOPICS, + attestation_sync_committee_topics, fork_core_topics, subnet_from_topic_hash, GossipEncoding, + GossipKind, GossipTopic, SnappyTransform, Subnet, SubnetDiscovery, ALTAIR_CORE_TOPICS, + BASE_CORE_TOPICS, CAPELLA_CORE_TOPICS, DENEB_CORE_TOPICS, LIGHT_CLIENT_GOSSIP_TOPICS, }; use crate::EnrExt; use crate::Eth2Enr; @@ -24,14 +28,9 @@ use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; use api_types::{PeerRequestId, Request, RequestId, Response}; use futures::stream::StreamExt; use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettings}; -use libp2p::gossipsub::{ - self, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, PublishError, - TopicScoreParams, -}; -use libp2p::multiaddr::{Multiaddr, Protocol as MProtocol}; +use libp2p::multiaddr::{self, Multiaddr, Protocol as MProtocol}; use libp2p::swarm::{Swarm, SwarmEvent}; -use libp2p::PeerId; -use libp2p::{identify, SwarmBuilder}; +use libp2p::{identify, PeerId, SwarmBuilder}; use slog::{crit, debug, info, o, trace, warn}; use std::path::PathBuf; use std::pin::Pin; @@ -51,7 +50,7 @@ mod gossip_cache; pub mod gossipsub_scoring_parameters; pub mod utils; /// The number of peers we target per subnet for discovery queries. -pub const TARGET_SUBNET_PEERS: usize = 6; +pub const TARGET_SUBNET_PEERS: usize = 3; const MAX_IDENTIFY_ADDRESSES: usize = 10; @@ -127,8 +126,6 @@ pub struct Network { gossip_cache: GossipCache, /// This node's PeerId. pub local_peer_id: PeerId, - /// Flag to disable warning logs for duplicate gossip messages and log at DEBUG level instead. - pub disable_duplicate_warn_logs: bool, /// Logger for behaviour actions. log: slog::Logger, } @@ -147,6 +144,14 @@ impl Network { // initialise the node's ID let local_keypair = utils::load_private_key(&config, &log); + // Trusted peers will also be marked as explicit in GossipSub. + // Cfr. https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#explicit-peering-agreements + let trusted_peers: Vec = config + .trusted_peers + .iter() + .map(|x| PeerId::from(x.clone())) + .collect(); + // set up a collection of variables accessible outside of the network crate let network_globals = { // Create an ENR or load from disk if appropriate @@ -161,11 +166,7 @@ impl Network { let globals = NetworkGlobals::new( enr, meta_data, - config - .trusted_peers - .iter() - .map(|x| PeerId::from(x.clone())) - .collect(), + trusted_peers, config.disable_peer_scoring, &log, ); @@ -278,6 +279,27 @@ impl Network { .with_peer_score(params, thresholds) .expect("Valid score params and thresholds"); + // Mark trusted peers as explicit. + for explicit_peer in config.trusted_peers.iter() { + gossipsub.add_explicit_peer(&PeerId::from(explicit_peer.clone())); + } + + // If we are using metrics, then register which topics we want to make sure to keep + // track of + if ctx.libp2p_registry.is_some() { + let topics_to_keep_metrics_for = attestation_sync_committee_topics::() + .map(|gossip_kind| { + Topic::from(GossipTopic::new( + gossip_kind, + GossipEncoding::default(), + enr_fork_id.fork_digest, + )) + .into() + }) + .collect::>(); + gossipsub.register_topics_for_metrics(topics_to_keep_metrics_for); + } + (gossipsub, update_gossipsub_scores) }; @@ -328,6 +350,7 @@ impl Network { let peer_manager = { let peer_manager_cfg = PeerManagerCfg { discovery_enabled: !config.disable_discovery, + quic_enabled: !config.disable_quic_support, metrics_enabled: config.metrics_enabled, target_peer_count: config.target_peers, ..Default::default() @@ -364,6 +387,7 @@ impl Network { identify, peer_manager, connection_limits, + upnp: Default::default(), } }; @@ -426,7 +450,6 @@ impl Network { update_gossipsub_scores, gossip_cache, local_peer_id, - disable_duplicate_warn_logs: config.disable_duplicate_warn_logs, log, }; @@ -642,6 +665,20 @@ impl Network { let topic = GossipTopic::new(kind, GossipEncoding::default(), new_fork_digest); self.subscribe(topic); } + + // Register the new topics for metrics + let topics_to_keep_metrics_for = attestation_sync_committee_topics::() + .map(|gossip_kind| { + Topic::from(GossipTopic::new( + gossip_kind, + GossipEncoding::default(), + new_fork_digest, + )) + .into() + }) + .collect::>(); + self.gossipsub_mut() + .register_topics_for_metrics(topics_to_keep_metrics_for); } /// Unsubscribe from all topics that doesn't have the given fork_digest @@ -745,21 +782,23 @@ impl Network { .gossipsub_mut() .publish(Topic::from(topic.clone()), message_data.clone()) { - if self.disable_duplicate_warn_logs && matches!(e, PublishError::Duplicate) { - debug!( - self.log, - "Could not publish message"; - "error" => ?e, - "kind" => %topic.kind(), - ); - } else { - warn!( - self.log, - "Could not publish message"; - "error" => ?e, - "kind" => %topic.kind(), - ); - }; + match e { + PublishError::Duplicate => { + debug!( + self.log, + "Attempted to publish duplicate message"; + "kind" => %topic.kind(), + ); + } + ref e => { + warn!( + self.log, + "Could not publish message"; + "error" => ?e, + "kind" => %topic.kind(), + ); + } + } // add to metrics match topic.kind() { @@ -1226,22 +1265,39 @@ impl Network { .publish(Topic::from(topic.clone()), data) { Ok(_) => { - warn!(self.log, "Gossip message published on retry"; "topic" => topic_str); - if let Some(v) = metrics::get_int_counter( + debug!( + self.log, + "Gossip message published on retry"; + "topic" => topic_str + ); + metrics::inc_counter_vec( &metrics::GOSSIP_LATE_PUBLISH_PER_TOPIC_KIND, &[topic_str], - ) { - v.inc() - }; + ); } - Err(e) => { - warn!(self.log, "Gossip message publish failed on retry"; "topic" => topic_str, "error" => %e); - if let Some(v) = metrics::get_int_counter( + Err(PublishError::Duplicate) => { + debug!( + self.log, + "Gossip message publish ignored on retry"; + "reason" => "duplicate", + "topic" => topic_str + ); + metrics::inc_counter_vec( &metrics::GOSSIP_FAILED_LATE_PUBLISH_PER_TOPIC_KIND, &[topic_str], - ) { - v.inc() - }; + ); + } + Err(e) => { + warn!( + self.log, + "Gossip message publish failed on retry"; + "topic" => topic_str, + "error" => %e + ); + metrics::inc_counter_vec( + &metrics::GOSSIP_FAILED_LATE_PUBLISH_PER_TOPIC_KIND, + &[topic_str], + ); } } } @@ -1584,6 +1640,47 @@ impl Network { } } + fn inject_upnp_event(&mut self, event: libp2p::upnp::Event) { + match event { + libp2p::upnp::Event::NewExternalAddr(addr) => { + info!(self.log, "UPnP route established"; "addr" => %addr); + let mut iter = addr.iter(); + // Skip Ip address. + iter.next(); + match iter.next() { + Some(multiaddr::Protocol::Udp(udp_port)) => match iter.next() { + Some(multiaddr::Protocol::QuicV1) => { + if let Err(e) = self.discovery_mut().update_enr_quic_port(udp_port) { + warn!(self.log, "Failed to update ENR"; "error" => e); + } + } + _ => { + trace!(self.log, "UPnP address mapped multiaddr from unknown transport"; "addr" => %addr) + } + }, + Some(multiaddr::Protocol::Tcp(tcp_port)) => { + if let Err(e) = self.discovery_mut().update_enr_tcp_port(tcp_port) { + warn!(self.log, "Failed to update ENR"; "error" => e); + } + } + _ => { + trace!(self.log, "UPnP address mapped multiaddr from unknown transport"; "addr" => %addr); + } + } + } + libp2p::upnp::Event::ExpiredExternalAddr(_) => {} + libp2p::upnp::Event::GatewayNotFound => { + info!(self.log, "UPnP not available"); + } + libp2p::upnp::Event::NonRoutableGateway => { + info!( + self.log, + "UPnP is available but gateway is not exposed to public network" + ); + } + } + } + /* Networking polling */ /// Poll the p2p networking stack. @@ -1606,6 +1703,10 @@ impl Network { } BehaviourEvent::Identify(ie) => self.inject_identify_event(ie), BehaviourEvent::PeerManager(pe) => self.inject_pm_event(pe), + BehaviourEvent::Upnp(e) => { + self.inject_upnp_event(e); + None + } BehaviourEvent::ConnectionLimits(le) => void::unreachable(le), }, SwarmEvent::ConnectionEstablished { .. } => None, diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 34dec1ca6..489c5ae52 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -1,3 +1,4 @@ +use crate::gossipsub; use crate::multiaddr::Protocol; use crate::rpc::{MetaData, MetaDataV1, MetaDataV2}; use crate::types::{ @@ -6,9 +7,7 @@ use crate::types::{ use crate::{GossipTopic, NetworkConfig}; use futures::future::Either; use libp2p::core::{multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed}; -use libp2p::gossipsub; use libp2p::identity::{secp256k1, Keypair}; -use libp2p::quic; use libp2p::{core, noise, yamux, PeerId, Transport}; use prometheus_client::registry::Registry; use slog::{debug, warn}; @@ -63,8 +62,8 @@ pub fn build_transport( let transport = if quic_support { // Enables Quic // The default quic configuration suits us for now. - let quic_config = quic::Config::new(&local_private_key); - let quic = quic::tokio::Transport::new(quic_config); + let quic_config = libp2p::quic::Config::new(&local_private_key); + let quic = libp2p::quic::tokio::Transport::new(quic_config); let transport = tcp .or_transport(quic) .map(|either_output, _| match either_output { diff --git a/beacon_node/lighthouse_network/src/types/mod.rs b/beacon_node/lighthouse_network/src/types/mod.rs index af9e9ef45..8cf52f47d 100644 --- a/beacon_node/lighthouse_network/src/types/mod.rs +++ b/beacon_node/lighthouse_network/src/types/mod.rs @@ -17,7 +17,7 @@ pub use pubsub::{PubsubMessage, SnappyTransform}; pub use subnet::{Subnet, SubnetDiscovery}; pub use sync_state::{BackFillState, SyncState}; pub use topics::{ - core_topics_to_subscribe, fork_core_topics, subnet_from_topic_hash, GossipEncoding, GossipKind, - GossipTopic, ALTAIR_CORE_TOPICS, BASE_CORE_TOPICS, CAPELLA_CORE_TOPICS, DENEB_CORE_TOPICS, - LIGHT_CLIENT_GOSSIP_TOPICS, + attestation_sync_committee_topics, core_topics_to_subscribe, fork_core_topics, + subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, ALTAIR_CORE_TOPICS, + BASE_CORE_TOPICS, CAPELLA_CORE_TOPICS, DENEB_CORE_TOPICS, LIGHT_CLIENT_GOSSIP_TOPICS, }; diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 60fe37482..9bbc7b265 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -1,8 +1,8 @@ //! Handles the encoding and decoding of pubsub messages. +use crate::gossipsub; use crate::types::{GossipEncoding, GossipKind, GossipTopic}; use crate::TopicHash; -use libp2p::gossipsub; use snap::raw::{decompress_len, Decoder, Encoder}; use ssz::{Decode, Encode}; use std::boxed::Box; diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index b77490517..b9194022c 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -1,7 +1,7 @@ -use libp2p::gossipsub::{IdentTopic as Topic, TopicHash}; +use crate::gossipsub::{IdentTopic as Topic, TopicHash}; use serde::{Deserialize, Serialize}; use strum::AsRefStr; -use types::{ChainSpec, EthSpec, ForkName, SubnetId, SyncSubnetId}; +use types::{ChainSpec, EthSpec, ForkName, SubnetId, SyncSubnetId, Unsigned}; use crate::Subnet; @@ -62,6 +62,17 @@ pub fn fork_core_topics(fork_name: &ForkName, spec: &ChainSpec) -> V } } +/// Returns all the attestation and sync committee topics, for a given fork. +pub fn attestation_sync_committee_topics() -> impl Iterator { + (0..TSpec::SubnetBitfieldLength::to_usize()) + .map(|subnet_id| GossipKind::Attestation(SubnetId::new(subnet_id as u64))) + .chain( + (0..TSpec::SyncCommitteeSubnetCount::to_usize()).map(|sync_committee_id| { + GossipKind::SyncCommitteeMessage(SyncSubnetId::new(sync_committee_id as u64)) + }), + ) +} + /// Returns all the topics that we need to subscribe to for a given fork /// including topics from older forks and new topics for the current fork. pub fn core_topics_to_subscribe( diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index 9585dcf5a..3351ac23c 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -1,5 +1,5 @@ #![cfg(test)] -use libp2p::gossipsub; +use lighthouse_network::gossipsub; use lighthouse_network::service::Network as LibP2PService; use lighthouse_network::Enr; use lighthouse_network::EnrExt; @@ -42,7 +42,12 @@ pub fn fork_context(fork_name: ForkName) -> ForkContext { ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) } -pub struct Libp2pInstance(LibP2PService, exit_future::Signal); +pub struct Libp2pInstance( + LibP2PService, + #[allow(dead_code)] + // This field is managed for lifetime purposes may not be used directly, hence the `#[allow(dead_code)]` attribute. + async_channel::Sender<()>, +); impl std::ops::Deref for Libp2pInstance { type Target = LibP2PService; @@ -105,7 +110,7 @@ pub async fn build_libp2p_instance( let config = build_config(boot_nodes); // launch libp2p service - let (signal, exit) = exit_future::signal(); + let (signal, exit) = async_channel::bounded(1); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = task_executor::TaskExecutor::new(rt, exit, log.clone(), shutdown_tx); let libp2p_context = lighthouse_network::Context { diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 643c1231a..5a21b462d 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -1056,7 +1056,7 @@ fn goodbye_test(log_level: Level, enable_logging: bool, protocol: Protocol) { fn tcp_test_goodbye_rpc() { // set up the logging. The level and enabled logging or not let log_level = Level::Debug; - let enable_logging = true; + let enable_logging = false; goodbye_test(log_level, enable_logging, Protocol::Tcp); } @@ -1066,6 +1066,6 @@ fn tcp_test_goodbye_rpc() { fn quic_test_goodbye_rpc() { // set up the logging. The level and enabled logging or not let log_level = Level::Debug; - let enable_logging = true; + let enable_logging = false; goodbye_test(log_level, enable_logging, Protocol::Quic); } diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index bbd2af217..228066b31 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -8,12 +8,13 @@ edition = { workspace = true } sloggers = { workspace = true } genesis = { workspace = true } matches = "0.1.8" -exit-future = { workspace = true } slog-term = { workspace = true } slog-async = { workspace = true } eth2 = { workspace = true } [dependencies] +async-channel = { workspace = true } +anyhow = { workspace = true } beacon_chain = { workspace = true } store = { workspace = true } lighthouse_network = { workspace = true } @@ -35,11 +36,10 @@ lazy_static = { workspace = true } lighthouse_metrics = { workspace = true } logging = { workspace = true } task_executor = { workspace = true } -igd = "0.12.1" +igd-next = "0.14" itertools = { workspace = true } num_cpus = { workspace = true } lru_cache = { workspace = true } -if-addrs = "0.6.4" lru = { workspace = true } strum = { workspace = true } tokio-util = { workspace = true } @@ -56,4 +56,4 @@ environment = { workspace = true } # NOTE: This can be run via cargo build --bin lighthouse --features network/disable-backfill disable-backfill = [] fork_from_env = ["beacon_chain/fork_from_env"] -portable = ["beacon_chain/portable"] \ No newline at end of file +portable = ["beacon_chain/portable"] diff --git a/beacon_node/network/src/nat.rs b/beacon_node/network/src/nat.rs index d011ac42e..e63ff5503 100644 --- a/beacon_node/network/src/nat.rs +++ b/beacon_node/network/src/nat.rs @@ -3,225 +3,58 @@ //! Currently supported strategies: //! - UPnP -use crate::{NetworkConfig, NetworkMessage}; -use if_addrs::get_if_addrs; -use slog::{debug, info}; -use std::net::{IpAddr, SocketAddr, SocketAddrV4}; -use tokio::sync::mpsc; -use types::EthSpec; +use anyhow::{bail, Context, Error}; +use igd_next::{aio::tokio as igd, PortMappingProtocol}; +use slog::debug; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::time::Duration; +use tokio::time::sleep; -/// Configuration required to construct the UPnP port mappings. -pub struct UPnPConfig { - /// The local TCP port. - tcp_port: u16, - /// The local UDP discovery port. - disc_port: u16, - /// The local UDP quic port. - quic_port: u16, - /// Whether discovery is enabled or not. - disable_discovery: bool, - /// Whether quic is enabled or not. - disable_quic_support: bool, -} +/// The duration in seconds of a port mapping on the gateway. +const MAPPING_DURATION: u32 = 3600; -/// Contains mappings that managed to be established. -#[derive(Default, Debug)] -pub struct EstablishedUPnPMappings { - /// A TCP port mapping for libp2p. - pub tcp_port: Option, - /// A UDP port for the QUIC libp2p transport. - pub udp_quic_port: Option, - /// A UDP port for discv5. - pub udp_disc_port: Option, -} +/// Renew the Mapping every half of `MAPPING_DURATION` to avoid the port being unmapped. +const MAPPING_TIMEOUT: u64 = MAPPING_DURATION as u64 / 2; -impl EstablishedUPnPMappings { - /// Returns true if at least one value is set. - pub fn is_some(&self) -> bool { - self.tcp_port.is_some() || self.udp_quic_port.is_some() || self.udp_disc_port.is_some() - } - - // Iterator over the UDP ports - pub fn udp_ports(&self) -> impl Iterator { - self.udp_quic_port.iter().chain(self.udp_disc_port.iter()) - } -} - -impl UPnPConfig { - pub fn from_config(config: &NetworkConfig) -> Option { - config.listen_addrs().v4().map(|v4_addr| UPnPConfig { - tcp_port: v4_addr.tcp_port, - disc_port: v4_addr.disc_port, - quic_port: v4_addr.quic_port, - disable_discovery: config.disable_discovery, - disable_quic_support: config.disable_quic_support, - }) - } -} - -/// Attempts to construct external port mappings with UPnP. -pub fn construct_upnp_mappings( - config: UPnPConfig, - network_send: mpsc::UnboundedSender>, +/// Attempts to map Discovery external port mappings with UPnP. +pub async fn construct_upnp_mappings( + addr: Ipv4Addr, + port: u16, log: slog::Logger, -) { - info!(log, "UPnP Attempting to initialise routes"); - match igd::search_gateway(Default::default()) { - Err(e) => info!(log, "UPnP not available"; "error" => %e), - Ok(gateway) => { - // Need to find the local listening address matched with the router subnet - let interfaces = match get_if_addrs() { - Ok(v) => v, - Err(e) => { - info!(log, "UPnP failed to get local interfaces"; "error" => %e); - return; - } - }; - let local_ip = interfaces.iter().find_map(|interface| { - // Just use the first IP of the first interface that is not a loopback and not an - // ipv6 address. - if !interface.is_loopback() { - interface.ip().is_ipv4().then(|| interface.ip()) - } else { - None - } - }); +) -> Result<(), Error> { + let gateway = igd::search_gateway(Default::default()) + .await + .context("Gateway does not support UPnP")?; - let local_ip = match local_ip { - None => { - info!(log, "UPnP failed to find local IP address"); - return; - } - Some(v) => v, - }; + let external_address = gateway + .get_external_ip() + .await + .context("Could not access gateway's external ip")?; - debug!(log, "UPnP Local IP Discovered"; "ip" => ?local_ip); - - let mut mappings = EstablishedUPnPMappings::default(); - - match local_ip { - IpAddr::V4(address) => { - let libp2p_socket = SocketAddrV4::new(address, config.tcp_port); - let external_ip = gateway.get_external_ip(); - // We add specific port mappings rather than getting the router to arbitrary assign - // one. - // I've found this to be more reliable. If multiple users are behind a single - // router, they should ideally try to set different port numbers. - mappings.tcp_port = add_port_mapping( - &gateway, - igd::PortMappingProtocol::TCP, - libp2p_socket, - "tcp", - &log, - ).map(|_| { - let external_socket = external_ip.as_ref().map(|ip| SocketAddr::new((*ip).into(), config.tcp_port)).map_err(|_| ()); - info!(log, "UPnP TCP route established"; "external_socket" => format!("{}:{}", external_socket.as_ref().map(|ip| ip.to_string()).unwrap_or_else(|_| "".into()), config.tcp_port)); - config.tcp_port - }).ok(); - - let set_udp_mapping = |udp_port| { - let udp_socket = SocketAddrV4::new(address, udp_port); - add_port_mapping( - &gateway, - igd::PortMappingProtocol::UDP, - udp_socket, - "udp", - &log, - ).map(|_| { - info!(log, "UPnP UDP route established"; "external_socket" => format!("{}:{}", external_ip.as_ref().map(|ip| ip.to_string()).unwrap_or_else(|_| "".into()), udp_port)); - }) - }; - - // Set the discovery UDP port mapping - if !config.disable_discovery && set_udp_mapping(config.disc_port).is_ok() { - mappings.udp_disc_port = Some(config.disc_port); - } - - // Set the quic UDP port mapping - if !config.disable_quic_support && set_udp_mapping(config.quic_port).is_ok() { - mappings.udp_quic_port = Some(config.quic_port); - } - - // report any updates to the network service. - if mappings.is_some() { - network_send.send(NetworkMessage::UPnPMappingEstablished{ mappings }) - .unwrap_or_else(|e| debug!(log, "Could not send message to the network service"; "error" => %e)); - } - } - _ => debug!(log, "UPnP no routes constructed. IPv6 not supported"), - } - } + let is_private = match external_address { + IpAddr::V4(ipv4) => ipv4.is_private(), + IpAddr::V6(ipv6) => ipv6.is_loopback() || ipv6.is_unspecified(), }; -} -/// Sets up a port mapping for a protocol returning the mapped port if successful. -fn add_port_mapping( - gateway: &igd::Gateway, - protocol: igd::PortMappingProtocol, - socket: SocketAddrV4, - protocol_string: &'static str, - log: &slog::Logger, -) -> Result<(), ()> { - // We add specific port mappings rather than getting the router to arbitrary assign - // one. - // I've found this to be more reliable. If multiple users are behind a single - // router, they should ideally try to set different port numbers. - let mapping_string = &format!("lighthouse-{}", protocol_string); - for _ in 0..2 { - match gateway.add_port(protocol, socket.port(), socket, 0, mapping_string) { - Err(e) => { - match e { - igd::AddPortError::PortInUse => { - // Try and remove and re-create - debug!(log, "UPnP port in use, attempting to remap"; "protocol" => protocol_string, "port" => socket.port()); - match gateway.remove_port(protocol, socket.port()) { - Ok(()) => { - debug!(log, "UPnP Removed port mapping"; "protocol" => protocol_string, "port" => socket.port()) - } - Err(e) => { - debug!(log, "UPnP Port remove failure"; "protocol" => protocol_string, "port" => socket.port(), "error" => %e); - return Err(()); - } - } - } - e => { - info!(log, "UPnP TCP route not set"; "error" => %e); - return Err(()); - } - } - } - Ok(_) => { - return Ok(()); - } - } + if is_private { + bail!( + "Gateway's external address is a private address: {}", + external_address + ); } - Err(()) -} -/// Removes the specified TCP and UDP port mappings. -pub fn remove_mappings(mappings: &EstablishedUPnPMappings, log: &slog::Logger) { - if mappings.is_some() { - debug!(log, "Removing UPnP port mappings"); - match igd::search_gateway(Default::default()) { - Ok(gateway) => { - if let Some(tcp_port) = mappings.tcp_port { - match gateway.remove_port(igd::PortMappingProtocol::TCP, tcp_port) { - Ok(()) => debug!(log, "UPnP Removed TCP port mapping"; "port" => tcp_port), - Err(e) => { - debug!(log, "UPnP Failed to remove TCP port mapping"; "port" => tcp_port, "error" => %e) - } - } - } - for udp_port in mappings.udp_ports() { - match gateway.remove_port(igd::PortMappingProtocol::UDP, *udp_port) { - Ok(()) => debug!(log, "UPnP Removed UDP port mapping"; "port" => udp_port), - Err(e) => { - debug!(log, "UPnP Failed to remove UDP port mapping"; "port" => udp_port, "error" => %e) - } - } - } - } - Err(e) => debug!(log, "UPnP failed to remove mappings"; "error" => %e), - } + loop { + gateway + .add_port( + PortMappingProtocol::UDP, + port, + SocketAddr::new(IpAddr::V4(addr), port), + MAPPING_DURATION, + "Lighthouse Discovery port", + ) + .await + .with_context(|| format!("Could not UPnP map port: {} on the gateway", port))?; + debug!(log, "Discovery UPnP port mapped"; "port" => %port); + sleep(Duration::from_secs(MAPPING_TIMEOUT)).await; } } diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 9d9b196e9..07fc06bc3 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -1657,7 +1657,7 @@ impl NetworkBeaconProcessor { self.gossip_penalize_peer( peer_id, - PeerAction::LowToleranceError, + PeerAction::HighToleranceError, "light_client_gossip_error", ); } @@ -1675,15 +1675,7 @@ impl NetworkBeaconProcessor { "light_client_gossip_error", ); } - LightClientFinalityUpdateError::FinalityUpdateAlreadySeen => debug!( - self.log, - "Light client finality update already seen"; - "peer" => %peer_id, - "error" => ?e, - ), - LightClientFinalityUpdateError::BeaconChainError(_) - | LightClientFinalityUpdateError::LightClientUpdateError(_) - | LightClientFinalityUpdateError::SigSlotStartIsNone + LightClientFinalityUpdateError::SigSlotStartIsNone | LightClientFinalityUpdateError::FailedConstructingUpdate => debug!( self.log, "Light client error constructing finality update"; @@ -1801,19 +1793,7 @@ impl NetworkBeaconProcessor { "light_client_gossip_error", ); } - LightClientOptimisticUpdateError::OptimisticUpdateAlreadySeen => { - metrics::register_optimistic_update_error(&e); - - debug!( - self.log, - "Light client optimistic update already seen"; - "peer" => %peer_id, - "error" => ?e, - ) - } - LightClientOptimisticUpdateError::BeaconChainError(_) - | LightClientOptimisticUpdateError::LightClientUpdateError(_) - | LightClientOptimisticUpdateError::SigSlotStartIsNone + LightClientOptimisticUpdateError::SigSlotStartIsNone | LightClientOptimisticUpdateError::FailedConstructingUpdate => { metrics::register_optimistic_update_error(&e); diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 67fc2fabb..e7d3a7ce2 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -589,7 +589,7 @@ impl NetworkBeaconProcessor { } /// Create a new work event to process `LightClientBootstrap`s from the RPC network. - pub fn send_lightclient_bootstrap_request( + pub fn send_light_client_bootstrap_request( self: &Arc, peer_id: PeerId, request_id: PeerRequestId, diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index a731dea7c..66c98ff3b 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -140,7 +140,7 @@ impl NetworkBeaconProcessor { let requested_blocks = request.block_roots().len(); let mut block_stream = match self .chain - .get_blocks_checking_early_attester_cache(request.block_roots().to_vec(), &executor) + .get_blocks_checking_caches(request.block_roots().to_vec(), &executor) { Ok(block_stream) => block_stream, Err(e) => return error!(self.log, "Error getting block stream"; "error" => ?e), diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 608d10d66..8894d5d9f 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -9,10 +9,8 @@ use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; use beacon_chain::data_availability_checker::AvailabilityCheckError; use beacon_chain::data_availability_checker::MaybeAvailableBlock; use beacon_chain::{ - observed_block_producers::Error as ObserveError, - validator_monitor::{get_block_delay_ms, get_slot_delay_ms}, - AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError, - ChainSegmentResult, HistoricalBlockError, NotifyExecutionLayer, + validator_monitor::get_slot_delay_ms, AvailabilityProcessingStatus, BeaconChainError, + BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, NotifyExecutionLayer, }; use beacon_processor::{ work_reprocessing_queue::{QueuedRpcBlock, ReprocessQueueMessage}, @@ -20,10 +18,8 @@ use beacon_processor::{ }; use lighthouse_network::PeerAction; use slog::{debug, error, info, warn}; -use slot_clock::SlotClock; use std::sync::Arc; use std::time::Duration; -use std::time::{SystemTime, UNIX_EPOCH}; use store::KzgCommitment; use tokio::sync::mpsc; use types::beacon_block_body::format_kzg_commitments; @@ -142,72 +138,6 @@ impl NetworkBeaconProcessor { return; }; - // Returns `true` if the time now is after the 4s attestation deadline. - let block_is_late = SystemTime::now() - .duration_since(UNIX_EPOCH) - // If we can't read the system time clock then indicate that the - // block is late (and therefore should *not* be requeued). This - // avoids infinite loops. - .map_or(true, |now| { - get_block_delay_ms(now, block.message(), &self.chain.slot_clock) - > self.chain.slot_clock.unagg_attestation_production_delay() - }); - - // Checks if a block from this proposer is already known. - let block_equivocates = || { - match self.chain.observed_slashable.read().is_slashable( - block.slot(), - block.message().proposer_index(), - block.canonical_root(), - ) { - Ok(is_slashable) => is_slashable, - //Both of these blocks will be rejected, so reject them now rather - // than re-queuing them. - Err(ObserveError::FinalizedBlock { .. }) - | Err(ObserveError::ValidatorIndexTooHigh { .. }) => false, - } - }; - - // If we've already seen a block from this proposer *and* the block - // arrived before the attestation deadline, requeue it to ensure it is - // imported late enough that it won't receive a proposer boost. - // - // Don't requeue blocks if they're already known to fork choice, just - // push them through to block processing so they can be handled through - // the normal channels. - if !block_is_late && block_equivocates() { - debug!( - self.log, - "Delaying processing of duplicate RPC block"; - "block_root" => ?block_root, - "proposer" => block.message().proposer_index(), - "slot" => block.slot() - ); - - // Send message to work reprocess queue to retry the block - let (process_fn, ignore_fn) = self.clone().generate_rpc_beacon_block_fns( - block_root, - block, - seen_timestamp, - process_type, - ); - let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock { - beacon_block_root: block_root, - process_fn, - ignore_fn, - }); - - if reprocess_tx.try_send(reprocess_msg).is_err() { - error!( - self.log, - "Failed to inform block import"; - "source" => "rpc", - "block_root" => %block_root - ); - } - return; - } - let slot = block.slot(); let parent_root = block.message().parent_root(); let commitments_formatted = block.as_block().commitments_formatted(); diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index f56a3b744..a774c0e16 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -218,7 +218,7 @@ impl Router { ), Request::LightClientBootstrap(request) => self.handle_beacon_processor_send_result( self.network_beacon_processor - .send_lightclient_bootstrap_request(peer_id, request_id, request), + .send_light_client_bootstrap_request(peer_id, request_id, request), ), } } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 01a7e1f98..18284bd23 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -1,5 +1,5 @@ use super::sync::manager::RequestId as SyncId; -use crate::nat::EstablishedUPnPMappings; +use crate::nat; use crate::network_beacon_processor::InvalidBlockStorage; use crate::persisted_dht::{clear_dht, load_dht, persist_dht}; use crate::router::{Router, RouterMessage}; @@ -27,6 +27,7 @@ use lighthouse_network::{ MessageId, NetworkEvent, NetworkGlobals, PeerId, }; use slog::{crit, debug, error, info, o, trace, warn}; +use std::collections::BTreeSet; use std::{collections::HashSet, pin::Pin, sync::Arc, time::Duration}; use store::HotColdDB; use strum::IntoStaticStr; @@ -94,11 +95,6 @@ pub enum NetworkMessage { /// The result of the validation validation_result: MessageAcceptance, }, - /// Called if UPnP managed to establish an external port mapping. - UPnPMappingEstablished { - /// The mappings that were established. - mappings: EstablishedUPnPMappings, - }, /// Reports a peer to the peer manager for performing an action. ReportPeer { peer_id: PeerId, @@ -124,7 +120,7 @@ pub enum NetworkMessage { pub enum ValidatorSubscriptionMessage { /// Subscribes a list of validators to specific slots for attestation duties. AttestationSubscribe { - subscriptions: Vec, + subscriptions: BTreeSet, }, SyncCommitteeSubscribe { subscriptions: Vec, @@ -188,9 +184,6 @@ pub struct NetworkService { store: Arc>, /// A collection of global variables, accessible outside of the network service. network_globals: Arc>, - /// Stores potentially created UPnP mappings to be removed on shutdown. (TCP port and UDP - /// ports). - upnp_mappings: EstablishedUPnPMappings, /// A delay that expires when a new fork takes place. next_fork_update: Pin>>, /// A delay that expires when we need to subscribe to a new fork's topics. @@ -237,22 +230,24 @@ impl NetworkService { "Backfill is disabled. DO NOT RUN IN PRODUCTION" ); - // try and construct UPnP port mappings if required. - if let Some(upnp_config) = crate::nat::UPnPConfig::from_config(config) { - let upnp_log = network_log.new(o!("service" => "UPnP")); - let upnp_network_send = network_senders.network_send(); - if config.upnp_enabled { - executor.spawn_blocking( - move || { - crate::nat::construct_upnp_mappings( - upnp_config, - upnp_network_send, - upnp_log, - ) - }, - "UPnP", - ); - } + if let (true, false, Some(v4)) = ( + config.upnp_enabled, + config.disable_discovery, + config.listen_addrs().v4(), + ) { + let nw = network_log.clone(); + let v4 = v4.clone(); + executor.spawn( + async move { + info!(nw, "UPnP Attempting to initialise routes"); + if let Err(e) = + nat::construct_upnp_mappings(v4.addr, v4.disc_port, nw.clone()).await + { + info!(nw, "Could not UPnP map Discovery port"; "error" => %e); + } + }, + "UPnP", + ); } // get a reference to the beacon chain store @@ -358,7 +353,6 @@ impl NetworkService { router_send, store, network_globals: network_globals.clone(), - upnp_mappings: EstablishedUPnPMappings::default(), next_fork_update, next_fork_subscriptions, next_unsubscribe, @@ -636,21 +630,6 @@ impl NetworkService { } => { self.libp2p.send_error_response(peer_id, id, error, reason); } - NetworkMessage::UPnPMappingEstablished { mappings } => { - self.upnp_mappings = mappings; - // If there is an external TCP port update, modify our local ENR. - if let Some(tcp_port) = self.upnp_mappings.tcp_port { - if let Err(e) = self.libp2p.discovery_mut().update_enr_tcp_port(tcp_port) { - warn!(self.log, "Failed to update ENR"; "error" => e); - } - } - // If there is an external QUIC port update, modify our local ENR. - if let Some(quic_port) = self.upnp_mappings.udp_quic_port { - if let Err(e) = self.libp2p.discovery_mut().update_enr_quic_port(quic_port) { - warn!(self.log, "Failed to update ENR"; "error" => e); - } - } - } NetworkMessage::ValidationResult { propagation_source, message_id, @@ -805,7 +784,7 @@ impl NetworkService { ValidatorSubscriptionMessage::AttestationSubscribe { subscriptions } => { if let Err(e) = self .attestation_service - .validator_subscriptions(subscriptions) + .validator_subscriptions(subscriptions.into_iter()) { warn!(self.log, "Attestation validator subscription failed"; "error" => e); } @@ -1009,10 +988,6 @@ impl Drop for NetworkService { "Saved DHT state"; ), } - - // attempt to remove port mappings - crate::nat::remove_mappings(&self.upnp_mappings, &self.log); - info!(self.log, "Network service shutdown"); } } diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index 35a7f1eab..39e5e1292 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -20,7 +20,7 @@ mod tests { fn get_topic_params( &self, topic: GossipTopic, - ) -> Option<&lighthouse_network::libp2p::gossipsub::TopicScoreParams> { + ) -> Option<&lighthouse_network::gossipsub::TopicScoreParams> { self.libp2p.get_topic_params(topic) } } @@ -62,7 +62,7 @@ mod tests { let runtime = Arc::new(Runtime::new().unwrap()); - let (signal, exit) = exit_future::signal(); + let (signal, exit) = async_channel::bounded(1); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = task_executor::TaskExecutor::new( Arc::downgrade(&runtime), @@ -139,7 +139,7 @@ mod tests { // Build network service. let (mut network_service, network_globals, _network_senders) = runtime.block_on(async { - let (_, exit) = exit_future::signal(); + let (_, exit) = async_channel::bounded(1); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = task_executor::TaskExecutor::new( Arc::downgrade(&runtime), diff --git a/beacon_node/network/src/subnet_service/attestation_subnets.rs b/beacon_node/network/src/subnet_service/attestation_subnets.rs index 1cae6299e..ab9ffb95a 100644 --- a/beacon_node/network/src/subnet_service/attestation_subnets.rs +++ b/beacon_node/network/src/subnet_service/attestation_subnets.rs @@ -196,7 +196,7 @@ impl AttestationService { /// safely dropped. pub fn validator_subscriptions( &mut self, - subscriptions: Vec, + subscriptions: impl Iterator, ) -> Result<(), String> { // If the node is in a proposer-only state, we ignore all subnet subscriptions. if self.proposer_only { @@ -227,7 +227,6 @@ impl AttestationService { warn!(self.log, "Failed to compute subnet id for validator subscription"; "error" => ?e, - "validator_index" => subscription.validator_index ); continue; } @@ -257,13 +256,11 @@ impl AttestationService { warn!(self.log, "Subscription to subnet error"; "error" => e, - "validator_index" => subscription.validator_index, ); } else { trace!(self.log, "Subscribed to subnet for aggregator duties"; "exact_subnet" => ?exact_subnet, - "validator_index" => subscription.validator_index ); } } diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 769775a62..74f3f59df 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -180,14 +180,12 @@ mod attestation_service { use super::*; fn get_subscription( - validator_index: u64, attestation_committee_index: CommitteeIndex, slot: Slot, committee_count_at_slot: u64, is_aggregator: bool, ) -> ValidatorSubscription { ValidatorSubscription { - validator_index, attestation_committee_index, slot, committee_count_at_slot, @@ -204,7 +202,6 @@ mod attestation_service { (0..validator_count) .map(|validator_index| { get_subscription( - validator_index, validator_index, slot, committee_count_at_slot, @@ -217,7 +214,6 @@ mod attestation_service { #[tokio::test] async fn subscribe_current_slot_wait_for_unsubscribe() { // subscription config - let validator_index = 1; let committee_index = 1; // Keep a low subscription slot so that there are no additional subnet discovery events. let subscription_slot = 0; @@ -233,7 +229,6 @@ mod attestation_service { .expect("Could not get current slot"); let subscriptions = vec![get_subscription( - validator_index, committee_index, current_slot + Slot::new(subscription_slot), committee_count, @@ -242,7 +237,7 @@ mod attestation_service { // submit the subscriptions attestation_service - .validator_subscriptions(subscriptions) + .validator_subscriptions(subscriptions.into_iter()) .unwrap(); // not enough time for peer discovery, just subscribe, unsubscribe @@ -253,7 +248,7 @@ mod attestation_service { &attestation_service.beacon_chain.spec, ) .unwrap(); - let expected = vec![ + let expected = [ SubnetServiceMessage::Subscribe(Subnet::Attestation(subnet_id)), SubnetServiceMessage::Unsubscribe(Subnet::Attestation(subnet_id)), ]; @@ -293,7 +288,6 @@ mod attestation_service { #[tokio::test] async fn test_same_subnet_unsubscription() { // subscription config - let validator_index = 1; let committee_count = 1; let subnets_per_node = MainnetEthSpec::default_spec().subnets_per_node as usize; @@ -313,7 +307,6 @@ mod attestation_service { .expect("Could not get current slot"); let sub1 = get_subscription( - validator_index, com1, current_slot + Slot::new(subscription_slot1), committee_count, @@ -321,7 +314,6 @@ mod attestation_service { ); let sub2 = get_subscription( - validator_index, com2, current_slot + Slot::new(subscription_slot2), committee_count, @@ -350,7 +342,7 @@ mod attestation_service { // submit the subscriptions attestation_service - .validator_subscriptions(vec![sub1, sub2]) + .validator_subscriptions(vec![sub1, sub2].into_iter()) .unwrap(); // Unsubscription event should happen at slot 2 (since subnet id's are the same, unsubscription event should be at higher slot + 1) @@ -431,7 +423,7 @@ mod attestation_service { // submit the subscriptions attestation_service - .validator_subscriptions(subscriptions) + .validator_subscriptions(subscriptions.into_iter()) .unwrap(); let events = get_events(&mut attestation_service, Some(131), 10).await; @@ -501,7 +493,7 @@ mod attestation_service { // submit the subscriptions attestation_service - .validator_subscriptions(subscriptions) + .validator_subscriptions(subscriptions.into_iter()) .unwrap(); let events = get_events(&mut attestation_service, None, 3).await; @@ -538,7 +530,6 @@ mod attestation_service { #[tokio::test] async fn test_subscribe_same_subnet_several_slots_apart() { // subscription config - let validator_index = 1; let committee_count = 1; let subnets_per_node = MainnetEthSpec::default_spec().subnets_per_node as usize; @@ -558,7 +549,6 @@ mod attestation_service { .expect("Could not get current slot"); let sub1 = get_subscription( - validator_index, com1, current_slot + Slot::new(subscription_slot1), committee_count, @@ -566,7 +556,6 @@ mod attestation_service { ); let sub2 = get_subscription( - validator_index, com2, current_slot + Slot::new(subscription_slot2), committee_count, @@ -595,7 +584,7 @@ mod attestation_service { // submit the subscriptions attestation_service - .validator_subscriptions(vec![sub1, sub2]) + .validator_subscriptions(vec![sub1, sub2].into_iter()) .unwrap(); // Unsubscription event should happen at the end of the slot. @@ -668,7 +657,7 @@ mod attestation_service { // submit the subscriptions attestation_service - .validator_subscriptions(subscriptions) + .validator_subscriptions(subscriptions.into_iter()) .unwrap(); // There should only be the same subscriptions as there are in the specification, diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index 0d7e7c16c..4e24aca07 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -332,7 +332,16 @@ impl BackFillSync { } // If we have run out of peers in which to retry this batch, the backfill state // transitions to a paused state. - self.retry_batch_download(network, id)?; + // We still need to reset the state for all the affected batches, so we should not + // short circuit early + if self.retry_batch_download(network, id).is_err() { + debug!( + self.log, + "Batch could not be retried"; + "batch_id" => id, + "error" => "no synced peers" + ); + } } else { debug!(self.log, "Batch not found while removing peer"; "peer" => %peer_id, "batch" => id) diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 002bb344a..1a8e0194f 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -622,6 +622,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("Specifies how many states from the freezer database should cache in memory [default: 1]") .takes_value(true) ) + .arg( + Arg::with_name("state-cache-size") + .long("state-cache-size") + .value_name("STATE_CACHE_SIZE") + .help("Specifies the size of the snapshot cache [default: 3]") + .takes_value(true) + ) /* * Execution Layer Integration */ @@ -939,6 +946,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .requires("checkpoint-state") ) + .arg( + Arg::with_name("checkpoint-blobs") + .long("checkpoint-blobs") + .help("Set the checkpoint blobs to start syncing from. Must be aligned and match \ + --checkpoint-block. Using --checkpoint-sync-url instead is recommended.") + .value_name("BLOBS_SSZ") + .takes_value(true) + .requires("checkpoint-block") + ) .arg( Arg::with_name("checkpoint-sync-url") .long("checkpoint-sync-url") @@ -1271,11 +1287,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("disable-duplicate-warn-logs") .long("disable-duplicate-warn-logs") - .help("Disable warning logs for duplicate gossip messages. The WARN level log is \ - useful for detecting a duplicate validator key running elsewhere. However, this may \ - result in excessive warning logs if the validator is broadcasting messages to \ - multiple beacon nodes via the validator client --broadcast flag. In this case, \ - disabling these warn logs may be useful.") + .help("This flag is deprecated and has no effect.") .takes_value(false) ) .group(ArgGroup::with_name("enable_http").args(&["http", "gui", "staking"]).multiple(true)) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index c940049c5..ba8430ace 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -163,9 +163,16 @@ pub fn get_config( cli_args.is_present("light-client-server"); } + if cli_args.is_present("light-client-server") { + client_config.chain.enable_light_client_server = true; + } + if let Some(cache_size) = clap_utils::parse_optional(cli_args, "shuffling-cache-size")? { client_config.chain.shuffling_cache_size = cache_size; } + if let Some(cache_size) = clap_utils::parse_optional(cli_args, "state-cache-size")? { + client_config.chain.snapshot_cache_size = cache_size; + } /* * Prometheus metrics HTTP server @@ -508,9 +515,10 @@ pub fn get_config( client_config.genesis = if eth2_network_config.genesis_state_is_known() { // Set up weak subjectivity sync, or start from the hardcoded genesis state. - if let (Some(initial_state_path), Some(initial_block_path)) = ( + if let (Some(initial_state_path), Some(initial_block_path), opt_initial_blobs_path) = ( cli_args.value_of("checkpoint-state"), cli_args.value_of("checkpoint-block"), + cli_args.value_of("checkpoint-blobs"), ) { let read = |path: &str| { use std::fs::File; @@ -526,10 +534,12 @@ pub fn get_config( let anchor_state_bytes = read(initial_state_path)?; let anchor_block_bytes = read(initial_block_path)?; + let anchor_blobs_bytes = opt_initial_blobs_path.map(read).transpose()?; ClientGenesis::WeakSubjSszBytes { anchor_state_bytes, anchor_block_bytes, + anchor_blobs_bytes, } } else if let Some(remote_bn_url) = cli_args.value_of("checkpoint-sync-url") { let url = SensitiveUrl::parse(remote_bn_url) @@ -1118,8 +1128,6 @@ pub fn set_network_config( config.target_peers = target_peers_str .parse::() .map_err(|_| format!("Invalid number of target peers: {}", target_peers_str))?; - } else { - config.target_peers = 80; // default value } if let Some(value) = cli_args.value_of("network-load") { @@ -1421,9 +1429,6 @@ pub fn set_network_config( Some(config_str.parse()?) } }; - - config.disable_duplicate_warn_logs = cli_args.is_present("disable-duplicate-warn-logs"); - Ok(()) } diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 63cd8e67d..4bdb0deca 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1344,7 +1344,7 @@ impl, Cold: ItemStore> HotColdDB high_restore_point .get_block_root(slot) .or_else(|_| high_restore_point.get_oldest_block_root()) - .map(|x| *x) + .copied() .map_err(HotColdDBError::RestorePointBlockHashError) } @@ -2376,6 +2376,9 @@ impl, Cold: ItemStore> HotColdDB self.cold_db.do_atomically(cold_ops)?; } + // In order to reclaim space, we need to compact the freezer DB as well. + self.cold_db.compact()?; + Ok(()) } } diff --git a/beacon_node/store/src/leveldb_store.rs b/beacon_node/store/src/leveldb_store.rs index 62619dd2c..d799bdedd 100644 --- a/beacon_node/store/src/leveldb_store.rs +++ b/beacon_node/store/src/leveldb_store.rs @@ -154,25 +154,15 @@ impl KeyValueStore for LevelDB { self.transaction_mutex.lock() } - /// Compact all values in the states and states flag columns. - fn compact(&self) -> Result<(), Error> { - let endpoints = |column: DBColumn| { - ( - BytesKey::from_vec(get_key_for_col(column.as_str(), Hash256::zero().as_bytes())), - BytesKey::from_vec(get_key_for_col( - column.as_str(), - Hash256::repeat_byte(0xff).as_bytes(), - )), - ) - }; - - for (start_key, end_key) in [ - endpoints(DBColumn::BeaconStateTemporary), - endpoints(DBColumn::BeaconState), - endpoints(DBColumn::BeaconStateSummary), - ] { - self.db.compact(&start_key, &end_key); - } + fn compact_column(&self, column: DBColumn) -> Result<(), Error> { + // Use key-size-agnostic keys [] and 0xff..ff with a minimum of 32 bytes to account for + // columns that may change size between sub-databases or schema versions. + let start_key = BytesKey::from_vec(get_key_for_col(column.as_str(), &[])); + let end_key = BytesKey::from_vec(get_key_for_col( + column.as_str(), + &vec![0xff; std::cmp::max(column.key_size(), 32)], + )); + self.db.compact(&start_key, &end_key); Ok(()) } diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index eacd28d2d..e86689b0c 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -80,8 +80,22 @@ pub trait KeyValueStore: Sync + Send + Sized + 'static { /// this method. In future we may implement a safer mandatory locking scheme. fn begin_rw_transaction(&self) -> MutexGuard<()>; - /// Compact the database, freeing space used by deleted items. - fn compact(&self) -> Result<(), Error>; + /// Compact a single column in the database, freeing space used by deleted items. + fn compact_column(&self, column: DBColumn) -> Result<(), Error>; + + /// Compact a default set of columns that are likely to free substantial space. + fn compact(&self) -> Result<(), Error> { + // Compact state and block related columns as they are likely to have the most churn, + // i.e. entries being created and deleted. + for column in [ + DBColumn::BeaconState, + DBColumn::BeaconStateSummary, + DBColumn::BeaconBlock, + ] { + self.compact_column(column)?; + } + Ok(()) + } /// Iterate through all keys and values in a particular column. fn iter_column(&self, column: DBColumn) -> ColumnIter { diff --git a/beacon_node/store/src/memory_store.rs b/beacon_node/store/src/memory_store.rs index c2e494dce..302d2c2ad 100644 --- a/beacon_node/store/src/memory_store.rs +++ b/beacon_node/store/src/memory_store.rs @@ -108,7 +108,7 @@ impl KeyValueStore for MemoryStore { self.transaction_mutex.lock() } - fn compact(&self) -> Result<(), Error> { + fn compact_column(&self, _column: DBColumn) -> Result<(), Error> { Ok(()) } } diff --git a/book/src/advanced_database.md b/book/src/advanced_database.md index 20c5d7443..867a8f79d 100644 --- a/book/src/advanced_database.md +++ b/book/src/advanced_database.md @@ -23,15 +23,13 @@ states to slow down dramatically. A lower _slots per restore point_ value (SPRP) frequent restore points, while a higher SPRP corresponds to less frequent. The table below shows some example values. -| Use Case | SPRP | Yearly Disk Usage* | Load Historical State | +| Use Case | SPRP | Yearly Disk Usage*| Load Historical State | |----------------------------|------|-------------------|-----------------------| -| Research | 32 | 3.4 TB | 155 ms | -| Block explorer/analysis | 128 | 851 GB | 620 ms | -| Enthusiast (prev. default) | 2048 | 53.6 GB | 10.2 s | -| Hobbyist | 4096 | 26.8 GB | 20.5 s | -| Validator only (default) | 8192 | 12.7 GB | 41 s | +| Research | 32 | more than 10 TB | 155 ms | +| Enthusiast (prev. default) | 2048 | hundreds of GB | 10.2 s | +| Validator only (default) | 8192 | tens of GB | 41 s | -*Last update: May 2023. +*Last update: Dec 2023. As we can see, it's a high-stakes trade-off! The relationships to disk usage and historical state load time are both linear – doubling SPRP halves disk usage and doubles load time. The minimum SPRP @@ -41,12 +39,12 @@ The default value is 8192 for databases synced from scratch using Lighthouse v2. 2048 for prior versions. Please see the section on [Defaults](#defaults) below. The values shown in the table are approximate, calculated using a simple heuristic: each -`BeaconState` consumes around 18MB of disk space, and each block replayed takes around 5ms. The +`BeaconState` consumes around 145MB of disk space, and each block replayed takes around 5ms. The **Yearly Disk Usage** column shows the approximate size of the freezer DB _alone_ (hot DB not included), calculated proportionally using the total freezer database disk usage. The **Load Historical State** time is the worst-case load time for a state in the last slot before a restore point. -As an example, we use an SPRP of 4096 to calculate the total size of the freezer database until May 2023. It has been about 900 days since the genesis, the total disk usage by the freezer database is therefore: 900/365*26.8 GB = 66 GB. +To run a full archival node with fast access to beacon states and a SPRP of 32, the disk usage will be more than 10 TB per year, which is impractical for many users. As such, users may consider running the [tree-states](https://github.com/sigp/lighthouse/releases/tag/v4.5.444-exp) release, which only uses less than 150 GB for a full archival node. The caveat is that it is currently experimental and in alpha release (as of Dec 2023), thus not recommended for running mainnet validators. Nevertheless, it is suitable to be used for analysis purposes, and if you encounter any issues in tree-states, we do appreciate any feedback. We plan to have a stable release of tree-states in 1H 2024. ### Defaults diff --git a/book/src/advanced_networking.md b/book/src/advanced_networking.md index b1f05450c..5fabf57d5 100644 --- a/book/src/advanced_networking.md +++ b/book/src/advanced_networking.md @@ -40,7 +40,7 @@ drastically and use the (recommended) default. ### NAT Traversal (Port Forwarding) -Lighthouse, by default, uses port 9000 for both TCP and UDP. Lighthouse will +Lighthouse, by default, uses port 9000 for both TCP and UDP. Since v4.5.0, Lighthouse will also attempt to make QUIC connections via UDP port 9001 by default. Lighthouse will still function if it is behind a NAT without any port mappings. Although Lighthouse still functions, we recommend that some mechanism is used to ensure that your Lighthouse node is publicly accessible. This will typically improve @@ -50,8 +50,8 @@ peers for your node and overall improve the Ethereum consensus network. Lighthouse currently supports UPnP. If UPnP is enabled on your router, Lighthouse will automatically establish the port mappings for you (the beacon node will inform you of established routes in this case). If UPnP is not -enabled, we recommend you to manually set up port mappings to both of Lighthouse's -TCP and UDP ports (9000 by default). +enabled, we recommend you to manually set up port mappings to Lighthouse's +TCP and UDP ports (9000 TCP/UDP, and 9001 UDP by default). > Note: Lighthouse needs to advertise its publicly accessible ports in > order to inform its peers that it is contactable and how to connect to it. @@ -66,7 +66,7 @@ TCP and UDP ports (9000 by default). The steps to do port forwarding depends on the router, but the general steps are given below: 1. Determine the default gateway IP: -- On Linux: open a terminal and run `ip route | grep default`, the result should look something similar to `default via 192.168.50.1 dev wlp2s0 proto dhcp metric 600`. The `192.168.50.1` is your router management default gateway IP. +- On Linux: open a terminal and run `ip route | grep default`, the result should look something similar to `default via 192.168.50.1 dev wlp2s0 proto dhcp metric 600`. The `192.168.50.1` is your router management default gateway IP. - On MacOS: open a terminal and run `netstat -nr|grep default` and it should return the default gateway IP. - On Windows: open a command prompt and run `ipconfig` and look for the `Default Gateway` which will show you the gateway IP. @@ -74,16 +74,22 @@ The steps to do port forwarding depends on the router, but the general steps are 2. Login to the router management page. The login credentials are usually available in the manual or the router, or it can be found on a sticker underneath the router. You can also try the login credentials for some common router brands listed [here](https://www.noip.com/support/knowledgebase/general-port-forwarding-guide/). -3. Navigate to the port forward settings in your router. The exact step depends on the router, but typically it will fall under the "Advanced" section, under the name "port forwarding" or "virtual server". +3. Navigate to the port forward settings in your router. The exact step depends on the router, but typically it will fall under the "Advanced" section, under the name "port forwarding" or "virtual server". 4. Configure a port forwarding rule as below: - Protocol: select `TCP/UDP` or `BOTH` - External port: `9000` - Internal port: `9000` -- IP address: Usually there is a dropdown list for you to select the device. Choose the device that is running Lighthouse +- IP address: Usually there is a dropdown list for you to select the device. Choose the device that is running Lighthouse. -5. To check that you have successfully open the ports, go to [yougetsignal](https://www.yougetsignal.com/tools/open-ports/) and enter `9000` in the `port number`. If it shows "open", then you have successfully set up port forwarding. If it shows "closed", double check your settings, and also check that you have allowed firewall rules on port 9000. +Since V4.5.0 port 9001/UDP is also used for QUIC support. +- Protocol: select `UDP` +- External port: `9001` +- Internal port: `9001` +- IP address: Choose the device that is running Lighthouse. + +5. To check that you have successfully opened the ports, go to [yougetsignal](https://www.yougetsignal.com/tools/open-ports/) and enter `9000` in the `port number`. If it shows "open", then you have successfully set up port forwarding. If it shows "closed", double check your settings, and also check that you have allowed firewall rules on port 9000. Note: this will only confirm if port 9000/TCP is open. You will need to ensure you have correctly setup port forwarding for the UDP ports (`9000` and `9001` by default). ### ENR Configuration @@ -125,6 +131,9 @@ IPv4 only: TCP and UDP. - `--listen-address :: --port 9909 --discovery-port 9999` will listen over IPv6 using port `9909` for TCP and port `9999` for UDP. +- By default, QUIC listens for UDP connections using a port number that is one greater than the specified port. + If the specified port is 9909, QUIC will use port 9910 for IPv6 UDP connections. + This can be configured with `--quic-port`. To listen over both IPv4 and IPv6: - Set two listening addresses using the `--listen-address` flag twice ensuring @@ -133,18 +142,38 @@ To listen over both IPv4 and IPv6: that this behaviour differs from the Ipv6 only case described above. - If necessary, set the `--port6` flag to configure the port used for TCP and UDP over IPv6. This flag has no effect when listening over IPv6 only. -- If necessary, set the `--discovery-port6` flag to configure the IPv6 UDP +- If necessary, set the `--discovery-port6` flag to configure the IPv6 UDP port. This will default to the value given to `--port6` if not set. This flag has no effect when listening over IPv6 only. +- If necessary, set the `--quic-port6` flag to configure the port used by QUIC for + UDP over IPv6. This will default to the value given to `--port6` + 1. This flag + has no effect when listening over IPv6 only. ##### Configuration Examples -- `--listen-address :: --listen-address 0.0.0.0 --port 9909` will listen - over IPv4 using port `9909` for TCP and UDP. It will also listen over IPv6 but - using the default value for `--port6` for UDP and TCP (`9090`). -- `--listen-address :: --listen-address --port 9909 --discovery-port6 9999` - will have the same configuration as before except for the IPv6 UDP socket, - which will use port `9999`. +> When using `--listen-address :: --listen-address 0.0.0.0 --port 9909`, listening will be set up as follows: +> +> **IPv4**: +> +> It listens on port `9909` for both TCP and UDP. +> QUIC will use the next sequential port `9910` for UDP. +> +> **IPv6**: +> +> It listens on the default value of --port6 (`9090`) for both UDP and TCP. +> QUIC will use port `9091` for UDP, which is the default `--port6` value (`9090`) + 1. + +> When using `--listen-address :: --listen-address --port 9909 --discovery-port6 9999`, listening will be set up as follows: +> +> **IPv4**: +> +> It listens on port `9909` for both TCP and UDP. +> QUIC will use the next sequential port `9910` for UDP. +> +> **IPv6**: +> +> It listens on the default value of `--port6` (`9090`) for TCP, and port `9999` for UDP. +> QUIC will use port `9091` for UDP, which is the default `--port6` value (`9090`) + 1. #### Configuring Lighthouse to advertise IPv6 reachable addresses Lighthouse supports IPv6 to connect to other nodes both over IPv6 exclusively, diff --git a/book/src/api-bn.md b/book/src/api-bn.md index 519ce5705..3e57edd8d 100644 --- a/book/src/api-bn.md +++ b/book/src/api-bn.md @@ -100,7 +100,7 @@ The `jq` tool is used to format the JSON data properly. If it returns `jq: comma Shows the status of validator at index `1` at the `head` state. ```bash -curl -X GET "http://localhost:5052/eth/v1/beacon/states/head/validators/1" -H "accept: application/json" +curl -X GET "http://localhost:5052/eth/v1/beacon/states/head/validators/1" -H "accept: application/json" | jq ``` ```json diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index 32c967c9e..ce7145098 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -463,20 +463,6 @@ curl -X GET "http://localhost:5052/lighthouse/eth1/deposit_cache" -H "accept: a } ``` -### `/lighthouse/beacon/states/{state_id}/ssz` - -Obtains a `BeaconState` in SSZ bytes. Useful for obtaining a genesis state. - -The `state_id` parameter is identical to that used in the [Standard Beacon Node API -`beacon/state` -routes](https://ethereum.github.io/beacon-APIs/#/Beacon/getStateRoot). - -```bash -curl -X GET "http://localhost:5052/lighthouse/beacon/states/0/ssz" | jq -``` - -*Example omitted for brevity, the body simply contains SSZ bytes.* - ### `/lighthouse/liveness` POST request that checks if any of the given validators have attested in the given epoch. Returns a list @@ -515,7 +501,7 @@ curl "http://localhost:5052/lighthouse/database/info" | jq ```json { - "schema_version": 16, + "schema_version": 18, "config": { "slots_per_restore_point": 8192, "slots_per_restore_point_set_explicitly": false, @@ -523,18 +509,26 @@ curl "http://localhost:5052/lighthouse/database/info" | jq "historic_state_cache_size": 1, "compact_on_init": false, "compact_on_prune": true, - "prune_payloads": true + "prune_payloads": true, + "prune_blobs": true, + "epochs_per_blob_prune": 1, + "blob_prune_margin_epochs": 0 }, "split": { - "slot": "5485952", - "state_root": "0xcfe5d41e6ab5a9dab0de00d89d97ae55ecaeed3b08e4acda836e69b2bef698b4" + "slot": "7454656", + "state_root": "0xbecfb1c8ee209854c611ebc967daa77da25b27f1a8ef51402fdbe060587d7653", + "block_root": "0x8730e946901b0a406313d36b3363a1b7091604e1346a3410c1a7edce93239a68" }, "anchor": { - "anchor_slot": "5414688", - "oldest_block_slot": "0", - "oldest_block_parent": "0x0000000000000000000000000000000000000000000000000000000000000000", - "state_upper_limit": "5414912", - "state_lower_limit": "8192" + "anchor_slot": "7451168", + "oldest_block_slot": "3962593", + "oldest_block_parent": "0x4a39f21367b3b9cc272744d1e38817bda5daf38d190dc23dc091f09fb54acd97", + "state_upper_limit": "7454720", + "state_lower_limit": "0" + }, + "blob_info": { + "oldest_blob_slot": "7413769", + "blobs_db": true } } ``` diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index d3b2edafe..cf52454c2 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -16,6 +16,7 @@ HTTP Path | Description | [`POST /lighthouse/validators/keystore`](#post-lighthousevalidatorskeystore) | Import a keystore. [`POST /lighthouse/validators/mnemonic`](#post-lighthousevalidatorsmnemonic) | Create a new validator from an existing mnemonic. [`POST /lighthouse/validators/web3signer`](#post-lighthousevalidatorsweb3signer) | Add web3signer validators. +[`GET /lighthouse/logs`](#get-lighthouselogs) | Get logs The query to Lighthouse API endpoints requires authorization, see [Authorization Header](./api-vc-auth-header.md). @@ -745,19 +746,19 @@ Create any number of new validators, all of which will refer to a "graffiti": "Mr F was here", "suggested_fee_recipient": "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d", "voting_public_key": "0xa062f95fee747144d5e511940624bc6546509eeaeae9383257a9c43e7ddc58c17c2bab4ae62053122184c381b90db380", + "builder_proposals": true, "url": "http://path-to-web3signer.com", - "root_certificate_path": "/path/on/vc/filesystem/to/certificate.pem", + "root_certificate_path": "/path/to/certificate.pem", + "client_identity_path": "/path/to/identity.p12", + "client_identity_password": "pass", "request_timeout_ms": 12000 } ] + ``` -The following fields may be omitted or nullified to obtain default values: +Some of the fields above may be omitted or nullified to obtain default values (e.g., `graffiti`, `request_timeout_ms`). -- `graffiti` -- `suggested_fee_recipient` -- `root_certificate_path` -- `request_timeout_ms` Command: ```bash @@ -765,7 +766,7 @@ DATADIR=/var/lib/lighthouse curl -X POST http://localhost:5062/lighthouse/validators/web3signer \ -H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" \ -H "Content-Type: application/json" \ --d "[{\"enable\":true,\"description\":\"validator_one\",\"graffiti\":\"Mr F was here\",\"suggested_fee_recipient\":\"0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d\",\"voting_public_key\":\"0xa062f95fee747144d5e511940624bc6546509eeaeae9383257a9c43e7ddc58c17c2bab4ae62053122184c381b90db380\",\"url\":\"http://path-to-web3signer.com\",\"request_timeout_ms\":12000}]" +-d "[{\"enable\":true,\"description\":\"validator_one\",\"graffiti\":\"Mr F was here\",\"suggested_fee_recipient\":\"0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d\",\"voting_public_key\":\"0xa062f95fee747144d5e511940624bc6546509eeaeae9383257a9c43e7ddc58c17c2bab4ae62053122184c381b90db380\",\"builder_proposals\":true,\"url\":\"http://path-to-web3signer.com\",\"root_certificate_path\":\"/path/to/certificate.pem\",\"client_identity_path\":\"/path/to/identity.p12\",\"client_identity_password\":\"pass\",\"request_timeout_ms\":12000}]" ``` diff --git a/book/src/builders.md b/book/src/builders.md index 014e43211..930d330d9 100644 --- a/book/src/builders.md +++ b/book/src/builders.md @@ -41,7 +41,7 @@ With the `--prefer-builder-proposals` flag, the validator client will always pre lighthouse vc --builder-boost-factor ``` With the `--builder-boost-factor` flag, a percentage multiplier is applied to the builder's payload value when choosing between a -builder payload header and payload from the paired execution node. +builder payload header and payload from the paired execution node. For example, `--builder-boost-factor 50` will only use the builder payload if it is 2x more profitable than the local payload. In order to configure whether a validator queries for blinded blocks check out [this section.](#validator-client-configuration) @@ -157,6 +157,7 @@ You can also directly configure these fields in the `validator_definitions.yml` suggested_fee_recipient: "0x6cc8dcbca744a6e4ffedb98e1d0df903b10abd21" gas_limit: 30000001 builder_proposals: true + builder_boost_factor: 50 - enabled: false voting_public_key: "0xa5566f9ec3c6e1fdf362634ebec9ef7aceb0e460e5079714808388e5d48f4ae1e12897fed1bea951c17fa389d511e477" type: local_keystore voting_keystore_path: /home/paul/.lighthouse/validators/0xa5566f9ec3c6e1fdf362634ebec9ef7aceb0e460e5079714808388e5d48f4ae1e12897fed1bea951c17fa389d511e477/voting-keystore.json @@ -164,6 +165,7 @@ You can also directly configure these fields in the `validator_definitions.yml` suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" gas_limit: 33333333 builder_proposals: true + prefer_builder_proposals: true ``` ## Circuit breaker conditions diff --git a/book/src/checkpoint-sync.md b/book/src/checkpoint-sync.md index 0c375a5f0..00afea156 100644 --- a/book/src/checkpoint-sync.md +++ b/book/src/checkpoint-sync.md @@ -1,9 +1,8 @@ # Checkpoint Sync -Since version 2.0.0 Lighthouse supports syncing from a recent finalized checkpoint. This is -substantially faster than syncing from genesis, while still providing all the same features. +Lighthouse supports syncing from a recent finalized checkpoint. This is substantially faster than syncing from genesis, while still providing all the same features. Checkpoint sync is also safer as it protects the node from long-range attacks. Since 4.6.0, checkpoint sync is required by default and genesis sync will no longer work without the use of `--allow-insecure-genesis-sync`. -If you would like to quickly get started with checkpoint sync, read the sections below on: +To quickly get started with checkpoint sync, read the sections below on: 1. [Automatic Checkpoint Sync](#automatic-checkpoint-sync) 2. [Backfilling Blocks](#backfilling-blocks) diff --git a/book/src/contributing.md b/book/src/contributing.md index 6b84843a6..5b0ab48e8 100644 --- a/book/src/contributing.md +++ b/book/src/contributing.md @@ -49,7 +49,7 @@ into the canonical spec. ## Rust Lighthouse adheres to Rust code conventions as outlined in the [**Rust -Styleguide**](https://github.com/rust-dev-tools/fmt-rfcs/blob/master/guide/guide.md). +Styleguide**](https://doc.rust-lang.org/nightly/style-guide/). Please use [clippy](https://github.com/rust-lang/rust-clippy) and [rustfmt](https://github.com/rust-lang/rustfmt) to detect common mistakes and diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md index a4d28452d..527d42ae3 100644 --- a/book/src/database-migrations.md +++ b/book/src/database-migrations.md @@ -16,6 +16,7 @@ validator client or the slasher**. | Lighthouse version | Release date | Schema version | Downgrade available? | |--------------------|--------------|----------------|----------------------| + | v4.6.0 | Dec 2023 | v19 | yes before Deneb | | v4.6.0-rc.0 | Dec 2023 | v18 | yes before Deneb | | v4.5.0 | Sep 2023 | v17 | yes | @@ -158,8 +159,7 @@ lighthouse db version --network mainnet Pruning historic states helps in managing the disk space used by the Lighthouse beacon node by removing old beacon states from the freezer database. This can be especially useful when the database has accumulated a significant amount -of historic data. This command is intended for nodes synced before 4.4.1, as newly synced node no longer store -historic states by default. +of historic data. This command is intended for nodes synced before 4.4.1, as newly synced nodes no longer store historic states by default. Here are the steps to prune historic states: @@ -175,14 +175,27 @@ Here are the steps to prune historic states: sudo -u "$LH_USER" lighthouse db prune-states --datadir "$LH_DATADIR" --network "$NET" ``` + If pruning is available, Lighthouse will log: + + ``` + INFO Ready to prune states + WARN Pruning states is irreversible + WARN Re-run this command with --confirm to commit to state deletion + INFO Nothing has been pruned on this run + ``` + 3. If you are ready to prune the states irreversibly, add the `--confirm` flag to commit the changes: ```bash sudo -u "$LH_USER" lighthouse db prune-states --confirm --datadir "$LH_DATADIR" --network "$NET" ``` - The `--confirm` flag ensures that you are aware the action is irreversible, and historic states will be permanently removed. + The `--confirm` flag ensures that you are aware the action is irreversible, and historic states will be permanently removed. Lighthouse will log: + ``` + INFO Historic states pruned successfully + ``` + 4. After successfully pruning the historic states, you can restart the Lighthouse beacon node: ```bash diff --git a/book/src/docker.md b/book/src/docker.md index defa89517..c48c745a0 100644 --- a/book/src/docker.md +++ b/book/src/docker.md @@ -112,7 +112,7 @@ docker run lighthouse:local lighthouse --help You can run a Docker beacon node with the following command: ```bash -docker run -p 9000:9000/tcp -p 9000:9000/udp -p 127.0.0.1:5052:5052 -v $HOME/.lighthouse:/root/.lighthouse sigp/lighthouse lighthouse --network mainnet beacon --http --http-address 0.0.0.0 +docker run -p 9000:9000/tcp -p 9000:9000/udp -p 9001:9001/udp -p 127.0.0.1:5052:5052 -v $HOME/.lighthouse:/root/.lighthouse sigp/lighthouse lighthouse --network mainnet beacon --http --http-address 0.0.0.0 ``` > To join the Goerli testnet, use `--network goerli` instead. @@ -135,18 +135,18 @@ docker run -v $HOME/.lighthouse:/root/.lighthouse sigp/lighthouse lighthouse bea ### Ports -In order to be a good peer and serve other peers you should expose port `9000` for both TCP and UDP. +In order to be a good peer and serve other peers you should expose port `9000` for both TCP and UDP, and port `9001` for UDP. Use the `-p` flag to do this: ```bash -docker run -p 9000:9000/tcp -p 9000:9000/udp sigp/lighthouse lighthouse beacon +docker run -p 9000:9000/tcp -p 9000:9000/udp -p 9001:9001/udp sigp/lighthouse lighthouse beacon ``` If you use the `--http` flag you may also want to expose the HTTP port with `-p 127.0.0.1:5052:5052`. ```bash -docker run -p 9000:9000/tcp -p 9000:9000/udp -p 127.0.0.1:5052:5052 sigp/lighthouse lighthouse beacon --http --http-address 0.0.0.0 +docker run -p 9000:9000/tcp -p 9000:9000/udp -p 9001:9001/udp -p 127.0.0.1:5052:5052 sigp/lighthouse lighthouse beacon --http --http-address 0.0.0.0 ``` [docker_hub]: https://hub.docker.com/repository/docker/sigp/lighthouse/ diff --git a/book/src/faq.md b/book/src/faq.md index b3f3d55ed..b8b267f17 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -22,7 +22,7 @@ - [Does increasing the number of validators increase the CPU and other computer resources used?](#vc-resource) - [I want to add new validators. Do I have to reimport the existing keys?](#vc-reimport) - [Do I have to stop `lighthouse vc` the when importing new validator keys?](#vc-import) - +- [How can I delete my validator once it is imported?](#vc-delete) ## [Network, Monitoring and Maintenance](#network-monitoring-and-maintenance-1) - [I have a low peer count and it is not increasing](#net-peer) @@ -33,6 +33,7 @@ - [Should I do anything to the beacon node or validator client settings if I have a relocation of the node / change of IP address?](#net-ip) - [How to change the TCP/UDP port 9000 that Lighthouse listens on?](#net-port) - [Lighthouse `v4.3.0` introduces a change where a node will subscribe to only 2 subnets in total. I am worried that this will impact my validators return.](#net-subnet) +- [How to know how many of my peers are connected through QUIC?](#net-quic) ## [Miscellaneous](#miscellaneous-1) - [What should I do if I lose my slashing protection database?](#misc-slashing) @@ -41,6 +42,7 @@ - [Does Lighthouse have pruning function like the execution client to save disk space?](#misc-prune) - [Can I use a HDD for the freezer database and only have the hot db on SSD?](#misc-freezer) - [Can Lighthouse log in local timestamp instead of UTC?](#misc-timestamp) +- [My hard disk is full and my validator is down. What should I do? ](#misc-full) ## Beacon Node @@ -78,13 +80,13 @@ The `WARN Execution engine called failed` log is shown when the beacon node cann `error: Reqwest(reqwest::Error { kind: Request, url: Url { scheme: "http", cannot_be_a_base: false, username: "", password: None, host: Some(Ipv4(127.0.0.1)), port: Some(8551), path: "/", query: None, fragment: None }, source: TimedOut }), service: exec` which says `TimedOut` at the end of the message. This means that the execution engine has not responded in time to the beacon node. One option is to add the flags `--execution-timeout-multiplier 3` and `--disable-lock-timeouts` to the beacon node. However, if the error persists, it is worth digging further to find out the cause. There are a few reasons why this can occur: -1. The execution engine is not synced. Check the log of the execution engine to make sure that it is synced. If it is syncing, wait until it is synced and the error will disappear. You will see the beacon node logs `INFO Execution engine online` when it is synced. +1. The execution engine is not synced. Check the log of the execution engine to make sure that it is synced. If it is syncing, wait until it is synced and the error will disappear. You will see the beacon node logs `INFO Execution engine online` when it is synced. 1. The computer is overloaded. Check the CPU and RAM usage to see if it has overloaded. You can use `htop` to check for CPU and RAM usage. 1. Your SSD is slow. Check if your SSD is in "The Bad" list [here](https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038). If your SSD is in "The Bad" list, it means it cannot keep in sync to the network and you may want to consider upgrading to a better SSD. If the reason for the error message is caused by no. 1 above, you may want to look further. If the execution engine is out of sync suddenly, it is usually caused by ungraceful shutdown. The common causes for ungraceful shutdown are: -- Power outage. If power outages are an issue at your place, consider getting a UPS to avoid ungraceful shutdown of services. -- The service file is not stopped properly. To overcome this, make sure that the process is stopped properly, e.g., during client updates. +- Power outage. If power outages are an issue at your place, consider getting a UPS to avoid ungraceful shutdown of services. +- The service file is not stopped properly. To overcome this, make sure that the process is stopped properly, e.g., during client updates. - Out of memory (oom) error. This can happen when the system memory usage has reached its maximum and causes the execution engine to be killed. When this occurs, the log file will show `Main process exited, code=killed, status=9/KILL`. You can also run `sudo journalctl -a --since "18 hours ago" | grep -i "killed process` to confirm that the execution client has been killed due to oom. If you are using geth as the execution client, a short term solution is to reduce the resources used. For example, you can reduce the cache by adding the flag `--cache 2048`. If the oom occurs rather frequently, a long term solution is to increase the memory capacity of the computer. ### My beacon node is stuck at downloading historical block using checkpoint sync. What should I do? @@ -96,8 +98,8 @@ INFO Downloading historical blocks est_time: --, distance: 4524545 slo ``` If the same log appears every minute and you do not see progress in downloading historical blocks, you can try one of the followings: - - - Check the number of peers you are connected to. If you have low peers (less than 50), try to do port forwarding on the port 9000 TCP/UDP to increase peer count. + + - Check the number of peers you are connected to. If you have low peers (less than 50), try to do port forwarding on the ports 9000 TCP/UDP and 9001 UDP to increase peer count. - Restart the beacon node. @@ -108,7 +110,7 @@ INFO Block from HTTP API already known` WARN Could not publish message error: Duplicate, service: libp2p ``` -This error usually happens when users are running mev-boost. The relay will publish the block on the network before returning it back to you. After the relay published the block on the network, it will propagate through nodes, and it happens quite often that your node will receive the block from your connected peers via gossip first, before getting the block from the relay, hence the message `duplicate`. +This error usually happens when users are running mev-boost. The relay will publish the block on the network before returning it back to you. After the relay published the block on the network, it will propagate through nodes, and it happens quite often that your node will receive the block from your connected peers via gossip first, before getting the block from the relay, hence the message `duplicate`. In short, it is nothing to worry about. @@ -122,7 +124,7 @@ WARN Head is optimistic execution_block_hash: 0x47e7555f1d4215d1ad409b1ac1 It means the beacon node will follow the chain, but it will not be able to attest or produce blocks. This is because the execution client is not synced, so the beacon chain cannot verify the authenticity of the chain head, hence the word `optimistic`. What you need to do is to make sure that the execution client is up and syncing. Once the execution client is synced, the error will disappear. -### My beacon node logs `CRIT Beacon block processing error error: ValidatorPubkeyCacheLockTimeout, service: beacon`, what should I do? +### My beacon node logs `CRIT Beacon block processing error error: ValidatorPubkeyCacheLockTimeout, service: beacon`, what should I do? An example of the log is shown below: @@ -131,7 +133,7 @@ CRIT Beacon block processing error error: ValidatorPubkeyCacheLockTime WARN BlockProcessingFailure outcome: ValidatorPubkeyCacheLockTimeout, msg: unexpected condition in processing block. ``` -A `Timeout` error suggests that the computer may be overloaded at the moment, for example, the execution client is still syncing. You may use the flag `--disable-lock-timeouts` to silence this error, although it will not fix the underlying slowness. Nevertheless, this is a relatively harmless log, and the error should go away once the resources used are back to normal. +A `Timeout` error suggests that the computer may be overloaded at the moment, for example, the execution client is still syncing. You may use the flag `--disable-lock-timeouts` to silence this error, although it will not fix the underlying slowness. Nevertheless, this is a relatively harmless log, and the error should go away once the resources used are back to normal. ### My beacon node logs `WARN BlockProcessingFailure outcome: MissingBeaconBlock`, what should I do? @@ -141,7 +143,7 @@ An example of the full log is shown below: WARN BlockProcessingFailure outcome: MissingBeaconBlock(0xbdba211f8d72029554e405d8e4906690dca807d1d7b1bc8c9b88d7970f1648bc), msg: unexpected condition in processing block. ``` -`MissingBeaconBlock` suggests that the database has corrupted. You should wipe the database and use [Checkpoint Sync](./checkpoint-sync.md) to resync the beacon chain. +`MissingBeaconBlock` suggests that the database has corrupted. You should wipe the database and use [Checkpoint Sync](./checkpoint-sync.md) to resync the beacon chain. ### After checkpoint sync, the progress of `downloading historical blocks` is slow. Why? @@ -171,7 +173,7 @@ The error is `503 Service Unavailable`. This means that the beacon node is still ERRO Failed to download attester duties err: FailedToDownloadAttesters("Some endpoints failed, num_failed: 2 http://localhost:5052/ => Unavailable(NotSynced), http://localhost:5052/ => RequestFailed(ServerMessage(ErrorMessage { code: 503, message: \"SERVICE_UNAVAILABLE: beacon node is syncing ``` -This means that the validator client is sending requests to the beacon node. However, as the beacon node is still syncing, it is therefore unable to fulfil the request. The error will disappear once the beacon node is synced. +This means that the validator client is sending requests to the beacon node. However, as the beacon node is still syncing, it is therefore unable to fulfil the request. The error will disappear once the beacon node is synced. ### My beacon node logs `WARN Error signalling fork choice waiter`, what should I do? @@ -266,9 +268,9 @@ repeats until the queue is cleared. The churn limit is summarised in the table b
-| Number of active validators | Validators activated per epoch | Validators activated per day | +| Number of active validators | Validators activated per epoch | Validators activated per day | |-------------------|--------------------------------------------|----| -| 327679 or less | 4 | 900 | +| 327679 or less | 4 | 900 | | 327680-393215 | 5 | 1125 | | 393216-458751 | 6 | 1350 | 458752-524287 | 7 | 1575 @@ -283,7 +285,7 @@ repeats until the queue is cleared. The churn limit is summarised in the table b
-For example, the number of active validators on Mainnet is about 574000 on May 2023. This means that 8 validators can be activated per epoch or 1800 per day (it is noted that the same applies to the exit queue). If, for example, there are 9000 validators waiting to be activated, this means that the waiting time can take up to 5 days. +For example, the number of active validators on Mainnet is about 574000 on May 2023. This means that 8 validators can be activated per epoch or 1800 per day (it is noted that the same applies to the exit queue). If, for example, there are 9000 validators waiting to be activated, this means that the waiting time can take up to 5 days. Once a validator has been activated, congratulations! It's time to produce blocks and attestations! @@ -296,14 +298,14 @@ duplicate your JSON keystores and don't run `lighthouse vc` twice). This will le However, there are some components which can be configured with redundancy. See the [Redundancy](./redundancy.md) guide for more information. -### I am missing attestations. Why? +### I am missing attestations. Why? The first thing is to ensure both consensus and execution clients are synced with the network. If they are synced, there may still be some issues with the node setup itself that is causing the missed attestations. Check the setup to ensure that: - the clock is synced - the computer has sufficient resources and is not overloaded - the internet is working well - you have sufficient peers -You can see more information on the [Ethstaker KB](https://ethstaker.gitbook.io/ethstaker-knowledge-base/help/missed-attestations). +You can see more information on the [Ethstaker KB](https://ethstaker.gitbook.io/ethstaker-knowledge-base/help/missed-attestations). Another cause for missing attestations is delays during block processing. When this happens, the debug logs will show (debug logs can be found under `$datadir/beacon/logs`): @@ -311,14 +313,14 @@ Another cause for missing attestations is delays during block processing. When t DEBG Delayed head block set_as_head_delay: Some(93.579425ms), imported_delay: Some(1.460405278s), observed_delay: Some(2.540811921s), block_delay: 4.094796624s, slot: 6837344, proposer_index: 211108, block_root: 0x2c52231c0a5a117401f5231585de8aa5dd963bc7cbc00c544e681342eedd1700, service: beacon ``` -The fields to look for are `imported_delay > 1s` and `observed_delay < 3s`. The `imported_delay` is how long the node took to process the block. The `imported_delay` of larger than 1 second suggests that there is slowness in processing the block. It could be due to high CPU usage, high I/O disk usage or the clients are doing some background maintenance processes. The `observed_delay` is determined mostly by the proposer and partly by your networking setup (e.g., how long it took for the node to receive the block). The `observed_delay` of less than 3 seconds means that the block is not arriving late from the block proposer. Combining the above, this implies that the validator should have been able to attest to the block, but failed due to slowness in the node processing the block. +The fields to look for are `imported_delay > 1s` and `observed_delay < 3s`. The `imported_delay` is how long the node took to process the block. The `imported_delay` of larger than 1 second suggests that there is slowness in processing the block. It could be due to high CPU usage, high I/O disk usage or the clients are doing some background maintenance processes. The `observed_delay` is determined mostly by the proposer and partly by your networking setup (e.g., how long it took for the node to receive the block). The `observed_delay` of less than 3 seconds means that the block is not arriving late from the block proposer. Combining the above, this implies that the validator should have been able to attest to the block, but failed due to slowness in the node processing the block. ### Sometimes I miss the attestation head vote, resulting in penalty. Is this normal? In general, it is unavoidable to have some penalties occasionally. This is particularly the case when you are assigned to attest on the first slot of an epoch and if the proposer of that slot releases the block late, then you will get penalised for missing the target and head votes. Your attestation performance does not only depend on your own setup, but also on everyone elses performance. -You could also check for the sync aggregate participation percentage on block explorers such as [beaconcha.in](https://beaconcha.in/). A low sync aggregate participation percentage (e.g., 60-70%) indicates that the block that you are assigned to attest to may be published late. As a result, your validator fails to correctly attest to the block. +You could also check for the sync aggregate participation percentage on block explorers such as [beaconcha.in](https://beaconcha.in/). A low sync aggregate participation percentage (e.g., 60-70%) indicates that the block that you are assigned to attest to may be published late. As a result, your validator fails to correctly attest to the block. Another possible reason for missing the head vote is due to a chain "reorg". A reorg can happen if the proposer publishes block `n` late, and the proposer of block `n+1` builds upon block `n-1` instead of `n`. This is called a "reorg". Due to the reorg, block `n` was never included in the chain. If you are assigned to attest at slot `n`, it is possible you may still attest to block `n` despite most of the network recognizing the block as being late. In this case you will miss the head reward. @@ -345,6 +347,13 @@ Generally yes. If you do not want to stop `lighthouse vc`, you can use the [key manager API](./api-vc-endpoints.md) to import keys. + +### How can I delete my validator once it is imported? + +Lighthouse supports the [KeyManager API](https://ethereum.github.io/keymanager-APIs/#/Local%20Key%20Manager/deleteKeys) to delete validators and remove them from the `validator_definitions.yml` file. To do so, start the validator client with the flag `--http` and call the API. + +If you are looking to delete the validators in one node and import it to another, you can use the [validator-manager](./validator-manager-move.md) to move the validators across nodes without the hassle of deleting and importing the keys. + ## Network, Monitoring and Maintenance ### I have a low peer count and it is not increasing @@ -379,7 +388,7 @@ If the ports are open, you should have incoming peers. To check that you have in If you have incoming peers, it should return a lot of data containing information of peers. If the response is empty, it means that you have no incoming peers and there the ports are not open. You may want to double check if the port forward was correctly set up. -2. Check that you do not lower the number of peers using the flag `--target-peers`. The default is 80. A lower value set will lower the maximum number of peers your node can connect to, which may potentially interrupt the validator performance. We recommend users to leave the `--target peers` untouched to keep a diverse set of peers. +2. Check that you do not lower the number of peers using the flag `--target-peers`. The default is 80. A lower value set will lower the maximum number of peers your node can connect to, which may potentially interrupt the validator performance. We recommend users to leave the `--target peers` untouched to keep a diverse set of peers. 3. Ensure that you have a quality router for the internet connection. For example, if you connect the router to many devices including the node, it may be possible that the router cannot handle all routing tasks, hence struggling to keep up the number of peers. Therefore, using a quality router for the node is important to keep a healthy number of peers. @@ -426,8 +435,8 @@ For these reasons, we recommend that you make your node publicly accessible. Lighthouse supports UPnP. If you are behind a NAT with a router that supports UPnP, you can simply ensure UPnP is enabled (Lighthouse will inform you in its initial logs if a route has been established). You can also manually [set up port mappings/port forwarding](./advanced_networking.md#how-to-open-ports) in your router to your local Lighthouse instance. By default, -Lighthouse uses port 9000 for both TCP and UDP. Opening both these ports will -make your Lighthouse node maximally contactable. +Lighthouse uses port 9000 for both TCP and UDP, and optionally 9001 UDP for QUIC support. +Opening these ports will make your Lighthouse node maximally contactable. ### How can I monitor my validators? @@ -440,7 +449,7 @@ Monitoring](./validator-monitoring.md) for more information. Lighthouse has also The setting on the beacon node is the same for both cases below. In the beacon node, specify `lighthouse bn --http-address local_IP` so that the beacon node is listening on the local network rather than `localhost`. You can find the `local_IP` by running the command `hostname -I | awk '{print $1}'` on the server running the beacon node. 1. If the beacon node and validator clients are on different servers *in the same network*, the setting in the validator client is as follows: - + Use the flag `--beacon-nodes` to point to the beacon node. For example, `lighthouse vc --beacon-nodes http://local_IP:5052` where `local_IP` is the local IP address of the beacon node and `5052` is the default `http-port` of the beacon node. If you have firewall setup, e.g., `ufw`, you will need to allow port 5052 (assuming that the default port is used) with `sudo ufw allow 5052`. Note: this will allow all IP addresses to access the HTTP API of the beacon node. If you are on an untrusted network (e.g., a university or public WiFi) or the host is exposed to the internet, use apply IP-address filtering as described later in this section. @@ -463,7 +472,7 @@ The setting on the beacon node is the same for both cases below. In the beacon n If you have firewall setup, e.g., `ufw`, you will need to allow connections to port 5052 (assuming that the default port is used). Since the beacon node HTTP/HTTPS API is public-facing (i.e., the 5052 port is now exposed to the internet due to port forwarding), we strongly recommend users to apply IP-address filtering to the BN/VC connection from malicious actors. This can be done using the command: - + ``` sudo ufw allow from vc_IP_address proto tcp to any port 5052 ``` @@ -476,16 +485,35 @@ It is also worth noting that the `--beacon-nodes` flag can also be used for redu No. Lighthouse will auto-detect the change and update your Ethereum Node Record (ENR). You just need to make sure you are not manually setting the ENR with `--enr-address` (which, for common use cases, this flag is not used). ### How to change the TCP/UDP port 9000 that Lighthouse listens on? -Use the flag ```--port ``` in the beacon node. This flag can be useful when you are running two beacon nodes at the same time. You can leave one beacon node as the default port 9000, and configure the second beacon node to listen on, e.g., ```--port 9001```. +Use the flag `--port ` in the beacon node. This flag can be useful when you are running two beacon nodes at the same time. You can leave one beacon node as the default port 9000, and configure the second beacon node to listen on, e.g., `--port 9100`. +Since V4.5.0, Lighthouse supports QUIC and by default will use the value of `--port` + 1 to listen via UDP (default `9001`). +This can be configured by using the flag `--quic-port`. Refer to [Advanced Networking](./advanced_networking.md#nat-traversal-port-forwarding) for more information. ### Lighthouse `v4.3.0` introduces a change where a node will subscribe to only 2 subnets in total. I am worried that this will impact my validators return. -Previously, having more validators means subscribing to more subnets. Since the change, a node will now only subscribe to 2 subnets in total. This will bring about significant reductions in bandwidth for nodes with multiple validators. +Previously, having more validators means subscribing to more subnets. Since the change, a node will now only subscribe to 2 subnets in total. This will bring about significant reductions in bandwidth for nodes with multiple validators. + +While subscribing to more subnets can ensure you have peers on a wider range of subnets, these subscriptions consume resources and bandwidth. This does not significantly increase the performance of the node, however it does benefit other nodes on the network. -While subscribing to more subnets can ensure you have peers on a wider range of subnets, these subscriptions consume resources and bandwidth. This does not significantly increase the performance of the node, however it does benefit other nodes on the network. - If you would still like to subscribe to all subnets, you can use the flag `subscribe-all-subnets`. This may improve the block rewards by 1-5%, though it comes at the cost of a much higher bandwidth requirement. +### How to know how many of my peers are connected via QUIC? + +With `--metrics` enabled in the beacon node, you can find the number of peers connected via QUIC using: + +```bash + curl -s "http://localhost:5054/metrics" | grep libp2p_quic_peers +``` + +A response example is: + +``` +# HELP libp2p_quic_peers Count of libp2p peers currently connected via QUIC +# TYPE libp2p_quic_peers gauge +libp2p_quic_peers 4 +``` +which shows that there are 4 peers connected via QUIC. + ## Miscellaneous ### What should I do if I lose my slashing protection database? @@ -523,7 +551,7 @@ which says that the version is v4.1.0. ### Does Lighthouse have pruning function like the execution client to save disk space? -There is no pruning of Lighthouse database for now. However, since v4.2.0, a feature to only sync back to the weak subjectivity point (approximately 5 months) when syncing via a checkpoint sync was added. This will help to save disk space since the previous behaviour will sync back to the genesis by default. +There is no pruning of Lighthouse database for now. However, since v4.2.0, a feature to only sync back to the weak subjectivity point (approximately 5 months) when syncing via a checkpoint sync was added. This will help to save disk space since the previous behaviour will sync back to the genesis by default. ### Can I use a HDD for the freezer database and only have the hot db on SSD? @@ -531,11 +559,13 @@ Yes, you can do so by using the flag `--freezer-dir /path/to/freezer_db` in the ### Can Lighthouse log in local timestamp instead of UTC? -The reason why Lighthouse logs in UTC is due to the dependency on an upstream library that is [yet to be resolved](https://github.com/sigp/lighthouse/issues/3130). Alternatively, using the flag `disable-log-timestamp` in combination with systemd will suppress the UTC timestamps and print the logs in local timestamps. - - - - +The reason why Lighthouse logs in UTC is due to the dependency on an upstream library that is [yet to be resolved](https://github.com/sigp/lighthouse/issues/3130). Alternatively, using the flag `disable-log-timestamp` in combination with systemd will suppress the UTC timestamps and print the logs in local timestamps. + +### My hard disk is full and my validator is down. What should I do? + +A quick way to get the validator back online is by removing the Lighthouse beacon node database and resync Lighthouse using checkpoint sync. A guide to do this can be found in the [Lighthouse Discord server](https://discord.com/channels/605577013327167508/605577013331361793/1019755522985050142). With some free space left, you will then be able to prune the execution client database to free up more space. + +For a relatively long term solution, if you are using Geth and Nethermind as the execution client, you can consider setup the online pruning feature. Refer to [Geth](https://blog.ethereum.org/2023/09/12/geth-v1-13-0) and [Nethermind](https://gist.github.com/yorickdowne/67be09b3ba0a9ff85ed6f83315b5f7e0) for details. diff --git a/book/src/help_bn.md b/book/src/help_bn.md index dff2ab687..e55c34a9f 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -32,11 +32,7 @@ FLAGS: --disable-deposit-contract-sync Explicitly disables syncing of deposit logs from the execution node. This overrides any previous option that depends on it. Useful if you intend to run a non-validating beacon node. - --disable-duplicate-warn-logs Disable warning logs for duplicate gossip messages. The WARN level log is - useful for detecting a duplicate validator key running elsewhere. - However, this may result in excessive warning logs if the validator is - broadcasting messages to multiple beacon nodes via the validator client - --broadcast flag. In this case, disabling these warn logs may be useful. + --disable-duplicate-warn-logs This flag is deprecated and has no effect. -x, --disable-enr-auto-update Discovery automatically updates the nodes local ENR with an external IP address and port as seen by other peers on the network. This disables this feature, fixing the ENR's IP/PORT to those specified on boot. @@ -183,6 +179,9 @@ OPTIONS: --builder-user-agent The HTTP user agent to send alongside requests to the builder URL. The default is Lighthouse's version string. + --checkpoint-blobs + Set the checkpoint blobs to start syncing from. Must be aligned and match --checkpoint-block. Using + --checkpoint-sync-url instead is recommended. --checkpoint-block Set a checkpoint block to start syncing from. Must be aligned and match --checkpoint-state. Using --checkpoint-sync-url instead is recommended. @@ -462,6 +461,9 @@ OPTIONS: --slots-per-restore-point Specifies how often a freezer DB restore point should be stored. Cannot be changed after initialization. [default: 8192 (mainnet) or 64 (minimal)] + --state-cache-size + Specifies the size of the snapshot cache [default: 3] + --suggested-fee-recipient Emergency fallback fee recipient for use in case the validator client does not have one configured. You should set this flag on the validator client instead of (or in addition to) setting it here. diff --git a/book/src/help_vc.md b/book/src/help_vc.md index bc6deec1e..3d2519aac 100644 --- a/book/src/help_vc.md +++ b/book/src/help_vc.md @@ -13,7 +13,7 @@ FLAGS: --disable-auto-discover If present, do not attempt to discover new validators in the validators-dir. Validators will need to be manually added to the validator_definitions.yml file. - --disable-log-timestamp If present, do not include timestamps in logging output. + --disable-log-timestamp If present, do not include timestamps in logging output. --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag will generally increase memory usage, it should only be provided when debugging specific memory allocation issues. @@ -21,6 +21,14 @@ FLAGS: DEPRECATED. Use --broadcast. By default, Lighthouse publishes attestation, sync committee subscriptions and proposer preparation messages to all beacon nodes provided in the `--beacon-nodes flag`. This option changes that behaviour such that these api calls only go out to the first available and synced beacon node + --disable-slashing-protection-web3signer + Disable Lighthouse's slashing protection for all web3signer keys. This can reduce the I/O burden on the VC + but is only safe if slashing protection is enabled on the remote signer and is implemented correctly. DO NOT + ENABLE THIS FLAG UNLESS YOU ARE CERTAIN THAT SLASHING PROTECTION IS ENABLED ON THE REMOTE SIGNER. YOU WILL + GET SLASHED IF YOU USE THIS FLAG WITHOUT ENABLING WEB3SIGNER'S SLASHING PROTECTION. + --distributed + Enables functionality required for running the validator in a distributed validator cluster. + --enable-doppelganger-protection If this flag is set, Lighthouse will delay startup for three epochs and monitor for messages on the network by any of the validators managed by this client. This will result in three (possibly four) epochs worth of @@ -32,8 +40,8 @@ FLAGS: Enable per validator metrics for > 64 validators. Note: This flag is automatically enabled for <= 64 validators. Enabling this metric for higher validator counts will lead to higher volume of prometheus metrics being collected. - -h, --help Prints help information - --http Enable the RESTful HTTP API server. Disabled by default. + -h, --help Prints help information + --http Enable the RESTful HTTP API server. Disabled by default. --http-allow-keystore-export If present, allow access to the DELETE /lighthouse/keystores HTTP API method, which allows exporting keystores and passwords to HTTP API consumers who have access to the API token. This method is useful for @@ -47,7 +55,7 @@ FLAGS: flag unless you're certain that a new slashing protection database is required. Usually, your database will have been initialized when you imported your validator keys. If you misplace your database and then run with this flag you risk being slashed. - --log-color Force outputting colors when emitting logs to the terminal. + --log-color Force outputting colors when emitting logs to the terminal. --logfile-compress If present, compress old log files. This can help reduce the space needed to store old logs. @@ -55,7 +63,7 @@ FLAGS: If present, log files will be generated as world-readable meaning they can be read by any user on the machine. Note that logs can often contain sensitive information about your validator and so this flag should be used with caution. For Windows users, the log file permissions will be inherited from the parent folder. - --metrics Enable the Prometheus metrics HTTP server. Disabled by default. + --metrics Enable the Prometheus metrics HTTP server. Disabled by default. --prefer-builder-proposals If this flag is set, Lighthouse will always prefer blocks constructed by builders, regardless of payload value. @@ -69,7 +77,7 @@ FLAGS: --use-long-timeouts If present, the validator client will use longer timeouts for requests made to the beacon node. This flag is generally not recommended, longer timeouts can cause missed duties when fallbacks are used. - -V, --version Prints version information + -V, --version Prints version information OPTIONS: --beacon-nodes @@ -209,4 +217,9 @@ OPTIONS: --validators-dir The directory which contains the validator keystores, deposit data for each validator along with the common slashing protection database and the validator_definitions.yml + --web3-signer-keep-alive-timeout + Keep-alive timeout for each web3signer connection. Set to 'null' to never timeout [default: 90000] + + --web3-signer-max-idle-connections + Maximum number of idle connections to maintain per web3signer host. Default is unlimited. ``` \ No newline at end of file diff --git a/book/src/http.md b/book/src/http.md deleted file mode 100644 index 82a688586..000000000 --- a/book/src/http.md +++ /dev/null @@ -1,33 +0,0 @@ -# HTTP API - -[OpenAPI Specification](https://ethereum.github.io/beacon-APIs/) - -## Beacon Node - -A Lighthouse beacon node can be configured to expose a HTTP server by supplying the `--http` flag. The default listen address is `localhost:5052`. - -The following CLI flags control the HTTP server: - -- `--http`: enable the HTTP server (required even if the following flags are - provided). -- `--http-port`: specify the listen port of the server. -- `--http-address`: specify the listen address of the server. - -The schema of the API aligns with the standard Ethereum Beacon Node API as defined -at [github.com/ethereum/beacon-APIs](https://github.com/ethereum/beacon-APIs). -It is an easy-to-use RESTful HTTP/JSON API. An interactive specification is -available [here](https://ethereum.github.io/beacon-APIs/). - -## Troubleshooting - -### HTTP API is unavailable or refusing connections - -Ensure the `--http` flag has been supplied at the CLI. - -You can quickly check that the HTTP endpoint is up using `curl`: - -``` -curl "localhost:5052/beacon/head" - -{"slot":37934,"block_root":"0x4d3ae7ebe8c6ef042db05958ec76e8f7be9d412a67a0defa6420a677249afdc7","state_root":"0x1c86b13ffc70a41e410eccce20d33f1fe59d148585ea27c2afb4060f75fe6be2","finalized_slot":37856,"finalized_block_root":"0xbdae152b62acef1e5c332697567d2b89e358628790b8273729096da670b23e86","justified_slot":37888,"justified_block_root":"0x01c2f516a407d8fdda23cad4ed4381e4ab8913d638f935a2fe9bd00d6ced5ec4","previous_justified_slot":37856,"previous_justified_block_root":"0xbdae152b62acef1e5c332697567d2b89e358628790b8273729096da670b23e86"} -``` diff --git a/book/src/installation-source.md b/book/src/installation-source.md index 58e6917ec..c2f586157 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -28,7 +28,7 @@ operating system. Install the following packages: ```bash -sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang +sudo apt update && sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang ``` > Tips: @@ -42,6 +42,16 @@ sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clan After this, you are ready to [build Lighthouse](#build-lighthouse). +#### Fedora/RHEL/CentOS + +Install the following packages: + +```bash +yum -y install git make perl clang cmake +``` + +After this, you are ready to [build Lighthouse](#build-lighthouse). + #### macOS 1. Install the [Homebrew][] package manager. diff --git a/book/src/js/deposit.js b/book/src/js/deposit.js deleted file mode 100644 index 58895854f..000000000 --- a/book/src/js/deposit.js +++ /dev/null @@ -1,127 +0,0 @@ -const NETWORK = "5"; -const NETWORK_NAME = "Goerli Test Network"; -const DEPOSIT_CONTRACT = "0x07b39F4fDE4A38bACe212b546dAc87C58DfE3fDC"; -const DEPOSIT_AMOUNT_ETH = "32"; -const GAS_LIMIT = "4000000"; -const DEPOSIT_DATA_BYTES = 420; - -let PREVIOUS_NON_ERROR_STATE = ""; - -$(document).ready(function(){ - if (typeof window.ethereum !== 'undefined') { - ethereum.on('networkChanged', function (accounts) { - checkNetwork() - }) - - PREVIOUS_NON_ERROR_STATE = "upload"; - checkNetwork() - } else { - console.error("No metamask detected!") - triggerError("Metamask is not installed.
Get Metamask.") - } - - $("#fileInput").change(function() { - openFile(this.files[0]) - }); - - $("#uploadButton").on("click", function() { - $("#fileInput").trigger("click"); - }); -}); - -function checkNetwork() { - if (window.ethereum.networkVersion === NETWORK) { - setUiState(PREVIOUS_NON_ERROR_STATE) - } else { - triggerError("Please set Metamask to use " + NETWORK_NAME + ".") - } -} - -function doDeposit(deposit_data) { - const ethereum = window.ethereum; - const utils = ethers.utils; - - let wei = utils.parseEther(DEPOSIT_AMOUNT_ETH); - let gasLimit = utils.bigNumberify(GAS_LIMIT); - - ethereum.enable() - .then(function (accounts) { - let params = [{ - "from": accounts[0], - "to": DEPOSIT_CONTRACT, - "gas": utils.hexlify(gasLimit), - "value": utils.hexlify(wei), - "data": deposit_data - }] - - ethereum.sendAsync({ - method: 'eth_sendTransaction', - params: params, - from: accounts[0], // Provide the user's account to use. - }, function (err, result) { - if (err !== null) { - triggerError("

" + err.message + "

Reload the window to try again.

") - } else { - let tx_hash = result.result; - $("#txLink").attr("href", "https://goerli.etherscan.io/tx/" + tx_hash); - setUiState("waiting"); - } - }) - }) - .catch(function (error) { - triggerError("Unable to get Metamask accounts.
Reload page to try again.") - }) - -} - -function openFile(file) { - var reader = new FileReader(); - - reader.onload = function () { - let data = reader.result; - if (data.startsWith("0x")) { - if (data.length === DEPOSIT_DATA_BYTES * 2 + 2) { - doDeposit(data) - } else { - triggerError("Invalid eth1_deposit_file. Bad length.") - } - } else { - triggerError("Invalid eth1_deposit_file. Did not start with 0x.") - } - } - - reader.readAsBinaryString(file); -} - -function triggerError(text) { - $("#errorText").html(text); - setUiState("error"); -} - -function setUiState(state) { - if (state === "upload") { - $('#uploadDiv').show(); - $('#depositDiv').hide(); - $('#waitingDiv').hide(); - $('#errorDiv').hide(); - } else if (state == "deposit") { - $('#uploadDiv').hide(); - $('#depositDiv').show(); - $('#waitingDiv').hide(); - $('#errorDiv').hide(); - } else if (state == "error") { - $('#uploadDiv').hide(); - $('#depositDiv').hide(); - $('#waitingDiv').hide(); - $('#errorDiv').show(); - } else if (state == "waiting") { - $('#uploadDiv').hide(); - $('#depositDiv').hide(); - $('#waitingDiv').show(); - $('#errorDiv').hide(); - } - - if (state !== "error") { - PREVIOUS_NON_ERROR_STATE = state; - } -} diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md index bab520b56..e2dab9652 100644 --- a/book/src/merge-migration.md +++ b/book/src/merge-migration.md @@ -207,6 +207,6 @@ guidance for specific setups. - [Ethereum.org: The Merge](https://ethereum.org/en/upgrades/merge/) - [Ethereum Staking Launchpad: Merge Readiness](https://launchpad.ethereum.org/en/merge-readiness). -- [CoinCashew: Ethereum Merge Upgrade Checklist](https://www.coincashew.com/coins/overview-eth/ethereum-merge-upgrade-checklist-for-home-stakers-and-validators) +- [CoinCashew: Ethereum Merge Upgrade Checklist](https://www.coincashew.com/coins/overview-eth/archived-guides/ethereum-merge-upgrade-checklist-for-home-stakers-and-validators) - [EthDocker: Merge Preparation](https://eth-docker.net/About/MergePrep/) - [Remy Roy: How to join the Goerli/Prater merge testnet](https://github.com/remyroy/ethstaker/blob/main/merge-goerli-prater.md) diff --git a/book/src/pi.md b/book/src/pi.md index 550415240..7ccfe6a02 100644 --- a/book/src/pi.md +++ b/book/src/pi.md @@ -22,7 +22,7 @@ terminal and an Internet connection are necessary. Install the Ubuntu dependencies: ```bash -sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang +sudo apt update && sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang ``` > Tips: diff --git a/book/src/redundancy.md b/book/src/redundancy.md index 11b984565..bd1976f95 100644 --- a/book/src/redundancy.md +++ b/book/src/redundancy.md @@ -101,10 +101,6 @@ from this list: - `none`: Disable all broadcasting. This option only has an effect when provided alone, otherwise it is ignored. Not recommended except for expert tweakers. -Broadcasting attestation, blocks and sync committee messages may result in excessive warning logs in the beacon node -due to duplicate gossip messages. In this case, it may be desirable to disable warning logs for duplicates using the -beacon node `--disable-duplicate-warn-logs` flag. - The default is `--broadcast subscriptions`. To also broadcast blocks for example, use `--broadcast subscriptions,blocks`. diff --git a/book/src/slasher.md b/book/src/slasher.md index c8506922b..79a2d1f8e 100644 --- a/book/src/slasher.md +++ b/book/src/slasher.md @@ -17,7 +17,7 @@ of the immaturity of the slasher UX and the extra resources required. The slasher runs inside the same process as the beacon node, when enabled via the `--slasher` flag: ``` -lighthouse bn --slasher --debug-level debug +lighthouse bn --slasher ``` The slasher hooks into Lighthouse's block and attestation processing, and pushes messages into an @@ -26,9 +26,6 @@ verifies the signatures of otherwise invalid messages. When a slasher batch upda messages are filtered for relevancy, and all relevant messages are checked for slashings and written to the slasher database. -You **should** run with debug logs, so that you can see the slasher's internal machinations, and -provide logs to the developers should you encounter any bugs. - ## Configuration The slasher has several configuration options that control its functioning. diff --git a/book/src/ui-configuration.md b/book/src/ui-configuration.md index 1a5daa544..31951c3c9 100644 --- a/book/src/ui-configuration.md +++ b/book/src/ui-configuration.md @@ -18,7 +18,7 @@ To enable the HTTP API for the beacon node, utilize the `--gui` CLI flag. This a If you require accessibility from another machine within the network, configure the `--http-address` to match the local LAN IP of the system running the Beacon Node and Validator Client. -> To access from another machine on the same network (192.168.0.200) set the Beacon Node and Validator Client `--http-address` as `192.168.0.200`. +> To access from another machine on the same network (192.168.0.200) set the Beacon Node and Validator Client `--http-address` as `192.168.0.200`. When this is set, the validator client requires the flag `--beacon-nodes http://192.168.0.200:5052` to connect to the beacon node. In a similar manner, the validator client requires activation of the `--http` flag, along with the optional consideration of configuring the `--http-address` flag. If `--http-address` flag is set on the Validator Client, then the `--unencrypted-http-transport` flag is required as well. These settings will ensure compatibility with Siren's connectivity requirements. diff --git a/book/src/ui-faqs.md b/book/src/ui-faqs.md index 92e06270c..77821788f 100644 --- a/book/src/ui-faqs.md +++ b/book/src/ui-faqs.md @@ -10,7 +10,7 @@ The required Api token may be found in the default data directory of the validat If you receive a red notification with a BEACON or VALIDATOR NODE NETWORK ERROR you can refer to the lighthouse ui configuration and [`connecting to clients section`](./ui-configuration.md#connecting-to-the-clients). ## 4. How do I connect Siren to Lighthouse from a different computer on the same network? -The most effective approach to enable access for a local network computer to Lighthouse's HTTP API ports is by configuring the `--http-address` to match the local LAN IP of the system running the beacon node and validator client. For instance, if the said node operates at `192.168.0.200`, this IP can be specified using the `--http-address` parameter as `--http-address 192.168.0.200`. +The most effective approach to enable access for a local network computer to Lighthouse's HTTP API ports is by configuring the `--http-address` to match the local LAN IP of the system running the beacon node and validator client. For instance, if the said node operates at `192.168.0.200`, this IP can be specified using the `--http-address` parameter as `--http-address 192.168.0.200`. When this is set, the validator client requires the flag `--beacon-nodes http://192.168.0.200:5052` to connect to the beacon node. Subsequently, by designating the host as `192.168.0.200`, you can seamlessly connect Siren to this specific beacon node and validator client pair from any computer situated within the same network. ## 5. How can I use Siren to monitor my validators remotely when I am not at home? @@ -22,7 +22,7 @@ Most contemporary home routers provide options for VPN access in various ways. A In the absence of a VPN, an alternative approach involves utilizing an SSH tunnel. To achieve this, you need remote SSH access to the computer hosting the Beacon Node and Validator Client pair (which necessitates a port forward in your router). In this context, while it is not obligatory to set a `--http-address` flag on the Beacon Node and Validator Client, you can configure an SSH tunnel to the local ports on the node and establish a connection through the tunnel. For instructions on setting up an SSH tunnel, refer to [`Connecting Siren via SSH tunnel`](./ui-faqs.md#6-how-do-i-connect-siren-to-lighthouse-via-a-ssh-tunnel) for detailed guidance. ## 6. How do I connect Siren to Lighthouse via a ssh tunnel? -If you would like to access Siren beyond the local network (i.e across the internet), we recommend using an SSH tunnel. This requires a tunnel for 3 ports: `80` (assuming the port is unchanged as per the [installation guide](./ui-installation.md#docker-recommended), `5052` (for beacon node) and `5062` (for validator client). You can use the command below to perform SSH tunneling: +If you would like to access Siren beyond the local network (i.e across the internet), we recommend using an SSH tunnel. This requires a tunnel for 3 ports: `80` (assuming the port is unchanged as per the [installation guide](./ui-installation.md#docker-recommended)), `5052` (for beacon node) and `5062` (for validator client). You can use the command below to perform SSH tunneling: ```bash @@ -55,7 +55,7 @@ If you have separate address setups for your Validator Client and Beacon Node re ## 8. How do I change my Beacon or Validator address after logging in? -Once you have successfully arrived to the main dashboard, use the sidebar to access the settings view. In the top right hand corner there is a `Configuration` action button that will redirect you back to the configuration screen where you can make appropriate changes. +Once you have successfully arrived to the main dashboard, use the sidebar to access the settings view. In the top right-hand corner there is a `Configuration` action button that will redirect you back to the configuration screen where you can make appropriate changes. ## 9. Why doesn't my validator balance graph show any data? If your graph is not showing data, it usually means your validator node is still caching data. The application must wait at least 3 epochs before it can render any graphical visualizations. This could take up to 20min. diff --git a/book/src/validator-monitoring.md b/book/src/validator-monitoring.md index 893ec90bd..71b1632a7 100644 --- a/book/src/validator-monitoring.md +++ b/book/src/validator-monitoring.md @@ -57,6 +57,7 @@ Monitor the mainnet validators at indices `0` and `1`: ``` lighthouse bn --validator-monitor-pubkeys 0x933ad9491b62059dd065b560d256d8957a8c402cc6e8d8ee7290ae11e8f7329267a8811c397529dac52ae1342ba58c95,0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c ``` +> Note: The validator monitoring will stop collecting per-validator Prometheus metrics and issuing per-validator logs when the number of validators reaches 64. To continue collecting metrics and logging, use the flag `--validator-monitor-individual-tracking-threshold N` where `N` is a number greater than the number of validators to monitor. ## Observing Monitoring diff --git a/book/src/validator-web3signer.md b/book/src/validator-web3signer.md index 00ef9a6b5..6a518af3c 100644 --- a/book/src/validator-web3signer.md +++ b/book/src/validator-web3signer.md @@ -30,8 +30,7 @@ or effectiveness. ## Usage A remote signing validator is added to Lighthouse in much the same way as one that uses a local -keystore, via the [`validator_definitions.yml`](./validator-management.md) file or via the `POST -/lighthouse/validators/web3signer` API endpoint. +keystore, via the [`validator_definitions.yml`](./validator-management.md) file or via the [`POST /lighthouse/validators/web3signer`](./api-vc-endpoints.md#post-lighthousevalidatorsweb3signer) API endpoint. Here is an example of a `validator_definitions.yml` file containing one validator which uses a remote signer: diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index bdf01914a..77fd872bd 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "4.6.0" +version = "5.1.1" authors = ["Sigma Prime "] edition = { workspace = true } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 16801be8e..3c22c822b 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -373,10 +373,30 @@ impl BeaconNodeHttpClient { if let Some(timeout) = timeout { builder = builder.timeout(timeout); } + let response = builder.json(body).send().await?; ok_or_error(response).await } + /// Generic POST function supporting arbitrary responses and timeouts. + /// Does not include Content-Type application/json in the request header. + async fn post_generic_json_without_content_type_header( + &self, + url: U, + body: &T, + timeout: Option, + ) -> Result { + let mut builder = self.client.post(url); + if let Some(timeout) = timeout { + builder = builder.timeout(timeout); + } + + let serialized_body = serde_json::to_vec(body).map_err(Error::InvalidJson)?; + + let response = builder.body(serialized_body).send().await?; + ok_or_error(response).await + } + /// Generic POST function supporting arbitrary responses and timeouts. async fn post_generic_with_consensus_version( &self, @@ -1250,7 +1270,8 @@ impl BeaconNodeHttpClient { .push("pool") .push("attester_slashings"); - self.post(path, slashing).await?; + self.post_generic_json_without_content_type_header(path, slashing, None) + .await?; Ok(()) } @@ -2237,7 +2258,7 @@ impl BeaconNodeHttpClient { pub async fn post_validator_liveness_epoch( &self, epoch: Epoch, - indices: &Vec, + indices: &[u64], ) -> Result>, Error> { let mut path = self.eth_path(V1)?; @@ -2247,8 +2268,12 @@ impl BeaconNodeHttpClient { .push("liveness") .push(&epoch.to_string()); - self.post_with_timeout_and_response(path, indices, self.timeouts.liveness) - .await + self.post_with_timeout_and_response( + path, + &ValidatorIndexDataRef(indices), + self.timeouts.liveness, + ) + .await } /// `POST validator/duties/attester/{epoch}` diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 11706f309..538f1a42d 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -8,15 +8,12 @@ mod standard_block_rewards; mod sync_committee_rewards; use crate::{ - ok_or_error, types::{ - BeaconState, ChainSpec, DepositTreeSnapshot, Epoch, EthSpec, FinalizedExecutionBlock, - GenericResponse, ValidatorId, + DepositTreeSnapshot, Epoch, EthSpec, FinalizedExecutionBlock, GenericResponse, ValidatorId, }, - BeaconNodeHttpClient, DepositData, Error, Eth1Data, Hash256, Slot, StateId, StatusCode, + BeaconNodeHttpClient, DepositData, Error, Eth1Data, Hash256, Slot, }; use proto_array::core::ProtoArray; -use reqwest::IntoUrl; use serde::{Deserialize, Serialize}; use ssz::four_byte_option_impl; use ssz_derive::{Decode, Encode}; @@ -371,27 +368,6 @@ pub struct DatabaseInfo { } impl BeaconNodeHttpClient { - /// Perform a HTTP GET request, returning `None` on a 404 error. - async fn get_bytes_opt(&self, url: U) -> Result>, Error> { - let response = self.client.get(url).send().await.map_err(Error::from)?; - match ok_or_error(response).await { - Ok(resp) => Ok(Some( - resp.bytes() - .await - .map_err(Error::from)? - .into_iter() - .collect::>(), - )), - Err(err) => { - if err.status() == Some(StatusCode::NOT_FOUND) { - Ok(None) - } else { - Err(err) - } - } - } - } - /// `GET lighthouse/health` pub async fn get_lighthouse_health(&self) -> Result, Error> { let mut path = self.server.full.clone(); @@ -516,28 +492,6 @@ impl BeaconNodeHttpClient { self.get(path).await } - /// `GET lighthouse/beacon/states/{state_id}/ssz` - pub async fn get_lighthouse_beacon_states_ssz( - &self, - state_id: &StateId, - spec: &ChainSpec, - ) -> Result>, Error> { - let mut path = self.server.full.clone(); - - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("lighthouse") - .push("beacon") - .push("states") - .push(&state_id.to_string()) - .push("ssz"); - - self.get_bytes_opt(path) - .await? - .map(|bytes| BeaconState::from_ssz_bytes(&bytes, spec).map_err(Error::InvalidSsz)) - .transpose() - } - /// `GET lighthouse/staking` pub async fn get_lighthouse_staking(&self) -> Result { let mut path = self.server.full.clone(); diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index a301055f3..8a1cf2ff3 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1710,12 +1710,12 @@ impl TryFrom<&HeaderMap> for ProduceBlockV3Metadata { })?; let execution_payload_value = parse_required_header(headers, EXECUTION_PAYLOAD_VALUE_HEADER, |s| { - s.parse::() + Uint256::from_dec_str(s) .map_err(|e| format!("invalid {EXECUTION_PAYLOAD_VALUE_HEADER}: {e:?}")) })?; let consensus_block_value = parse_required_header(headers, CONSENSUS_BLOCK_VALUE_HEADER, |s| { - s.parse::() + Uint256::from_dec_str(s) .map_err(|e| format!("invalid {CONSENSUS_BLOCK_VALUE_HEADER}: {e:?}")) })?; diff --git a/common/eth2_network_config/built_in_network_configs/chiado/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/chiado/boot_enr.yaml index 96baffde6..eb079bfc5 100644 --- a/common/eth2_network_config/built_in_network_configs/chiado/boot_enr.yaml +++ b/common/eth2_network_config/built_in_network_configs/chiado/boot_enr.yaml @@ -1,3 +1,11 @@ +# chiado-lighthouse-0 +- "enr:-L64QOijsdi9aVIawMb5h5PWueaPM9Ai6P17GNPFlHzz7MGJQ8tFMdYrEx8WQitNKLG924g2Q9cCdzg54M0UtKa3QIKCMxaHYXR0bmV0c4j__________4RldGgykDE2cEMCAABv__________-CaWSCdjSCaXCEi5AaWYlzZWNwMjU2azGhA8CjTkD4m1s8FbKCN18LgqlYcE65jrT148vFtwd9U62SiHN5bmNuZXRzD4N0Y3CCIyiDdWRwgiMo" +# chiado-lighthouse-1 +- "enr:-L64QKYKGQj5ybkfBxyFU5IEVzP7oJkGHJlie4W8BCGAYEi4P0mmMksaasiYF789mVW_AxYVNVFUjg9CyzmdvpyWQ1KCMlmHYXR0bmV0c4j__________4RldGgykDE2cEMCAABv__________-CaWSCdjSCaXCEi5CtNolzZWNwMjU2azGhAuA7BAwIijy1z81AO9nz_MOukA1ER68rGA67PYQ5pF1qiHN5bmNuZXRzD4N0Y3CCIyiDdWRwgiMo" +# chiado-lodestar-0 +- "enr:-Ly4QJJUnV9BxP_rw2Bv7E9iyw4sYS2b4OQZIf4Mu_cA6FljJvOeSTQiCUpbZhZjR4R0VseBhdTzrLrlHrAuu_OeZqgJh2F0dG5ldHOI__________-EZXRoMpAxNnBDAgAAb___________gmlkgnY0gmlwhIuQGnOJc2VjcDI1NmsxoQPT_u3IjDtB2r-nveH5DhUmlM8F2IgLyxhmwmqW4L5k3ohzeW5jbmV0cw-DdGNwgiMog3VkcIIjKA" +# chiado-prysm-0 +- "enr:-MK4QCkOyqOTPX1_-F-5XVFjPclDUc0fj3EeR8FJ5-hZjv6ARuGlFspM0DtioHn1r6YPUXkOg2g3x6EbeeKdsrvVBYmGAYQKrixeh2F0dG5ldHOIAAAAAAAAAACEZXRoMpAxNnBDAgAAb___________gmlkgnY0gmlwhIuQGlWJc2VjcDI1NmsxoQKdW3-DgLExBkpLGMRtuM88wW_gZkC7Yeg0stYDTrlynYhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA" # chiado-teku-0 - "enr:-Ly4QLYLNqrjvSxD3lpAPBUNlxa6cIbe79JqLZLFcZZjWoCjZcw-85agLUErHiygG2weRSCLnd5V460qTbLbwJQsfZkoh2F0dG5ldHOI__________-EZXRoMpAxNnBDAgAAb___________gmlkgnY0gmlwhKq7mu-Jc2VjcDI1NmsxoQP900YAYa9kdvzlSKGjVo-F3XVzATjOYp3BsjLjSophO4hzeW5jbmV0cw-DdGNwgiMog3VkcIIjKA" # chiado-teku-1 diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/boot_enr.yaml index e5896988c..7b64e66fe 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/boot_enr.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/boot_enr.yaml @@ -7,3 +7,9 @@ - enr:-Ly4QBbaKRSX4SncCOxTTL611Kxlz-zYFrIn-k_63jGIPK_wbvFghVUHJICPCxufgTX5h79jvgfPr-2hEEQEdziGQ5MCh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhAMazo6Jc2VjcDI1NmsxoQKt-kbM9isuWp8djhyEq6-4MLv1Sy7dOXeMOMdPgwu9LohzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA - enr:-Ly4QKJ5BzgFyJ6BaTlGY0C8ROzl508U3GA6qxdG5Gn2hxdke6nQO187pYlLvhp82Dez4PQn436Fts1F0WAm-_5l2LACh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhA-YLVKJc2VjcDI1NmsxoQI8_Lvr6p_TkcAu8KorKacfUEnoOon0tdO0qWhriPdBP4hzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA - enr:-Ly4QJMtoiX2bPnVbiQOJCLbtUlqdqZk7kCJQln_W1bp1vOHcxWowE-iMXkKC4_uOb0o73wAW71WYi80Dlsg-7a5wiICh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhDbP3KmJc2VjcDI1NmsxoQNvcfKYUqcemLFlpKxl7JcQJwQ3L9unYL44gY2aEiRnI4hzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA +- enr:-Ly4QIAhiTHk6JdVhCdiLwT83wAolUFo5J4nI5HrF7-zJO_QEw3cmEGxC1jvqNNUN64Vu-xxqDKSM528vKRNCehZAfEBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhEFtZ5SJc2VjcDI1NmsxoQJwgL5C-30E8RJmW8gCb7sfwWvvfre7wGcCeV4X1G2wJYhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA +- enr:-Ly4QDhEjlkf8fwO5uWAadexy88GXZneTuUCIPHhv98v8ZfXMtC0S1S_8soiT0CMEgoeLe9Db01dtkFQUnA9YcnYC_8Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhEFtZ5WJc2VjcDI1NmsxoQMRSho89q2GKx_l2FZhR1RmnSiQr6o_9hfXfQUuW6bjMohzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA +- enr:-Ly4QLKgv5M2D4DYJgo6s4NG_K4zu4sk5HOLCfGCdtgoezsbfRbfGpQ4iSd31M88ec3DHA5FWVbkgIas9EaJeXia0nwBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhI1eYRaJc2VjcDI1NmsxoQLpK_A47iNBkVjka9Mde1F-Kie-R0sq97MCNKCxt2HwOIhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA +- enr:-Ly4QF_0qvji6xqXrhQEhwJR1W9h5dXV7ZjVCN_NlosKxcgZW6emAfB_KXxEiPgKr_-CZG8CWvTiojEohG1ewF7P368Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhI1eYUqJc2VjcDI1NmsxoQIpNRUT6llrXqEbjkAodsZOyWv8fxQkyQtSvH4sg2D7n4hzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA +- enr:-Ly4QCD5D99p36WafgTSxB6kY7D2V1ca71C49J4VWI2c8UZCCPYBvNRWiv0-HxOcbpuUdwPVhyWQCYm1yq2ZH0ukCbQBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhI1eYVSJc2VjcDI1NmsxoQJJMSV8iSZ8zvkgbi8cjIGEUVJeekLqT0LQha_co-siT4hzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA +- enr:-KK4QKXJq1QOVWuJAGige4uaT8LRPQGCVRf3lH3pxjaVScMRUfFW1eiiaz8RwOAYvw33D4EX-uASGJ5QVqVCqwccxa-Bi4RldGgykCGm-DYDAABk__________-CaWSCdjSCaXCEM0QnzolzZWNwMjU2azGhAhNvrRkpuK4MWTf3WqiOXSOePL8Zc-wKVpZ9FQx_BDadg3RjcIIjKIN1ZHCCIyg \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index 940fad361..2311d6db0 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -41,7 +41,7 @@ CAPELLA_FORK_VERSION: 0x03000064 CAPELLA_FORK_EPOCH: 648704 # Deneb DENEB_FORK_VERSION: 0x04000064 -DENEB_FORK_EPOCH: 18446744073709551615 +DENEB_FORK_EPOCH: 889856 # 2024-03-11T18:30:20.000Z # Sharding SHARDING_FORK_VERSION: 0x03000064 SHARDING_FORK_EPOCH: 18446744073709551615 @@ -75,7 +75,7 @@ EJECTION_BALANCE: 16000000000 # 2**2 (= 4) MIN_PER_EPOCH_CHURN_LIMIT: 4 # 2**3 (= 8) -MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 2 # 2**12 (= 4096) CHURN_LIMIT_QUOTIENT: 4096 @@ -105,3 +105,14 @@ MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 ATTESTATION_SUBNET_COUNT: 64 ATTESTATION_SUBNET_EXTRA_BITS: 0 ATTESTATION_SUBNET_PREFIX_BITS: 6 +ATTESTATION_SUBNET_SHUFFLING_PREFIX_BITS: 3 + +# Deneb +# `2**7` (=128) +MAX_REQUEST_BLOCKS_DENEB: 128 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK +MAX_REQUEST_BLOB_SIDECARS: 768 +# `2**14` (= 16384 epochs, ~15 days) +MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 16384 +# `6` +BLOB_SIDECAR_SUBNET_COUNT: 6 diff --git a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml index 0bb72ebd8..1104791ed 100644 --- a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml @@ -105,6 +105,7 @@ ATTESTATION_SUBNET_COUNT: 64 ATTESTATION_SUBNET_EXTRA_BITS: 0 # ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS ATTESTATION_SUBNET_PREFIX_BITS: 6 +ATTESTATION_SUBNET_SHUFFLING_PREFIX_BITS: 3 # Deneb # `2**7` (=128) diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index ed96df291..b29ecfc6d 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -1,9 +1,15 @@ # Mainnet config # Extends the mainnet preset -CONFIG_NAME: 'mainnet' PRESET_BASE: 'mainnet' +# Free-form short name of the network that this configuration applies to - known +# canonical network names include: +# * 'mainnet' - there can be only one +# * 'prater' - testnet +# Must match the regex: [a-z0-9\-] +CONFIG_NAME: 'mainnet' + # Transition # --------------------------------------------------------------- # Estimated on Sept 15, 2022 @@ -12,6 +18,8 @@ TERMINAL_TOTAL_DIFFICULTY: 58750000000000000000000 TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 + + # Genesis # --------------------------------------------------------------- # `2**14` (= 16,384) @@ -32,22 +40,16 @@ GENESIS_DELAY: 604800 # Altair ALTAIR_FORK_VERSION: 0x01000000 -ALTAIR_FORK_EPOCH: 74240 -# Merge +ALTAIR_FORK_EPOCH: 74240 # Oct 27, 2021, 10:56:23am UTC +# Bellatrix BELLATRIX_FORK_VERSION: 0x02000000 -BELLATRIX_FORK_EPOCH: 144896 # Sept 6, 2022, 11:34:47am UTC +BELLATRIX_FORK_EPOCH: 144896 # Sept 6, 2022, 11:34:47am UTC # Capella CAPELLA_FORK_VERSION: 0x03000000 -CAPELLA_FORK_EPOCH: 194048 # April 12, 2023, 10:27:35pm UTC +CAPELLA_FORK_EPOCH: 194048 # April 12, 2023, 10:27:35pm UTC # Deneb DENEB_FORK_VERSION: 0x04000000 -DENEB_FORK_EPOCH: 18446744073709551615 -# Sharding -SHARDING_FORK_VERSION: 0x03000000 -SHARDING_FORK_EPOCH: 18446744073709551615 - -# TBD, 2**32 is a placeholder. Merge transition approach is in active R&D. -TRANSITION_TOTAL_DIFFICULTY: 4294967296 +DENEB_FORK_EPOCH: 269568 # March 13, 2024, 01:55:35pm UTC # Time parameters @@ -74,16 +76,22 @@ INACTIVITY_SCORE_RECOVERY_RATE: 16 EJECTION_BALANCE: 16000000000 # 2**2 (= 4) MIN_PER_EPOCH_CHURN_LIMIT: 4 -# 2**3 (= 8) -MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 # 2**16 (= 65,536) CHURN_LIMIT_QUOTIENT: 65536 - +# [New in Deneb:EIP7514] 2**3 (= 8) +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 # Fork choice # --------------------------------------------------------------- # 40% PROPOSER_SCORE_BOOST: 40 +# 20% +REORG_HEAD_WEIGHT_THRESHOLD: 20 +# 160% +REORG_PARENT_WEIGHT_THRESHOLD: 160 +# `2` epochs +REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 + # Deposit contract # --------------------------------------------------------------- @@ -92,16 +100,43 @@ DEPOSIT_CHAIN_ID: 1 DEPOSIT_NETWORK_ID: 1 DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa -# Network + +# Networking # --------------------------------------------------------------- -SUBNETS_PER_NODE: 2 +# `10 * 2**20` (= 10485760, 10 MiB) GOSSIP_MAX_SIZE: 10485760 +# `2**10` (= 1024) +MAX_REQUEST_BLOCKS: 1024 +# `2**8` (= 256) +EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 +# `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 +# `10 * 2**20` (=10485760, 10 MiB) MAX_CHUNK_SIZE: 10485760 +# 5s TTFB_TIMEOUT: 5 +# 10s RESP_TIMEOUT: 10 +ATTESTATION_PROPAGATION_SLOT_RANGE: 32 +# 500ms +MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500 MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 +# 2 subnets per node +SUBNETS_PER_NODE: 2 +# 2**8 (= 64) ATTESTATION_SUBNET_COUNT: 64 ATTESTATION_SUBNET_EXTRA_BITS: 0 +# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS ATTESTATION_SUBNET_PREFIX_BITS: 6 +ATTESTATION_SUBNET_SHUFFLING_PREFIX_BITS: 3 + +# Deneb +# `2**7` (=128) +MAX_REQUEST_BLOCKS_DENEB: 128 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK +MAX_REQUEST_BLOB_SIDECARS: 768 +# `2**12` (= 4096 epochs, ~18 days) +MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 +# `6` +BLOB_SIDECAR_SUBNET_COUNT: 6 diff --git a/common/eth2_network_config/built_in_network_configs/prater/config.yaml b/common/eth2_network_config/built_in_network_configs/prater/config.yaml index 1928aeb30..ac94b6386 100644 --- a/common/eth2_network_config/built_in_network_configs/prater/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/prater/config.yaml @@ -115,6 +115,7 @@ ATTESTATION_SUBNET_COUNT: 64 ATTESTATION_SUBNET_EXTRA_BITS: 0 # ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS ATTESTATION_SUBNET_PREFIX_BITS: 6 +ATTESTATION_SUBNET_SHUFFLING_PREFIX_BITS: 3 # Deneb # `2**7` (=128) diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index 33a5ccb3f..89b51ba76 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -104,6 +104,7 @@ ATTESTATION_SUBNET_COUNT: 64 ATTESTATION_SUBNET_EXTRA_BITS: 0 # ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS ATTESTATION_SUBNET_PREFIX_BITS: 6 +ATTESTATION_SUBNET_SHUFFLING_PREFIX_BITS: 3 # Deneb # `2**7` (=128) diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index 565b8d789..a76a8320a 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -583,6 +583,8 @@ mod tests { } else { GenesisStateSource::Unknown }; + // With Deneb enabled by default we must set a trusted setup here. + let kzg_trusted_setup = get_trusted_setup_from_config(&config).unwrap(); let testnet = Eth2NetworkConfig { deposit_contract_deploy_block, @@ -593,7 +595,7 @@ mod tests { .map(Encode::as_ssz_bytes) .map(Into::into), config, - kzg_trusted_setup: None, + kzg_trusted_setup: Some(kzg_trusted_setup), }; testnet diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index af1172348..81d0e797a 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v4.6.0-", - fallback = "Lighthouse/v4.6.0" + prefix = "Lighthouse/v5.1.1-", + fallback = "Lighthouse/v5.1.1" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/common/logging/src/lib.rs b/common/logging/src/lib.rs index caf3e1d2f..3a5a5209b 100644 --- a/common/logging/src/lib.rs +++ b/common/logging/src/lib.rs @@ -223,7 +223,7 @@ impl TimeLatch { } } -pub fn create_tracing_layer(base_tracing_log_path: PathBuf, turn_on_terminal_logs: bool) { +pub fn create_tracing_layer(base_tracing_log_path: PathBuf) { let filter_layer = match tracing_subscriber::EnvFilter::try_from_default_env() .or_else(|_| tracing_subscriber::EnvFilter::try_new("warn")) { @@ -268,11 +268,7 @@ pub fn create_tracing_layer(base_tracing_log_path: PathBuf, turn_on_terminal_log if let Err(e) = tracing_subscriber::fmt() .with_env_filter(filter_layer) - .with_writer(move || { - tracing_subscriber::fmt::writer::OptionalWriter::::from( - turn_on_terminal_logs.then(std::io::stdout), - ) - }) + .with_writer(std::io::sink) .finish() .with(MetricsLayer) .with(custom_layer) diff --git a/common/system_health/src/lib.rs b/common/system_health/src/lib.rs index d10540e50..ec64ce31a 100644 --- a/common/system_health/src/lib.rs +++ b/common/system_health/src/lib.rs @@ -198,6 +198,25 @@ pub fn observe_system_health_vc( } } +/// Observes if NAT traversal is possible. +pub fn observe_nat() -> bool { + let discv5_nat = lighthouse_network::metrics::get_int_gauge( + &lighthouse_network::metrics::NAT_OPEN, + &["discv5"], + ) + .map(|g| g.get() == 1) + .unwrap_or_default(); + + let libp2p_nat = lighthouse_network::metrics::get_int_gauge( + &lighthouse_network::metrics::NAT_OPEN, + &["libp2p"], + ) + .map(|g| g.get() == 1) + .unwrap_or_default(); + + discv5_nat && libp2p_nat +} + /// Observes the Beacon Node system health. pub fn observe_system_health_bn( sysinfo: Arc>, @@ -223,11 +242,7 @@ pub fn observe_system_health_bn( .unwrap_or_else(|| (String::from("None"), 0, 0)); // Determine if the NAT is open or not. - let nat_open = lighthouse_network::metrics::NAT_OPEN - .as_ref() - .map(|v| v.get()) - .unwrap_or(0) - != 0; + let nat_open = observe_nat(); SystemHealthBN { system_health, diff --git a/common/task_executor/Cargo.toml b/common/task_executor/Cargo.toml index 38f4eca36..b3d58fa5e 100644 --- a/common/task_executor/Cargo.toml +++ b/common/task_executor/Cargo.toml @@ -5,10 +5,10 @@ authors = ["Sigma Prime "] edition = { workspace = true } [dependencies] +async-channel = { workspace = true } tokio = { workspace = true } slog = { workspace = true } futures = { workspace = true } -exit-future = { workspace = true } lazy_static = { workspace = true } lighthouse_metrics = { workspace = true } sloggers = { workspace = true } diff --git a/common/task_executor/src/lib.rs b/common/task_executor/src/lib.rs index 6bf4cc8e0..d6edfd312 100644 --- a/common/task_executor/src/lib.rs +++ b/common/task_executor/src/lib.rs @@ -3,7 +3,7 @@ pub mod test_utils; use futures::channel::mpsc::Sender; use futures::prelude::*; -use slog::{crit, debug, o, trace}; +use slog::{debug, o, trace}; use std::sync::Weak; use tokio::runtime::{Handle, Runtime}; @@ -73,7 +73,7 @@ pub struct TaskExecutor { /// The handle to the runtime on which tasks are spawned handle_provider: HandleProvider, /// The receiver exit future which on receiving shuts down the task - exit: exit_future::Exit, + exit: async_channel::Receiver<()>, /// Sender given to tasks, so that if they encounter a state in which execution cannot /// continue they can request that everything shuts down. /// @@ -93,7 +93,7 @@ impl TaskExecutor { /// crate). pub fn new>( handle: T, - exit: exit_future::Exit, + exit: async_channel::Receiver<()>, log: slog::Logger, signal_tx: Sender, ) -> Self { @@ -138,23 +138,11 @@ impl TaskExecutor { name: &'static str, ) { let mut shutdown_sender = self.shutdown_sender(); - let log = self.log.clone(); - if let Some(handle) = self.handle() { handle.spawn(async move { let timer = metrics::start_timer_vec(&metrics::TASKS_HISTOGRAM, &[name]); if let Err(join_error) = task_handle.await { - if let Ok(panic) = join_error.try_into_panic() { - let message = panic.downcast_ref::<&str>().unwrap_or(&""); - - crit!( - log, - "Task panic. This is a bug!"; - "task_name" => name, - "message" => message, - "advice" => "Please check above for a backtrace and notify \ - the developers" - ); + if let Ok(_panic) = join_error.try_into_panic() { let _ = shutdown_sender .try_send(ShutdownReason::Failure("Panic (fatal error)")); } @@ -171,8 +159,8 @@ impl TaskExecutor { /// Spawn a future on the tokio runtime. /// - /// The future is wrapped in an `exit_future::Exit`. The task is cancelled when the corresponding - /// exit_future `Signal` is fired/dropped. + /// The future is wrapped in an `async-channel::Receiver`. The task is cancelled when the corresponding + /// Sender is dropped. /// /// The future is monitored via another spawned future to ensure that it doesn't panic. In case /// of a panic, the executor will be shut down via `self.signal_tx`. @@ -184,9 +172,9 @@ impl TaskExecutor { } } - /// Spawn a future on the tokio runtime. This function does not wrap the task in an `exit_future::Exit` + /// Spawn a future on the tokio runtime. This function does not wrap the task in an `async-channel::Receiver` /// like [spawn](#method.spawn). - /// The caller of this function is responsible for wrapping up the task with an `exit_future::Exit` to + /// The caller of this function is responsible for wrapping up the task with an `async-channel::Receiver` to /// ensure that the task gets canceled appropriately. /// This function generates prometheus metrics on number of tasks and task duration. /// @@ -225,9 +213,9 @@ impl TaskExecutor { } } - /// Spawn a future on the tokio runtime wrapped in an `exit_future::Exit` returning an optional + /// Spawn a future on the tokio runtime wrapped in an `async-channel::Receiver` returning an optional /// join handle to the future. - /// The task is canceled when the corresponding exit_future `Signal` is fired/dropped. + /// The task is canceled when the corresponding async-channel is dropped. /// /// This function generates prometheus metrics on number of tasks and task duration. pub fn spawn_handle( @@ -235,30 +223,29 @@ impl TaskExecutor { task: impl Future + Send + 'static, name: &'static str, ) -> Option>> { - let exit = self.exit.clone(); + let exit = self.exit(); let log = self.log.clone(); if let Some(int_gauge) = metrics::get_int_gauge(&metrics::ASYNC_TASKS_COUNT, &[name]) { // Task is shutdown before it completes if `exit` receives let int_gauge_1 = int_gauge.clone(); - let future = future::select(Box::pin(task), exit).then(move |either| { - let result = match either { - future::Either::Left((value, _)) => { - trace!(log, "Async task completed"; "task" => name); - Some(value) - } - future::Either::Right(_) => { - debug!(log, "Async task shutdown, exit received"; "task" => name); - None - } - }; - int_gauge_1.dec(); - futures::future::ready(result) - }); - int_gauge.inc(); if let Some(handle) = self.handle() { - Some(handle.spawn(future)) + Some(handle.spawn(async move { + futures::pin_mut!(exit); + let result = match future::select(Box::pin(task), exit).await { + future::Either::Left((value, _)) => { + trace!(log, "Async task completed"; "task" => name); + Some(value) + } + future::Either::Right(_) => { + debug!(log, "Async task shutdown, exit received"; "task" => name); + None + } + }; + int_gauge_1.dec(); + result + })) } else { debug!(self.log, "Couldn't spawn task. Runtime shutting down"); None @@ -336,7 +323,7 @@ impl TaskExecutor { metrics::inc_gauge_vec(&metrics::BLOCK_ON_TASKS_COUNT, &[name]); let log = self.log.clone(); let handle = self.handle()?; - let exit = self.exit.clone(); + let exit = self.exit(); debug!( log, @@ -374,9 +361,13 @@ impl TaskExecutor { self.handle_provider.handle() } - /// Returns a copy of the `exit_future::Exit`. - pub fn exit(&self) -> exit_future::Exit { - self.exit.clone() + /// Returns a future that completes when `async-channel::Sender` is dropped or () is sent, + /// which translates to the exit signal being triggered. + pub fn exit(&self) -> impl Future { + let exit = self.exit.clone(); + async move { + let _ = exit.recv().await; + } } /// Get a channel to request shutting down. diff --git a/common/task_executor/src/test_utils.rs b/common/task_executor/src/test_utils.rs index c6e5ad01e..6e372d975 100644 --- a/common/task_executor/src/test_utils.rs +++ b/common/task_executor/src/test_utils.rs @@ -14,7 +14,7 @@ use tokio::runtime; /// This struct should never be used in production, only testing. pub struct TestRuntime { runtime: Option>, - _runtime_shutdown: exit_future::Signal, + _runtime_shutdown: async_channel::Sender<()>, pub task_executor: TaskExecutor, pub log: Logger, } @@ -24,7 +24,7 @@ impl Default for TestRuntime { /// called *outside* any existing runtime, create a new `Runtime` and keep it alive until the /// `Self` is dropped. fn default() -> Self { - let (runtime_shutdown, exit) = exit_future::signal(); + let (runtime_shutdown, exit) = async_channel::bounded(1); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let log = null_logger().unwrap(); diff --git a/common/validator_dir/src/builder.rs b/common/validator_dir/src/builder.rs index bccf9086a..3d5d14960 100644 --- a/common/validator_dir/src/builder.rs +++ b/common/validator_dir/src/builder.rs @@ -214,6 +214,7 @@ impl<'a> Builder<'a> { .write(true) .read(true) .create(true) + .truncate(true) .open(path) .map_err(Error::UnableToSaveDepositData)? .write_all(hex.as_bytes()) @@ -231,6 +232,7 @@ impl<'a> Builder<'a> { .write(true) .read(true) .create(true) + .truncate(true) .open(path) .map_err(Error::UnableToSaveDepositAmount)? .write_all(format!("{}", amount).as_bytes()) diff --git a/common/warp_utils/Cargo.toml b/common/warp_utils/Cargo.toml index 85c1901ba..0d33de998 100644 --- a/common/warp_utils/Cargo.toml +++ b/common/warp_utils/Cargo.toml @@ -14,8 +14,10 @@ beacon_chain = { workspace = true } state_processing = { workspace = true } safe_arith = { workspace = true } serde = { workspace = true } +serde_json = { workspace = true } tokio = { workspace = true } headers = "0.3.2" lighthouse_metrics = { workspace = true } lazy_static = { workspace = true } serde_array_query = "0.1.0" +bytes = { workspace = true } diff --git a/common/warp_utils/src/json.rs b/common/warp_utils/src/json.rs new file mode 100644 index 000000000..203a6495a --- /dev/null +++ b/common/warp_utils/src/json.rs @@ -0,0 +1,22 @@ +use bytes::Bytes; +use serde::de::DeserializeOwned; +use std::error::Error as StdError; +use warp::{Filter, Rejection}; + +use crate::reject; + +struct Json; + +type BoxError = Box; + +impl Json { + fn decode(bytes: Bytes) -> Result { + serde_json::from_slice(&bytes).map_err(Into::into) + } +} + +pub fn json() -> impl Filter + Copy { + warp::body::bytes().and_then(|bytes: Bytes| async move { + Json::decode(bytes).map_err(|err| reject::custom_deserialize_error(format!("{:?}", err))) + }) +} diff --git a/common/warp_utils/src/lib.rs b/common/warp_utils/src/lib.rs index 77d61251f..55ee423fa 100644 --- a/common/warp_utils/src/lib.rs +++ b/common/warp_utils/src/lib.rs @@ -2,6 +2,7 @@ //! Lighthouse project. E.g., the `http_api` and `http_metrics` crates. pub mod cors; +pub mod json; pub mod metrics; pub mod query; pub mod reject; diff --git a/common/warp_utils/src/reject.rs b/common/warp_utils/src/reject.rs index cf3d11af8..b6bb5ace3 100644 --- a/common/warp_utils/src/reject.rs +++ b/common/warp_utils/src/reject.rs @@ -82,6 +82,15 @@ pub fn custom_bad_request(msg: String) -> warp::reject::Rejection { warp::reject::custom(CustomBadRequest(msg)) } +#[derive(Debug)] +pub struct CustomDeserializeError(pub String); + +impl Reject for CustomDeserializeError {} + +pub fn custom_deserialize_error(msg: String) -> warp::reject::Rejection { + warp::reject::custom(CustomDeserializeError(msg)) +} + #[derive(Debug)] pub struct CustomServerError(pub String); @@ -161,6 +170,9 @@ pub async fn handle_rejection(err: warp::Rejection) -> Result() { + message = format!("BAD_REQUEST: body deserialize error: {}", e.0); + code = StatusCode::BAD_REQUEST; } else if let Some(e) = err.find::() { message = format!("BAD_REQUEST: body deserialize error: {}", e); code = StatusCode::BAD_REQUEST; diff --git a/consensus/cached_tree_hash/src/impls.rs b/consensus/cached_tree_hash/src/impls.rs index 0624bd201..efdba32b5 100644 --- a/consensus/cached_tree_hash/src/impls.rs +++ b/consensus/cached_tree_hash/src/impls.rs @@ -26,13 +26,11 @@ pub fn u64_leaf_count(len: usize) -> usize { pub fn hash256_iter( values: &[Hash256], -) -> impl Iterator + ExactSizeIterator + '_ { +) -> impl ExactSizeIterator + '_ { values.iter().copied().map(Hash256::to_fixed_bytes) } -pub fn u64_iter( - values: &[u64], -) -> impl Iterator + ExactSizeIterator + '_ { +pub fn u64_iter(values: &[u64]) -> impl ExactSizeIterator + '_ { let type_size = size_of::(); let vals_per_chunk = BYTES_PER_CHUNK / type_size; values.chunks(vals_per_chunk).map(move |xs| { diff --git a/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs b/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs index 77211a86a..3b3161614 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs @@ -59,7 +59,7 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { expected_head: get_root(3), }); - // Ensure that with justified epoch 1 we find 2 + // Ensure that with justified epoch 1 we find 3 // // 0 // | @@ -68,11 +68,15 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { // 2 <- start // | // 3 <- head + // + // Since https://github.com/ethereum/consensus-specs/pull/3431 it is valid + // to elect head blocks that have a higher justified checkpoint than the + // store. ops.push(Operation::FindHead { justified_checkpoint: get_checkpoint(1), finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), - expected_head: get_root(2), + expected_head: get_root(3), }); // Ensure that with justified epoch 2 we find 3 @@ -247,14 +251,19 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(10), }); - // Same as above, but with justified epoch 3 (should be invalid). - ops.push(Operation::InvalidFindHead { + // Same as above, but with justified epoch 3. + // + // Since https://github.com/ethereum/consensus-specs/pull/3431 it is valid + // to elect head blocks that have a higher justified checkpoint than the + // store. + ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(3), root: get_root(6), }, finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), + expected_head: get_root(10), }); // Add a vote to 1. @@ -305,14 +314,19 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(9), }); - // Save as above but justified epoch 3 (should fail). - ops.push(Operation::InvalidFindHead { + // Save as above but justified epoch 3. + // + // Since https://github.com/ethereum/consensus-specs/pull/3431 it is valid + // to elect head blocks that have a higher justified checkpoint than the + // store. + ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(3), root: get_root(5), }, finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), + expected_head: get_root(9), }); // Add a vote to 2. @@ -363,14 +377,19 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(10), }); - // Same as above but justified epoch 3 (should fail). - ops.push(Operation::InvalidFindHead { + // Same as above but justified epoch 3. + // + // Since https://github.com/ethereum/consensus-specs/pull/3431 it is valid + // to elect head blocks that have a higher justified checkpoint than the + // store. + ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(3), root: get_root(6), }, finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), + expected_head: get_root(10), }); // Ensure that if we start at 1 we find 9 (just: 0, fin: 0). @@ -405,14 +424,19 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(9), }); - // Same as above but justified epoch 3 (should fail). - ops.push(Operation::InvalidFindHead { + // Same as above but justified epoch 3. + // + // Since https://github.com/ethereum/consensus-specs/pull/3431 it is valid + // to elect head blocks that have a higher justified checkpoint than the + // store. + ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(3), root: get_root(5), }, finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), + expected_head: get_root(9), }); // Ensure that if we start at 2 we find 10 (just: 0, fin: 0). @@ -444,14 +468,19 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: balances.clone(), expected_head: get_root(10), }); - // Same as above but justified epoch 3 (should fail). - ops.push(Operation::InvalidFindHead { + // Same as above but justified epoch 3. + // + // Since https://github.com/ethereum/consensus-specs/pull/3431 it is valid + // to elect head blocks that have a higher justified checkpoint than the + // store. + ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(3), root: get_root(6), }, finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances, + expected_head: get_root(10), }); // END OF TESTS diff --git a/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs b/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs index a60b3e6b3..27a7969e4 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs @@ -184,7 +184,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { root: Hash256::zero(), }, }, - // Ensure the head is still 4 whilst the justified epoch is 0. + // Ensure the head is now 5 whilst the justified epoch is 0. // // 0 // / \ @@ -203,9 +203,10 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { root: Hash256::zero(), }, justified_state_balances: balances.clone(), - expected_head: get_root(4), + expected_head: get_root(5), }, - // Ensure there is an error when starting from a block that has the wrong justified epoch. + // Ensure there is no error when starting from a block that has the + // wrong justified epoch. // // 0 // / \ @@ -214,7 +215,11 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { // 4 3 // | // 5 <- starting from 5 with justified epoch 0 should error. - Operation::InvalidFindHead { + // + // Since https://github.com/ethereum/consensus-specs/pull/3431 it is valid + // to elect head blocks that have a higher justified checkpoint than the + // store. + Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), root: get_root(5), @@ -224,6 +229,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { root: Hash256::zero(), }, justified_state_balances: balances.clone(), + expected_head: get_root(5), }, // Set the justified epoch to 2 and the start block to 5 and ensure 5 is the head. // diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 4726715a1..7c2ecfe26 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -961,16 +961,9 @@ impl ProtoArray { node_justified_checkpoint }; - let mut correct_justified = self.justified_checkpoint.epoch == genesis_epoch - || voting_source.epoch == self.justified_checkpoint.epoch; - - if let Some(node_unrealized_justified_checkpoint) = node.unrealized_justified_checkpoint { - if !correct_justified && self.justified_checkpoint.epoch + 1 == current_epoch { - correct_justified = node_unrealized_justified_checkpoint.epoch - >= self.justified_checkpoint.epoch - && voting_source.epoch + 2 >= current_epoch; - } - } + let correct_justified = self.justified_checkpoint.epoch == genesis_epoch + || voting_source.epoch == self.justified_checkpoint.epoch + || voting_source.epoch + 2 >= current_epoch; let correct_finalized = self.finalized_checkpoint.epoch == genesis_epoch || self.is_finalized_checkpoint_or_descendant::(node.root); diff --git a/consensus/types/presets/mainnet/deneb.yaml b/consensus/types/presets/mainnet/deneb.yaml index 6d2fb4abd..0f56b8bdf 100644 --- a/consensus/types/presets/mainnet/deneb.yaml +++ b/consensus/types/presets/mainnet/deneb.yaml @@ -8,5 +8,5 @@ FIELD_ELEMENTS_PER_BLOB: 4096 MAX_BLOB_COMMITMENTS_PER_BLOCK: 4096 # `uint64(6)` MAX_BLOBS_PER_BLOCK: 6 -# `floorlog2(BLOB_KZG_COMMITMENTS_GINDEX) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 12 = 17 +# `floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 12 = 17 KZG_COMMITMENT_INCLUSION_PROOF_DEPTH: 17 diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index ac4a583cb..d1d75523a 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -125,7 +125,7 @@ mod tests { // Check the in-memory size of an `Attestation`, which is useful for reasoning about memory // and preventing regressions. // - // This test will only pass with `blst`, if we run these tests with Milagro or another + // This test will only pass with `blst`, if we run these tests with another // BLS library in future we will have to make it generic. #[test] fn size_of() { diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 146dff895..b0a942d74 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -176,6 +176,12 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, T, } } } + + /// Return `true` if this block body has a non-zero number of blobs. + pub fn has_blobs(self) -> bool { + self.blob_kzg_commitments() + .map_or(false, |blobs| !blobs.is_empty()) + } } impl<'a, T: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, T, Payload> { diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index e2e25f24b..4c0ee1bfa 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -641,7 +641,7 @@ impl BeaconState { if self.slot() <= decision_slot { Ok(block_root) } else { - self.get_block_root(decision_slot).map(|root| *root) + self.get_block_root(decision_slot).copied() } } @@ -657,7 +657,7 @@ impl BeaconState { if self.slot() == decision_slot { Ok(block_root) } else { - self.get_block_root(decision_slot).map(|root| *root) + self.get_block_root(decision_slot).copied() } } @@ -683,7 +683,7 @@ impl BeaconState { if self.slot() == decision_slot { Ok(block_root) } else { - self.get_block_root(decision_slot).map(|root| *root) + self.get_block_root(decision_slot).copied() } } @@ -1866,6 +1866,7 @@ impl BeaconState { }; // 2. Get all `BeaconState` leaves. + self.initialize_tree_hash_cache(); let mut cache = self .tree_hash_cache_mut() .take() diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index b2120fb04..88f989d2a 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -1,4 +1,5 @@ use crate::application_domain::{ApplicationDomain, APPLICATION_DOMAIN_BUILDER}; +use crate::blob_sidecar::BlobIdentifier; use crate::*; use int_to_bytes::int_to_bytes4; use serde::Deserialize; @@ -189,6 +190,7 @@ pub struct ChainSpec { pub attestation_subnet_count: u64, pub attestation_subnet_extra_bits: u8, pub attestation_subnet_prefix_bits: u8, + pub attestation_subnet_shuffling_prefix_bits: u8, /* * Networking Deneb @@ -679,7 +681,7 @@ impl ChainSpec { * Deneb hard fork params */ deneb_fork_version: [0x04, 0x00, 0x00, 0x00], - deneb_fork_epoch: None, + deneb_fork_epoch: Some(Epoch::new(269568)), /* * Network specific @@ -701,6 +703,8 @@ impl ChainSpec { message_domain_valid_snappy: default_message_domain_valid_snappy(), attestation_subnet_extra_bits: default_attestation_subnet_extra_bits(), attestation_subnet_prefix_bits: default_attestation_subnet_prefix_bits(), + attestation_subnet_shuffling_prefix_bits: + default_attestation_subnet_shuffling_prefix_bits(), max_request_blocks: default_max_request_blocks(), /* @@ -805,7 +809,7 @@ impl ChainSpec { max_committees_per_slot: 64, target_committee_size: 128, min_per_epoch_churn_limit: 4, - max_per_epoch_activation_churn_limit: 8, + max_per_epoch_activation_churn_limit: 2, churn_limit_quotient: 4_096, shuffle_round_count: 90, min_genesis_active_validator_count: 4_096, @@ -940,7 +944,7 @@ impl ChainSpec { * Deneb hard fork params */ deneb_fork_version: [0x04, 0x00, 0x00, 0x64], - deneb_fork_epoch: None, + deneb_fork_epoch: Some(Epoch::new(889856)), /* * Network specific @@ -954,7 +958,7 @@ impl ChainSpec { target_aggregators_per_committee: 16, epochs_per_subnet_subscription: default_epochs_per_subnet_subscription(), gossip_max_size: default_gossip_max_size(), - min_epochs_for_block_requests: default_min_epochs_for_block_requests(), + min_epochs_for_block_requests: 33024, max_chunk_size: default_max_chunk_size(), ttfb_timeout: default_ttfb_timeout(), resp_timeout: default_resp_timeout(), @@ -962,6 +966,8 @@ impl ChainSpec { message_domain_valid_snappy: default_message_domain_valid_snappy(), attestation_subnet_extra_bits: default_attestation_subnet_extra_bits(), attestation_subnet_prefix_bits: default_attestation_subnet_prefix_bits(), + attestation_subnet_shuffling_prefix_bits: + default_attestation_subnet_shuffling_prefix_bits(), max_request_blocks: default_max_request_blocks(), /* @@ -969,7 +975,7 @@ impl ChainSpec { */ max_request_blocks_deneb: default_max_request_blocks_deneb(), max_request_blob_sidecars: default_max_request_blob_sidecars(), - min_epochs_for_blob_sidecars_requests: default_min_epochs_for_blob_sidecars_requests(), + min_epochs_for_blob_sidecars_requests: 16384, blob_sidecar_subnet_count: default_blob_sidecar_subnet_count(), /* @@ -1139,6 +1145,9 @@ pub struct Config { #[serde(default = "default_attestation_subnet_prefix_bits")] #[serde(with = "serde_utils::quoted_u8")] attestation_subnet_prefix_bits: u8, + #[serde(default = "default_attestation_subnet_shuffling_prefix_bits")] + #[serde(with = "serde_utils::quoted_u8")] + attestation_subnet_shuffling_prefix_bits: u8, #[serde(default = "default_max_request_blocks_deneb")] #[serde(with = "serde_utils::quoted_u64")] max_request_blocks_deneb: u64, @@ -1236,6 +1245,10 @@ const fn default_attestation_subnet_prefix_bits() -> u8 { 6 } +const fn default_attestation_subnet_shuffling_prefix_bits() -> u8 { + 3 +} + const fn default_max_request_blocks() -> u64 { 1024 } @@ -1280,8 +1293,13 @@ fn max_blocks_by_root_request_common(max_request_blocks: u64) -> usize { fn max_blobs_by_root_request_common(max_request_blob_sidecars: u64) -> usize { let max_request_blob_sidecars = max_request_blob_sidecars as usize; - RuntimeVariableList::::from_vec( - vec![Hash256::zero(); max_request_blob_sidecars], + let empty_blob_identifier = BlobIdentifier { + block_root: Hash256::zero(), + index: 0, + }; + + RuntimeVariableList::::from_vec( + vec![empty_blob_identifier; max_request_blob_sidecars], max_request_blob_sidecars, ) .as_ssz_bytes() @@ -1414,6 +1432,7 @@ impl Config { message_domain_valid_snappy: spec.message_domain_valid_snappy, attestation_subnet_extra_bits: spec.attestation_subnet_extra_bits, attestation_subnet_prefix_bits: spec.attestation_subnet_prefix_bits, + attestation_subnet_shuffling_prefix_bits: spec.attestation_subnet_shuffling_prefix_bits, max_request_blocks_deneb: spec.max_request_blocks_deneb, max_request_blob_sidecars: spec.max_request_blob_sidecars, min_epochs_for_blob_sidecars_requests: spec.min_epochs_for_blob_sidecars_requests, @@ -1474,6 +1493,7 @@ impl Config { message_domain_valid_snappy, attestation_subnet_extra_bits, attestation_subnet_prefix_bits, + attestation_subnet_shuffling_prefix_bits, max_request_blocks, epochs_per_subnet_subscription, attestation_propagation_slot_range, @@ -1531,6 +1551,7 @@ impl Config { message_domain_valid_snappy, attestation_subnet_extra_bits, attestation_subnet_prefix_bits, + attestation_subnet_shuffling_prefix_bits, max_request_blocks, epochs_per_subnet_subscription, attestation_propagation_slot_range, @@ -1817,6 +1838,7 @@ mod yaml_tests { check_default!(message_domain_valid_snappy); check_default!(attestation_subnet_extra_bits); check_default!(attestation_subnet_prefix_bits); + check_default!(attestation_subnet_shuffling_prefix_bits); assert_eq!(chain_spec.bellatrix_fork_epoch, None); } diff --git a/consensus/types/src/historical_summary.rs b/consensus/types/src/historical_summary.rs index dcc387d3d..d212f8a5e 100644 --- a/consensus/types/src/historical_summary.rs +++ b/consensus/types/src/historical_summary.rs @@ -81,7 +81,7 @@ impl<'a, N: Unsigned> CachedTreeHash for HistoricalSummaryCache<' pub fn leaf_iter( values: &[HistoricalSummary], -) -> impl Iterator + ExactSizeIterator + '_ { +) -> impl ExactSizeIterator + '_ { values .iter() .map(|value| value.tree_hash_root()) diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index 616aced48..6660783ab 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -9,7 +9,7 @@ use ssz_derive::{Decode, Encode}; use std::sync::Arc; use test_random_derive::TestRandom; -/// A LightClientBootstrap is the initializer we send over to lightclient nodes +/// A LightClientBootstrap is the initializer we send over to light_client nodes /// that are trying to generate their basic storage when booting up. #[derive( Debug, diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index 87601b815..494e68b63 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -11,7 +11,7 @@ use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; -/// A LightClientFinalityUpdate is the update lightclient request or received by a gossip that +/// A LightClientFinalityUpdate is the update light_client request or received by a gossip that /// signal a new finalized beacon block header for the light client sync protocol. #[derive( Debug, diff --git a/consensus/types/src/participation_list.rs b/consensus/types/src/participation_list.rs index be119fbef..6e3d916de 100644 --- a/consensus/types/src/participation_list.rs +++ b/consensus/types/src/participation_list.rs @@ -43,7 +43,7 @@ pub fn leaf_count(len: usize) -> usize { pub fn leaf_iter( values: &[ParticipationFlags], -) -> impl Iterator + ExactSizeIterator + '_ { +) -> impl ExactSizeIterator + '_ { values.chunks(BYTES_PER_CHUNK).map(|xs| { // Zero-pad chunks on the right. let mut chunk = [0u8; BYTES_PER_CHUNK]; diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index 2752e3109..82e12b7ec 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -72,36 +72,43 @@ impl SubnetId { .into()) } - #[allow(clippy::arithmetic_side_effects)] /// Computes the set of subnets the node should be subscribed to during the current epoch, /// along with the first epoch in which these subscriptions are no longer valid. + #[allow(clippy::arithmetic_side_effects)] pub fn compute_subnets_for_epoch( node_id: ethereum_types::U256, epoch: Epoch, spec: &ChainSpec, ) -> Result<(impl Iterator, Epoch), &'static str> { - // Simplify the variable name + // simplify variable naming let subscription_duration = spec.epochs_per_subnet_subscription; + let prefix_bits = spec.attestation_subnet_prefix_bits as u64; + let shuffling_prefix_bits = spec.attestation_subnet_shuffling_prefix_bits as u64; - let node_id_prefix = - (node_id >> (256 - spec.attestation_subnet_prefix_bits as usize)).as_usize(); + // calculate the prefixes used to compute the subnet and shuffling + let node_id_prefix = (node_id >> (256 - prefix_bits)).as_u64(); + let shuffling_prefix = (node_id >> (256 - (prefix_bits + shuffling_prefix_bits))).as_u64(); - // NOTE: The as_u64() panics if the number is larger than u64::max_value(). This cannot be - // true as spec.epochs_per_subnet_subscription is a u64. - let node_offset = (node_id % ethereum_types::U256::from(subscription_duration)).as_u64(); + // number of groups the shuffling creates + let shuffling_groups = 1 << shuffling_prefix_bits; + // shuffling group for this node + let shuffling_bits = shuffling_prefix % shuffling_groups; + let epoch_transition = (node_id_prefix + + (shuffling_bits * (subscription_duration >> shuffling_prefix_bits))) + % subscription_duration; // Calculate at which epoch this node needs to re-evaluate let valid_until_epoch = epoch.as_u64() + subscription_duration - .saturating_sub((epoch.as_u64() + node_offset) % subscription_duration); + .saturating_sub((epoch.as_u64() + epoch_transition) % subscription_duration); - let subscription_event_idx = (epoch.as_u64() + node_offset) / subscription_duration; + let subscription_event_idx = (epoch.as_u64() + epoch_transition) / subscription_duration; let permutation_seed = ethereum_hashing::hash(&int_to_bytes::int_to_bytes8(subscription_event_idx)); let num_subnets = 1 << spec.attestation_subnet_prefix_bits; let permutated_prefix = compute_shuffled_index( - node_id_prefix, + node_id_prefix as usize, num_subnets, &permutation_seed, spec.shuffle_round_count, @@ -180,38 +187,33 @@ mod tests { "60930578857433095740782970114409273483106482059893286066493409689627770333527", "103822458477361691467064888613019442068586830412598673713899771287914656699997", ] - .into_iter() - .map(|v| ethereum_types::U256::from_dec_str(v).unwrap()) - .collect::>(); + .map(|v| ethereum_types::U256::from_dec_str(v).unwrap()); let epochs = [ 54321u64, 1017090249, 1827566880, 846255942, 766597383, 1204990115, 1616209495, 1774367616, 1484598751, 3525502229, ] - .into_iter() - .map(Epoch::from) - .collect::>(); + .map(Epoch::from); // Test mainnet let spec = ChainSpec::mainnet(); // Calculated by hand - let expected_valid_time: Vec = [ - 54528, 1017090371, 1827567108, 846256076, 766597570, 1204990135, 1616209582, - 1774367723, 1484598953, 3525502371, - ] - .into(); + let expected_valid_time = [ + 54528u64, 1017090255, 1827567030, 846256049, 766597387, 1204990287, 1616209536, + 1774367857, 1484598847, 3525502311, + ]; // Calculated from pyspec - let expected_subnets = vec![ + let expected_subnets = [ vec![4u64, 5u64], - vec![61, 62], - vec![23, 24], + vec![31, 32], + vec![39, 40], vec![38, 39], vec![53, 54], - vec![39, 40], + vec![57, 58], vec![48, 49], - vec![39, 40], + vec![1, 2], vec![34, 35], vec![37, 38], ]; @@ -228,11 +230,11 @@ mod tests { >(node_ids[x], epochs[x], &spec) .unwrap(); - assert_eq!(Epoch::from(expected_valid_time[x]), valid_time); assert_eq!( expected_subnets[x], computed_subnets.map(SubnetId::into).collect::>() ); + assert_eq!(Epoch::from(expected_valid_time[x]), valid_time); } } } diff --git a/consensus/types/src/validator_subscription.rs b/consensus/types/src/validator_subscription.rs index fd48660c5..62932638e 100644 --- a/consensus/types/src/validator_subscription.rs +++ b/consensus/types/src/validator_subscription.rs @@ -4,10 +4,8 @@ use ssz_derive::{Decode, Encode}; /// A validator subscription, created when a validator subscribes to a slot to perform optional aggregation /// duties. -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode, Eq, PartialOrd, Ord)] pub struct ValidatorSubscription { - /// The validators index. - pub validator_index: u64, /// The index of the committee within `slot` of which the validator is a member. Used by the /// beacon node to quickly evaluate the associated `SubnetId`. pub attestation_committee_index: CommitteeIndex, diff --git a/crypto/bls/Cargo.toml b/crypto/bls/Cargo.toml index 1216fc2a9..7aa8e02dc 100644 --- a/crypto/bls/Cargo.toml +++ b/crypto/bls/Cargo.toml @@ -7,7 +7,6 @@ edition = { workspace = true } [dependencies] ethereum_ssz = { workspace = true } tree_hash = { workspace = true } -milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v1.5.1", optional = true } rand = { workspace = true } serde = { workspace = true } ethereum_serde_utils = { workspace = true } @@ -22,7 +21,6 @@ blst = { version = "0.3.3", optional = true } arbitrary = [] default = ["supranational"] fake_crypto = [] -milagro = ["milagro_bls"] supranational = ["blst"] supranational-portable = ["supranational", "blst/portable"] supranational-force-adx = ["supranational", "blst/force-adx"] diff --git a/crypto/bls/src/impls/milagro.rs b/crypto/bls/src/impls/milagro.rs deleted file mode 100644 index eb4767d3c..000000000 --- a/crypto/bls/src/impls/milagro.rs +++ /dev/null @@ -1,194 +0,0 @@ -use crate::{ - generic_aggregate_public_key::TAggregatePublicKey, - generic_aggregate_signature::TAggregateSignature, - generic_public_key::{GenericPublicKey, TPublicKey, PUBLIC_KEY_BYTES_LEN}, - generic_secret_key::{TSecretKey, SECRET_KEY_BYTES_LEN}, - generic_signature::{TSignature, SIGNATURE_BYTES_LEN}, - Error, Hash256, ZeroizeHash, -}; -pub use milagro_bls as milagro; -use rand::thread_rng; -use std::iter::ExactSizeIterator; - -/// Provides the externally-facing, core BLS types. -pub mod types { - pub use super::milagro::AggregatePublicKey; - pub use super::milagro::AggregateSignature; - pub use super::milagro::PublicKey; - pub use super::milagro::SecretKey; - pub use super::milagro::Signature; - pub use super::verify_signature_sets; - pub use super::SignatureSet; -} - -pub type SignatureSet<'a> = crate::generic_signature_set::GenericSignatureSet< - 'a, - milagro::PublicKey, - milagro::AggregatePublicKey, - milagro::Signature, - milagro::AggregateSignature, ->; - -pub fn verify_signature_sets<'a>( - signature_sets: impl ExactSizeIterator>, -) -> bool { - if signature_sets.len() == 0 { - return false; - } - - signature_sets - .map(|signature_set| { - let mut aggregate = milagro::AggregatePublicKey::from_public_key( - signature_set.signing_keys.first().ok_or(())?.point(), - ); - - for signing_key in signature_set.signing_keys.iter().skip(1) { - aggregate.add(signing_key.point()) - } - - if signature_set.signature.point().is_none() { - return Err(()); - } - - Ok(( - signature_set.signature.as_ref(), - aggregate, - signature_set.message, - )) - }) - .collect::, ()>>() - .map(|aggregates| { - milagro::AggregateSignature::verify_multiple_aggregate_signatures( - &mut rand::thread_rng(), - aggregates.iter().map(|(signature, aggregate, message)| { - ( - signature - .point() - .expect("guarded against none by previous check"), - aggregate, - message.as_bytes(), - ) - }), - ) - }) - .unwrap_or(false) -} - -impl TPublicKey for milagro::PublicKey { - fn serialize(&self) -> [u8; PUBLIC_KEY_BYTES_LEN] { - let mut bytes = [0; PUBLIC_KEY_BYTES_LEN]; - bytes[..].copy_from_slice(&self.as_bytes()); - bytes - } - - fn deserialize(bytes: &[u8]) -> Result { - Self::from_bytes(bytes).map_err(Into::into) - } -} - -impl TAggregatePublicKey for milagro::AggregatePublicKey { - fn to_public_key(&self) -> GenericPublicKey { - GenericPublicKey::from_point(milagro::PublicKey { - point: self.point.clone(), - }) - } - - fn aggregate(pubkeys: &[GenericPublicKey]) -> Result { - let pubkey_refs = pubkeys.iter().map(|pk| pk.point()).collect::>(); - Ok(milagro::AggregatePublicKey::aggregate(&pubkey_refs)?) - } -} - -impl TSignature for milagro::Signature { - fn serialize(&self) -> [u8; SIGNATURE_BYTES_LEN] { - let mut bytes = [0; SIGNATURE_BYTES_LEN]; - - bytes[..].copy_from_slice(&self.as_bytes()); - - bytes - } - - fn deserialize(bytes: &[u8]) -> Result { - milagro::Signature::from_bytes(&bytes).map_err(Error::MilagroError) - } - - fn verify(&self, pubkey: &milagro::PublicKey, msg: Hash256) -> bool { - self.verify(msg.as_bytes(), pubkey) - } -} - -impl TAggregateSignature - for milagro::AggregateSignature -{ - fn infinity() -> Self { - milagro::AggregateSignature::new() - } - - fn add_assign(&mut self, other: &milagro::Signature) { - self.add(other) - } - - fn add_assign_aggregate(&mut self, other: &Self) { - self.add_aggregate(other) - } - - fn serialize(&self) -> [u8; SIGNATURE_BYTES_LEN] { - let mut bytes = [0; SIGNATURE_BYTES_LEN]; - - bytes[..].copy_from_slice(&self.as_bytes()); - - bytes - } - - fn deserialize(bytes: &[u8]) -> Result { - milagro::AggregateSignature::from_bytes(&bytes).map_err(Error::MilagroError) - } - - fn fast_aggregate_verify( - &self, - msg: Hash256, - pubkeys: &[&GenericPublicKey], - ) -> bool { - let pubkeys = pubkeys.iter().map(|pk| pk.point()).collect::>(); - self.fast_aggregate_verify(msg.as_bytes(), &pubkeys) - } - - fn aggregate_verify( - &self, - msgs: &[Hash256], - pubkeys: &[&GenericPublicKey], - ) -> bool { - let pubkeys = pubkeys.iter().map(|pk| pk.point()).collect::>(); - let msgs = msgs.iter().map(|hash| hash.as_bytes()).collect::>(); - self.aggregate_verify(&msgs, &pubkeys) - } -} - -impl TSecretKey for milagro::SecretKey { - fn random() -> Self { - Self::random(&mut thread_rng()) - } - - fn public_key(&self) -> milagro::PublicKey { - let point = milagro::PublicKey::from_secret_key(self).point; - milagro::PublicKey { point } - } - - fn sign(&self, msg: Hash256) -> milagro::Signature { - let point = milagro::Signature::new(msg.as_bytes(), self).point; - milagro::Signature { point } - } - - fn serialize(&self) -> ZeroizeHash { - let mut bytes = [0; SECRET_KEY_BYTES_LEN]; - - // Takes the right-hand 32 bytes from the secret key. - bytes[..].copy_from_slice(&self.as_bytes()); - - bytes.into() - } - - fn deserialize(bytes: &[u8]) -> Result { - Self::from_bytes(bytes).map_err(Into::into) - } -} diff --git a/crypto/bls/src/impls/mod.rs b/crypto/bls/src/impls/mod.rs index b3f2da77b..d87c3b12b 100644 --- a/crypto/bls/src/impls/mod.rs +++ b/crypto/bls/src/impls/mod.rs @@ -1,5 +1,3 @@ #[cfg(feature = "supranational")] pub mod blst; pub mod fake_crypto; -#[cfg(feature = "milagro")] -pub mod milagro; diff --git a/crypto/bls/src/lib.rs b/crypto/bls/src/lib.rs index 750e1bd5b..fef9804b7 100644 --- a/crypto/bls/src/lib.rs +++ b/crypto/bls/src/lib.rs @@ -9,15 +9,13 @@ //! are supported via compile-time flags. There are three backends supported via features: //! //! - `supranational`: the pure-assembly, highly optimized version from the `blst` crate. -//! - `milagro`: the classic pure-Rust `milagro_bls` crate. //! - `fake_crypto`: an always-returns-valid implementation that is only useful for testing //! scenarios which intend to *ignore* real cryptography. //! //! This crate uses traits to reduce code-duplication between the two implementations. For example, //! the `GenericPublicKey` struct exported from this crate is generic across the `TPublicKey` trait //! (i.e., `PublicKey`). `TPublicKey` is implemented by all three backends (see the -//! `impls.rs` module). When compiling with the `milagro` feature, we export -//! `type PublicKey = GenericPublicKey`. +//! `impls.rs` module). #[macro_use] mod macros; @@ -43,16 +41,11 @@ pub use zeroize_hash::ZeroizeHash; #[cfg(feature = "supranational")] use blst::BLST_ERROR as BlstError; -#[cfg(feature = "milagro")] -use milagro_bls::AmclError; pub type Hash256 = ethereum_types::H256; #[derive(Clone, Debug, PartialEq)] pub enum Error { - /// An error was raised from the Milagro BLS library. - #[cfg(feature = "milagro")] - MilagroError(AmclError), /// An error was raised from the Supranational BLST BLS library. #[cfg(feature = "supranational")] BlstError(BlstError), @@ -66,13 +59,6 @@ pub enum Error { InvalidZeroSecretKey, } -#[cfg(feature = "milagro")] -impl From for Error { - fn from(e: AmclError) -> Error { - Error::MilagroError(e) - } -} - #[cfg(feature = "supranational")] impl From for Error { fn from(e: BlstError) -> Error { @@ -94,8 +80,7 @@ pub mod generics { } /// Defines all the fundamental BLS points which should be exported by this crate by making -/// concrete the generic type parameters using the points from some external BLS library (e.g., -/// Milagro, BLST). +/// concrete the generic type parameters using the points from some external BLS library (e.g.,BLST). macro_rules! define_mod { ($name: ident, $mod: path) => { pub mod $name { @@ -139,8 +124,6 @@ macro_rules! define_mod { }; } -#[cfg(feature = "milagro")] -define_mod!(milagro_implementations, crate::impls::milagro::types); #[cfg(feature = "supranational")] define_mod!(blst_implementations, crate::impls::blst::types); #[cfg(feature = "fake_crypto")] @@ -149,14 +132,7 @@ define_mod!( crate::impls::fake_crypto::types ); -#[cfg(all(feature = "milagro", not(feature = "fake_crypto"),))] -pub use milagro_implementations::*; - -#[cfg(all( - feature = "supranational", - not(feature = "fake_crypto"), - not(feature = "milagro") -))] +#[cfg(all(feature = "supranational", not(feature = "fake_crypto"),))] pub use blst_implementations::*; #[cfg(feature = "fake_crypto")] diff --git a/crypto/bls/tests/tests.rs b/crypto/bls/tests/tests.rs index ad498dbfa..478c1b7dc 100644 --- a/crypto/bls/tests/tests.rs +++ b/crypto/bls/tests/tests.rs @@ -509,8 +509,3 @@ macro_rules! test_suite { mod blst { test_suite!(blst_implementations); } - -#[cfg(all(feature = "milagro", not(debug_assertions)))] -mod milagro { - test_suite!(milagro_implementations); -} diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index 95af4d638..3583d9e27 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -77,7 +77,15 @@ pub fn inspect_cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("freezer") .long("freezer") .help("Inspect the freezer DB rather than the hot DB") - .takes_value(false), + .takes_value(false) + .conflicts_with("blobs-db"), + ) + .arg( + Arg::with_name("blobs-db") + .long("blobs-db") + .help("Inspect the blobs DB rather than the hot DB") + .takes_value(false) + .conflicts_with("freezer"), ) .arg( Arg::with_name("output-dir") @@ -88,6 +96,34 @@ pub fn inspect_cli_app<'a, 'b>() -> App<'a, 'b> { ) } +pub fn compact_cli_app<'a, 'b>() -> App<'a, 'b> { + App::new("compact") + .setting(clap::AppSettings::ColoredHelp) + .about("Compact database manually") + .arg( + Arg::with_name("column") + .long("column") + .value_name("TAG") + .help("3-byte column ID (see `DBColumn`)") + .takes_value(true) + .required(true), + ) + .arg( + Arg::with_name("freezer") + .long("freezer") + .help("Inspect the freezer DB rather than the hot DB") + .takes_value(false) + .conflicts_with("blobs-db"), + ) + .arg( + Arg::with_name("blobs-db") + .long("blobs-db") + .help("Inspect the blobs DB rather than the hot DB") + .takes_value(false) + .conflicts_with("freezer"), + ) +} + pub fn prune_payloads_app<'a, 'b>() -> App<'a, 'b> { App::new("prune-payloads") .alias("prune_payloads") @@ -162,6 +198,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .subcommand(migrate_cli_app()) .subcommand(version_cli_app()) .subcommand(inspect_cli_app()) + .subcommand(compact_cli_app()) .subcommand(prune_payloads_app()) .subcommand(prune_blobs_app()) .subcommand(prune_states_app()) @@ -251,6 +288,7 @@ pub struct InspectConfig { skip: Option, limit: Option, freezer: bool, + blobs_db: bool, /// Configures where the inspect output should be stored. output_dir: PathBuf, } @@ -261,6 +299,7 @@ fn parse_inspect_config(cli_args: &ArgMatches) -> Result let skip = clap_utils::parse_optional(cli_args, "skip")?; let limit = clap_utils::parse_optional(cli_args, "limit")?; let freezer = cli_args.is_present("freezer"); + let blobs_db = cli_args.is_present("blobs-db"); let output_dir: PathBuf = clap_utils::parse_optional(cli_args, "output-dir")?.unwrap_or_else(PathBuf::new); @@ -270,6 +309,7 @@ fn parse_inspect_config(cli_args: &ArgMatches) -> Result skip, limit, freezer, + blobs_db, output_dir, }) } @@ -277,32 +317,20 @@ fn parse_inspect_config(cli_args: &ArgMatches) -> Result pub fn inspect_db( inspect_config: InspectConfig, client_config: ClientConfig, - runtime_context: &RuntimeContext, - log: Logger, ) -> Result<(), String> { - let spec = runtime_context.eth2_config.spec.clone(); let hot_path = client_config.get_db_path(); let cold_path = client_config.get_freezer_db_path(); let blobs_path = client_config.get_blobs_db_path(); - let db = HotColdDB::, LevelDB>::open( - &hot_path, - &cold_path, - &blobs_path, - |_, _, _| Ok(()), - client_config.store, - spec, - log, - ) - .map_err(|e| format!("{:?}", e))?; - let mut total = 0; let mut num_keys = 0; let sub_db = if inspect_config.freezer { - &db.cold_db + LevelDB::::open(&cold_path).map_err(|e| format!("Unable to open freezer DB: {e:?}"))? + } else if inspect_config.blobs_db { + LevelDB::::open(&blobs_path).map_err(|e| format!("Unable to open blobs DB: {e:?}"))? } else { - &db.hot_db + LevelDB::::open(&hot_path).map_err(|e| format!("Unable to open hot DB: {e:?}"))? }; let skip = inspect_config.skip.unwrap_or(0); @@ -356,6 +384,7 @@ pub fn inspect_db( let write_result = fs::OpenOptions::new() .create(true) + .truncate(true) .write(true) .open(&file_path) .map_err(|e| format!("Failed to open file: {:?}", e)) @@ -384,6 +413,50 @@ pub fn inspect_db( Ok(()) } +pub struct CompactConfig { + column: DBColumn, + freezer: bool, + blobs_db: bool, +} + +fn parse_compact_config(cli_args: &ArgMatches) -> Result { + let column = clap_utils::parse_required(cli_args, "column")?; + let freezer = cli_args.is_present("freezer"); + let blobs_db = cli_args.is_present("blobs-db"); + Ok(CompactConfig { + column, + freezer, + blobs_db, + }) +} + +pub fn compact_db( + compact_config: CompactConfig, + client_config: ClientConfig, + log: Logger, +) -> Result<(), Error> { + let hot_path = client_config.get_db_path(); + let cold_path = client_config.get_freezer_db_path(); + let blobs_path = client_config.get_blobs_db_path(); + let column = compact_config.column; + + let (sub_db, db_name) = if compact_config.freezer { + (LevelDB::::open(&cold_path)?, "freezer_db") + } else if compact_config.blobs_db { + (LevelDB::::open(&blobs_path)?, "blobs_db") + } else { + (LevelDB::::open(&hot_path)?, "hot_db") + }; + info!( + log, + "Compacting database"; + "db" => db_name, + "column" => ?column + ); + sub_db.compact_column(column)?; + Ok(()) +} + pub struct MigrateConfig { to: SchemaVersion, } @@ -537,7 +610,10 @@ pub fn prune_states( // Check that the user has confirmed they want to proceed. if !prune_config.confirm { match db.get_anchor_info() { - Some(anchor_info) if anchor_info.state_upper_limit == STATE_UPPER_LIMIT_NO_RETAIN => { + Some(anchor_info) + if anchor_info.state_lower_limit == 0 + && anchor_info.state_upper_limit == STATE_UPPER_LIMIT_NO_RETAIN => + { info!(log, "States have already been pruned"); return Ok(()); } @@ -585,7 +661,11 @@ pub fn run(cli_args: &ArgMatches<'_>, env: Environment) -> Result } ("inspect", Some(cli_args)) => { let inspect_config = parse_inspect_config(cli_args)?; - inspect_db(inspect_config, client_config, &context, log) + inspect_db::(inspect_config, client_config) + } + ("compact", Some(cli_args)) => { + let compact_config = parse_compact_config(cli_args)?; + compact_db::(compact_config, client_config, log).map_err(format_err) } ("prune-payloads", Some(_)) => { prune_payloads(client_config, &context, log).map_err(format_err) diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 1bba8242c..9796217d0 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "4.6.0" +version = "5.1.1" authors = ["Paul Hauner "] edition = { workspace = true } diff --git a/lcli/Dockerfile b/lcli/Dockerfile index e39e2a483..4f5c3f297 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -1,13 +1,13 @@ # `lcli` requires the full project to be in scope, so this should be built either: # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` -FROM rust:1.73.0-bullseye AS builder +FROM rust:1.75.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse -ARG PORTABLE -ENV PORTABLE $PORTABLE -RUN if [ "$PORTABLE" = true ] ; then export FEATURES=portable; fi && cd lighthouse && make install-lcli +ARG FEATURES +ENV FEATURES $FEATURES +RUN cd lighthouse && make install-lcli FROM ubuntu:22.04 RUN apt-get update && apt-get -y upgrade && apt-get clean && rm -rf /var/lib/apt/lists/* -COPY --from=builder /usr/local/cargo/bin/lcli /usr/local/bin/lcli \ No newline at end of file +COPY --from=builder /usr/local/cargo/bin/lcli /usr/local/bin/lcli diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index 3a0c7a9f6..47db1036d 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -9,7 +9,9 @@ use ethereum_hashing::hash; use ssz::Decode; use ssz::Encode; use state_processing::process_activations; -use state_processing::upgrade::{upgrade_to_altair, upgrade_to_bellatrix}; +use state_processing::upgrade::{ + upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_deneb, +}; use std::fs::File; use std::io::Read; use std::path::PathBuf; @@ -19,8 +21,8 @@ use types::ExecutionBlockHash; use types::{ test_utils::generate_deterministic_keypairs, Address, BeaconState, ChainSpec, Config, Epoch, Eth1Data, EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, - ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderMerge, ExecutionPayloadHeaderRefMut, - ForkName, Hash256, Keypair, PublicKey, Validator, + ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderMerge, ForkName, Hash256, Keypair, + PublicKey, Validator, }; pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Result<(), String> { @@ -302,26 +304,47 @@ fn initialize_state_with_validators( state.fork_mut().previous_version = spec.bellatrix_fork_version; // Override latest execution payload header. - // See https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/merge/beacon-chain.md#testing + // See https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/bellatrix/beacon-chain.md#testing + if let ExecutionPayloadHeader::Merge(ref header) = execution_payload_header { + *state + .latest_execution_payload_header_merge_mut() + .or(Err("mismatched fork".to_string()))? = header.clone(); + } + } - // Currently, we only support starting from a bellatrix state - match state - .latest_execution_payload_header_mut() - .map_err(|e| format!("Failed to get execution payload header: {:?}", e))? - { - ExecutionPayloadHeaderRefMut::Merge(header_mut) => { - if let ExecutionPayloadHeader::Merge(eph) = execution_payload_header { - *header_mut = eph; - } else { - return Err("Execution payload header must be a bellatrix header".to_string()); - } - } - ExecutionPayloadHeaderRefMut::Capella(_) => { - return Err("Cannot start genesis from a capella state".to_string()) - } - ExecutionPayloadHeaderRefMut::Deneb(_) => { - return Err("Cannot start genesis from a deneb state".to_string()) - } + if spec + .capella_fork_epoch + .map_or(false, |fork_epoch| fork_epoch == T::genesis_epoch()) + { + upgrade_to_capella(&mut state, spec).unwrap(); + + // Remove intermediate fork from `state.fork`. + state.fork_mut().previous_version = spec.capella_fork_version; + + // Override latest execution payload header. + // See https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/bellatrix/beacon-chain.md#testing + if let ExecutionPayloadHeader::Capella(ref header) = execution_payload_header { + *state + .latest_execution_payload_header_capella_mut() + .or(Err("mismatched fork".to_string()))? = header.clone(); + } + } + + if spec + .deneb_fork_epoch + .map_or(false, |fork_epoch| fork_epoch == T::genesis_epoch()) + { + upgrade_to_deneb(&mut state, spec).unwrap(); + + // Remove intermediate fork from `state.fork`. + state.fork_mut().previous_version = spec.deneb_fork_version; + + // Override latest execution payload header. + // See https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/bellatrix/beacon-chain.md#testing + if let ExecutionPayloadHeader::Deneb(ref header) = execution_payload_header { + *state + .latest_execution_payload_header_deneb_mut() + .or(Err("mismatched fork".to_string()))? = header.clone(); } } @@ -331,5 +354,10 @@ fn initialize_state_with_validators( // Set genesis validators root for domain separation and chain versioning *state.genesis_validators_root_mut() = state.update_validators_tree_hash_cache().unwrap(); + // Sanity check for state fork matching config fork. + state + .fork_name(spec) + .map_err(|e| format!("state fork mismatch: {e:?}"))?; + Ok(state) } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 8517c66c3..d664aac31 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "lighthouse" -version = "4.6.0" +version = "5.1.1" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false -rust-version = "1.73.0" +rust-version = "1.75.0" [features] default = ["slasher-lmdb"] @@ -14,8 +14,6 @@ write_ssz_files = ["beacon_node/write_ssz_files"] portable = ["bls/supranational-portable"] # Compiles BLST so that it always uses ADX instructions. modern = ["bls/supranational-force-adx"] -# Uses the slower Milagro BLS library, which is written in native Rust. -milagro = ["bls/milagro"] # Support minimal spec (used for testing only). spec-minimal = [] # Support Gnosis spec and Gnosis Beacon Chain. diff --git a/lighthouse/environment/Cargo.toml b/lighthouse/environment/Cargo.toml index b57e1e9de..f95751392 100644 --- a/lighthouse/environment/Cargo.toml +++ b/lighthouse/environment/Cargo.toml @@ -5,6 +5,7 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] +async-channel = { workspace = true } tokio = { workspace = true } slog = { workspace = true } sloggers = { workspace = true } @@ -17,7 +18,6 @@ slog-term = { workspace = true } slog-async = { workspace = true } futures = { workspace = true } slog-json = "2.3.0" -exit-future = { workspace = true } serde = { workspace = true } [target.'cfg(not(target_family = "unix"))'.dependencies] diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index a1f6b26a9..e59b1d455 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -343,7 +343,7 @@ impl EnvironmentBuilder { /// Consumes the builder, returning an `Environment`. pub fn build(self) -> Result, String> { - let (signal, exit) = exit_future::signal(); + let (signal, exit) = async_channel::bounded(1); let (signal_tx, signal_rx) = channel(1); Ok(Environment { runtime: self @@ -370,8 +370,8 @@ pub struct Environment { signal_rx: Option>, /// Sender to request shutting down. signal_tx: Sender, - signal: Option, - exit: exit_future::Exit, + signal: Option>, + exit: async_channel::Receiver<()>, log: Logger, sse_logging_components: Option, eth_spec_instance: E, @@ -434,7 +434,7 @@ impl Environment { async move { rx.next().await.ok_or("Internal shutdown channel exhausted") }; futures::pin_mut!(inner_shutdown); - match self.runtime().block_on(async { + let register_handlers = async { let mut handles = vec![]; // setup for handling SIGTERM @@ -465,7 +465,9 @@ impl Environment { } future::select(inner_shutdown, future::select_all(handles.into_iter())).await - }) { + }; + + match self.runtime().block_on(register_handlers) { future::Either::Left((Ok(reason), _)) => { info!(self.log, "Internal shutdown received"; "reason" => reason.message()); Ok(reason) @@ -541,7 +543,7 @@ impl Environment { /// Fire exit signal which shuts down all spawned services pub fn fire_signal(&mut self) { if let Some(signal) = self.signal.take() { - let _ = signal.fire(); + drop(signal); } } diff --git a/lighthouse/environment/tests/testnet_dir/config.yaml b/lighthouse/environment/tests/testnet_dir/config.yaml index dbb6819cd..86f4dce23 100644 --- a/lighthouse/environment/tests/testnet_dir/config.yaml +++ b/lighthouse/environment/tests/testnet_dir/config.yaml @@ -97,3 +97,4 @@ MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 ATTESTATION_SUBNET_COUNT: 64 ATTESTATION_SUBNET_EXTRA_BITS: 0 ATTESTATION_SUBNET_PREFIX_BITS: 6 +ATTESTATION_SUBNET_SHUFFLING_PREFIX_BITS: 3 diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 06eb06fc0..932b125dc 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -11,6 +11,7 @@ use futures::TryFutureExt; use lighthouse_version::VERSION; use malloc_utils::configure_memory_allocator; use slog::{crit, info}; +use std::backtrace::Backtrace; use std::path::PathBuf; use std::process::exit; use task_executor::ShutdownReason; @@ -22,8 +23,6 @@ fn bls_library_name() -> &'static str { "blst-portable" } else if cfg!(feature = "modern") { "blst-modern" - } else if cfg!(feature = "milagro") { - "milagro" } else { "blst" } @@ -528,6 +527,21 @@ fn run( let log = environment.core_context().log().clone(); + // Log panics properly. + { + let log = log.clone(); + std::panic::set_hook(Box::new(move |info| { + crit!( + log, + "Task panic. This is a bug!"; + "location" => info.location().map(ToString::to_string), + "message" => info.payload().downcast_ref::(), + "backtrace" => %Backtrace::capture(), + "advice" => "Please check above for a backtrace and notify the developers", + ); + })); + } + let mut tracing_log_path: Option = clap_utils::parse_optional(matches, "logfile")?; if tracing_log_path.is_none() { @@ -540,9 +554,7 @@ fn run( let path = tracing_log_path.clone().unwrap(); - let turn_on_terminal_logs = matches.is_present("env_log"); - - logging::create_tracing_layer(path, turn_on_terminal_logs); + logging::create_tracing_layer(path); // Allow Prometheus to export the time at which the process was started. metrics::expose_process_start_time(&log); diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 2a88770cd..b6cd84a69 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -23,8 +23,11 @@ use types::{ Address, Checkpoint, Epoch, ExecutionBlockHash, ForkName, Hash256, MainnetEthSpec, ProgressiveBalancesMode, }; +use unused_port::{unused_tcp4_port, unused_tcp6_port, unused_udp4_port, unused_udp6_port}; const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545/"; + +// These dummy ports should ONLY be used for `enr-xxx-port` flags that do not bind. const DUMMY_ENR_TCP_PORT: u16 = 7777; const DUMMY_ENR_UDP_PORT: u16 = 8888; const DUMMY_ENR_QUIC_PORT: u16 = 9999; @@ -55,6 +58,12 @@ impl CommandLineTest { } fn run_with_zero_port(&mut self) -> CompletedTest { + // Required since Deneb was enabled on mainnet. + self.cmd.arg("--allow-insecure-genesis-sync"); + self.run_with_zero_port_and_no_genesis_sync() + } + + fn run_with_zero_port_and_no_genesis_sync(&mut self) -> CompletedTest { self.cmd.arg("-z"); self.run() } @@ -93,16 +102,16 @@ fn staking_flag() { } #[test] -fn allow_insecure_genesis_sync() { - CommandLineTest::new() - .run_with_zero_port() - .with_config(|config| { - assert_eq!(config.allow_insecure_genesis_sync, false); - }); +#[should_panic] +fn allow_insecure_genesis_sync_default() { + CommandLineTest::new().run_with_zero_port_and_no_genesis_sync(); +} +#[test] +fn allow_insecure_genesis_sync_enabled() { CommandLineTest::new() .flag("allow-insecure-genesis-sync", None) - .run_with_zero_port() + .run_with_zero_port_and_no_genesis_sync() .with_config(|config| { assert_eq!(config.allow_insecure_genesis_sync, true); }); @@ -166,6 +175,26 @@ fn shuffling_cache_set() { .with_config(|config| assert_eq!(config.chain.shuffling_cache_size, 500)); } +#[test] +fn snapshot_cache_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.snapshot_cache_size, + beacon_node::beacon_chain::snapshot_cache::DEFAULT_SNAPSHOT_CACHE_SIZE + ) + }); +} + +#[test] +fn snapshot_cache_set() { + CommandLineTest::new() + .flag("state-cache-size", Some("500")) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.chain.snapshot_cache_size, 500)); +} + #[test] fn fork_choice_before_proposal_timeout_default() { CommandLineTest::new() @@ -851,6 +880,7 @@ fn network_port_flag_over_ipv4() { let port = 0; CommandLineTest::new() .flag("port", Some(port.to_string().as_str())) + .flag("allow-insecure-genesis-sync", None) .run() .with_config(|config| { assert_eq!( @@ -864,9 +894,10 @@ fn network_port_flag_over_ipv4() { ); }); - let port = 9000; + let port = unused_tcp4_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("port", Some(port.to_string().as_str())) + .flag("allow-insecure-genesis-sync", None) .run() .with_config(|config| { assert_eq!( @@ -886,6 +917,7 @@ fn network_port_flag_over_ipv6() { CommandLineTest::new() .flag("listen-address", Some("::1")) .flag("port", Some(port.to_string().as_str())) + .flag("allow-insecure-genesis-sync", None) .run() .with_config(|config| { assert_eq!( @@ -899,10 +931,11 @@ fn network_port_flag_over_ipv6() { ); }); - let port = 9000; + let port = unused_tcp4_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("listen-address", Some("::1")) .flag("port", Some(port.to_string().as_str())) + .flag("allow-insecure-genesis-sync", None) .run() .with_config(|config| { assert_eq!( @@ -925,6 +958,7 @@ fn network_port_flag_over_ipv4_and_ipv6() { .flag("listen-address", Some("::1")) .flag("port", Some(port.to_string().as_str())) .flag("port6", Some(port6.to_string().as_str())) + .flag("allow-insecure-genesis-sync", None) .run() .with_config(|config| { assert_eq!( @@ -947,13 +981,14 @@ fn network_port_flag_over_ipv4_and_ipv6() { ); }); - let port = 19000; - let port6 = 29000; + let port = unused_tcp4_port().expect("Unable to find unused port."); + let port6 = unused_tcp6_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("listen-address", Some("127.0.0.1")) .flag("listen-address", Some("::1")) .flag("port", Some(port.to_string().as_str())) .flag("port6", Some(port6.to_string().as_str())) + .flag("allow-insecure-genesis-sync", None) .run() .with_config(|config| { assert_eq!( @@ -983,6 +1018,7 @@ fn network_port_and_discovery_port_flags_over_ipv4() { CommandLineTest::new() .flag("port", Some(tcp4_port.to_string().as_str())) .flag("discovery-port", Some(disc4_port.to_string().as_str())) + .flag("allow-insecure-genesis-sync", None) .run() .with_config(|config| { assert_eq!( @@ -1003,6 +1039,7 @@ fn network_port_and_discovery_port_flags_over_ipv6() { .flag("listen-address", Some("::1")) .flag("port", Some(tcp6_port.to_string().as_str())) .flag("discovery-port", Some(disc6_port.to_string().as_str())) + .flag("allow-insecure-genesis-sync", None) .run() .with_config(|config| { assert_eq!( @@ -1028,6 +1065,7 @@ fn network_port_and_discovery_port_flags_over_ipv4_and_ipv6() { .flag("discovery-port", Some(disc4_port.to_string().as_str())) .flag("port6", Some(tcp6_port.to_string().as_str())) .flag("discovery-port6", Some(disc6_port.to_string().as_str())) + .flag("allow-insecure-genesis-sync", None) .run() .with_config(|config| { assert_eq!( @@ -1067,6 +1105,7 @@ fn network_port_discovery_quic_port_flags_over_ipv4_and_ipv6() { .flag("port6", Some(tcp6_port.to_string().as_str())) .flag("discovery-port6", Some(disc6_port.to_string().as_str())) .flag("quic-port6", Some(quic6_port.to_string().as_str())) + .flag("allow-insecure-genesis-sync", None) .run() .with_config(|config| { assert_eq!( @@ -1284,15 +1323,15 @@ fn enr_tcp6_port_flag() { fn enr_match_flag_over_ipv4() { let addr = "127.0.0.2".parse::().unwrap(); - // the reason we use the ENR dummy values is because, due to the nature of the `--enr-match` flag, these will eventually become ENR ports (as well as listening ports). - let udp4_port = DUMMY_ENR_UDP_PORT; - let tcp4_port = DUMMY_ENR_TCP_PORT; + let udp4_port = unused_udp4_port().expect("Unable to find unused port."); + let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("enr-match", None) .flag("listen-address", Some("127.0.0.2")) .flag("discovery-port", Some(udp4_port.to_string().as_str())) .flag("port", Some(tcp4_port.to_string().as_str())) + .flag("allow-insecure-genesis-sync", None) .run() .with_config(|config| { assert_eq!( @@ -1315,15 +1354,15 @@ fn enr_match_flag_over_ipv6() { const ADDR: &str = "::1"; let addr = ADDR.parse::().unwrap(); - // the reason we use the ENR dummy values is because, due to the nature of the `--enr-match` flag, these will eventually become ENR ports (as well as listening ports). - let udp6_port = DUMMY_ENR_UDP_PORT; - let tcp6_port = DUMMY_ENR_TCP_PORT; + let udp6_port = unused_udp6_port().expect("Unable to find unused port."); + let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("enr-match", None) .flag("listen-address", Some(ADDR)) .flag("discovery-port", Some(udp6_port.to_string().as_str())) .flag("port", Some(tcp6_port.to_string().as_str())) + .flag("allow-insecure-genesis-sync", None) .run() .with_config(|config| { assert_eq!( @@ -1345,15 +1384,13 @@ fn enr_match_flag_over_ipv6() { fn enr_match_flag_over_ipv4_and_ipv6() { const IPV6_ADDR: &str = "::1"; - // the reason we use the ENR dummy values is because, due to the nature of the `--enr-match` flag, these will eventually become ENR ports (as well as listening ports). - let udp6_port = DUMMY_ENR_UDP_PORT; - let tcp6_port = DUMMY_ENR_TCP_PORT; + let udp6_port = unused_udp6_port().expect("Unable to find unused port."); + let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); let ipv6_addr = IPV6_ADDR.parse::().unwrap(); const IPV4_ADDR: &str = "127.0.0.1"; - // the reason we use the ENR dummy values is because, due to the nature of the `--enr-match` flag, these will eventually become ENR ports (as well as listening ports). - let udp4_port = DUMMY_ENR_UDP_PORT; - let tcp4_port = DUMMY_ENR_TCP_PORT; + let udp4_port = unused_udp4_port().expect("Unable to find unused port."); + let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); let ipv4_addr = IPV4_ADDR.parse::().unwrap(); CommandLineTest::new() @@ -1364,6 +1401,7 @@ fn enr_match_flag_over_ipv4_and_ipv6() { .flag("listen-address", Some(IPV6_ADDR)) .flag("discovery-port6", Some(udp6_port.to_string().as_str())) .flag("port6", Some(tcp6_port.to_string().as_str())) + .flag("allow-insecure-genesis-sync", None) .run() .with_config(|config| { assert_eq!( @@ -1490,6 +1528,7 @@ fn http_port_flag() { .flag("http", None) .flag("http-port", Some(port1.to_string().as_str())) .flag("port", Some(port2.to_string().as_str())) + .flag("allow-insecure-genesis-sync", None) .run() .with_config(|config| assert_eq!(config.http_api.listen_port, port1)); } @@ -1647,6 +1686,7 @@ fn metrics_port_flag() { .flag("metrics", None) .flag("metrics-port", Some(port1.to_string().as_str())) .flag("port", Some(port2.to_string().as_str())) + .flag("allow-insecure-genesis-sync", None) .run() .with_config(|config| assert_eq!(config.http_metrics.listen_port, port1)); } @@ -2372,6 +2412,7 @@ fn light_client_server_default() { .run_with_zero_port() .with_config(|config| { assert_eq!(config.network.enable_light_client_server, false); + assert_eq!(config.chain.enable_light_client_server, false); assert_eq!(config.http_api.enable_light_client_server, false); }); } @@ -2383,6 +2424,7 @@ fn light_client_server_enabled() { .run_with_zero_port() .with_config(|config| { assert_eq!(config.network.enable_light_client_server, true); + assert_eq!(config.chain.enable_light_client_server, true); }); } @@ -2582,22 +2624,3 @@ fn genesis_state_url_value() { assert_eq!(config.genesis_state_url_timeout, Duration::from_secs(42)); }); } - -#[test] -fn disable_duplicate_warn_logs_default() { - CommandLineTest::new() - .run_with_zero_port() - .with_config(|config| { - assert_eq!(config.network.disable_duplicate_warn_logs, false); - }); -} - -#[test] -fn disable_duplicate_warn_logs() { - CommandLineTest::new() - .flag("disable-duplicate-warn-logs", None) - .run_with_zero_port() - .with_config(|config| { - assert_eq!(config.network.disable_duplicate_warn_logs, true); - }); -} diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 701aed07e..764fd87cc 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -636,3 +636,20 @@ fn validator_registration_batch_size_zero_value() { .flag("validator-registration-batch-size", Some("0")) .run(); } + +#[test] +fn validator_disable_web3_signer_slashing_protection_default() { + CommandLineTest::new().run().with_config(|config| { + assert!(config.enable_web3signer_slashing_protection); + }); +} + +#[test] +fn validator_disable_web3_signer_slashing_protection() { + CommandLineTest::new() + .flag("disable-slashing-protection-web3signer", None) + .run() + .with_config(|config| { + assert!(!config.enable_web3signer_slashing_protection); + }); +} diff --git a/scripts/local_testnet/README.md b/scripts/local_testnet/README.md index 2862fde07..74dc4739b 100644 --- a/scripts/local_testnet/README.md +++ b/scripts/local_testnet/README.md @@ -193,3 +193,9 @@ Update the genesis time to now using: ./start_local_testnet.sh -p genesis.json ``` 4. Block production using builder flow will start at epoch 4. + +### Testing sending a transaction + +Some addresses in the local testnet are seeded with testnet ETH, allowing users to carry out transactions. To send a transaction, we first add the address to a wallet, such as [Metamask](https://metamask.io/). The private keys for the addresses are listed [here](https://github.com/sigp/lighthouse/blob/441fc1691b69f9edc4bbdc6665f3efab16265c9b/testing/execution_engine_integration/src/execution_engine.rs#L13-L14). + +Next, we add the local testnet to Metamask, a brief guide can be found [here](https://support.metamask.io/hc/en-us/articles/360043227612-How-to-add-a-custom-network-RPC). If you start the local testnet with default settings, the network RPC is: http://localhost:6001 and the `Chain ID` is `4242`, as defined in [`vars.env`](https://github.com/sigp/lighthouse/blob/441fc1691b69f9edc4bbdc6665f3efab16265c9b/scripts/local_testnet/vars.env#L42). Once the network and account are added, you should see that the account contains testnet ETH which allow us to carry out transactions. \ No newline at end of file diff --git a/scripts/local_testnet/beacon_node.sh b/scripts/local_testnet/beacon_node.sh index 940fe2b85..2660dfa3c 100755 --- a/scripts/local_testnet/beacon_node.sh +++ b/scripts/local_testnet/beacon_node.sh @@ -67,4 +67,5 @@ exec $lighthouse_binary \ --target-peers $((BN_COUNT - 1)) \ --execution-endpoint $execution_endpoint \ --execution-jwt $execution_jwt \ + --http-allow-sync-stalled \ $BN_ARGS diff --git a/scripts/local_testnet/geth.sh b/scripts/local_testnet/geth.sh index 5dc4575cf..ab1a0ec6e 100755 --- a/scripts/local_testnet/geth.sh +++ b/scripts/local_testnet/geth.sh @@ -50,4 +50,5 @@ exec $GETH_BINARY \ --bootnodes $EL_BOOTNODE_ENODE \ --port $network_port \ --http.port $http_port \ - --authrpc.port $auth_port + --authrpc.port $auth_port \ + 2>&1 | tee $data_dir/geth.log diff --git a/scripts/tests/genesis.json b/scripts/tests/genesis.json index 83f45f1a0..bfbc08c81 100644 --- a/scripts/tests/genesis.json +++ b/scripts/tests/genesis.json @@ -13,7 +13,7 @@ "londonBlock": 0, "mergeForkBlock": 0, "shanghaiTime": 0, - "shardingForkTime": 0, + "cancunTime": 0, "terminalTotalDifficulty": 0, "terminalTotalDifficultyPassed": true }, diff --git a/scripts/tests/vars.env b/scripts/tests/vars.env index 98ae08f07..ffe7ac4ae 100644 --- a/scripts/tests/vars.env +++ b/scripts/tests/vars.env @@ -16,7 +16,7 @@ DEPOSIT_CONTRACT_ADDRESS=4242424242424242424242424242424242424242 GENESIS_FORK_VERSION=0x42424242 # Block hash generated from genesis.json in directory -ETH1_BLOCK_HASH=add7865f8346031c72287e2edc4a4952fd34fc0a8642403e8c1bce67f215c92b +ETH1_BLOCK_HASH=7a5c656343c3a66dcf75415958b500e8873f9dab0cd588e6cf0785b52a06dd34 VALIDATOR_COUNT=80 GENESIS_VALIDATOR_COUNT=80 @@ -41,8 +41,8 @@ CHAIN_ID=4242 # Hard fork configuration ALTAIR_FORK_EPOCH=0 BELLATRIX_FORK_EPOCH=0 -CAPELLA_FORK_EPOCH=1 -DENEB_FORK_EPOCH=18446744073709551615 +CAPELLA_FORK_EPOCH=0 +DENEB_FORK_EPOCH=0 TTD=0 @@ -62,4 +62,5 @@ PROPOSER_SCORE_BOOST=70 BN_ARGS="" # Enable doppelganger detection -VC_ARGS=" --enable-doppelganger-protection " \ No newline at end of file +VC_ARGS=" --enable-doppelganger-protection " + diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index 8bc36d008..f3d00fa03 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -7,7 +7,6 @@ edition = { workspace = true } [features] # `ef_tests` feature must be enabled to actually run the tests ef_tests = [] -milagro = ["bls/milagro"] fake_crypto = ["bls/fake_crypto"] portable = ["beacon_chain/portable"] diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index e42db1801..508c28427 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.4.0-beta.4 +TESTS_TAG := v1.4.0-beta.6 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index a5ab897c3..1d1f2fa49 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -51,7 +51,8 @@ excluded_paths = [ "bls12-381-tests/deserialization_G1", "bls12-381-tests/deserialization_G2", "bls12-381-tests/hash_to_G2", - "tests/.*/eip6110" + "tests/.*/eip6110", + "tests/.*/whisk" ] diff --git a/testing/ef_tests/src/cases/bls_eth_aggregate_pubkeys.rs b/testing/ef_tests/src/cases/bls_eth_aggregate_pubkeys.rs index 8783aa141..2a9a393bf 100644 --- a/testing/ef_tests/src/cases/bls_eth_aggregate_pubkeys.rs +++ b/testing/ef_tests/src/cases/bls_eth_aggregate_pubkeys.rs @@ -31,10 +31,6 @@ impl Case for BlsEthAggregatePubkeys { { return Ok(()); } - #[cfg(feature = "milagro")] - Err(bls::Error::MilagroError(_)) if self.output.is_none() => { - return Ok(()); - } Err(e) => return Err(Error::FailedToParseTest(format!("{:?}", e))), }; diff --git a/testing/ef_tests/src/decode.rs b/testing/ef_tests/src/decode.rs index b5c0da53a..e95bddffa 100644 --- a/testing/ef_tests/src/decode.rs +++ b/testing/ef_tests/src/decode.rs @@ -71,9 +71,7 @@ where f(&bytes).map_err(|e| { match e { // NOTE: this is a bit hacky, but seemingly better than the alternatives - ssz::DecodeError::BytesInvalid(message) - if message.contains("Blst") || message.contains("Milagro") => - { + ssz::DecodeError::BytesInvalid(message) if message.contains("Blst") => { Error::InvalidBLSInput(message) } e => Error::FailedToParseTest(format!( diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index dd25dba8b..5ed657c65 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -1,6 +1,6 @@ #![cfg(feature = "ef_tests")] -use ef_tests::{KzgInclusionMerkleProofValidityHandler, *}; +use ef_tests::*; use types::{MainnetEthSpec, MinimalEthSpec, *}; // Check that the hand-computed multiplications on EthSpec are correctly computed. @@ -605,6 +605,7 @@ fn merkle_proof_validity() { } #[test] +#[cfg(feature = "fake_crypto")] fn kzg_inclusion_merkle_proof_validity() { KzgInclusionMerkleProofValidityHandler::::default().run(); KzgInclusionMerkleProofValidityHandler::::default().run(); diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index 6de108fcb..7f66658f0 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -4,12 +4,12 @@ version = "0.1.0" edition = { workspace = true } [dependencies] +async-channel = { workspace = true } tempfile = { workspace = true } serde_json = { workspace = true } task_executor = { workspace = true } tokio = { workspace = true } futures = { workspace = true } -exit-future = { workspace = true } environment = { workspace = true } execution_layer = { workspace = true } sensitive_url = { workspace = true } @@ -24,4 +24,4 @@ fork_choice = { workspace = true } logging = { workspace = true } [features] -portable = ["types/portable"] \ No newline at end of file +portable = ["types/portable"] diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index b0701e80a..8a61f17ce 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -42,7 +42,7 @@ pub struct TestRig { ee_a: ExecutionPair, ee_b: ExecutionPair, spec: ChainSpec, - _runtime_shutdown: exit_future::Signal, + _runtime_shutdown: async_channel::Sender<()>, } /// Import a private key into the execution engine and unlock it so that we can @@ -111,7 +111,7 @@ impl TestRig { .build() .unwrap(), ); - let (runtime_shutdown, exit) = exit_future::signal(); + let (runtime_shutdown, exit) = async_channel::bounded(1); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = TaskExecutor::new(Arc::downgrade(&runtime), exit, log.clone(), shutdown_tx); let mut spec = TEST_FORK.make_genesis_spec(MainnetEthSpec::default_spec()); @@ -381,7 +381,7 @@ impl TestRig { let status = self .ee_a .execution_layer - .notify_new_payload(valid_payload.clone().try_into().unwrap()) + .notify_new_payload(valid_payload.to_ref().try_into().unwrap()) .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); @@ -435,7 +435,7 @@ impl TestRig { let status = self .ee_a .execution_layer - .notify_new_payload(invalid_payload.try_into().unwrap()) + .notify_new_payload(invalid_payload.to_ref().try_into().unwrap()) .await .unwrap(); assert!(matches!( @@ -507,7 +507,7 @@ impl TestRig { let status = self .ee_a .execution_layer - .notify_new_payload(second_payload.clone().try_into().unwrap()) + .notify_new_payload(second_payload.to_ref().try_into().unwrap()) .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); @@ -559,7 +559,7 @@ impl TestRig { let status = self .ee_b .execution_layer - .notify_new_payload(second_payload.clone().try_into().unwrap()) + .notify_new_payload(second_payload.to_ref().try_into().unwrap()) .await .unwrap(); assert!(matches!(status, PayloadStatus::Syncing)); @@ -597,7 +597,7 @@ impl TestRig { let status = self .ee_b .execution_layer - .notify_new_payload(valid_payload.clone().try_into().unwrap()) + .notify_new_payload(valid_payload.to_ref().try_into().unwrap()) .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); @@ -611,7 +611,7 @@ impl TestRig { let status = self .ee_b .execution_layer - .notify_new_payload(second_payload.clone().try_into().unwrap()) + .notify_new_payload(second_payload.to_ref().try_into().unwrap()) .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); diff --git a/testing/network_testing/README.md b/testing/network_testing/README.md index 359151468..f97c3cff2 100644 --- a/testing/network_testing/README.md +++ b/testing/network_testing/README.md @@ -60,7 +60,7 @@ https://prater.checkpoint.sigp.io --execution-endpoint http://localhost:8551 Mainnet: ``` -$ lighthouse --network prater bn --execution-jwt /tmp/mockel.jwt --checkpoint-sync-url +$ lighthouse --network mainnet bn --execution-jwt /tmp/mockel.jwt --checkpoint-sync-url https://checkpoint.sigp.io --execution-endpoint http://localhost:8551 ``` diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index d34cdbc9f..f38eacc39 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -1,5 +1,5 @@ use crate::local_network::LocalNetwork; -use node_test_rig::eth2::types::{BlockId, StateId}; +use node_test_rig::eth2::types::{BlockId, FinalityCheckpointsData, StateId}; use std::time::Duration; use types::{Epoch, EthSpec, ExecPayload, ExecutionBlockHash, Hash256, Slot, Unsigned}; @@ -243,3 +243,93 @@ pub async fn verify_transition_block_finalized( )) } } + +pub(crate) async fn verify_light_client_updates( + network: LocalNetwork, + start_slot: Slot, + end_slot: Slot, + slot_duration: Duration, +) -> Result<(), String> { + slot_delay(start_slot, slot_duration).await; + + // Tolerance of 2 slot allows for 1 single missed slot. + let light_client_update_slot_tolerance = Slot::new(2); + let remote_nodes = network.remote_nodes()?; + let client = remote_nodes.first().unwrap(); + let mut have_seen_block = false; + let mut have_achieved_finality = false; + + for slot in start_slot.as_u64()..=end_slot.as_u64() { + slot_delay(Slot::new(1), slot_duration).await; + let slot = Slot::new(slot); + let previous_slot = slot - 1; + + let previous_slot_block = client + .get_beacon_blocks::(BlockId::Slot(previous_slot)) + .await + .map_err(|e| { + format!("Unable to get beacon block for previous slot {previous_slot:?}: {e:?}") + })?; + let previous_slot_has_block = previous_slot_block.is_some(); + + if !have_seen_block { + // Make sure we have seen the first block in Altair, to make sure we have sync aggregates available. + if previous_slot_has_block { + have_seen_block = true; + } + // Wait for another slot before we check the first update to avoid race condition. + continue; + } + + // Make sure previous slot has a block, otherwise skip checking for the signature slot distance + if !previous_slot_has_block { + continue; + } + + // Verify light client optimistic update. `signature_slot_distance` should be 1 in the ideal scenario. + let signature_slot = client + .get_beacon_light_client_optimistic_update::() + .await + .map_err(|e| format!("Error while getting light client updates: {:?}", e))? + .ok_or(format!("Light client optimistic update not found {slot:?}"))? + .data + .signature_slot; + let signature_slot_distance = slot - signature_slot; + if signature_slot_distance > light_client_update_slot_tolerance { + return Err(format!("Existing optimistic update too old: signature slot {signature_slot}, current slot {slot:?}")); + } + + // Verify light client finality update. `signature_slot_distance` should be 1 in the ideal scenario. + // NOTE: Currently finality updates are produced as long as the finalized block is known, even if the finalized header + // sync committee period does not match the signature slot committee period. + // TODO: This complies with the current spec, but we should check if this is a bug. + if !have_achieved_finality { + let FinalityCheckpointsData { finalized, .. } = client + .get_beacon_states_finality_checkpoints(StateId::Head) + .await + .map_err(|e| format!("Unable to get beacon state finality checkpoint: {e:?}"))? + .ok_or("Unable to get head state".to_string())? + .data; + if !finalized.root.is_zero() { + // Wait for another slot before we check the first finality update to avoid race condition. + have_achieved_finality = true; + } + continue; + } + let signature_slot = client + .get_beacon_light_client_finality_update::() + .await + .map_err(|e| format!("Error while getting light client updates: {:?}", e))? + .ok_or(format!("Light client finality update not found {slot:?}"))? + .data + .signature_slot; + let signature_slot_distance = slot - signature_slot; + if signature_slot_distance > light_client_update_slot_tolerance { + return Err(format!( + "Existing finality update too old: signature slot {signature_slot}, current slot {slot:?}" + )); + } + } + + Ok(()) +} diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 953dcf582..8d6ffc42f 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -220,6 +220,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { fork, sync_aggregate, transition, + light_client_update, ) = futures::join!( // Check that the chain finalizes at the first given opportunity. checks::verify_first_finalization(network.clone(), slot_duration), @@ -272,6 +273,13 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { Epoch::new(TERMINAL_BLOCK / MinimalEthSpec::slots_per_epoch()), slot_duration, post_merge_sim + ), + checks::verify_light_client_updates( + network.clone(), + // Sync aggregate available from slot 1 after Altair fork transition. + Epoch::new(ALTAIR_FORK_EPOCH).start_slot(MinimalEthSpec::slots_per_epoch()) + 1, + Epoch::new(END_EPOCH).start_slot(MinimalEthSpec::slots_per_epoch()), + slot_duration ) ); @@ -282,6 +290,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { fork?; sync_aggregate?; transition?; + light_client_update?; // The `final_future` either completes immediately or never completes, depending on the value // of `continue_after_checks`. @@ -380,6 +389,9 @@ async fn create_local_network( beacon_config.network.target_peers = node_count + proposer_nodes - 1; beacon_config.network.enr_address = (Some(Ipv4Addr::LOCALHOST), None); + beacon_config.network.enable_light_client_server = true; + beacon_config.chain.enable_light_client_server = true; + beacon_config.http_api.enable_light_client_server = true; if post_merge_sim { let el_config = execution_layer::Config { diff --git a/testing/web3signer_tests/Cargo.toml b/testing/web3signer_tests/Cargo.toml index 38b775b39..1bdf62cd2 100644 --- a/testing/web3signer_tests/Cargo.toml +++ b/testing/web3signer_tests/Cargo.toml @@ -8,6 +8,7 @@ edition = { workspace = true } [dependencies] [dev-dependencies] +async-channel = { workspace = true } eth2_keystore = { workspace = true } types = { workspace = true } tempfile = { workspace = true } @@ -17,7 +18,6 @@ url = { workspace = true } validator_client = { workspace = true } slot_clock = { workspace = true } futures = { workspace = true } -exit-future = { workspace = true } task_executor = { workspace = true } environment = { workspace = true } account_utils = { workspace = true } diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 6f3536fe4..3090b4da5 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -45,7 +45,7 @@ mod tests { initialized_validators::{ load_pem_certificate, load_pkcs12_identity, InitializedValidators, }, - validator_store::ValidatorStore, + validator_store::{Error as ValidatorStoreError, ValidatorStore}, SlashingDatabase, SLASHING_PROTECTION_FILENAME, }; @@ -157,6 +157,18 @@ mod tests { } } + #[derive(Debug, Clone, Copy)] + struct SlashingProtectionConfig { + /// Whether to enable slashing protection for web3signer keys locally within Lighthouse. + local: bool, + } + + impl Default for SlashingProtectionConfig { + fn default() -> Self { + SlashingProtectionConfig { local: true } + } + } + impl Web3SignerRig { pub async fn new(network: &str, listen_address: &str, listen_port: u16) -> Self { GET_WEB3SIGNER_BIN @@ -231,6 +243,8 @@ mod tests { )) .arg("eth2") .arg(format!("--network={}", network)) + // Can't *easily* test `--slashing-protection-enabled=true` because web3signer + // requires a Postgres instance. .arg("--slashing-protection-enabled=false") .stdout(stdio()) .stderr(stdio()) @@ -293,18 +307,26 @@ mod tests { validator_store: Arc>, _validator_dir: TempDir, runtime: Arc, - _runtime_shutdown: exit_future::Signal, + _runtime_shutdown: async_channel::Sender<()>, + using_web3signer: bool, } impl ValidatorStoreRig { - pub async fn new(validator_definitions: Vec, spec: ChainSpec) -> Self { + pub async fn new( + validator_definitions: Vec, + slashing_protection_config: SlashingProtectionConfig, + using_web3signer: bool, + spec: ChainSpec, + ) -> Self { let log = environment::null_logger().unwrap(); let validator_dir = TempDir::new().unwrap(); + let config = validator_client::Config::default(); let validator_definitions = ValidatorDefinitions::from(validator_definitions); let initialized_validators = InitializedValidators::from_definitions( validator_definitions, validator_dir.path().into(), + config.clone(), log.clone(), ) .await @@ -318,7 +340,7 @@ mod tests { .build() .unwrap(), ); - let (runtime_shutdown, exit) = exit_future::signal(); + let (runtime_shutdown, exit) = async_channel::bounded(1); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = TaskExecutor::new(Arc::downgrade(&runtime), exit, log.clone(), shutdown_tx); @@ -331,7 +353,10 @@ mod tests { let slot_clock = TestingSlotClock::new(Slot::new(0), Duration::from_secs(0), Duration::from_secs(1)); - let config = validator_client::Config::default(); + let config = validator_client::Config { + enable_web3signer_slashing_protection: slashing_protection_config.local, + ..Default::default() + }; let validator_store = ValidatorStore::<_, E>::new( initialized_validators, @@ -350,6 +375,7 @@ mod tests { _validator_dir: validator_dir, runtime, _runtime_shutdown: runtime_shutdown, + using_web3signer, } } @@ -378,7 +404,12 @@ mod tests { } impl TestingRig { - pub async fn new(network: &str, spec: ChainSpec, listen_port: u16) -> Self { + pub async fn new( + network: &str, + slashing_protection_config: SlashingProtectionConfig, + spec: ChainSpec, + listen_port: u16, + ) -> Self { let signer_rig = Web3SignerRig::new(network, WEB3SIGNER_LISTEN_ADDRESS, listen_port).await; let validator_pubkey = signer_rig.keypair.pk.clone(); @@ -400,7 +431,13 @@ mod tests { voting_keystore_password: Some(KEYSTORE_PASSWORD.to_string().into()), }, }; - ValidatorStoreRig::new(vec![validator_definition], spec.clone()).await + ValidatorStoreRig::new( + vec![validator_definition], + slashing_protection_config, + false, + spec.clone(), + ) + .await }; let remote_signer_validator_store = { @@ -422,7 +459,13 @@ mod tests { client_identity_password: Some(client_identity_password()), }), }; - ValidatorStoreRig::new(vec![validator_definition], spec).await + ValidatorStoreRig::new( + vec![validator_definition], + slashing_protection_config, + true, + spec, + ) + .await }; Self { @@ -465,6 +508,36 @@ mod tests { assert!(prev_signature.is_some(), "sanity check"); self } + + /// Assert that a slashable message fails to be signed locally and is either signed or not + /// by the web3signer rig depending on the value of `web3signer_should_sign`. + pub async fn assert_slashable_message_should_sign( + self, + case_name: &str, + generate_sig: F, + web3signer_should_sign: bool, + ) -> Self + where + F: Fn(PublicKeyBytes, Arc>) -> R, + R: Future>, + { + for validator_rig in &self.validator_rigs { + let result = + generate_sig(self.validator_pubkey, validator_rig.validator_store.clone()) + .await; + + if !validator_rig.using_web3signer || !web3signer_should_sign { + let err = result.unwrap_err(); + assert!( + matches!(err, ValidatorStoreError::Slashable(_)), + "should not sign slashable {case_name}" + ); + } else { + assert_eq!(result, Ok(()), "should sign slashable {case_name}"); + } + } + self + } } /// Get a generic, arbitrary attestation for signing. @@ -503,64 +576,69 @@ mod tests { let network_config = Eth2NetworkConfig::constant(network).unwrap().unwrap(); let spec = &network_config.chain_spec::().unwrap(); - TestingRig::new(network, spec.clone(), listen_port) - .await - .assert_signatures_match("randao_reveal", |pubkey, validator_store| async move { + TestingRig::new( + network, + SlashingProtectionConfig::default(), + spec.clone(), + listen_port, + ) + .await + .assert_signatures_match("randao_reveal", |pubkey, validator_store| async move { + validator_store + .randao_reveal(pubkey, Epoch::new(0)) + .await + .unwrap() + }) + .await + .assert_signatures_match("beacon_block_base", |pubkey, validator_store| async move { + let block = BeaconBlock::Base(BeaconBlockBase::empty(spec)); + let block_slot = block.slot(); + validator_store + .sign_block(pubkey, block, block_slot) + .await + .unwrap() + }) + .await + .assert_signatures_match("attestation", |pubkey, validator_store| async move { + let mut attestation = get_attestation(); + validator_store + .sign_attestation(pubkey, 0, &mut attestation, Epoch::new(0)) + .await + .unwrap(); + attestation + }) + .await + .assert_signatures_match("signed_aggregate", |pubkey, validator_store| async move { + let attestation = get_attestation(); + validator_store + .produce_signed_aggregate_and_proof( + pubkey, + 0, + attestation, + SelectionProof::from(Signature::empty()), + ) + .await + .unwrap() + }) + .await + .assert_signatures_match("selection_proof", |pubkey, validator_store| async move { + validator_store + .produce_selection_proof(pubkey, Slot::new(0)) + .await + .unwrap() + }) + .await + .assert_signatures_match( + "validator_registration", + |pubkey, validator_store| async move { + let val_reg_data = get_validator_registration(pubkey); validator_store - .randao_reveal(pubkey, Epoch::new(0)) + .sign_validator_registration_data(val_reg_data) .await .unwrap() - }) - .await - .assert_signatures_match("beacon_block_base", |pubkey, validator_store| async move { - let block = BeaconBlock::Base(BeaconBlockBase::empty(spec)); - let block_slot = block.slot(); - validator_store - .sign_block(pubkey, block, block_slot) - .await - .unwrap() - }) - .await - .assert_signatures_match("attestation", |pubkey, validator_store| async move { - let mut attestation = get_attestation(); - validator_store - .sign_attestation(pubkey, 0, &mut attestation, Epoch::new(0)) - .await - .unwrap(); - attestation - }) - .await - .assert_signatures_match("signed_aggregate", |pubkey, validator_store| async move { - let attestation = get_attestation(); - validator_store - .produce_signed_aggregate_and_proof( - pubkey, - 0, - attestation, - SelectionProof::from(Signature::empty()), - ) - .await - .unwrap() - }) - .await - .assert_signatures_match("selection_proof", |pubkey, validator_store| async move { - validator_store - .produce_selection_proof(pubkey, Slot::new(0)) - .await - .unwrap() - }) - .await - .assert_signatures_match( - "validator_registration", - |pubkey, validator_store| async move { - let val_reg_data = get_validator_registration(pubkey); - validator_store - .sign_validator_registration_data(val_reg_data) - .await - .unwrap() - }, - ) - .await; + }, + ) + .await; } /// Test all the Altair types. @@ -572,82 +650,78 @@ mod tests { .unwrap() .start_slot(E::slots_per_epoch()); - TestingRig::new(network, spec.clone(), listen_port) - .await - .assert_signatures_match( - "beacon_block_altair", - |pubkey, validator_store| async move { - let mut altair_block = BeaconBlockAltair::empty(spec); - altair_block.slot = altair_fork_slot; - validator_store - .sign_block(pubkey, BeaconBlock::Altair(altair_block), altair_fork_slot) - .await - .unwrap() - }, - ) - .await - .assert_signatures_match( - "sync_selection_proof", - |pubkey, validator_store| async move { - validator_store - .produce_sync_selection_proof( - &pubkey, - altair_fork_slot, - SyncSubnetId::from(0), - ) - .await - .unwrap() - }, - ) - .await - .assert_signatures_match( - "sync_committee_signature", - |pubkey, validator_store| async move { - validator_store - .produce_sync_committee_signature( - altair_fork_slot, - Hash256::zero(), - 0, - &pubkey, - ) - .await - .unwrap() - }, - ) - .await - .assert_signatures_match( - "signed_contribution_and_proof", - |pubkey, validator_store| async move { - let contribution = SyncCommitteeContribution { - slot: altair_fork_slot, - beacon_block_root: <_>::default(), - subcommittee_index: <_>::default(), - aggregation_bits: <_>::default(), - signature: AggregateSignature::empty(), - }; - validator_store - .produce_signed_contribution_and_proof( - 0, - pubkey, - contribution, - SyncSelectionProof::from(Signature::empty()), - ) - .await - .unwrap() - }, - ) - .await - .assert_signatures_match( - "validator_registration", - |pubkey, validator_store| async move { - let val_reg_data = get_validator_registration(pubkey); - validator_store - .sign_validator_registration_data(val_reg_data) - .await - .unwrap() - }, - ) - .await; + TestingRig::new( + network, + SlashingProtectionConfig::default(), + spec.clone(), + listen_port, + ) + .await + .assert_signatures_match( + "beacon_block_altair", + |pubkey, validator_store| async move { + let mut altair_block = BeaconBlockAltair::empty(spec); + altair_block.slot = altair_fork_slot; + validator_store + .sign_block(pubkey, BeaconBlock::Altair(altair_block), altair_fork_slot) + .await + .unwrap() + }, + ) + .await + .assert_signatures_match( + "sync_selection_proof", + |pubkey, validator_store| async move { + validator_store + .produce_sync_selection_proof(&pubkey, altair_fork_slot, SyncSubnetId::from(0)) + .await + .unwrap() + }, + ) + .await + .assert_signatures_match( + "sync_committee_signature", + |pubkey, validator_store| async move { + validator_store + .produce_sync_committee_signature(altair_fork_slot, Hash256::zero(), 0, &pubkey) + .await + .unwrap() + }, + ) + .await + .assert_signatures_match( + "signed_contribution_and_proof", + |pubkey, validator_store| async move { + let contribution = SyncCommitteeContribution { + slot: altair_fork_slot, + beacon_block_root: <_>::default(), + subcommittee_index: <_>::default(), + aggregation_bits: <_>::default(), + signature: AggregateSignature::empty(), + }; + validator_store + .produce_signed_contribution_and_proof( + 0, + pubkey, + contribution, + SyncSelectionProof::from(Signature::empty()), + ) + .await + .unwrap() + }, + ) + .await + .assert_signatures_match( + "validator_registration", + |pubkey, validator_store| async move { + let val_reg_data = get_validator_registration(pubkey); + validator_store + .sign_validator_registration_data(val_reg_data) + .await + .unwrap() + }, + ) + .await; } /// Test all the Merge types. @@ -659,17 +733,154 @@ mod tests { .unwrap() .start_slot(E::slots_per_epoch()); - TestingRig::new(network, spec.clone(), listen_port) - .await - .assert_signatures_match("beacon_block_merge", |pubkey, validator_store| async move { - let mut merge_block = BeaconBlockMerge::empty(spec); - merge_block.slot = merge_fork_slot; + TestingRig::new( + network, + SlashingProtectionConfig::default(), + spec.clone(), + listen_port, + ) + .await + .assert_signatures_match("beacon_block_merge", |pubkey, validator_store| async move { + let mut merge_block = BeaconBlockMerge::empty(spec); + merge_block.slot = merge_fork_slot; + validator_store + .sign_block(pubkey, BeaconBlock::Merge(merge_block), merge_fork_slot) + .await + .unwrap() + }) + .await; + } + + async fn test_lighthouse_slashing_protection( + slashing_protection_config: SlashingProtectionConfig, + listen_port: u16, + ) { + // Run these tests on mainnet. + let network = "mainnet"; + + let network_config = Eth2NetworkConfig::constant(network).unwrap().unwrap(); + let spec = &network_config.chain_spec::().unwrap(); + let merge_fork_slot = spec + .bellatrix_fork_epoch + .unwrap() + .start_slot(E::slots_per_epoch()); + + // The slashable message should only be signed by the web3signer validator if slashing + // protection is disabled in Lighthouse. + let slashable_message_should_sign = !slashing_protection_config.local; + + let first_attestation = || { + let mut attestation = get_attestation(); + attestation.data.source.epoch = Epoch::new(1); + attestation.data.target.epoch = Epoch::new(4); + attestation + }; + + let double_vote_attestation = || { + let mut attestation = first_attestation(); + attestation.data.beacon_block_root = Hash256::from_low_u64_be(1); + attestation + }; + + let surrounding_attestation = || { + let mut attestation = first_attestation(); + attestation.data.source.epoch = Epoch::new(0); + attestation.data.target.epoch = Epoch::new(5); + attestation + }; + + let surrounded_attestation = || { + let mut attestation = first_attestation(); + attestation.data.source.epoch = Epoch::new(2); + attestation.data.target.epoch = Epoch::new(3); + attestation + }; + + let first_block = || { + let mut merge_block = BeaconBlockMerge::empty(spec); + merge_block.slot = merge_fork_slot; + BeaconBlock::Merge(merge_block) + }; + + let double_vote_block = || { + let mut block = first_block(); + *block.state_root_mut() = Hash256::repeat_byte(0xff); + block + }; + + let current_epoch = Epoch::new(5); + + TestingRig::new( + network, + slashing_protection_config, + spec.clone(), + listen_port, + ) + .await + .assert_signatures_match("first_attestation", |pubkey, validator_store| async move { + let mut attestation = first_attestation(); + validator_store + .sign_attestation(pubkey, 0, &mut attestation, current_epoch) + .await + .unwrap(); + attestation + }) + .await + .assert_slashable_message_should_sign( + "double_vote_attestation", + move |pubkey, validator_store| async move { + let mut attestation = double_vote_attestation(); validator_store - .sign_block(pubkey, BeaconBlock::Merge(merge_block), merge_fork_slot) + .sign_attestation(pubkey, 0, &mut attestation, current_epoch) .await - .unwrap() - }) - .await; + }, + slashable_message_should_sign, + ) + .await + .assert_slashable_message_should_sign( + "surrounding_attestation", + move |pubkey, validator_store| async move { + let mut attestation = surrounding_attestation(); + validator_store + .sign_attestation(pubkey, 0, &mut attestation, current_epoch) + .await + }, + slashable_message_should_sign, + ) + .await + .assert_slashable_message_should_sign( + "surrounded_attestation", + move |pubkey, validator_store| async move { + let mut attestation = surrounded_attestation(); + validator_store + .sign_attestation(pubkey, 0, &mut attestation, current_epoch) + .await + }, + slashable_message_should_sign, + ) + .await + .assert_signatures_match("first_block", |pubkey, validator_store| async move { + let block = first_block(); + let slot = block.slot(); + validator_store + .sign_block(pubkey, block, slot) + .await + .unwrap() + }) + .await + .assert_slashable_message_should_sign( + "double_vote_block", + move |pubkey, validator_store| async move { + let block = double_vote_block(); + let slot = block.slot(); + validator_store + .sign_block(pubkey, block, slot) + .await + .map(|_| ()) + }, + slashable_message_should_sign, + ) + .await; } #[tokio::test] @@ -706,4 +917,14 @@ mod tests { async fn sepolia_merge_types() { test_merge_types("sepolia", 4252).await } + + #[tokio::test] + async fn slashing_protection_disabled_locally() { + test_lighthouse_slashing_protection(SlashingProtectionConfig { local: false }, 4253).await + } + + #[tokio::test] + async fn slashing_protection_enabled_locally() { + test_lighthouse_slashing_protection(SlashingProtectionConfig { local: true }, 4254).await + } } diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 90a82b7e3..d3dffc3d0 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -10,6 +10,7 @@ path = "src/lib.rs" [dev-dependencies] tokio = { workspace = true } +itertools = { workspace = true } [dependencies] tree_hash = { workspace = true } @@ -30,7 +31,6 @@ directory = { workspace = true } lockfile = { workspace = true } environment = { workspace = true } parking_lot = { workspace = true } -exit-future = { workspace = true } filesystem = { workspace = true } hex = { workspace = true } deposit_contract = { workspace = true } @@ -51,7 +51,6 @@ ring = { workspace = true } rand = { workspace = true, features = ["small_rng"] } lighthouse_metrics = { workspace = true } lazy_static = { workspace = true } -itertools = { workspace = true } monitoring_api = { workspace = true } sensitive_url = { workspace = true } task_executor = { workspace = true } diff --git a/validator_client/src/attestation_service.rs b/validator_client/src/attestation_service.rs index 43b9d60e2..1c6b60add 100644 --- a/validator_client/src/attestation_service.rs +++ b/validator_client/src/attestation_service.rs @@ -430,6 +430,11 @@ impl AttestationService { .flatten() .unzip(); + if attestations.is_empty() { + warn!(log, "No attestations were published"); + return Ok(None); + } + // Post the attestations to the BN. match self .beacon_nodes diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index e6d19bc89..16a265212 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -145,6 +145,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { future.") .takes_value(false) ) + .arg( + Arg::with_name("distributed") + .long("distributed") + .help("Enables functionality required for running the validator in a distributed validator cluster.") + .takes_value(false) + ) /* REST API related arguments */ .arg( Arg::with_name("http") @@ -367,4 +373,35 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { constructed by builders, regardless of payload value.") .takes_value(false), ) + .arg( + Arg::with_name("disable-slashing-protection-web3signer") + .long("disable-slashing-protection-web3signer") + .help("Disable Lighthouse's slashing protection for all web3signer keys. This can \ + reduce the I/O burden on the VC but is only safe if slashing protection \ + is enabled on the remote signer and is implemented correctly. DO NOT ENABLE \ + THIS FLAG UNLESS YOU ARE CERTAIN THAT SLASHING PROTECTION IS ENABLED ON \ + THE REMOTE SIGNER. YOU WILL GET SLASHED IF YOU USE THIS FLAG WITHOUT \ + ENABLING WEB3SIGNER'S SLASHING PROTECTION.") + .takes_value(false) + ) + /* + * Experimental/development options. + */ + .arg( + Arg::with_name("web3-signer-keep-alive-timeout") + .long("web3-signer-keep-alive-timeout") + .value_name("MILLIS") + .default_value("90000") + .help("Keep-alive timeout for each web3signer connection. Set to 'null' to never \ + timeout") + .takes_value(true), + ) + .arg( + Arg::with_name("web3-signer-max-idle-connections") + .long("web3-signer-max-idle-connections") + .value_name("COUNT") + .help("Maximum number of idle connections to maintain per web3signer host. Default \ + is unlimited.") + .takes_value(true), + ) } diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index a919afb01..ae59829a3 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -14,6 +14,7 @@ use slog::{info, warn, Logger}; use std::fs; use std::net::IpAddr; use std::path::PathBuf; +use std::time::Duration; use types::{Address, GRAFFITI_BYTES_LEN}; pub const DEFAULT_BEACON_NODE: &str = "http://localhost:5052/"; @@ -75,12 +76,18 @@ pub struct Config { pub enable_latency_measurement_service: bool, /// Defines the number of validators per `validator/register_validator` request sent to the BN. pub validator_registration_batch_size: usize, + /// Enable slashing protection even while using web3signer keys. + pub enable_web3signer_slashing_protection: bool, /// Enables block production via the block v3 endpoint. This configuration option can be removed post deneb. pub produce_block_v3: bool, /// Specifies the boost factor, a percentage multiplier to apply to the builder's payload value. pub builder_boost_factor: Option, /// If true, Lighthouse will prefer builder proposals, if available. pub prefer_builder_proposals: bool, + /// Whether we are running with distributed network support. + pub distributed: bool, + pub web3_signer_keep_alive_timeout: Option, + pub web3_signer_max_idle_connections: Option, } impl Default for Config { @@ -121,9 +128,13 @@ impl Default for Config { broadcast_topics: vec![ApiTopic::Subscriptions], enable_latency_measurement_service: true, validator_registration_batch_size: 500, + enable_web3signer_slashing_protection: true, produce_block_v3: false, builder_boost_factor: None, prefer_builder_proposals: false, + distributed: false, + web3_signer_keep_alive_timeout: Some(Duration::from_secs(90)), + web3_signer_max_idle_connections: None, } } } @@ -225,6 +236,10 @@ impl Config { config.beacon_nodes_tls_certs = Some(tls_certs.split(',').map(PathBuf::from).collect()); } + if cli_args.is_present("distributed") { + config.distributed = true; + } + if cli_args.is_present("disable-run-on-all") { warn!( log, @@ -245,6 +260,22 @@ impl Config { .collect::>()?; } + /* + * Web3 signer + */ + if let Some(s) = parse_optional::(cli_args, "web3-signer-keep-alive-timeout")? { + config.web3_signer_keep_alive_timeout = if s == "null" { + None + } else { + Some(Duration::from_millis( + s.parse().map_err(|_| "invalid timeout value".to_string())?, + )) + } + } + if let Some(n) = parse_optional::(cli_args, "web3-signer-max-idle-connections")? { + config.web3_signer_max_idle_connections = Some(n); + } + /* * Http API server */ @@ -386,6 +417,19 @@ impl Config { return Err("validator-registration-batch-size cannot be 0".to_string()); } + config.enable_web3signer_slashing_protection = + if cli_args.is_present("disable-slashing-protection-web3signer") { + warn!( + log, + "Slashing protection for remote keys disabled"; + "info" => "ensure slashing protection on web3signer is enabled or you WILL \ + get slashed" + ); + false + } else { + true + }; + Ok(config) } } diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index 26747f811..b5b56943c 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -6,7 +6,7 @@ //! The `DutiesService` is also responsible for sending events to the `BlockService` which trigger //! block production. -mod sync; +pub mod sync; use crate::beacon_node_fallback::{ApiTopic, BeaconNodeFallback, OfflineOnFailure, RequireSynced}; use crate::http_metrics::metrics::{get_int_gauge, set_int_gauge, ATTESTATION_DUTY}; @@ -42,6 +42,9 @@ const HISTORICAL_DUTIES_EPOCHS: u64 = 2; /// At start-up selection proofs will be computed with less lookahead out of necessity. const SELECTION_PROOF_SLOT_LOOKAHEAD: u64 = 8; +/// The attestation selection proof lookahead for those running with the --distributed flag. +const SELECTION_PROOF_SLOT_LOOKAHEAD_DVT: u64 = 1; + /// Fraction of a slot at which selection proof signing should happen (2 means half way). const SELECTION_PROOF_SCHEDULE_DENOM: u32 = 2; @@ -119,43 +122,37 @@ pub struct SubscriptionSlots { slots: Vec<(Slot, AtomicBool)>, } -impl DutyAndProof { - /// Instantiate `Self`, computing the selection proof as well. - pub async fn new_with_selection_proof( - duty: AttesterData, - validator_store: &ValidatorStore, - spec: &ChainSpec, - ) -> Result { - let selection_proof = validator_store - .produce_selection_proof(duty.pubkey, duty.slot) - .await - .map_err(Error::FailedToProduceSelectionProof)?; +/// Create a selection proof for `duty`. +/// +/// Return `Ok(None)` if the attesting validator is not an aggregator. +async fn make_selection_proof( + duty: &AttesterData, + validator_store: &ValidatorStore, + spec: &ChainSpec, +) -> Result, Error> { + let selection_proof = validator_store + .produce_selection_proof(duty.pubkey, duty.slot) + .await + .map_err(Error::FailedToProduceSelectionProof)?; - let selection_proof = selection_proof - .is_aggregator(duty.committee_length as usize, spec) - .map_err(Error::InvalidModulo) - .map(|is_aggregator| { - if is_aggregator { - Some(selection_proof) - } else { - // Don't bother storing the selection proof if the validator isn't an - // aggregator, we won't need it. - None - } - })?; - - let subscription_slots = SubscriptionSlots::new(duty.slot); - - Ok(Self { - duty, - selection_proof, - subscription_slots, + selection_proof + .is_aggregator(duty.committee_length as usize, spec) + .map_err(Error::InvalidModulo) + .map(|is_aggregator| { + if is_aggregator { + Some(selection_proof) + } else { + // Don't bother storing the selection proof if the validator isn't an + // aggregator, we won't need it. + None + } }) - } +} +impl DutyAndProof { /// Create a new `DutyAndProof` with the selection proof waiting to be filled in. - pub fn new_without_selection_proof(duty: AttesterData) -> Self { - let subscription_slots = SubscriptionSlots::new(duty.slot); + pub fn new_without_selection_proof(duty: AttesterData, current_slot: Slot) -> Self { + let subscription_slots = SubscriptionSlots::new(duty.slot, current_slot); Self { duty, selection_proof: None, @@ -165,10 +162,13 @@ impl DutyAndProof { } impl SubscriptionSlots { - fn new(duty_slot: Slot) -> Arc { + fn new(duty_slot: Slot, current_slot: Slot) -> Arc { let slots = ATTESTATION_SUBSCRIPTION_OFFSETS .into_iter() .filter_map(|offset| duty_slot.safe_sub(offset).ok()) + // Keep only scheduled slots that haven't happened yet. This avoids sending expired + // subscriptions. + .filter(|scheduled_slot| *scheduled_slot > current_slot) .map(|scheduled_slot| (scheduled_slot, AtomicBool::new(false))) .collect(); Arc::new(Self { slots }) @@ -211,16 +211,21 @@ pub struct DutiesService { /// proposals for any validators which are not registered locally. pub proposers: RwLock, /// Map from validator index to sync committee duties. - pub sync_duties: SyncDutiesMap, + pub sync_duties: SyncDutiesMap, /// Provides the canonical list of locally-managed validators. pub validator_store: Arc>, /// Tracks the current slot. pub slot_clock: T, /// Provides HTTP access to remote beacon nodes. pub beacon_nodes: Arc>, - pub enable_high_validator_count_metrics: bool, + /// The runtime for spawning tasks. pub context: RuntimeContext, + /// The current chain spec. pub spec: ChainSpec, + //// Whether we permit large validator counts in the metrics. + pub enable_high_validator_count_metrics: bool, + /// If this validator is running in distributed mode. + pub distributed: bool, } impl DutiesService { @@ -779,14 +784,14 @@ async fn poll_beacon_attesters_for_epoch( // request for extra data unless necessary in order to save on network bandwidth. let uninitialized_validators = get_uninitialized_validators(duties_service, &epoch, local_pubkeys); - let indices_to_request = if !uninitialized_validators.is_empty() { + let initial_indices_to_request = if !uninitialized_validators.is_empty() { uninitialized_validators.as_slice() } else { &local_indices[0..min(INITIAL_DUTIES_QUERY_SIZE, local_indices.len())] }; let response = - post_validator_duties_attester(duties_service, epoch, indices_to_request).await?; + post_validator_duties_attester(duties_service, epoch, initial_indices_to_request).await?; let dependent_root = response.dependent_root; // Find any validators which have conflicting (epoch, dependent_root) values or missing duties for the epoch. @@ -810,24 +815,29 @@ async fn poll_beacon_attesters_for_epoch( return Ok(()); } - // Filter out validators which have already been requested. - let initial_duties = &response.data; + // Make a request for all indices that require updating which we have not already made a request + // for. let indices_to_request = validators_to_update .iter() - .filter(|&&&pubkey| !initial_duties.iter().any(|duty| duty.pubkey == pubkey)) .filter_map(|pubkey| duties_service.validator_store.validator_index(pubkey)) + .filter(|validator_index| !initial_indices_to_request.contains(validator_index)) .collect::>(); - let new_duties = if !indices_to_request.is_empty() { + // Filter the initial duties by their relevance so that we don't hit the warning below about + // overwriting duties. There was previously a bug here. + let new_initial_duties = response + .data + .into_iter() + .filter(|duty| validators_to_update.contains(&&duty.pubkey)); + + let mut new_duties = if !indices_to_request.is_empty() { post_validator_duties_attester(duties_service, epoch, indices_to_request.as_slice()) .await? .data - .into_iter() - .chain(response.data) - .collect::>() } else { - response.data + vec![] }; + new_duties.extend(new_initial_duties); drop(fetch_timer); @@ -846,26 +856,53 @@ async fn poll_beacon_attesters_for_epoch( // Update the duties service with the new `DutyAndProof` messages. let mut attesters = duties_service.attesters.write(); let mut already_warned = Some(()); + let current_slot = duties_service + .slot_clock + .now_or_genesis() + .unwrap_or_default(); for duty in &new_duties { let attester_map = attesters.entry(duty.pubkey).or_default(); // Create initial entries in the map without selection proofs. We'll compute them in the // background later to avoid creating a thundering herd of signing threads whenever new // duties are computed. - let duty_and_proof = DutyAndProof::new_without_selection_proof(duty.clone()); + let duty_and_proof = DutyAndProof::new_without_selection_proof(duty.clone(), current_slot); - if let Some((prior_dependent_root, _)) = - attester_map.insert(epoch, (dependent_root, duty_and_proof)) - { - // Using `already_warned` avoids excessive logs. - if dependent_root != prior_dependent_root && already_warned.take().is_some() { - warn!( - log, - "Attester duties re-org"; - "prior_dependent_root" => %prior_dependent_root, - "dependent_root" => %dependent_root, - "msg" => "this may happen from time to time" - ) + match attester_map.entry(epoch) { + hash_map::Entry::Occupied(mut occupied) => { + let mut_value = occupied.get_mut(); + let (prior_dependent_root, prior_duty_and_proof) = &mut_value; + + // Guard against overwriting an existing value for the same duty. If we did + // overwrite we could lose a selection proof or information from + // `subscription_slots`. Hitting this branch should be prevented by our logic for + // fetching duties only for unknown indices. + if dependent_root == *prior_dependent_root + && prior_duty_and_proof.duty == duty_and_proof.duty + { + warn!( + log, + "Redundant attester duty update"; + "dependent_root" => %dependent_root, + "validator_index" => duty.validator_index, + ); + continue; + } + + // Using `already_warned` avoids excessive logs. + if dependent_root != *prior_dependent_root && already_warned.take().is_some() { + warn!( + log, + "Attester duties re-org"; + "prior_dependent_root" => %prior_dependent_root, + "dependent_root" => %dependent_root, + "msg" => "this may happen from time to time" + ) + } + *mut_value = (dependent_root, duty_and_proof); + } + hash_map::Entry::Vacant(vacant) => { + vacant.insert((dependent_root, duty_and_proof)); } } } @@ -997,7 +1034,13 @@ async fn fill_in_selection_proofs( continue; }; - let lookahead_slot = current_slot + SELECTION_PROOF_SLOT_LOOKAHEAD; + let selection_lookahead = if duties_service.distributed { + SELECTION_PROOF_SLOT_LOOKAHEAD_DVT + } else { + SELECTION_PROOF_SLOT_LOOKAHEAD + }; + + let lookahead_slot = current_slot + selection_lookahead; let mut relevant_duties = duties_by_slot.split_off(&lookahead_slot); std::mem::swap(&mut relevant_duties, &mut duties_by_slot); @@ -1016,12 +1059,13 @@ async fn fill_in_selection_proofs( // Sign selection proofs (serially). let duty_and_proof_results = stream::iter(relevant_duties.into_values().flatten()) .then(|duty| async { - DutyAndProof::new_with_selection_proof( - duty, + let opt_selection_proof = make_selection_proof( + &duty, &duties_service.validator_store, &duties_service.spec, ) - .await + .await?; + Ok((duty, opt_selection_proof)) }) .collect::>() .await; @@ -1029,7 +1073,7 @@ async fn fill_in_selection_proofs( // Add to attesters store. let mut attesters = duties_service.attesters.write(); for result in duty_and_proof_results { - let duty_and_proof = match result { + let (duty, selection_proof) = match result { Ok(duty_and_proof) => duty_and_proof, Err(Error::FailedToProduceSelectionProof( ValidatorStoreError::UnknownPubkey(pubkey), @@ -1057,12 +1101,12 @@ async fn fill_in_selection_proofs( } }; - let attester_map = attesters.entry(duty_and_proof.duty.pubkey).or_default(); - let epoch = duty_and_proof.duty.slot.epoch(E::slots_per_epoch()); + let attester_map = attesters.entry(duty.pubkey).or_default(); + let epoch = duty.slot.epoch(E::slots_per_epoch()); match attester_map.entry(epoch) { hash_map::Entry::Occupied(mut entry) => { // No need to update duties for which no proof was computed. - let Some(selection_proof) = duty_and_proof.selection_proof else { + let Some(selection_proof) = selection_proof else { continue; }; @@ -1083,6 +1127,14 @@ async fn fill_in_selection_proofs( } } hash_map::Entry::Vacant(entry) => { + // This probably shouldn't happen, but we have enough info to fill in the + // entry so we may as well. + let subscription_slots = SubscriptionSlots::new(duty.slot, current_slot); + let duty_and_proof = DutyAndProof { + duty, + selection_proof, + subscription_slots, + }; entry.insert((dependent_root, duty_and_proof)); } } @@ -1306,13 +1358,15 @@ mod test { #[test] fn subscription_slots_exact() { + // Set current slot in the past so no duties are considered expired. + let current_slot = Slot::new(0); for duty_slot in [ - Slot::new(32), + Slot::new(33), Slot::new(47), Slot::new(99), Slot::new(1002003), ] { - let subscription_slots = SubscriptionSlots::new(duty_slot); + let subscription_slots = SubscriptionSlots::new(duty_slot, current_slot); // Run twice to check idempotence (subscription slots shouldn't be marked as done until // we mark them manually). @@ -1346,8 +1400,9 @@ mod test { #[test] fn subscription_slots_mark_multiple() { for (i, offset) in ATTESTATION_SUBSCRIPTION_OFFSETS.into_iter().enumerate() { + let current_slot = Slot::new(0); let duty_slot = Slot::new(64); - let subscription_slots = SubscriptionSlots::new(duty_slot); + let subscription_slots = SubscriptionSlots::new(duty_slot, current_slot); subscription_slots.record_successful_subscription_at(duty_slot - offset); @@ -1362,4 +1417,22 @@ mod test { } } } + + /// Test the boundary condition where all subscription slots are *just* expired. + #[test] + fn subscription_slots_expired() { + let current_slot = Slot::new(100); + let duty_slot = current_slot + ATTESTATION_SUBSCRIPTION_OFFSETS[0]; + let subscription_slots = SubscriptionSlots::new(duty_slot, current_slot); + for offset in ATTESTATION_SUBSCRIPTION_OFFSETS.into_iter() { + let slot = duty_slot - offset; + assert!(!subscription_slots.should_send_subscription_at(slot)); + } + assert!(subscription_slots.slots.is_empty()); + + // If the duty slot is 1 later, we get a non-empty set of duties. + let subscription_slots = SubscriptionSlots::new(duty_slot + 1, current_slot); + assert_eq!(subscription_slots.slots.len(), 1); + assert!(subscription_slots.should_send_subscription_at(current_slot + 1),); + } } diff --git a/validator_client/src/duties_service/sync.rs b/validator_client/src/duties_service/sync.rs index de42fa587..3618b4714 100644 --- a/validator_client/src/duties_service/sync.rs +++ b/validator_client/src/duties_service/sync.rs @@ -7,18 +7,18 @@ use crate::{ }; use futures::future::join_all; -use itertools::Itertools; use parking_lot::{MappedRwLockReadGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}; use slog::{crit, debug, info, warn}; use slot_clock::SlotClock; use std::collections::{HashMap, HashSet}; +use std::marker::PhantomData; use std::sync::Arc; -use types::{ - ChainSpec, Epoch, EthSpec, PublicKeyBytes, Slot, SyncDuty, SyncSelectionProof, SyncSubnetId, -}; +use types::{ChainSpec, EthSpec, PublicKeyBytes, Slot, SyncDuty, SyncSelectionProof, SyncSubnetId}; -/// Number of epochs in advance to compute selection proofs. +/// Number of epochs in advance to compute selection proofs when not in `distributed` mode. pub const AGGREGATION_PRE_COMPUTE_EPOCHS: u64 = 2; +/// Number of slots in advance to compute selection proofs when in `distributed` mode. +pub const AGGREGATION_PRE_COMPUTE_SLOTS_DISTRIBUTED: u64 = 1; /// Top-level data-structure containing sync duty information. /// @@ -32,9 +32,12 @@ pub const AGGREGATION_PRE_COMPUTE_EPOCHS: u64 = 2; /// 2. One-at-a-time locking. For the innermost locks on the aggregator duties, all of the functions /// in this file take care to only lock one validator at a time. We never hold a lock while /// trying to obtain another one (hence no lock ordering issues). -pub struct SyncDutiesMap { +pub struct SyncDutiesMap { /// Map from sync committee period to duties for members of that sync committee. committees: RwLock>, + /// Whether we are in `distributed` mode and using reduced lookahead for aggregate pre-compute. + distributed: bool, + _phantom: PhantomData, } /// Duties for a single sync committee period. @@ -59,8 +62,8 @@ pub struct ValidatorDuties { /// Aggregator duties for a single validator. pub struct AggregatorDuties { - /// The epoch up to which aggregation proofs have already been computed (inclusive). - pre_compute_epoch: RwLock>, + /// The slot up to which aggregation proofs have already been computed (inclusive). + pre_compute_slot: RwLock>, /// Map from slot & subnet ID to proof that this validator is an aggregator. /// /// The slot is the slot at which the signed contribution and proof should be broadcast, @@ -82,15 +85,15 @@ pub struct SlotDuties { pub aggregators: HashMap>, } -impl Default for SyncDutiesMap { - fn default() -> Self { +impl SyncDutiesMap { + pub fn new(distributed: bool) -> Self { Self { committees: RwLock::new(HashMap::new()), + distributed, + _phantom: PhantomData, } } -} -impl SyncDutiesMap { /// Check if duties are already known for all of the given validators for `committee_period`. fn all_duties_known(&self, committee_period: u64, validator_indices: &[u64]) -> bool { self.committees @@ -104,22 +107,34 @@ impl SyncDutiesMap { }) } + /// Number of slots in advance to compute selection proofs + fn aggregation_pre_compute_slots(&self) -> u64 { + if self.distributed { + AGGREGATION_PRE_COMPUTE_SLOTS_DISTRIBUTED + } else { + E::slots_per_epoch() * AGGREGATION_PRE_COMPUTE_EPOCHS + } + } + /// Prepare for pre-computation of selection proofs for `committee_period`. /// - /// Return the epoch up to which proofs should be pre-computed, as well as a vec of - /// `(previous_pre_compute_epoch, sync_duty)` pairs for all validators which need to have proofs + /// Return the slot up to which proofs should be pre-computed, as well as a vec of + /// `(previous_pre_compute_slot, sync_duty)` pairs for all validators which need to have proofs /// computed. See `fill_in_aggregation_proofs` for the actual calculation. fn prepare_for_aggregator_pre_compute( &self, committee_period: u64, - current_epoch: Epoch, + current_slot: Slot, spec: &ChainSpec, - ) -> (Epoch, Vec<(Epoch, SyncDuty)>) { - let default_start_epoch = - std::cmp::max(current_epoch, first_epoch_of_period(committee_period, spec)); - let pre_compute_epoch = std::cmp::min( - current_epoch + AGGREGATION_PRE_COMPUTE_EPOCHS, - last_epoch_of_period(committee_period, spec), + ) -> (Slot, Vec<(Slot, SyncDuty)>) { + let default_start_slot = std::cmp::max( + current_slot, + first_slot_of_period::(committee_period, spec), + ); + let pre_compute_lookahead_slots = self.aggregation_pre_compute_slots(); + let pre_compute_slot = std::cmp::min( + current_slot + pre_compute_lookahead_slots, + last_slot_of_period::(committee_period, spec), ); let pre_compute_duties = self.committees.read().get(&committee_period).map_or_else( @@ -130,18 +145,18 @@ impl SyncDutiesMap { .values() .filter_map(|maybe_duty| { let duty = maybe_duty.as_ref()?; - let old_pre_compute_epoch = duty + let old_pre_compute_slot = duty .aggregation_duties - .pre_compute_epoch + .pre_compute_slot .write() - .replace(pre_compute_epoch); + .replace(pre_compute_slot); - match old_pre_compute_epoch { + match old_pre_compute_slot { // No proofs pre-computed previously, compute all from the start of - // the period or the current epoch (whichever is later). - None => Some((default_start_epoch, duty.duty.clone())), + // the period or the current slot (whichever is later). + None => Some((default_start_slot, duty.duty.clone())), // Proofs computed up to `prev`, start from the subsequent epoch. - Some(prev) if prev < pre_compute_epoch => { + Some(prev) if prev < pre_compute_slot => { Some((prev + 1, duty.duty.clone())) } // Proofs already known, no need to compute. @@ -151,7 +166,7 @@ impl SyncDutiesMap { .collect() }, ); - (pre_compute_epoch, pre_compute_duties) + (pre_compute_slot, pre_compute_duties) } fn get_or_create_committee_duties<'a, 'b>( @@ -176,7 +191,7 @@ impl SyncDutiesMap { /// Get duties for all validators for the given `wall_clock_slot`. /// /// This is the entry-point for the sync committee service. - pub fn get_duties_for_slot( + pub fn get_duties_for_slot( &self, wall_clock_slot: Slot, spec: &ChainSpec, @@ -253,7 +268,7 @@ impl ValidatorDuties { Self { duty, aggregation_duties: AggregatorDuties { - pre_compute_epoch: RwLock::new(None), + pre_compute_slot: RwLock::new(None), proofs: RwLock::new(HashMap::new()), }, } @@ -265,12 +280,12 @@ fn epoch_offset(spec: &ChainSpec) -> u64 { spec.epochs_per_sync_committee_period.as_u64() / 2 } -fn first_epoch_of_period(sync_committee_period: u64, spec: &ChainSpec) -> Epoch { - spec.epochs_per_sync_committee_period * sync_committee_period +fn first_slot_of_period(sync_committee_period: u64, spec: &ChainSpec) -> Slot { + (spec.epochs_per_sync_committee_period * sync_committee_period).start_slot(E::slots_per_epoch()) } -fn last_epoch_of_period(sync_committee_period: u64, spec: &ChainSpec) -> Epoch { - first_epoch_of_period(sync_committee_period + 1, spec) - 1 +fn last_slot_of_period(sync_committee_period: u64, spec: &ChainSpec) -> Slot { + first_slot_of_period::(sync_committee_period + 1, spec) - 1 } pub async fn poll_sync_committee_duties( @@ -278,11 +293,11 @@ pub async fn poll_sync_committee_duties( ) -> Result<(), Error> { let sync_duties = &duties_service.sync_duties; let spec = &duties_service.spec; - let current_epoch = duties_service + let current_slot = duties_service .slot_clock .now() - .ok_or(Error::UnableToReadSlotClock)? - .epoch(E::slots_per_epoch()); + .ok_or(Error::UnableToReadSlotClock)?; + let current_epoch = current_slot.epoch(E::slots_per_epoch()); // If the Altair fork is yet to be activated, do not attempt to poll for duties. if spec @@ -330,8 +345,8 @@ pub async fn poll_sync_committee_duties( } // Pre-compute aggregator selection proofs for the current period. - let (current_pre_compute_epoch, new_pre_compute_duties) = sync_duties - .prepare_for_aggregator_pre_compute(current_sync_committee_period, current_epoch, spec); + let (current_pre_compute_slot, new_pre_compute_duties) = sync_duties + .prepare_for_aggregator_pre_compute(current_sync_committee_period, current_slot, spec); if !new_pre_compute_duties.is_empty() { let sub_duties_service = duties_service.clone(); @@ -341,8 +356,8 @@ pub async fn poll_sync_committee_duties( sub_duties_service, &new_pre_compute_duties, current_sync_committee_period, - current_epoch, - current_pre_compute_epoch, + current_slot, + current_pre_compute_slot, ) .await }, @@ -368,11 +383,14 @@ pub async fn poll_sync_committee_duties( } // Pre-compute aggregator selection proofs for the next period. - if (current_epoch + AGGREGATION_PRE_COMPUTE_EPOCHS).sync_committee_period(spec)? + let aggregate_pre_compute_lookahead_slots = sync_duties.aggregation_pre_compute_slots(); + if (current_slot + aggregate_pre_compute_lookahead_slots) + .epoch(E::slots_per_epoch()) + .sync_committee_period(spec)? == next_sync_committee_period { - let (pre_compute_epoch, new_pre_compute_duties) = sync_duties - .prepare_for_aggregator_pre_compute(next_sync_committee_period, current_epoch, spec); + let (pre_compute_slot, new_pre_compute_duties) = sync_duties + .prepare_for_aggregator_pre_compute(next_sync_committee_period, current_slot, spec); if !new_pre_compute_duties.is_empty() { let sub_duties_service = duties_service.clone(); @@ -382,8 +400,8 @@ pub async fn poll_sync_committee_duties( sub_duties_service, &new_pre_compute_duties, next_sync_committee_period, - current_epoch, - pre_compute_epoch, + current_slot, + pre_compute_slot, ) .await }, @@ -495,10 +513,10 @@ pub async fn poll_sync_committee_duties_for_period( duties_service: Arc>, - pre_compute_duties: &[(Epoch, SyncDuty)], + pre_compute_duties: &[(Slot, SyncDuty)], sync_committee_period: u64, - current_epoch: Epoch, - pre_compute_epoch: Epoch, + current_slot: Slot, + pre_compute_slot: Slot, ) { let log = duties_service.context.log(); @@ -506,16 +524,16 @@ pub async fn fill_in_aggregation_proofs( log, "Calculating sync selection proofs"; "period" => sync_committee_period, - "current_epoch" => current_epoch, - "pre_compute_epoch" => pre_compute_epoch + "current_slot" => current_slot, + "pre_compute_slot" => pre_compute_slot ); - // Generate selection proofs for each validator at each slot, one epoch at a time. - for epoch in (current_epoch.as_u64()..=pre_compute_epoch.as_u64()).map(Epoch::new) { + // Generate selection proofs for each validator at each slot, one slot at a time. + for slot in (current_slot.as_u64()..=pre_compute_slot.as_u64()).map(Slot::new) { let mut validator_proofs = vec![]; - for (validator_start_epoch, duty) in pre_compute_duties { - // Proofs are already known at this epoch for this validator. - if epoch < *validator_start_epoch { + for (validator_start_slot, duty) in pre_compute_duties { + // Proofs are already known at this slot for this validator. + if slot < *validator_start_slot { continue; } @@ -533,67 +551,64 @@ pub async fn fill_in_aggregation_proofs( // Create futures to produce proofs. let duties_service_ref = &duties_service; - let futures = epoch - .slot_iter(E::slots_per_epoch()) - .cartesian_product(&subnet_ids) - .map(|(duty_slot, subnet_id)| async move { - // Construct proof for prior slot. - let slot = duty_slot - 1; + let futures = subnet_ids.iter().map(|subnet_id| async move { + // Construct proof for prior slot. + let proof_slot = slot - 1; - let proof = match duties_service_ref - .validator_store - .produce_sync_selection_proof(&duty.pubkey, slot, *subnet_id) - .await - { - Ok(proof) => proof, - Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { - // A pubkey can be missing when a validator was recently - // removed via the API. - debug!( - log, - "Missing pubkey for sync selection proof"; - "pubkey" => ?pubkey, - "pubkey" => ?duty.pubkey, - "slot" => slot, - ); - return None; - } - Err(e) => { - warn!( - log, - "Unable to sign selection proof"; - "error" => ?e, - "pubkey" => ?duty.pubkey, - "slot" => slot, - ); - return None; - } - }; - - match proof.is_aggregator::() { - Ok(true) => { - debug!( - log, - "Validator is sync aggregator"; - "validator_index" => duty.validator_index, - "slot" => slot, - "subnet_id" => %subnet_id, - ); - Some(((slot, *subnet_id), proof)) - } - Ok(false) => None, - Err(e) => { - warn!( - log, - "Error determining is_aggregator"; - "pubkey" => ?duty.pubkey, - "slot" => slot, - "error" => ?e, - ); - None - } + let proof = match duties_service_ref + .validator_store + .produce_sync_selection_proof(&duty.pubkey, proof_slot, *subnet_id) + .await + { + Ok(proof) => proof, + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently + // removed via the API. + debug!( + log, + "Missing pubkey for sync selection proof"; + "pubkey" => ?pubkey, + "pubkey" => ?duty.pubkey, + "slot" => proof_slot, + ); + return None; } - }); + Err(e) => { + warn!( + log, + "Unable to sign selection proof"; + "error" => ?e, + "pubkey" => ?duty.pubkey, + "slot" => proof_slot, + ); + return None; + } + }; + + match proof.is_aggregator::() { + Ok(true) => { + debug!( + log, + "Validator is sync aggregator"; + "validator_index" => duty.validator_index, + "slot" => proof_slot, + "subnet_id" => %subnet_id, + ); + Some(((proof_slot, *subnet_id), proof)) + } + Ok(false) => None, + Err(e) => { + warn!( + log, + "Error determining is_aggregator"; + "pubkey" => ?duty.pubkey, + "slot" => proof_slot, + "error" => ?e, + ); + None + } + } + }); // Execute all the futures in parallel, collecting any successful results. let proofs = join_all(futures) @@ -635,7 +650,7 @@ pub async fn fill_in_aggregation_proofs( debug!( log, "Finished computing sync selection proofs"; - "epoch" => epoch, + "slot" => slot, "updated_validators" => num_validators_updated, ); } diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index dcf66d2fb..a4480195e 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -685,7 +685,16 @@ pub fn serve( let maybe_graffiti = body.graffiti.clone().map(Into::into); let initialized_validators_rw_lock = validator_store.initialized_validators(); - let mut initialized_validators = initialized_validators_rw_lock.write(); + let initialized_validators = initialized_validators_rw_lock.upgradable_read(); + + // Do not make any changes if all fields are identical or unchanged. + fn equal_or_none( + current_value: Option, + new_value: Option, + ) -> bool { + new_value.is_none() || current_value == new_value + } + match ( initialized_validators.is_enabled(&validator_pubkey), initialized_validators.validator(&validator_pubkey.compress()), @@ -694,32 +703,65 @@ pub fn serve( "no validator for {:?}", validator_pubkey ))), + // If all specified parameters match their existing settings, then this + // change is a no-op. (Some(is_enabled), Some(initialized_validator)) - if Some(is_enabled) == body.enabled - && initialized_validator.get_gas_limit() == body.gas_limit - && initialized_validator.get_builder_boost_factor() - == body.builder_boost_factor - && initialized_validator.get_builder_proposals() - == body.builder_proposals - && initialized_validator.get_prefer_builder_proposals() - == body.prefer_builder_proposals - && initialized_validator.get_graffiti() == maybe_graffiti => + if equal_or_none(Some(is_enabled), body.enabled) + && equal_or_none( + initialized_validator.get_gas_limit(), + body.gas_limit, + ) + && equal_or_none( + initialized_validator.get_builder_boost_factor(), + body.builder_boost_factor, + ) + && equal_or_none( + initialized_validator.get_builder_proposals(), + body.builder_proposals, + ) + && equal_or_none( + initialized_validator.get_prefer_builder_proposals(), + body.prefer_builder_proposals, + ) + && equal_or_none( + initialized_validator.get_graffiti(), + maybe_graffiti, + ) => + { + Ok(()) + } + // Disabling an already disabled validator *with no other changes* is a + // no-op. + (Some(false), None) + if body.enabled.map_or(true, |enabled| !enabled) + && body.gas_limit.is_none() + && body.builder_boost_factor.is_none() + && body.builder_proposals.is_none() + && body.prefer_builder_proposals.is_none() + && maybe_graffiti.is_none() => { Ok(()) } (Some(_), _) => { + // Upgrade read lock only in the case where a write is actually + // required. + let mut initialized_validators_write = + parking_lot::RwLockUpgradableReadGuard::upgrade( + initialized_validators, + ); if let Some(handle) = task_executor.handle() { handle .block_on( - initialized_validators.set_validator_definition_fields( - &validator_pubkey, - body.enabled, - body.gas_limit, - body.builder_proposals, - body.builder_boost_factor, - body.prefer_builder_proposals, - body.graffiti, - ), + initialized_validators_write + .set_validator_definition_fields( + &validator_pubkey, + body.enabled, + body.gas_limit, + body.builder_proposals, + body.builder_boost_factor, + body.prefer_builder_proposals, + body.graffiti, + ), ) .map_err(|e| { warp_utils::reject::custom_server_error(format!( diff --git a/validator_client/src/http_api/test_utils.rs b/validator_client/src/http_api/test_utils.rs index 7b0cb51ec..49ea4ef5b 100644 --- a/validator_client/src/http_api/test_utils.rs +++ b/validator_client/src/http_api/test_utils.rs @@ -80,6 +80,7 @@ impl ApiTester { let initialized_validators = InitializedValidators::from_definitions( validator_defs, validator_dir.path().into(), + Default::default(), log.clone(), ) .await diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index f7db76e4a..ba46ea63b 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -68,6 +68,7 @@ impl ApiTester { let initialized_validators = InitializedValidators::from_definitions( validator_defs, validator_dir.path().into(), + Config::default(), log.clone(), ) .await @@ -1177,6 +1178,58 @@ async fn validator_derived_builder_boost_factor_with_process_defaults() { .await; } +#[tokio::test] +async fn validator_builder_boost_factor_global_builder_proposals_true() { + let config = Config { + builder_proposals: true, + prefer_builder_proposals: false, + builder_boost_factor: None, + ..Config::default() + }; + ApiTester::new_with_config(config) + .await + .assert_default_builder_boost_factor(None); +} + +#[tokio::test] +async fn validator_builder_boost_factor_global_builder_proposals_false() { + let config = Config { + builder_proposals: false, + prefer_builder_proposals: false, + builder_boost_factor: None, + ..Config::default() + }; + ApiTester::new_with_config(config) + .await + .assert_default_builder_boost_factor(Some(0)); +} + +#[tokio::test] +async fn validator_builder_boost_factor_global_prefer_builder_proposals_true() { + let config = Config { + builder_proposals: true, + prefer_builder_proposals: true, + builder_boost_factor: None, + ..Config::default() + }; + ApiTester::new_with_config(config) + .await + .assert_default_builder_boost_factor(Some(u64::MAX)); +} + +#[tokio::test] +async fn validator_builder_boost_factor_global_prefer_builder_proposals_true_override() { + let config = Config { + builder_proposals: false, + prefer_builder_proposals: true, + builder_boost_factor: None, + ..Config::default() + }; + ApiTester::new_with_config(config) + .await + .assert_default_builder_boost_factor(Some(u64::MAX)); +} + #[tokio::test] async fn prefer_builder_proposals_validator() { ApiTester::new() diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index 7e4331dc8..c94115e5e 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -34,6 +34,7 @@ use validator_dir::Builder as ValidatorDirBuilder; use crate::key_cache; use crate::key_cache::KeyCache; +use crate::Config; /// Default timeout for a request to a remote signer for a signature. /// @@ -208,6 +209,7 @@ impl InitializedValidator { key_cache: &mut KeyCache, key_stores: &mut HashMap, web3_signer_client_map: &mut Option>, + config: &Config, ) -> Result { if !def.enabled { return Err(Error::UnableToInitializeDisabledValidator); @@ -311,6 +313,8 @@ impl InitializedValidator { web3_signer.client_identity_path.clone(), web3_signer.client_identity_password.clone(), request_timeout, + config.web3_signer_keep_alive_timeout, + config.web3_signer_max_idle_connections, )?; client_map.insert(web3_signer, client.clone()); client @@ -325,6 +329,8 @@ impl InitializedValidator { web3_signer.client_identity_path.clone(), web3_signer.client_identity_password.clone(), request_timeout, + config.web3_signer_keep_alive_timeout, + config.web3_signer_max_idle_connections, )?; new_web3_signer_client_map.insert(web3_signer, client.clone()); *web3_signer_client_map = Some(new_web3_signer_client_map); @@ -393,8 +399,13 @@ fn build_web3_signer_client( client_identity_path: Option, client_identity_password: Option, request_timeout: Duration, + keep_alive_timeout: Option, + max_idle_connections: Option, ) -> Result { - let builder = Client::builder().timeout(request_timeout); + let builder = Client::builder() + .timeout(request_timeout) + .pool_idle_timeout(keep_alive_timeout) + .pool_max_idle_per_host(max_idle_connections.unwrap_or(usize::MAX)); let builder = if let Some(path) = root_certificate_path { let certificate = load_pem_certificate(path)?; @@ -475,6 +486,7 @@ pub struct InitializedValidators { web3_signer_client_map: Option>, /// For logging via `slog`. log: Logger, + config: Config, } impl InitializedValidators { @@ -482,6 +494,7 @@ impl InitializedValidators { pub async fn from_definitions( definitions: ValidatorDefinitions, validators_dir: PathBuf, + config: Config, log: Logger, ) -> Result { let mut this = Self { @@ -489,6 +502,7 @@ impl InitializedValidators { definitions, validators: HashMap::default(), web3_signer_client_map: None, + config, log, }; this.update_validators().await?; @@ -1234,6 +1248,7 @@ impl InitializedValidators { &mut key_cache, &mut key_stores, &mut None, + &self.config, ) .await { @@ -1284,6 +1299,7 @@ impl InitializedValidators { &mut key_cache, &mut key_stores, &mut self.web3_signer_client_map, + &self.config, ) .await { diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 89fc03762..52de95a37 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -39,7 +39,7 @@ use account_utils::validator_definitions::ValidatorDefinitions; use attestation_service::{AttestationService, AttestationServiceBuilder}; use block_service::{BlockService, BlockServiceBuilder}; use clap::ArgMatches; -use duties_service::DutiesService; +use duties_service::{sync::SyncDutiesMap, DutiesService}; use environment::RuntimeContext; use eth2::{reqwest::ClientBuilder, types::Graffiti, BeaconNodeHttpClient, StatusCode, Timeouts}; use http_api::ApiSecret; @@ -192,6 +192,7 @@ impl ProductionValidatorClient { let validators = InitializedValidators::from_definitions( validator_defs, config.validator_dir.clone(), + config.clone(), log.clone(), ) .await @@ -450,13 +451,14 @@ impl ProductionValidatorClient { let duties_service = Arc::new(DutiesService { attesters: <_>::default(), proposers: <_>::default(), - sync_duties: <_>::default(), + sync_duties: SyncDutiesMap::new(config.distributed), slot_clock: slot_clock.clone(), beacon_nodes: beacon_nodes.clone(), validator_store: validator_store.clone(), spec: context.eth2_config.spec.clone(), context: duties_context, enable_high_validator_count_metrics: config.enable_high_validator_count_metrics, + distributed: config.distributed, }); // Update the metrics server. diff --git a/validator_client/src/signing_method.rs b/validator_client/src/signing_method.rs index 0de2f2f54..4ead0ed3c 100644 --- a/validator_client/src/signing_method.rs +++ b/validator_client/src/signing_method.rs @@ -117,6 +117,20 @@ impl SigningContext { } impl SigningMethod { + /// Return whether this signing method requires local slashing protection. + pub fn requires_local_slashing_protection( + &self, + enable_web3signer_slashing_protection: bool, + ) -> bool { + match self { + // Slashing protection is ALWAYS required for local keys. DO NOT TURN THIS OFF. + SigningMethod::LocalKeystore { .. } => true, + // Slashing protection is only required for remote signer keys when the configuration + // dictates that it is desired. + SigningMethod::Web3Signer { .. } => enable_web3signer_slashing_protection, + } + } + /// Return the signature of `signable_message`, with respect to the `signing_context`. pub async fn get_signature>( &self, diff --git a/validator_client/src/sync_committee_service.rs b/validator_client/src/sync_committee_service.rs index 90b62cd3b..f7abb3855 100644 --- a/validator_client/src/sync_committee_service.rs +++ b/validator_client/src/sync_committee_service.rs @@ -161,7 +161,7 @@ impl SyncCommitteeService { let Some(slot_duties) = self .duties_service .sync_duties - .get_duties_for_slot::(slot, &self.duties_service.spec) + .get_duties_for_slot(slot, &self.duties_service.spec) else { debug!(log, "No duties known for slot {}", slot); return Ok(()); @@ -548,7 +548,7 @@ impl SyncCommitteeService { match self .duties_service .sync_duties - .get_duties_for_slot::(duty_slot, spec) + .get_duties_for_slot(duty_slot, spec) { Some(duties) => subscriptions.extend(subscriptions_from_sync_duties( duties.duties, diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index c913b9906..b8c11a79b 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -97,6 +97,7 @@ pub struct ValidatorStore { fee_recipient_process: Option
, gas_limit: Option, builder_proposals: bool, + enable_web3signer_slashing_protection: bool, produce_block_v3: bool, prefer_builder_proposals: bool, builder_boost_factor: Option, @@ -131,6 +132,7 @@ impl ValidatorStore { fee_recipient_process: config.fee_recipient, gas_limit: config.gas_limit, builder_proposals: config.builder_proposals, + enable_web3signer_slashing_protection: config.enable_web3signer_slashing_protection, produce_block_v3: config.produce_block_v3, prefer_builder_proposals: config.prefer_builder_proposals, builder_boost_factor: config.builder_boost_factor, @@ -572,7 +574,7 @@ impl ValidatorStore { return Some(u64::MAX); } self.builder_boost_factor.or({ - if self.builder_proposals { + if !self.builder_proposals { Some(0) } else { None @@ -604,19 +606,26 @@ impl ValidatorStore { let signing_context = self.signing_context(Domain::BeaconProposer, signing_epoch); let domain_hash = signing_context.domain_hash(&self.spec); + let signing_method = self.doppelganger_checked_signing_method(validator_pubkey)?; + // Check for slashing conditions. - let slashing_status = self.slashing_protection.check_and_insert_block_proposal( - &validator_pubkey, - &block.block_header(), - domain_hash, - ); + let slashing_status = if signing_method + .requires_local_slashing_protection(self.enable_web3signer_slashing_protection) + { + self.slashing_protection.check_and_insert_block_proposal( + &validator_pubkey, + &block.block_header(), + domain_hash, + ) + } else { + Ok(Safe::Valid) + }; match slashing_status { // We can safely sign this block without slashing. Ok(Safe::Valid) => { metrics::inc_counter_vec(&metrics::SIGNED_BLOCKS_TOTAL, &[metrics::SUCCESS]); - let signing_method = self.doppelganger_checked_signing_method(validator_pubkey)?; let signature = signing_method .get_signature::( SignableMessage::BeaconBlock(&block), @@ -672,20 +681,28 @@ impl ValidatorStore { }); } + // Get the signing method and check doppelganger protection. + let signing_method = self.doppelganger_checked_signing_method(validator_pubkey)?; + // Checking for slashing conditions. let signing_epoch = attestation.data.target.epoch; let signing_context = self.signing_context(Domain::BeaconAttester, signing_epoch); let domain_hash = signing_context.domain_hash(&self.spec); - let slashing_status = self.slashing_protection.check_and_insert_attestation( - &validator_pubkey, - &attestation.data, - domain_hash, - ); + let slashing_status = if signing_method + .requires_local_slashing_protection(self.enable_web3signer_slashing_protection) + { + self.slashing_protection.check_and_insert_attestation( + &validator_pubkey, + &attestation.data, + domain_hash, + ) + } else { + Ok(Safe::Valid) + }; match slashing_status { // We can safely sign this attestation. Ok(Safe::Valid) => { - let signing_method = self.doppelganger_checked_signing_method(validator_pubkey)?; let signature = signing_method .get_signature::>( SignableMessage::AttestationData(&attestation.data),