From b1d2510d1bb4d791c7c8e3da0a023fad408d307e Mon Sep 17 00:00:00 2001 From: Divma Date: Thu, 29 Sep 2022 01:50:11 +0000 Subject: [PATCH] Libp2p v0.48.0 upgrade (#3547) ## Issue Addressed Upgrades libp2p to v.0.47.0. This is the compilation of - [x] #3495 - [x] #3497 - [x] #3491 - [x] #3546 - [x] #3553 Co-authored-by: Age Manning --- .github/workflows/local-testnet.yml | 3 +- .github/workflows/test-suite.yml | 92 +- Cargo.lock | 159 +-- Cross.toml | 4 +- Dockerfile | 2 +- Makefile | 2 +- beacon_node/http_api/tests/common.rs | 4 +- beacon_node/lighthouse_network/Cargo.toml | 4 +- .../src/discovery/enr_ext.rs | 2 - .../lighthouse_network/src/discovery/mod.rs | 43 +- beacon_node/lighthouse_network/src/lib.rs | 9 +- .../src/peer_manager/mod.rs | 16 +- .../src/peer_manager/network_behaviour.rs | 14 +- .../src/peer_manager/peerdb/score.rs | 2 +- beacon_node/lighthouse_network/src/rpc/mod.rs | 1 + beacon_node/lighthouse_network/src/service.rs | 573 -------- .../src/service/api_types.rs | 101 ++ .../src/service/behaviour.rs | 34 + .../{behaviour => service}/gossip_cache.rs | 0 .../gossipsub_scoring_parameters.rs | 0 .../src/{behaviour => service}/mod.rs | 1263 +++++++++-------- .../lighthouse_network/src/service/utils.rs | 288 ++++ .../tests/common/behaviour.rs | 3 +- .../lighthouse_network/tests/common/mod.rs | 23 +- .../lighthouse_network/tests/pm_tests.rs | 4 +- .../lighthouse_network/tests/rpc_tests.rs | 184 +-- beacon_node/network/src/service.rs | 246 ++-- book/src/installation-source.md | 14 +- book/src/setup.md | 2 + .../aarch64-unknown-linux-gnu.dockerfile | 14 + .../cross/x86_64-unknown-linux-gnu.dockerfile | 14 + 31 files changed, 1506 insertions(+), 1614 deletions(-) delete mode 100644 beacon_node/lighthouse_network/src/service.rs create mode 100644 beacon_node/lighthouse_network/src/service/api_types.rs create mode 100644 beacon_node/lighthouse_network/src/service/behaviour.rs rename beacon_node/lighthouse_network/src/{behaviour => service}/gossip_cache.rs (100%) rename beacon_node/lighthouse_network/src/{behaviour => service}/gossipsub_scoring_parameters.rs (100%) rename beacon_node/lighthouse_network/src/{behaviour => service}/mod.rs (56%) create mode 100644 beacon_node/lighthouse_network/src/service/utils.rs create mode 100644 scripts/cross/aarch64-unknown-linux-gnu.dockerfile create mode 100644 scripts/cross/x86_64-unknown-linux-gnu.dockerfile diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index 35032a093..c688c0df3 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -20,7 +20,8 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Install ganache run: npm install ganache@latest --global diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 1a7d78f61..a3e9625b5 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -50,6 +50,8 @@ jobs: - uses: actions/checkout@v1 - name: Get latest version of stable Rust run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Install ganache run: sudo npm install -g ganache - name: Run tests in release @@ -68,7 +70,7 @@ jobs: node-version: '14' - name: Install windows build tools run: | - choco install python visualstudio2019-workload-vctools -y + choco install python protoc visualstudio2019-workload-vctools -y npm config set msvs_version 2019 - name: Install ganache run: npm install -g ganache --loglevel verbose @@ -90,6 +92,8 @@ jobs: - uses: actions/checkout@v1 - name: Get latest version of stable Rust run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Run beacon_chain tests for all known forks run: make test-beacon-chain op-pool-tests: @@ -100,6 +104,8 @@ jobs: - uses: actions/checkout@v1 - name: Get latest version of stable Rust run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Run operation_pool tests for all known forks run: make test-op-pool slasher-tests: @@ -120,6 +126,8 @@ jobs: - uses: actions/checkout@v1 - name: Get latest version of stable Rust run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Install ganache run: sudo npm install -g ganache - name: Run tests in debug @@ -132,6 +140,8 @@ jobs: - uses: actions/checkout@v1 - name: Get latest version of stable Rust run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Run state_transition_vectors in release. run: make run-state-transition-tests ef-tests-ubuntu: @@ -142,6 +152,8 @@ jobs: - uses: actions/checkout@v1 - name: Get latest version of stable Rust run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Run consensus-spec-tests with blst, milagro and fake_crypto run: make test-ef dockerfile-ubuntu: @@ -164,6 +176,8 @@ jobs: - uses: actions/checkout@v1 - name: Get latest version of stable Rust run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Install ganache run: sudo npm install -g ganache - name: Run the beacon chain sim that starts from an eth1 contract @@ -176,6 +190,8 @@ jobs: - uses: actions/checkout@v1 - name: Get latest version of stable Rust run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Install ganache run: sudo npm install -g ganache - name: Run the beacon chain sim and go through the merge transition @@ -188,6 +204,8 @@ jobs: - uses: actions/checkout@v1 - name: Get latest version of stable Rust run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Install ganache run: sudo npm install -g ganache - name: Run the beacon chain sim without an eth1 connection @@ -197,35 +215,39 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Install ganache - run: sudo npm install -g ganache - - name: Run the syncing simulator - run: cargo run --release --bin simulator syncing-sim + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 + - name: Install ganache + run: sudo npm install -g ganache + - name: Run the syncing simulator + run: cargo run --release --bin simulator syncing-sim doppelganger-protection-test: - name: doppelganger-protection-test - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v1 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Install ganache - run: sudo npm install -g ganache - - name: Install lighthouse and lcli - run: | - make - make install-lcli - - name: Run the doppelganger protection success test script - run: | - cd scripts/tests - ./doppelganger_protection.sh success - - name: Run the doppelganger protection failure test script - run: | - cd scripts/tests - ./doppelganger_protection.sh failure + name: doppelganger-protection-test + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 + - name: Install ganache + run: sudo npm install -g ganache + - name: Install lighthouse and lcli + run: | + make + make install-lcli + - name: Run the doppelganger protection success test script + run: | + cd scripts/tests + ./doppelganger_protection.sh success + - name: Run the doppelganger protection failure test script + run: | + cd scripts/tests + ./doppelganger_protection.sh failure execution-engine-integration-ubuntu: name: execution-engine-integration-ubuntu runs-on: ubuntu-latest @@ -240,6 +262,8 @@ jobs: dotnet-version: '6.0.201' - name: Get latest version of stable Rust run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Run exec engine integration tests in release run: make test-exec-engine check-benchmarks: @@ -250,6 +274,8 @@ jobs: - uses: actions/checkout@v1 - name: Get latest version of stable Rust run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Typecheck benchmark code without running it run: make check-benches check-consensus: @@ -270,6 +296,8 @@ jobs: - uses: actions/checkout@v1 - name: Get latest version of stable Rust run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Lint code for quality and style with Clippy run: make lint - name: Certify Cargo.lock freshness @@ -289,6 +317,8 @@ jobs: git checkout 31a49666ccfcd7963b63345d6ce757c373f22c2a cargo build --release --bin cargo-clippy --bin clippy-driver cargo build --release --bin cargo-clippy --bin clippy-driver -Zunstable-options --out-dir $(rustc --print=sysroot)/bin + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Run Clippy with the disallowed-from-async lint run: make nightly-lint check-msrv: @@ -299,6 +329,8 @@ jobs: - uses: actions/checkout@v1 - name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }}) run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }} + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Run cargo check run: cargo check --workspace arbitrary-check: @@ -339,6 +371,8 @@ jobs: run: rustup toolchain install $PINNED_NIGHTLY # NOTE: cargo-udeps version is pinned until this issue is resolved: # https://github.com/est31/cargo-udeps/issues/135 + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Install cargo-udeps run: cargo install cargo-udeps --locked --force --version 0.1.30 - name: Create Cargo config dir diff --git a/Cargo.lock b/Cargo.lock index 64eab1442..cfefa6c11 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3226,9 +3226,9 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.45.1" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41726ee8f662563fafba2d2d484b14037cc8ecb8c953fbfc8439d4ce3a0a9029" +checksum = "94c996fe5bfdba47f5a5af71d48ecbe8cec900b7b97391cc1d3ba1afb0e2d3b6" dependencies = [ "bytes", "futures", @@ -3236,7 +3236,7 @@ dependencies = [ "getrandom 0.2.7", "instant", "lazy_static", - "libp2p-core 0.33.0", + "libp2p-core 0.36.0", "libp2p-dns", "libp2p-gossipsub", "libp2p-identify", @@ -3293,9 +3293,9 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.33.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42d46fca305dee6757022e2f5a4f6c023315084d0ed7441c3ab244e76666d979" +checksum = "b1fff5bd889c82a0aec668f2045edd066f559d4e5c40354e5a4c77ac00caac38" dependencies = [ "asn1_der", "bs58", @@ -3313,10 +3313,9 @@ dependencies = [ "multistream-select 0.11.0", "parking_lot 0.12.1", "pin-project 1.0.11", - "prost 0.10.4", - "prost-build 0.10.4", + "prost 0.11.0", + "prost-build 0.11.1", "rand 0.8.5", - "ring", "rw-stream-sink 0.3.0", "sha2 0.10.2", "smallvec", @@ -3328,12 +3327,12 @@ dependencies = [ [[package]] name = "libp2p-dns" -version = "0.33.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbb462ec3a51fab457b4b44ac295e8b0a4b04dc175127e615cf996b1f0f1a268" +checksum = "6cb3c16e3bb2f76c751ae12f0f26e788c89d353babdded40411e7923f01fc978" dependencies = [ "futures", - "libp2p-core 0.33.0", + "libp2p-core 0.36.0", "log", "parking_lot 0.12.1", "smallvec", @@ -3342,9 +3341,9 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.38.1" +version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43e064ba4d7832e01c738626c6b274ae100baba05f5ffcc7b265c2a3ed398108" +checksum = "2185aac44b162c95180ae4ddd1f4dfb705217ea1cb8e16bdfc70d31496fd80fa" dependencies = [ "asynchronous-codec", "base64", @@ -3354,12 +3353,12 @@ dependencies = [ "futures", "hex_fmt", "instant", - "libp2p-core 0.33.0", + "libp2p-core 0.36.0", "libp2p-swarm", "log", "prometheus-client", - "prost 0.10.4", - "prost-build 0.10.4", + "prost 0.11.0", + "prost-build 0.11.1", "rand 0.7.3", "regex", "sha2 0.10.2", @@ -3370,19 +3369,19 @@ dependencies = [ [[package]] name = "libp2p-identify" -version = "0.36.1" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b84b53490442d086db1fa5375670c9666e79143dccadef3f7c74a4346899a984" +checksum = "f19440c84b509d69b13f0c9c28caa9bd3a059d25478527e937e86761f25c821e" dependencies = [ "asynchronous-codec", "futures", "futures-timer", - "libp2p-core 0.33.0", + "libp2p-core 0.36.0", "libp2p-swarm", "log", "lru", - "prost 0.10.4", - "prost-build 0.10.4", + "prost 0.11.0", + "prost-build 0.11.1", "prost-codec", "smallvec", "thiserror", @@ -3391,11 +3390,11 @@ dependencies = [ [[package]] name = "libp2p-metrics" -version = "0.6.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "564a7e5284d7d9b3140fdfc3cb6567bc32555e86a21de5604c2ec85da05cf384" +checksum = "a74ab339e8b5d989e8c1000a78adb5c064a6319245bb22d1e70b415ec18c39b8" dependencies = [ - "libp2p-core 0.33.0", + "libp2p-core 0.36.0", "libp2p-gossipsub", "libp2p-identify", "libp2p-swarm", @@ -3404,14 +3403,14 @@ dependencies = [ [[package]] name = "libp2p-mplex" -version = "0.33.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ff9c893f2367631a711301d703c47432af898c9bb8253bea0e2c051a13f7640" +checksum = "ce53169351226ee0eb18ee7bef8d38f308fa8ad7244f986ae776390c0ae8a44d" dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.33.0", + "libp2p-core 0.36.0", "log", "nohash-hasher", "parking_lot 0.12.1", @@ -3422,18 +3421,18 @@ dependencies = [ [[package]] name = "libp2p-noise" -version = "0.36.0" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf2cee1dad1c83325bbd182a8e94555778699cec8a9da00086efb7522c4c15ad" +checksum = "7cb0f939a444b06779ce551b3d78ebf13970ac27906ada452fd70abd160b09b8" dependencies = [ "bytes", "curve25519-dalek 3.2.0", "futures", "lazy_static", - "libp2p-core 0.33.0", + "libp2p-core 0.36.0", "log", - "prost 0.10.4", - "prost-build 0.10.4", + "prost 0.11.0", + "prost-build 0.11.1", "rand 0.8.5", "sha2 0.10.2", "snow", @@ -3444,33 +3443,33 @@ dependencies = [ [[package]] name = "libp2p-plaintext" -version = "0.33.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db007e737adc5d28b2e03223b0210164928ad742591127130796a72aa8eaf54f" +checksum = "328e8c654a55ac7f093eb96dfd0386244dd337f2bd2822dc019522b743ea8add" dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.33.0", + "libp2p-core 0.36.0", "log", - "prost 0.10.4", - "prost-build 0.10.4", + "prost 0.11.0", + "prost-build 0.11.1", "unsigned-varint 0.7.1", "void", ] [[package]] name = "libp2p-swarm" -version = "0.36.1" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f4bb21c5abadbf00360c734f16bf87f1712ed4f23cd46148f625d2ddb867346" +checksum = "70ad2db60c06603606b54b58e4247e32efec87a93cb4387be24bf32926c600f2" dependencies = [ "either", "fnv", "futures", "futures-timer", "instant", - "libp2p-core 0.33.0", + "libp2p-core 0.36.0", "log", "pin-project 1.0.11", "rand 0.7.3", @@ -3481,26 +3480,27 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" -version = "0.27.2" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f693c8c68213034d472cbb93a379c63f4f307d97c06f1c41e4985de481687a5" +checksum = "1f02622b9dd150011b4eeec387f8bd013189a2f27da08ba363e7c6e606d77a48" dependencies = [ + "heck 0.4.0", "quote", "syn", ] [[package]] name = "libp2p-tcp" -version = "0.33.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f4933e38ef21b50698aefc87799c24f2a365c9d3f6cf50471f3f6a0bc410892" +checksum = "9675432b4c94b3960f3d2c7e57427b81aea92aab67fd0eebef09e2ae0ff54895" dependencies = [ "futures", "futures-timer", "if-addrs 0.7.0", "ipnet", "libc", - "libp2p-core 0.33.0", + "libp2p-core 0.36.0", "log", "socket2", "tokio", @@ -3508,14 +3508,14 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.35.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39d398fbb29f432c4128fabdaac2ed155c3bcaf1b9bd40eeeb10a471eefacbf5" +checksum = "de8a9e825cc03f2fc194d2e1622113d7fe18e1c7f4458a582b83140c9b9aea27" dependencies = [ "either", "futures", "futures-rustls", - "libp2p-core 0.33.0", + "libp2p-core 0.36.0", "log", "parking_lot 0.12.1", "quicksink", @@ -3527,12 +3527,12 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.37.0" +version = "0.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fe653639ad74877c759720febb0cbcbf4caa221adde4eed2d3126ce5c6f381f" +checksum = "b74ec8dc042b583f0b2b93d52917f3b374c1e4b1cfa79ee74c7672c41257694c" dependencies = [ "futures", - "libp2p-core 0.33.0", + "libp2p-core 0.36.0", "parking_lot 0.12.1", "thiserror", "yamux", @@ -4440,15 +4440,6 @@ dependencies = [ "types", ] -[[package]] -name = "owning_ref" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff55baddef9e4ad00f88b6c743a2a8062d4c6ade126c2a528644b8e444d52ce" -dependencies = [ - "stable_deref_trait", -] - [[package]] name = "parity-scale-codec" version = "2.3.1" @@ -4876,21 +4867,21 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.16.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac1abe0255c04d15f571427a2d1e00099016506cf3297b53853acd2b7eb87825" +checksum = "3c473049631c233933d6286c88bbb7be30e62ec534cf99a9ae0079211f7fa603" dependencies = [ "dtoa", "itoa 1.0.2", - "owning_ref", + "parking_lot 0.12.1", "prometheus-client-derive-text-encode", ] [[package]] name = "prometheus-client-derive-text-encode" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8e12d01b9d66ad9eb4529c57666b6263fc1993cb30261d83ead658fdd932652" +checksum = "66a455fbcb954c1a7decf3c586e860fd7889cddf4b8e164be736dbac95a953cd" dependencies = [ "proc-macro2", "quote", @@ -4909,12 +4900,12 @@ dependencies = [ [[package]] name = "prost" -version = "0.10.4" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71adf41db68aa0daaefc69bb30bcd68ded9b9abaad5d1fbb6304c4fb390e083e" +checksum = "399c3c31cdec40583bb68f0b18403400d01ec4289c383aa047560439952c4dd7" dependencies = [ "bytes", - "prost-derive 0.10.1", + "prost-derive 0.11.0", ] [[package]] @@ -4939,21 +4930,19 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.10.4" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae5a4388762d5815a9fc0dea33c56b021cdc8dde0c55e0c9ca57197254b0cab" +checksum = "7f835c582e6bd972ba8347313300219fed5bfa52caf175298d860b61ff6069bb" dependencies = [ "bytes", - "cfg-if", - "cmake", "heck 0.4.0", "itertools", "lazy_static", "log", "multimap", "petgraph", - "prost 0.10.4", - "prost-types 0.10.1", + "prost 0.11.0", + "prost-types 0.11.1", "regex", "tempfile", "which", @@ -4961,13 +4950,13 @@ dependencies = [ [[package]] name = "prost-codec" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00af1e92c33b4813cc79fda3f2dbf56af5169709be0202df730e9ebc3e4cd007" +checksum = "011ae9ff8359df7915f97302d591cdd9e0e27fbd5a4ddc5bd13b71079bb20987" dependencies = [ "asynchronous-codec", "bytes", - "prost 0.10.4", + "prost 0.11.0", "thiserror", "unsigned-varint 0.7.1", ] @@ -4987,9 +4976,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.10.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b670f45da57fb8542ebdbb6105a925fe571b67f9e7ed9f47a06a84e72b4e7cc" +checksum = "7345d5f0e08c0536d7ac7229952590239e77abf0a0100a1b1d890add6ea96364" dependencies = [ "anyhow", "itertools", @@ -5010,12 +4999,12 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.10.1" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d0a014229361011dc8e69c8a1ec6c2e8d0f2af7c91e3ea3f5b2170298461e68" +checksum = "4dfaa718ad76a44b3415e6c4d53b17c8f99160dcb3a99b10470fce8ad43f6e3e" dependencies = [ "bytes", - "prost 0.10.4", + "prost 0.11.0", ] [[package]] @@ -6272,12 +6261,6 @@ dependencies = [ "syn", ] -[[package]] -name = "stable_deref_trait" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" - [[package]] name = "state_processing" version = "0.2.0" diff --git a/Cross.toml b/Cross.toml index d5f7a5d50..963e22d0e 100644 --- a/Cross.toml +++ b/Cross.toml @@ -1,5 +1,5 @@ [target.x86_64-unknown-linux-gnu] -pre-build = ["apt-get install -y cmake clang-3.9"] +dockerfile = './scripts/cross/x86_64-unknown-linux-gnu.dockerfile' [target.aarch64-unknown-linux-gnu] -pre-build = ["apt-get install -y cmake clang-3.9"] +dockerfile = './scripts/cross/aarch64-unknown-linux-gnu.dockerfile' diff --git a/Dockerfile b/Dockerfile index 86a69c653..72423b17c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ FROM rust:1.62.1-bullseye AS builder -RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev +RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler COPY . lighthouse ARG FEATURES ENV FEATURES $FEATURES diff --git a/Makefile b/Makefile index 6119f0dc0..3bf23a4ce 100644 --- a/Makefile +++ b/Makefile @@ -179,7 +179,7 @@ arbitrary-fuzz: # Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database) audit: cargo install --force cargo-audit - cargo audit --ignore RUSTSEC-2020-0071 --ignore RUSTSEC-2020-0159 --ignore RUSTSEC-2022-0040 + cargo audit --ignore RUSTSEC-2020-0071 --ignore RUSTSEC-2020-0159 # Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose. vendor: diff --git a/beacon_node/http_api/tests/common.rs b/beacon_node/http_api/tests/common.rs index 032e1346f..a0dbf40b2 100644 --- a/beacon_node/http_api/tests/common.rs +++ b/beacon_node/http_api/tests/common.rs @@ -118,9 +118,7 @@ pub async fn create_api_server_on_port( // Only a peer manager can add peers, so we create a dummy manager. let config = lighthouse_network::peer_manager::config::Config::default(); - let mut pm = PeerManager::new(config, network_globals.clone(), &log) - .await - .unwrap(); + let mut pm = PeerManager::new(config, network_globals.clone(), &log).unwrap(); // add a peer let peer_id = PeerId::random(); diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index c6ba53050..e5af0a749 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -37,12 +37,12 @@ directory = { path = "../../common/directory" } regex = "1.5.5" strum = { version = "0.24.0", features = ["derive"] } superstruct = "0.5.0" -prometheus-client = "0.16.0" +prometheus-client = "0.18.0" unused_port = { path = "../../common/unused_port" } delay_map = "0.1.1" [dependencies.libp2p] -version = "0.45.1" +version = "0.48.0" default-features = false features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio", "plaintext", "secp256k1"] diff --git a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs index 81eaaaf1b..1001efe23 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs @@ -232,7 +232,6 @@ impl CombinedKeyExt for CombinedKey { .expect("libp2p key must be valid"); Ok(CombinedKey::from(ed_keypair)) } - _ => Err("ENR: Unsupported libp2p key type"), } } } @@ -266,7 +265,6 @@ pub fn peer_id_to_node_id(peer_id: &PeerId) -> Result Err("Unsupported public key".into()), } } diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 23b8895cf..d766fd23a 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -7,8 +7,8 @@ pub(crate) mod enr; pub mod enr_ext; // Allow external use of the lighthouse ENR builder -use crate::behaviour::TARGET_SUBNET_PEERS; use crate::metrics; +use crate::service::TARGET_SUBNET_PEERS; use crate::{error, Enr, NetworkConfig, NetworkGlobals, Subnet, SubnetDiscovery}; use discv5::{enr::NodeId, Discv5, Discv5Event}; pub use enr::{ @@ -21,6 +21,8 @@ pub use libp2p::core::identity::{Keypair, PublicKey}; use enr::{ATTESTATION_BITFIELD_ENR_KEY, ETH2_ENR_KEY, SYNC_COMMITTEE_BITFIELD_ENR_KEY}; use futures::prelude::*; use futures::stream::FuturesUnordered; +use libp2p::multiaddr::Protocol; +use libp2p::swarm::AddressScore; pub use libp2p::{ core::{connection::ConnectionId, ConnectedPoint, Multiaddr, PeerId}, swarm::{ @@ -67,13 +69,11 @@ pub const FIND_NODE_QUERY_CLOSEST_PEERS: usize = 16; /// The threshold for updating `min_ttl` on a connected peer. const DURATION_DIFFERENCE: Duration = Duration::from_millis(1); -/// The events emitted by polling discovery. -pub enum DiscoveryEvent { - /// A query has completed. This result contains a mapping of discovered peer IDs to the `min_ttl` - /// of the peer if it is specified. - QueryResult(HashMap>), - /// This indicates that our local UDP socketaddr has been updated and we should inform libp2p. - SocketUpdated(SocketAddr), +/// A query has completed. This result contains a mapping of discovered peer IDs to the `min_ttl` +/// of the peer if it is specified. +#[derive(Debug)] +pub struct DiscoveredPeers { + pub peers: HashMap>, } #[derive(Clone, PartialEq)] @@ -362,7 +362,7 @@ impl Discovery { } /// Returns an iterator over all enr entries in the DHT. - pub fn table_entries_enr(&mut self) -> Vec { + pub fn table_entries_enr(&self) -> Vec { self.discv5.table_entries_enr() } @@ -909,7 +909,7 @@ impl Discovery { impl NetworkBehaviour for Discovery { // Discovery is not a real NetworkBehaviour... type ConnectionHandler = libp2p::swarm::handler::DummyConnectionHandler; - type OutEvent = DiscoveryEvent; + type OutEvent = DiscoveredPeers; fn new_handler(&mut self) -> Self::ConnectionHandler { libp2p::swarm::handler::DummyConnectionHandler::default() @@ -976,11 +976,9 @@ impl NetworkBehaviour for Discovery { self.process_queue(); // Drive the queries and return any results from completed queries - if let Some(results) = self.poll_queries(cx) { + if let Some(peers) = self.poll_queries(cx) { // return the result to the peer manager - return Poll::Ready(NBAction::GenerateEvent(DiscoveryEvent::QueryResult( - results, - ))); + return Poll::Ready(NBAction::GenerateEvent(DiscoveredPeers { peers })); } // Process the server event stream @@ -1019,8 +1017,8 @@ impl NetworkBehaviour for Discovery { } */ } - Discv5Event::SocketUpdated(socket) => { - info!(self.log, "Address updated"; "ip" => %socket.ip(), "udp_port" => %socket.port()); + Discv5Event::SocketUpdated(socket_addr) => { + info!(self.log, "Address updated"; "ip" => %socket_addr.ip(), "udp_port" => %socket_addr.port()); metrics::inc_counter(&metrics::ADDRESS_UPDATE_COUNT); metrics::check_nat(); // Discv5 will have updated our local ENR. We save the updated version @@ -1029,9 +1027,16 @@ impl NetworkBehaviour for Discovery { enr::save_enr_to_disk(Path::new(&self.enr_dir), &enr, &self.log); // update network globals *self.network_globals.local_enr.write() = enr; - return Poll::Ready(NBAction::GenerateEvent( - DiscoveryEvent::SocketUpdated(socket), - )); + // A new UDP socket has been detected. + // Build a multiaddr to report to libp2p + let mut address = Multiaddr::from(socket_addr.ip()); + // NOTE: This doesn't actually track the external TCP port. More sophisticated NAT handling + // should handle this. + address.push(Protocol::Tcp(self.network_globals.listen_port_tcp())); + return Poll::Ready(NBAction::ReportObservedAddr { + address, + score: AddressScore::Finite(1), + }); } Discv5Event::EnrAdded { .. } | Discv5Event::TalkRequest(_) diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index f679b7e65..be4da809c 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -5,15 +5,14 @@ #[macro_use] extern crate lazy_static; -pub mod behaviour; mod config; +pub mod service; #[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy pub mod discovery; pub mod metrics; pub mod peer_manager; pub mod rpc; -mod service; pub mod types; pub use config::gossip_max_size; @@ -69,7 +68,6 @@ pub use crate::types::{ pub use prometheus_client; -pub use behaviour::{BehaviourEvent, Gossipsub, PeerRequestId, Request, Response}; pub use config::Config as NetworkConfig; pub use discovery::{CombinedKeyExt, EnrExt, Eth2Enr}; pub use discv5; @@ -85,4 +83,7 @@ pub use peer_manager::{ peerdb::PeerDB, ConnectionDirection, PeerConnectionStatus, PeerInfo, PeerManager, SyncInfo, SyncStatus, }; -pub use service::{load_private_key, Context, Libp2pEvent, Service, NETWORK_KEY_FILENAME}; +// pub use service::{load_private_key, Context, Libp2pEvent, Service, NETWORK_KEY_FILENAME}; +pub use service::api_types::{PeerRequestId, Request, Response}; +pub use service::utils::*; +pub use service::{Gossipsub, NetworkEvent}; diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 63d081660..0f2913595 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -1,7 +1,7 @@ //! Implementation of Lighthouse's peer management system. -use crate::behaviour::TARGET_SUBNET_PEERS; use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RPCResponseErrorCode}; +use crate::service::TARGET_SUBNET_PEERS; use crate::{error, metrics, Gossipsub}; use crate::{NetworkGlobals, PeerId}; use crate::{Subnet, SubnetDiscovery}; @@ -12,6 +12,7 @@ use peerdb::{client::ClientKind, BanOperation, BanResult, ScoreUpdateResult}; use rand::seq::SliceRandom; use slog::{debug, error, trace, warn}; use smallvec::SmallVec; +use std::collections::VecDeque; use std::{ sync::Arc, time::{Duration, Instant}, @@ -71,6 +72,8 @@ pub struct PeerManager { status_peers: HashSetDelay, /// The target number of peers we would like to connect to. target_peers: usize, + /// Peers queued to be dialed. + peers_to_dial: VecDeque<(PeerId, Option)>, /// A collection of sync committee subnets that we need to stay subscribed to. /// Sync committee subnets are longer term (256 epochs). Hence, we need to re-run /// discovery queries for subnet peers if we disconnect from existing sync @@ -115,7 +118,7 @@ pub enum PeerManagerEvent { impl PeerManager { // NOTE: Must be run inside a tokio executor. - pub async fn new( + pub fn new( cfg: config::Config, network_globals: Arc>, log: &slog::Logger, @@ -135,6 +138,7 @@ impl PeerManager { Ok(PeerManager { network_globals, events: SmallVec::new(), + peers_to_dial: Default::default(), inbound_ping_peers: HashSetDelay::new(Duration::from_secs(ping_interval_inbound)), outbound_ping_peers: HashSetDelay::new(Duration::from_secs(ping_interval_outbound)), status_peers: HashSetDelay::new(Duration::from_secs(status_interval)), @@ -360,8 +364,8 @@ impl PeerManager { /* Notifications from the Swarm */ // A peer is being dialed. - pub fn inject_dialing(&mut self, peer_id: &PeerId, enr: Option) { - self.inject_peer_connection(peer_id, ConnectingType::Dialing, enr); + pub fn dial_peer(&mut self, peer_id: &PeerId, enr: Option) { + self.peers_to_dial.push_back((*peer_id, enr)); } /// Reports if a peer is banned or not. @@ -1247,9 +1251,7 @@ mod tests { }; let log = build_log(slog::Level::Debug, false); let globals = NetworkGlobals::new_test_globals(&log); - PeerManager::new(config, Arc::new(globals), &log) - .await - .unwrap() + PeerManager::new(config, Arc::new(globals), &log).unwrap() } #[tokio::test] diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index 3bda64f0b..a19c6db65 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -3,6 +3,7 @@ use std::task::{Context, Poll}; use futures::StreamExt; use libp2p::core::connection::ConnectionId; use libp2p::core::ConnectedPoint; +use libp2p::swarm::dial_opts::{DialOpts, PeerCondition}; use libp2p::swarm::handler::DummyConnectionHandler; use libp2p::swarm::{ ConnectionHandler, DialError, NetworkBehaviour, NetworkBehaviourAction, PollParameters, @@ -16,7 +17,7 @@ use crate::rpc::GoodbyeReason; use crate::types::SyncState; use super::peerdb::BanResult; -use super::{PeerManager, PeerManagerEvent, ReportSource}; +use super::{ConnectingType, PeerManager, PeerManagerEvent, ReportSource}; impl NetworkBehaviour for PeerManager { type ConnectionHandler = DummyConnectionHandler; @@ -99,6 +100,17 @@ impl NetworkBehaviour for PeerManager { self.events.shrink_to_fit(); } + if let Some((peer_id, maybe_enr)) = self.peers_to_dial.pop_front() { + self.inject_peer_connection(&peer_id, ConnectingType::Dialing, maybe_enr); + let handler = self.new_handler(); + return Poll::Ready(NetworkBehaviourAction::Dial { + opts: DialOpts::peer_id(peer_id) + .condition(PeerCondition::Disconnected) + .build(), + handler, + }); + } + Poll::Pending } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs index accc0b60c..fca665db9 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs @@ -5,7 +5,7 @@ //! As the logic develops this documentation will advance. //! //! The scoring algorithms are currently experimental. -use crate::behaviour::gossipsub_scoring_parameters::GREYLIST_THRESHOLD as GOSSIPSUB_GREYLIST_THRESHOLD; +use crate::service::gossipsub_scoring_parameters::GREYLIST_THRESHOLD as GOSSIPSUB_GREYLIST_THRESHOLD; use serde::Serialize; use std::time::Instant; use strum::AsRefStr; diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 0bedd423b..7b0092ef7 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -90,6 +90,7 @@ impl std::fmt::Display for RPCSend { } /// Messages sent to the user from the RPC protocol. +#[derive(Debug)] pub struct RPCMessage { /// The peer that sent the message. pub peer_id: PeerId, diff --git a/beacon_node/lighthouse_network/src/service.rs b/beacon_node/lighthouse_network/src/service.rs deleted file mode 100644 index bcd546fb0..000000000 --- a/beacon_node/lighthouse_network/src/service.rs +++ /dev/null @@ -1,573 +0,0 @@ -use crate::behaviour::{ - save_metadata_to_disk, Behaviour, BehaviourEvent, PeerRequestId, Request, Response, -}; -use crate::config::NetworkLoad; -use crate::discovery::enr; -use crate::multiaddr::Protocol; -use crate::rpc::{GoodbyeReason, MetaData, MetaDataV1, MetaDataV2, RPCResponseErrorCode, ReqId}; -use crate::types::{error, EnrAttestationBitfield, EnrSyncCommitteeBitfield, GossipKind}; -use crate::EnrExt; -use crate::{NetworkConfig, NetworkGlobals, PeerAction, ReportSource}; -use futures::prelude::*; -use libp2p::core::{ - identity::Keypair, multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed, -}; -use libp2p::{ - bandwidth::{BandwidthLogging, BandwidthSinks}, - core, noise, - swarm::{ConnectionLimits, SwarmBuilder, SwarmEvent}, - PeerId, Swarm, Transport, -}; -use prometheus_client::registry::Registry; -use slog::{crit, debug, info, o, trace, warn, Logger}; -use ssz::Decode; -use std::fs::File; -use std::io::prelude::*; -use std::pin::Pin; -use std::sync::Arc; -use std::time::Duration; -use types::{ChainSpec, EnrForkId, EthSpec, ForkContext}; - -use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS}; - -pub const NETWORK_KEY_FILENAME: &str = "key"; -/// The maximum simultaneous libp2p connections per peer. -const MAX_CONNECTIONS_PER_PEER: u32 = 1; -/// The filename to store our local metadata. -pub const METADATA_FILENAME: &str = "metadata"; - -/// The types of events than can be obtained from polling the libp2p service. -/// -/// This is a subset of the events that a libp2p swarm emits. -#[derive(Debug)] -pub enum Libp2pEvent { - /// A behaviour event - Behaviour(BehaviourEvent), - /// A new listening address has been established. - NewListenAddr(Multiaddr), - /// We reached zero listening addresses. - ZeroListeners, -} - -/// The configuration and state of the libp2p components for the beacon node. -pub struct Service { - /// The libp2p Swarm handler. - pub swarm: Swarm>, - /// The bandwidth logger for the underlying libp2p transport. - pub bandwidth: Arc, - /// This node's PeerId. - pub local_peer_id: PeerId, - /// The libp2p logger handle. - pub log: Logger, -} - -pub struct Context<'a> { - pub config: &'a NetworkConfig, - pub enr_fork_id: EnrForkId, - pub fork_context: Arc, - pub chain_spec: &'a ChainSpec, - pub gossipsub_registry: Option<&'a mut Registry>, -} - -impl Service { - pub async fn new( - executor: task_executor::TaskExecutor, - ctx: Context<'_>, - log: &Logger, - ) -> error::Result<(Arc>, Self)> { - let log = log.new(o!("service"=> "libp2p")); - trace!(log, "Libp2p Service starting"); - - let config = ctx.config; - // initialise the node's ID - let local_keypair = load_private_key(config, &log); - - // Create an ENR or load from disk if appropriate - let enr = - enr::build_or_load_enr::(local_keypair.clone(), config, &ctx.enr_fork_id, &log)?; - - let local_peer_id = enr.peer_id(); - - // Construct the metadata - let meta_data = load_or_build_metadata(&config.network_dir, &log); - - // set up a collection of variables accessible outside of the network crate - let network_globals = Arc::new(NetworkGlobals::new( - enr.clone(), - config.libp2p_port, - config.discovery_port, - meta_data, - config - .trusted_peers - .iter() - .map(|x| PeerId::from(x.clone())) - .collect(), - &log, - )); - - info!(log, "Libp2p Starting"; "peer_id" => %enr.peer_id(), "bandwidth_config" => format!("{}-{}", config.network_load, NetworkLoad::from(config.network_load).name)); - let discovery_string = if config.disable_discovery { - "None".into() - } else { - config.discovery_port.to_string() - }; - debug!(log, "Attempting to open listening ports"; "address" => ?config.listen_address, "tcp_port" => config.libp2p_port, "udp_port" => discovery_string); - - let (mut swarm, bandwidth) = { - // Set up the transport - tcp/ws with noise and mplex - let (transport, bandwidth) = build_transport(local_keypair.clone()) - .map_err(|e| format!("Failed to build transport: {:?}", e))?; - - // Lighthouse network behaviour - let behaviour = - Behaviour::new(&local_keypair, ctx, network_globals.clone(), &log).await?; - - // use the executor for libp2p - struct Executor(task_executor::TaskExecutor); - impl libp2p::core::Executor for Executor { - fn exec(&self, f: Pin + Send>>) { - self.0.spawn(f, "libp2p"); - } - } - - // sets up the libp2p connection limits - let limits = ConnectionLimits::default() - .with_max_pending_incoming(Some(5)) - .with_max_pending_outgoing(Some(16)) - .with_max_established_incoming(Some( - (config.target_peers as f32 - * (1.0 + PEER_EXCESS_FACTOR - MIN_OUTBOUND_ONLY_FACTOR)) - .ceil() as u32, - )) - .with_max_established_outgoing(Some( - (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)).ceil() as u32, - )) - .with_max_established(Some( - (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR + PRIORITY_PEER_EXCESS)) - .ceil() as u32, - )) - .with_max_established_per_peer(Some(MAX_CONNECTIONS_PER_PEER)); - - ( - SwarmBuilder::new(transport, behaviour, local_peer_id) - .notify_handler_buffer_size(std::num::NonZeroUsize::new(7).expect("Not zero")) - .connection_event_buffer_size(64) - .connection_limits(limits) - .executor(Box::new(Executor(executor))) - .build(), - bandwidth, - ) - }; - - // listen on the specified address - let listen_multiaddr = { - let mut m = Multiaddr::from(config.listen_address); - m.push(Protocol::Tcp(config.libp2p_port)); - m - }; - - match Swarm::listen_on(&mut swarm, listen_multiaddr.clone()) { - Ok(_) => { - let mut log_address = listen_multiaddr; - log_address.push(Protocol::P2p(local_peer_id.into())); - info!(log, "Listening established"; "address" => %log_address); - } - Err(err) => { - crit!( - log, - "Unable to listen on libp2p address"; - "error" => ?err, - "listen_multiaddr" => %listen_multiaddr, - ); - return Err("Libp2p was unable to listen on the given listen address.".into()); - } - }; - - // helper closure for dialing peers - let mut dial = |mut multiaddr: Multiaddr| { - // strip the p2p protocol if it exists - strip_peer_id(&mut multiaddr); - match Swarm::dial(&mut swarm, multiaddr.clone()) { - Ok(()) => debug!(log, "Dialing libp2p peer"; "address" => %multiaddr), - Err(err) => debug!( - log, - "Could not connect to peer"; "address" => %multiaddr, "error" => ?err - ), - }; - }; - - // attempt to connect to user-input libp2p nodes - for multiaddr in &config.libp2p_nodes { - dial(multiaddr.clone()); - } - - // attempt to connect to any specified boot-nodes - let mut boot_nodes = config.boot_nodes_enr.clone(); - boot_nodes.dedup(); - - for bootnode_enr in boot_nodes { - for multiaddr in &bootnode_enr.multiaddr() { - // ignore udp multiaddr if it exists - let components = multiaddr.iter().collect::>(); - if let Protocol::Udp(_) = components[1] { - continue; - } - - if !network_globals - .peers - .read() - .is_connected_or_dialing(&bootnode_enr.peer_id()) - { - dial(multiaddr.clone()); - } - } - } - - for multiaddr in &config.boot_nodes_multiaddr { - // check TCP support for dialing - if multiaddr - .iter() - .any(|proto| matches!(proto, Protocol::Tcp(_))) - { - dial(multiaddr.clone()); - } - } - - let mut subscribed_topics: Vec = vec![]; - - for topic_kind in &config.topics { - if swarm.behaviour_mut().subscribe_kind(topic_kind.clone()) { - subscribed_topics.push(topic_kind.clone()); - } else { - warn!(log, "Could not subscribe to topic"; "topic" => %topic_kind); - } - } - - if !subscribed_topics.is_empty() { - info!(log, "Subscribed to topics"; "topics" => ?subscribed_topics); - } - - let service = Service { - swarm, - bandwidth, - local_peer_id, - log, - }; - - Ok((network_globals, service)) - } - - /// Sends a request to a peer, with a given Id. - pub fn send_request(&mut self, peer_id: PeerId, request_id: AppReqId, request: Request) { - self.swarm - .behaviour_mut() - .send_request(peer_id, request_id, request); - } - - /// Informs the peer that their request failed. - pub fn respond_with_error( - &mut self, - peer_id: PeerId, - id: PeerRequestId, - error: RPCResponseErrorCode, - reason: String, - ) { - self.swarm - .behaviour_mut() - .send_error_reponse(peer_id, id, error, reason); - } - - /// Report a peer's action. - pub fn report_peer( - &mut self, - peer_id: &PeerId, - action: PeerAction, - source: ReportSource, - msg: &'static str, - ) { - self.swarm - .behaviour_mut() - .peer_manager_mut() - .report_peer(peer_id, action, source, None, msg); - } - - /// Disconnect and ban a peer, providing a reason. - pub fn goodbye_peer(&mut self, peer_id: &PeerId, reason: GoodbyeReason, source: ReportSource) { - self.swarm - .behaviour_mut() - .goodbye_peer(peer_id, reason, source); - } - - /// Sends a response to a peer's request. - pub fn send_response(&mut self, peer_id: PeerId, id: PeerRequestId, response: Response) { - self.swarm - .behaviour_mut() - .send_successful_response(peer_id, id, response); - } - - pub async fn next_event(&mut self) -> Libp2pEvent { - loop { - match self.swarm.select_next_some().await { - SwarmEvent::Behaviour(behaviour) => { - // Handle banning here - match &behaviour { - BehaviourEvent::PeerBanned(peer_id) => { - self.swarm.ban_peer_id(*peer_id); - } - BehaviourEvent::PeerUnbanned(peer_id) => { - self.swarm.unban_peer_id(*peer_id); - } - _ => {} - } - return Libp2pEvent::Behaviour(behaviour); - } - SwarmEvent::ConnectionEstablished { - peer_id: _, - endpoint: _, - num_established: _, - concurrent_dial_errors: _, - } => {} - SwarmEvent::ConnectionClosed { - peer_id: _, - cause: _, - endpoint: _, - num_established: _, - } => {} - SwarmEvent::NewListenAddr { address, .. } => { - return Libp2pEvent::NewListenAddr(address) - } - SwarmEvent::IncomingConnection { - local_addr, - send_back_addr, - } => { - trace!(self.log, "Incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr) - } - SwarmEvent::IncomingConnectionError { - local_addr, - send_back_addr, - error, - } => { - debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => %error); - } - SwarmEvent::BannedPeer { peer_id, .. } => { - debug!(self.log, "Banned peer connection rejected"; "peer_id" => %peer_id); - } - SwarmEvent::OutgoingConnectionError { peer_id, error } => { - debug!(self.log, "Failed to dial address"; "peer_id" => ?peer_id, "error" => %error); - } - SwarmEvent::ExpiredListenAddr { address, .. } => { - debug!(self.log, "Listen address expired"; "address" => %address) - } - SwarmEvent::ListenerClosed { - addresses, reason, .. - } => { - crit!(self.log, "Listener closed"; "addresses" => ?addresses, "reason" => ?reason); - if Swarm::listeners(&self.swarm).count() == 0 { - return Libp2pEvent::ZeroListeners; - } - } - SwarmEvent::ListenerError { error, .. } => { - // this is non fatal, but we still check - warn!(self.log, "Listener error"; "error" => ?error); - if Swarm::listeners(&self.swarm).count() == 0 { - return Libp2pEvent::ZeroListeners; - } - } - SwarmEvent::Dialing(_peer_id) => {} - } - } - } -} - -type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>; - -/// The implementation supports TCP/IP, WebSockets over TCP/IP, noise as the encryption layer, and -/// mplex as the multiplexing layer. -fn build_transport( - local_private_key: Keypair, -) -> std::io::Result<(BoxedTransport, Arc)> { - let tcp = libp2p::tcp::TokioTcpConfig::new().nodelay(true); - let transport = libp2p::dns::TokioDnsConfig::system(tcp)?; - #[cfg(feature = "libp2p-websocket")] - let transport = { - let trans_clone = transport.clone(); - transport.or_transport(libp2p::websocket::WsConfig::new(trans_clone)) - }; - - let (transport, bandwidth) = BandwidthLogging::new(transport); - - // mplex config - let mut mplex_config = libp2p::mplex::MplexConfig::new(); - mplex_config.set_max_buffer_size(256); - mplex_config.set_max_buffer_behaviour(libp2p::mplex::MaxBufferBehaviour::Block); - - // yamux config - let mut yamux_config = libp2p::yamux::YamuxConfig::default(); - yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::on_read()); - - // Authentication - Ok(( - transport - .upgrade(core::upgrade::Version::V1) - .authenticate(generate_noise_config(&local_private_key)) - .multiplex(core::upgrade::SelectUpgrade::new( - yamux_config, - mplex_config, - )) - .timeout(Duration::from_secs(10)) - .boxed(), - bandwidth, - )) -} - -// Useful helper functions for debugging. Currently not used in the client. -#[allow(dead_code)] -fn keypair_from_hex(hex_bytes: &str) -> error::Result { - let hex_bytes = if let Some(stripped) = hex_bytes.strip_prefix("0x") { - stripped.to_string() - } else { - hex_bytes.to_string() - }; - - hex::decode(&hex_bytes) - .map_err(|e| format!("Failed to parse p2p secret key bytes: {:?}", e).into()) - .and_then(keypair_from_bytes) -} - -#[allow(dead_code)] -fn keypair_from_bytes(mut bytes: Vec) -> error::Result { - libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut bytes) - .map(|secret| { - let keypair: libp2p::core::identity::secp256k1::Keypair = secret.into(); - Keypair::Secp256k1(keypair) - }) - .map_err(|e| format!("Unable to parse p2p secret key: {:?}", e).into()) -} - -/// Loads a private key from disk. If this fails, a new key is -/// generated and is then saved to disk. -/// -/// Currently only secp256k1 keys are allowed, as these are the only keys supported by discv5. -pub fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair { - // check for key from disk - let network_key_f = config.network_dir.join(NETWORK_KEY_FILENAME); - if let Ok(mut network_key_file) = File::open(network_key_f.clone()) { - let mut key_bytes: Vec = Vec::with_capacity(36); - match network_key_file.read_to_end(&mut key_bytes) { - Err(_) => debug!(log, "Could not read network key file"), - Ok(_) => { - // only accept secp256k1 keys for now - if let Ok(secret_key) = - libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut key_bytes) - { - let kp: libp2p::core::identity::secp256k1::Keypair = secret_key.into(); - debug!(log, "Loaded network key from disk."); - return Keypair::Secp256k1(kp); - } else { - debug!(log, "Network key file is not a valid secp256k1 key"); - } - } - } - } - - // if a key could not be loaded from disk, generate a new one and save it - let local_private_key = Keypair::generate_secp256k1(); - if let Keypair::Secp256k1(key) = local_private_key.clone() { - let _ = std::fs::create_dir_all(&config.network_dir); - match File::create(network_key_f.clone()) - .and_then(|mut f| f.write_all(&key.secret().to_bytes())) - { - Ok(_) => { - debug!(log, "New network key generated and written to disk"); - } - Err(e) => { - warn!( - log, - "Could not write node key to file: {:?}. error: {}", network_key_f, e - ); - } - } - } - local_private_key -} - -/// Generate authenticated XX Noise config from identity keys -fn generate_noise_config( - identity_keypair: &Keypair, -) -> noise::NoiseAuthenticated { - let static_dh_keys = noise::Keypair::::new() - .into_authentic(identity_keypair) - .expect("signing can fail only once during starting a node"); - noise::NoiseConfig::xx(static_dh_keys).into_authenticated() -} - -/// For a multiaddr that ends with a peer id, this strips this suffix. Rust-libp2p -/// only supports dialing to an address without providing the peer id. -fn strip_peer_id(addr: &mut Multiaddr) { - let last = addr.pop(); - match last { - Some(Protocol::P2p(_)) => {} - Some(other) => addr.push(other), - _ => {} - } -} - -/// Load metadata from persisted file. Return default metadata if loading fails. -fn load_or_build_metadata( - network_dir: &std::path::Path, - log: &slog::Logger, -) -> MetaData { - // We load a V2 metadata version by default (regardless of current fork) - // since a V2 metadata can be converted to V1. The RPC encoder is responsible - // for sending the correct metadata version based on the negotiated protocol version. - let mut meta_data = MetaDataV2 { - seq_number: 0, - attnets: EnrAttestationBitfield::::default(), - syncnets: EnrSyncCommitteeBitfield::::default(), - }; - // Read metadata from persisted file if available - let metadata_path = network_dir.join(METADATA_FILENAME); - if let Ok(mut metadata_file) = File::open(metadata_path) { - let mut metadata_ssz = Vec::new(); - if metadata_file.read_to_end(&mut metadata_ssz).is_ok() { - // Attempt to read a MetaDataV2 version from the persisted file, - // if that fails, read MetaDataV1 - match MetaDataV2::::from_ssz_bytes(&metadata_ssz) { - Ok(persisted_metadata) => { - meta_data.seq_number = persisted_metadata.seq_number; - // Increment seq number if persisted attnet is not default - if persisted_metadata.attnets != meta_data.attnets - || persisted_metadata.syncnets != meta_data.syncnets - { - meta_data.seq_number += 1; - } - debug!(log, "Loaded metadata from disk"); - } - Err(_) => { - match MetaDataV1::::from_ssz_bytes(&metadata_ssz) { - Ok(persisted_metadata) => { - let persisted_metadata = MetaData::V1(persisted_metadata); - // Increment seq number as the persisted metadata version is updated - meta_data.seq_number = *persisted_metadata.seq_number() + 1; - debug!(log, "Loaded metadata from disk"); - } - Err(e) => { - debug!( - log, - "Metadata from file could not be decoded"; - "error" => ?e, - ); - } - } - } - } - } - }; - - // Wrap the MetaData - let meta_data = MetaData::V2(meta_data); - - debug!(log, "Metadata sequence number"; "seq_num" => meta_data.seq_number()); - save_metadata_to_disk(network_dir, meta_data.clone(), log); - meta_data -} diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs new file mode 100644 index 000000000..e5d81737c --- /dev/null +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -0,0 +1,101 @@ +use std::sync::Arc; + +use libp2p::core::connection::ConnectionId; +use types::{EthSpec, SignedBeaconBlock}; + +use crate::rpc::{ + methods::{ + BlocksByRangeRequest, BlocksByRootRequest, OldBlocksByRangeRequest, RPCCodedResponse, + RPCResponse, ResponseTermination, StatusMessage, + }, + OutboundRequest, SubstreamId, +}; + +/// Identifier of requests sent by a peer. +pub type PeerRequestId = (ConnectionId, SubstreamId); + +/// Identifier of a request. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum RequestId { + Application(AppReqId), + Internal, +} + +/// The type of RPC requests the Behaviour informs it has received and allows for sending. +/// +// NOTE: This is an application-level wrapper over the lower network level requests that can be +// sent. The main difference is the absence of the Ping, Metadata and Goodbye protocols, which don't +// leave the Behaviour. For all protocols managed by RPC see `RPCRequest`. +#[derive(Debug, Clone, PartialEq)] +pub enum Request { + /// A Status message. + Status(StatusMessage), + /// A blocks by range request. + BlocksByRange(BlocksByRangeRequest), + /// A request blocks root request. + BlocksByRoot(BlocksByRootRequest), +} + +impl std::convert::From for OutboundRequest { + fn from(req: Request) -> OutboundRequest { + match req { + Request::BlocksByRoot(r) => OutboundRequest::BlocksByRoot(r), + Request::BlocksByRange(BlocksByRangeRequest { start_slot, count }) => { + OutboundRequest::BlocksByRange(OldBlocksByRangeRequest { + start_slot, + count, + step: 1, + }) + } + Request::Status(s) => OutboundRequest::Status(s), + } + } +} + +/// The type of RPC responses the Behaviour informs it has received, and allows for sending. +/// +// NOTE: This is an application-level wrapper over the lower network level responses that can be +// sent. The main difference is the absense of Pong and Metadata, which don't leave the +// Behaviour. For all protocol reponses managed by RPC see `RPCResponse` and +// `RPCCodedResponse`. +#[derive(Debug, Clone, PartialEq)] +pub enum Response { + /// A Status message. + Status(StatusMessage), + /// A response to a get BLOCKS_BY_RANGE request. A None response signals the end of the batch. + BlocksByRange(Option>>), + /// A response to a get BLOCKS_BY_ROOT request. + BlocksByRoot(Option>>), +} + +impl std::convert::From> for RPCCodedResponse { + fn from(resp: Response) -> RPCCodedResponse { + match resp { + Response::BlocksByRoot(r) => match r { + Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRoot(b)), + None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRoot), + }, + Response::BlocksByRange(r) => match r { + Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRange(b)), + None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange), + }, + Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)), + } + } +} + +impl slog::Value for RequestId { + fn serialize( + &self, + record: &slog::Record, + key: slog::Key, + serializer: &mut dyn slog::Serializer, + ) -> slog::Result { + match self { + RequestId::Internal => slog::Value::serialize("Behaviour", record, key, serializer), + RequestId::Application(ref id) => { + slog::Value::serialize(&format_args!("{:?}", id), record, key, serializer) + } + } + } +} diff --git a/beacon_node/lighthouse_network/src/service/behaviour.rs b/beacon_node/lighthouse_network/src/service/behaviour.rs new file mode 100644 index 000000000..8327293a7 --- /dev/null +++ b/beacon_node/lighthouse_network/src/service/behaviour.rs @@ -0,0 +1,34 @@ +use crate::discovery::Discovery; +use crate::peer_manager::PeerManager; +use crate::rpc::{ReqId, RPC}; +use crate::types::SnappyTransform; + +use libp2p::gossipsub::subscription_filter::{ + MaxCountSubscriptionFilter, WhitelistSubscriptionFilter, +}; +use libp2p::gossipsub::Gossipsub as BaseGossipsub; +use libp2p::identify::Identify; +use libp2p::swarm::NetworkBehaviour; +use libp2p::NetworkBehaviour; +use types::EthSpec; + +use super::api_types::RequestId; + +pub type SubscriptionFilter = MaxCountSubscriptionFilter; +pub type Gossipsub = BaseGossipsub; + +#[derive(NetworkBehaviour)] +pub(crate) struct Behaviour { + /// The routing pub-sub mechanism for eth2. + pub gossipsub: Gossipsub, + /// The Eth2 RPC specified in the wire-0 protocol. + pub eth2_rpc: RPC, TSpec>, + /// Discv5 Discovery protocol. + pub discovery: Discovery, + /// Keep regular connection to peers and disconnect if absent. + // NOTE: The id protocol is used for initial interop. This will be removed by mainnet. + /// Provides IP addresses and peer information. + pub identify: Identify, + /// The peer manager that keeps track of peer's reputation and status. + pub peer_manager: PeerManager, +} diff --git a/beacon_node/lighthouse_network/src/behaviour/gossip_cache.rs b/beacon_node/lighthouse_network/src/service/gossip_cache.rs similarity index 100% rename from beacon_node/lighthouse_network/src/behaviour/gossip_cache.rs rename to beacon_node/lighthouse_network/src/service/gossip_cache.rs diff --git a/beacon_node/lighthouse_network/src/behaviour/gossipsub_scoring_parameters.rs b/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs similarity index 100% rename from beacon_node/lighthouse_network/src/behaviour/gossipsub_scoring_parameters.rs rename to beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs diff --git a/beacon_node/lighthouse_network/src/behaviour/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs similarity index 56% rename from beacon_node/lighthouse_network/src/behaviour/mod.rs rename to beacon_node/lighthouse_network/src/service/mod.rs index 9c9e094db..53d29ccb2 100644 --- a/beacon_node/lighthouse_network/src/behaviour/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1,85 +1,63 @@ -use crate::behaviour::gossipsub_scoring_parameters::{ - lighthouse_gossip_thresholds, PeerScoreSettings, -}; -use crate::config::gossipsub_config; +use crate::config::{gossipsub_config, NetworkLoad}; use crate::discovery::{ - subnet_predicate, Discovery, DiscoveryEvent, FIND_NODE_QUERY_CLOSEST_PEERS, + subnet_predicate, DiscoveredPeers, Discovery, FIND_NODE_QUERY_CLOSEST_PEERS, }; use crate::peer_manager::{ config::Config as PeerManagerCfg, peerdb::score::PeerAction, peerdb::score::ReportSource, ConnectionDirection, PeerManager, PeerManagerEvent, }; -use crate::rpc::*; -use crate::service::{Context as ServiceContext, METADATA_FILENAME}; +use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS}; +use crate::service::behaviour::BehaviourEvent; +pub use crate::service::behaviour::Gossipsub; use crate::types::{ subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, SnappyTransform, Subnet, SubnetDiscovery, }; use crate::Eth2Enr; use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; +use crate::{rpc::*, EnrExt}; +use api_types::{PeerRequestId, Request, RequestId, Response}; use futures::stream::StreamExt; +use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettings}; +use libp2p::bandwidth::BandwidthSinks; use libp2p::gossipsub::error::PublishError; -use libp2p::{ - core::{ - connection::ConnectionId, identity::Keypair, multiaddr::Protocol as MProtocol, Multiaddr, - }, - gossipsub::{ - metrics::Config as GossipsubMetricsConfig, - subscription_filter::{MaxCountSubscriptionFilter, WhitelistSubscriptionFilter}, - Gossipsub as BaseGossipsub, GossipsubEvent, IdentTopic as Topic, MessageAcceptance, - MessageAuthenticity, MessageId, - }, - identify::{Identify, IdentifyConfig, IdentifyEvent}, - swarm::{ - dial_opts::{DialOpts, PeerCondition}, - AddressScore, NetworkBehaviour, NetworkBehaviourAction as NBAction, - NetworkBehaviourEventProcess, PollParameters, - }, - NetworkBehaviour, PeerId, -}; -use slog::{crit, debug, o, trace, warn}; -use ssz::Encode; -use std::collections::HashSet; -use std::fs::File; -use std::io::Write; -use std::path::{Path, PathBuf}; -use std::{ - collections::VecDeque, - marker::PhantomData, - sync::Arc, - task::{Context, Poll}, -}; -use types::{ - consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, - SignedBeaconBlock, Slot, SubnetId, SyncSubnetId, +use libp2p::gossipsub::metrics::Config as GossipsubMetricsConfig; +use libp2p::gossipsub::subscription_filter::MaxCountSubscriptionFilter; +use libp2p::gossipsub::{ + GossipsubEvent, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, }; +use libp2p::identify::{Identify, IdentifyConfig, IdentifyEvent}; +use libp2p::multiaddr::{Multiaddr, Protocol as MProtocol}; +use libp2p::swarm::{ConnectionLimits, Swarm, SwarmBuilder, SwarmEvent}; +use libp2p::PeerId; +use slog::{crit, debug, info, o, trace, warn}; +use std::marker::PhantomData; +use std::path::PathBuf; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; +use types::{ + consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, Slot, SubnetId, +}; +use utils::{build_transport, strip_peer_id, Context as ServiceContext, MAX_CONNECTIONS_PER_PEER}; + +use self::behaviour::Behaviour; use self::gossip_cache::GossipCache; +pub mod api_types; +mod behaviour; mod gossip_cache; pub mod gossipsub_scoring_parameters; - +pub mod utils; /// The number of peers we target per subnet for discovery queries. pub const TARGET_SUBNET_PEERS: usize = 6; const MAX_IDENTIFY_ADDRESSES: usize = 10; -/// Identifier of requests sent by a peer. -pub type PeerRequestId = (ConnectionId, SubstreamId); - -pub type SubscriptionFilter = MaxCountSubscriptionFilter; -pub type Gossipsub = BaseGossipsub; - -/// Identifier of a request. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum RequestId { - Application(AppReqId), - Behaviour, -} - /// The types of events than can be obtained from polling the behaviour. #[derive(Debug)] -pub enum BehaviourEvent { +pub enum NetworkEvent { /// We have successfully dialed and connected to a peer. PeerConnectedOutgoing(PeerId), /// A peer has successfully dialed and connected to us. @@ -125,224 +103,419 @@ pub enum BehaviourEvent { }, /// Inform the network to send a Status to this peer. StatusPeer(PeerId), -} - -/// Internal type to pass messages from sub-behaviours to the poll of the global behaviour to be -/// specified as an NBAction. -enum InternalBehaviourMessage { - /// Dial a Peer. - DialPeer(PeerId), - /// The socket has been updated. - SocketUpdated(Multiaddr), + NewListenAddr(Multiaddr), + ZeroListeners, } /// Builds the network behaviour that manages the core protocols of eth2. /// This core behaviour is managed by `Behaviour` which adds peer management to all core /// behaviours. -#[derive(NetworkBehaviour)] -#[behaviour( - out_event = "BehaviourEvent", - poll_method = "poll", - event_process = true -)] -pub struct Behaviour { - /* Sub-Behaviours */ - /// The routing pub-sub mechanism for eth2. - gossipsub: Gossipsub, - /// The Eth2 RPC specified in the wire-0 protocol. - eth2_rpc: RPC, TSpec>, - /// Discv5 Discovery protocol. - discovery: Discovery, - /// Keep regular connection to peers and disconnect if absent. - // NOTE: The id protocol is used for initial interop. This will be removed by mainnet. - /// Provides IP addresses and peer information. - identify: Identify, - /// The peer manager that keeps track of peer's reputation and status. - peer_manager: PeerManager, - +pub struct Network { + swarm: libp2p::swarm::Swarm>, /* Auxiliary Fields */ - /// The output events generated by this behaviour to be consumed in the swarm poll. - #[behaviour(ignore)] - events: VecDeque>, - /// Internal behaviour events, the NBAction type is composed of sub-behaviours, so we use a - /// custom type here to avoid having to specify the concrete type. - #[behaviour(ignore)] - internal_events: VecDeque, /// A collections of variables accessible outside the network service. - #[behaviour(ignore)] network_globals: Arc>, /// Keeps track of the current EnrForkId for upgrading gossipsub topics. // NOTE: This can be accessed via the network_globals ENR. However we keep it here for quick // lookups for every gossipsub message send. - #[behaviour(ignore)] enr_fork_id: EnrForkId, - /// The waker for the current task. This is used to wake the task when events are added to the - /// queue. - #[behaviour(ignore)] - waker: Option, /// Directory where metadata is stored. - #[behaviour(ignore)] network_dir: PathBuf, - #[behaviour(ignore)] fork_context: Arc, /// Gossipsub score parameters. - #[behaviour(ignore)] score_settings: PeerScoreSettings, /// The interval for updating gossipsub scores - #[behaviour(ignore)] update_gossipsub_scores: tokio::time::Interval, - #[behaviour(ignore)] gossip_cache: GossipCache, + /// The bandwidth logger for the underlying libp2p transport. + pub bandwidth: Arc, + /// This node's PeerId. + pub local_peer_id: PeerId, /// Logger for behaviour actions. - #[behaviour(ignore)] log: slog::Logger, } /// Implements the combined behaviour for the libp2p service. -impl Behaviour { +impl Network { pub async fn new( - local_key: &Keypair, + executor: task_executor::TaskExecutor, ctx: ServiceContext<'_>, - network_globals: Arc>, log: &slog::Logger, - ) -> error::Result { - let behaviour_log = log.new(o!()); - + ) -> error::Result<(Self, Arc>)> { + let log = log.new(o!("service"=> "libp2p")); let mut config = ctx.config.clone(); + trace!(log, "Libp2p Service starting"); + // initialise the node's ID + let local_keypair = utils::load_private_key(&config, &log); - // Set up the Identify Behaviour - let identify_config = if config.private { - IdentifyConfig::new( - "".into(), - local_key.public(), // Still send legitimate public key - ) - .with_cache_size(0) - } else { - IdentifyConfig::new("eth2/1.0.0".into(), local_key.public()) - .with_agent_version(lighthouse_version::version_with_platform()) - .with_cache_size(0) + // set up a collection of variables accessible outside of the network crate + let network_globals = { + // Create an ENR or load from disk if appropriate + let enr = crate::discovery::enr::build_or_load_enr::( + local_keypair.clone(), + &config, + &ctx.enr_fork_id, + &log, + )?; + // Construct the metadata + let meta_data = utils::load_or_build_metadata(&config.network_dir, &log); + let globals = NetworkGlobals::new( + enr, + config.libp2p_port, + config.discovery_port, + meta_data, + config + .trusted_peers + .iter() + .map(|x| PeerId::from(x.clone())) + .collect(), + &log, + ); + Arc::new(globals) }; - // Build and start the discovery sub-behaviour - let mut discovery = - Discovery::new(local_key, &config, network_globals.clone(), log).await?; - // start searching for peers - discovery.discover_peers(FIND_NODE_QUERY_CLOSEST_PEERS); - // Grab our local ENR FORK ID let enr_fork_id = network_globals .local_enr() .eth2() .expect("Local ENR must have a fork id"); - let possible_fork_digests = ctx.fork_context.all_fork_digests(); - let filter = MaxCountSubscriptionFilter { - filter: Self::create_whitelist_filter( - possible_fork_digests, - ctx.chain_spec.attestation_subnet_count, - SYNC_COMMITTEE_SUBNET_COUNT, - ), - max_subscribed_topics: 200, - max_subscriptions_per_request: 150, // 148 in theory = (64 attestation + 4 sync committee + 6 core topics) * 2 - }; - - config.gs_config = gossipsub_config(config.network_load, ctx.fork_context.clone()); - - // If metrics are enabled for gossipsub build the configuration - let gossipsub_metrics = ctx - .gossipsub_registry - .map(|registry| (registry, GossipsubMetricsConfig::default())); - - let snappy_transform = SnappyTransform::new(config.gs_config.max_transmit_size()); - let mut gossipsub = Gossipsub::new_with_subscription_filter_and_transform( - MessageAuthenticity::Anonymous, - config.gs_config.clone(), - gossipsub_metrics, - filter, - snappy_transform, - ) - .map_err(|e| format!("Could not construct gossipsub: {:?}", e))?; - - // Construct a set of gossipsub peer scoring parameters - // We don't know the number of active validators and the current slot yet - let active_validators = TSpec::minimum_validator_count(); - let current_slot = Slot::new(0); - - let thresholds = lighthouse_gossip_thresholds(); - let score_settings = PeerScoreSettings::new(ctx.chain_spec, &config.gs_config); - // Prepare scoring parameters - let params = score_settings.get_peer_score_params( - active_validators, - &thresholds, - &enr_fork_id, - current_slot, - )?; + let gossip_cache = { + let slot_duration = std::time::Duration::from_secs(ctx.chain_spec.seconds_per_slot); + let half_epoch = std::time::Duration::from_secs( + ctx.chain_spec.seconds_per_slot * TSpec::slots_per_epoch() / 2, + ); - trace!(behaviour_log, "Using peer score params"; "params" => ?params); - - // Set up a scoring update interval - let update_gossipsub_scores = tokio::time::interval(params.decay_interval); - - gossipsub - .with_peer_score(params, thresholds) - .expect("Valid score params and thresholds"); - - let peer_manager_cfg = PeerManagerCfg { - discovery_enabled: !config.disable_discovery, - metrics_enabled: config.metrics_enabled, - target_peer_count: config.target_peers, - ..Default::default() + GossipCache::builder() + .beacon_block_timeout(slot_duration) + .aggregates_timeout(half_epoch) + .attestation_timeout(half_epoch) + .voluntary_exit_timeout(half_epoch * 2) + .proposer_slashing_timeout(half_epoch * 2) + .attester_slashing_timeout(half_epoch * 2) + // .signed_contribution_and_proof_timeout(timeout) // Do not retry + // .sync_committee_message_timeout(timeout) // Do not retry + .build() }; - let slot_duration = std::time::Duration::from_secs(ctx.chain_spec.seconds_per_slot); - let half_epoch = std::time::Duration::from_secs( - ctx.chain_spec.seconds_per_slot * TSpec::slots_per_epoch() / 2, - ); - let gossip_cache = GossipCache::builder() - .beacon_block_timeout(slot_duration) - .aggregates_timeout(half_epoch) - .attestation_timeout(half_epoch) - .voluntary_exit_timeout(half_epoch * 2) - .proposer_slashing_timeout(half_epoch * 2) - .attester_slashing_timeout(half_epoch * 2) - // .signed_contribution_and_proof_timeout(timeout) // Do not retry - // .sync_committee_message_timeout(timeout) // Do not retry - .build(); + let local_peer_id = network_globals.local_peer_id(); - Ok(Behaviour { - // Sub-behaviours - gossipsub, - eth2_rpc: RPC::new(ctx.fork_context.clone(), log.clone()), - discovery, - identify: Identify::new(identify_config), - // Auxiliary fields - peer_manager: PeerManager::new(peer_manager_cfg, network_globals.clone(), log).await?, - events: VecDeque::new(), - internal_events: VecDeque::new(), + let (gossipsub, update_gossipsub_scores) = { + let thresholds = lighthouse_gossip_thresholds(); + + // Prepare scoring parameters + let params = { + // Construct a set of gossipsub peer scoring parameters + // We don't know the number of active validators and the current slot yet + let active_validators = TSpec::minimum_validator_count(); + let current_slot = Slot::new(0); + score_settings.get_peer_score_params( + active_validators, + &thresholds, + &enr_fork_id, + current_slot, + )? + }; + + trace!(log, "Using peer score params"; "params" => ?params); + + // Set up a scoring update interval + let update_gossipsub_scores = tokio::time::interval(params.decay_interval); + + let possible_fork_digests = ctx.fork_context.all_fork_digests(); + let filter = MaxCountSubscriptionFilter { + filter: utils::create_whitelist_filter( + possible_fork_digests, + ctx.chain_spec.attestation_subnet_count, + SYNC_COMMITTEE_SUBNET_COUNT, + ), + max_subscribed_topics: 200, + max_subscriptions_per_request: 150, // 148 in theory = (64 attestation + 4 sync committee + 6 core topics) * 2 + }; + + config.gs_config = gossipsub_config(config.network_load, ctx.fork_context.clone()); + + // If metrics are enabled for gossipsub build the configuration + let gossipsub_metrics = ctx + .gossipsub_registry + .map(|registry| (registry, GossipsubMetricsConfig::default())); + + let snappy_transform = SnappyTransform::new(config.gs_config.max_transmit_size()); + let mut gossipsub = Gossipsub::new_with_subscription_filter_and_transform( + MessageAuthenticity::Anonymous, + config.gs_config.clone(), + gossipsub_metrics, + filter, + snappy_transform, + ) + .map_err(|e| format!("Could not construct gossipsub: {:?}", e))?; + + gossipsub + .with_peer_score(params, thresholds) + .expect("Valid score params and thresholds"); + + (gossipsub, update_gossipsub_scores) + }; + + let eth2_rpc = RPC::new(ctx.fork_context.clone(), log.clone()); + + let discovery = { + // Build and start the discovery sub-behaviour + let mut discovery = + Discovery::new(&local_keypair, &config, network_globals.clone(), &log).await?; + // start searching for peers + discovery.discover_peers(FIND_NODE_QUERY_CLOSEST_PEERS); + discovery + }; + + let identify = { + let identify_config = if config.private { + IdentifyConfig::new( + "".into(), + local_keypair.public(), // Still send legitimate public key + ) + .with_cache_size(0) + } else { + IdentifyConfig::new("eth2/1.0.0".into(), local_keypair.public()) + .with_agent_version(lighthouse_version::version_with_platform()) + .with_cache_size(0) + }; + Identify::new(identify_config) + }; + + let peer_manager = { + let peer_manager_cfg = PeerManagerCfg { + discovery_enabled: !config.disable_discovery, + metrics_enabled: config.metrics_enabled, + target_peer_count: config.target_peers, + ..Default::default() + }; + PeerManager::new(peer_manager_cfg, network_globals.clone(), &log)? + }; + + let behaviour = { + Behaviour { + gossipsub, + eth2_rpc, + discovery, + identify, + peer_manager, + } + }; + + let (swarm, bandwidth) = { + // Set up the transport - tcp/ws with noise and mplex + let (transport, bandwidth) = build_transport(local_keypair.clone()) + .map_err(|e| format!("Failed to build transport: {:?}", e))?; + + // use the executor for libp2p + struct Executor(task_executor::TaskExecutor); + impl libp2p::core::Executor for Executor { + fn exec(&self, f: Pin + Send>>) { + self.0.spawn(f, "libp2p"); + } + } + + // sets up the libp2p connection limits + let limits = ConnectionLimits::default() + .with_max_pending_incoming(Some(5)) + .with_max_pending_outgoing(Some(16)) + .with_max_established_incoming(Some( + (config.target_peers as f32 + * (1.0 + PEER_EXCESS_FACTOR - MIN_OUTBOUND_ONLY_FACTOR)) + .ceil() as u32, + )) + .with_max_established_outgoing(Some( + (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)).ceil() as u32, + )) + .with_max_established(Some( + (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR + PRIORITY_PEER_EXCESS)) + .ceil() as u32, + )) + .with_max_established_per_peer(Some(MAX_CONNECTIONS_PER_PEER)); + + ( + SwarmBuilder::new(transport, behaviour, local_peer_id) + .notify_handler_buffer_size(std::num::NonZeroUsize::new(7).expect("Not zero")) + .connection_event_buffer_size(64) + .connection_limits(limits) + .executor(Box::new(Executor(executor))) + .build(), + bandwidth, + ) + }; + + let mut network = Network { + swarm, network_globals, enr_fork_id, - waker: None, network_dir: config.network_dir.clone(), - log: behaviour_log, - score_settings, fork_context: ctx.fork_context, - gossip_cache, + score_settings, update_gossipsub_scores, - }) + gossip_cache, + bandwidth, + local_peer_id, + log, + }; + + network.start(&config).await?; + + let network_globals = network.network_globals.clone(); + + Ok((network, network_globals)) + } + + /// Starts the network: + /// + /// - Starts listening in the given ports. + /// - Dials boot-nodes and libp2p peers. + /// - Subscribes to starting gossipsub topics. + async fn start(&mut self, config: &crate::NetworkConfig) -> error::Result<()> { + let enr = self.network_globals.local_enr(); + info!(self.log, "Libp2p Starting"; "peer_id" => %enr.peer_id(), "bandwidth_config" => format!("{}-{}", config.network_load, NetworkLoad::from(config.network_load).name)); + let discovery_string = if config.disable_discovery { + "None".into() + } else { + config.discovery_port.to_string() + }; + + debug!(self.log, "Attempting to open listening ports"; "address" => ?config.listen_address, "tcp_port" => config.libp2p_port, "udp_port" => discovery_string); + + let listen_multiaddr = { + let mut m = Multiaddr::from(config.listen_address); + m.push(MProtocol::Tcp(config.libp2p_port)); + m + }; + + match self.swarm.listen_on(listen_multiaddr.clone()) { + Ok(_) => { + let mut log_address = listen_multiaddr; + log_address.push(MProtocol::P2p(enr.peer_id().into())); + info!(self.log, "Listening established"; "address" => %log_address); + } + Err(err) => { + crit!( + self.log, + "Unable to listen on libp2p address"; + "error" => ?err, + "listen_multiaddr" => %listen_multiaddr, + ); + return Err("Libp2p was unable to listen on the given listen address.".into()); + } + }; + + // helper closure for dialing peers + let mut dial = |mut multiaddr: Multiaddr| { + // strip the p2p protocol if it exists + strip_peer_id(&mut multiaddr); + match self.swarm.dial(multiaddr.clone()) { + Ok(()) => debug!(self.log, "Dialing libp2p peer"; "address" => %multiaddr), + Err(err) => { + debug!(self.log, "Could not connect to peer"; "address" => %multiaddr, "error" => ?err) + } + }; + }; + + // attempt to connect to user-input libp2p nodes + for multiaddr in &config.libp2p_nodes { + dial(multiaddr.clone()); + } + + // attempt to connect to any specified boot-nodes + let mut boot_nodes = config.boot_nodes_enr.clone(); + boot_nodes.dedup(); + + for bootnode_enr in boot_nodes { + for multiaddr in &bootnode_enr.multiaddr() { + // ignore udp multiaddr if it exists + let components = multiaddr.iter().collect::>(); + if let MProtocol::Udp(_) = components[1] { + continue; + } + + if !self + .network_globals + .peers + .read() + .is_connected_or_dialing(&bootnode_enr.peer_id()) + { + dial(multiaddr.clone()); + } + } + } + + for multiaddr in &config.boot_nodes_multiaddr { + // check TCP support for dialing + if multiaddr + .iter() + .any(|proto| matches!(proto, MProtocol::Tcp(_))) + { + dial(multiaddr.clone()); + } + } + + let mut subscribed_topics: Vec = vec![]; + + for topic_kind in &config.topics { + if self.subscribe_kind(topic_kind.clone()) { + subscribed_topics.push(topic_kind.clone()); + } else { + warn!(self.log, "Could not subscribe to topic"; "topic" => %topic_kind); + } + } + + if !subscribed_topics.is_empty() { + info!(self.log, "Subscribed to topics"; "topics" => ?subscribed_topics); + } + + Ok(()) } /* Public Accessible Functions to interact with the behaviour */ - /// Get a mutable reference to the underlying discovery sub-behaviour. + /// The routing pub-sub mechanism for eth2. + pub fn gossipsub_mut(&mut self) -> &mut Gossipsub { + &mut self.swarm.behaviour_mut().gossipsub + } + /// The Eth2 RPC specified in the wire-0 protocol. + pub fn eth2_rpc_mut(&mut self) -> &mut RPC, TSpec> { + &mut self.swarm.behaviour_mut().eth2_rpc + } + /// Discv5 Discovery protocol. pub fn discovery_mut(&mut self) -> &mut Discovery { - &mut self.discovery + &mut self.swarm.behaviour_mut().discovery + } + /// Provides IP addresses and peer information. + pub fn identify_mut(&mut self) -> &mut Identify { + &mut self.swarm.behaviour_mut().identify + } + /// The peer manager that keeps track of peer's reputation and status. + pub fn peer_manager_mut(&mut self) -> &mut PeerManager { + &mut self.swarm.behaviour_mut().peer_manager } - /// Get a mutable reference to the peer manager. - pub fn peer_manager_mut(&mut self) -> &mut PeerManager { - &mut self.peer_manager + /// The routing pub-sub mechanism for eth2. + pub fn gossipsub(&self) -> &Gossipsub { + &self.swarm.behaviour().gossipsub + } + /// The Eth2 RPC specified in the wire-0 protocol. + pub fn eth2_rpc(&self) -> &RPC, TSpec> { + &self.swarm.behaviour().eth2_rpc + } + /// Discv5 Discovery protocol. + pub fn discovery(&self) -> &Discovery { + &self.swarm.behaviour().discovery + } + /// Provides IP addresses and peer information. + pub fn identify(&self) -> &Identify { + &self.swarm.behaviour().identify + } + /// The peer manager that keeps track of peer's reputation and status. + pub fn peer_manager(&self) -> &PeerManager { + &self.swarm.behaviour().peer_manager } /// Returns the local ENR of the node. @@ -350,11 +523,6 @@ impl Behaviour { self.network_globals.local_enr() } - /// Obtain a reference to the gossipsub protocol. - pub fn gs(&self) -> &Gossipsub { - &self.gossipsub - } - /* Pubsub behaviour functions */ /// Subscribes to a gossipsub topic kind, letting the network service determine the @@ -413,7 +581,7 @@ impl Behaviour { let topic: Topic = topic.into(); - match self.gossipsub.subscribe(&topic) { + match self.gossipsub_mut().subscribe(&topic) { Err(e) => { warn!(self.log, "Failed to subscribe to topic"; "topic" => %topic, "error" => ?e); false @@ -436,7 +604,7 @@ impl Behaviour { // unsubscribe from the topic let libp2p_topic: Topic = topic.clone().into(); - match self.gossipsub.unsubscribe(&libp2p_topic) { + match self.gossipsub_mut().unsubscribe(&libp2p_topic) { Err(_) => { warn!(self.log, "Failed to unsubscribe from topic"; "topic" => %libp2p_topic); false @@ -455,8 +623,8 @@ impl Behaviour { for topic in message.topics(GossipEncoding::default(), self.enr_fork_id.fork_digest) { let message_data = message.encode(GossipEncoding::default()); if let Err(e) = self - .gossipsub - .publish(topic.clone().into(), message_data.clone()) + .gossipsub_mut() + .publish(Topic::from(topic.clone()), message_data.clone()) { slog::warn!(self.log, "Could not publish message"; "error" => ?e); @@ -515,7 +683,7 @@ impl Behaviour { } } - if let Err(e) = self.gossipsub.report_message_validation_result( + if let Err(e) = self.gossipsub_mut().report_message_validation_result( &message_id, propagation_source, validation_result, @@ -548,16 +716,16 @@ impl Behaviour { "beacon_attestation_subnet_params" => ?beacon_attestation_subnet_params, ); - self.gossipsub + self.gossipsub_mut() .set_topic_params(get_topic(GossipKind::BeaconBlock), beacon_block_params)?; - self.gossipsub.set_topic_params( + self.gossipsub_mut().set_topic_params( get_topic(GossipKind::BeaconAggregateAndProof), beacon_aggregate_proof_params, )?; for i in 0..self.score_settings.attestation_subnet_count() { - self.gossipsub.set_topic_params( + self.gossipsub_mut().set_topic_params( get_topic(GossipKind::Attestation(SubnetId::new(i))), beacon_attestation_subnet_params.clone(), )?; @@ -570,18 +738,17 @@ impl Behaviour { /// Send a request to a peer over RPC. pub fn send_request(&mut self, peer_id: PeerId, request_id: AppReqId, request: Request) { - self.eth2_rpc - .send_request(peer_id, RequestId::Application(request_id), request.into()) + self.eth2_rpc_mut().send_request( + peer_id, + RequestId::Application(request_id), + request.into(), + ) } /// Send a successful response to a peer over RPC. - pub fn send_successful_response( - &mut self, - peer_id: PeerId, - id: PeerRequestId, - response: Response, - ) { - self.eth2_rpc.send_response(peer_id, id, response.into()) + pub fn send_response(&mut self, peer_id: PeerId, id: PeerRequestId, response: Response) { + self.eth2_rpc_mut() + .send_response(peer_id, id, response.into()) } /// Inform the peer that their request produced an error. @@ -592,35 +759,54 @@ impl Behaviour { error: RPCResponseErrorCode, reason: String, ) { - self.eth2_rpc - .send_response(peer_id, id, RPCCodedResponse::Error(error, reason.into())) + self.eth2_rpc_mut().send_response( + peer_id, + id, + RPCCodedResponse::Error(error, reason.into()), + ) } /* Peer management functions */ + pub fn testing_dial(&mut self, addr: Multiaddr) -> Result<(), libp2p::swarm::DialError> { + self.swarm.dial(addr) + } + + pub fn report_peer( + &mut self, + peer_id: &PeerId, + action: PeerAction, + source: ReportSource, + msg: &'static str, + ) { + self.peer_manager_mut() + .report_peer(peer_id, action, source, None, msg); + } + /// Disconnects from a peer providing a reason. /// /// This will send a goodbye, disconnect and then ban the peer. /// This is fatal for a peer, and should be used in unrecoverable circumstances. pub fn goodbye_peer(&mut self, peer_id: &PeerId, reason: GoodbyeReason, source: ReportSource) { - self.peer_manager.goodbye_peer(peer_id, reason, source); + self.peer_manager_mut() + .goodbye_peer(peer_id, reason, source); } /// Returns an iterator over all enr entries in the DHT. - pub fn enr_entries(&mut self) -> Vec { - self.discovery.table_entries_enr() + pub fn enr_entries(&self) -> Vec { + self.discovery().table_entries_enr() } /// Add an ENR to the routing table of the discovery mechanism. pub fn add_enr(&mut self, enr: Enr) { - self.discovery.add_enr(enr); + self.discovery_mut().add_enr(enr); } /// Updates a subnet value to the ENR attnets/syncnets bitfield. /// /// The `value` is `true` if a subnet is being added and false otherwise. pub fn update_enr_subnet(&mut self, subnet_id: Subnet, value: bool) { - if let Err(e) = self.discovery.update_enr_bitfield(subnet_id, value) { + if let Err(e) = self.discovery_mut().update_enr_bitfield(subnet_id, value) { crit!(self.log, "Could not update ENR bitfield"; "error" => e); } // update the local meta data which informs our peers of the update during PINGS @@ -631,7 +817,7 @@ impl Behaviour { /// would like to retain the peers for. pub fn discover_subnet_peers(&mut self, subnets_to_discover: Vec) { // If discovery is not started or disabled, ignore the request - if !self.discovery.started { + if !self.discovery().started { return; } @@ -678,13 +864,13 @@ impl Behaviour { // request the subnet query from discovery if !filtered.is_empty() { - self.discovery.discover_subnet_peers(filtered); + self.discovery_mut().discover_subnet_peers(filtered); } } /// Updates the local ENR's "eth2" field with the latest EnrForkId. pub fn update_fork_version(&mut self, enr_fork_id: EnrForkId) { - self.discovery.update_eth2_enr(enr_fork_id.clone()); + self.discovery_mut().update_eth2_enr(enr_fork_id.clone()); // update the local reference self.enr_fork_id = enr_fork_id; @@ -695,13 +881,13 @@ impl Behaviour { /// Updates the current meta data of the node to match the local ENR. fn update_metadata_bitfields(&mut self) { let local_attnets = self - .discovery + .discovery_mut() .local_enr() .attestation_bitfield::() .expect("Local discovery must have attestation bitfield"); let local_syncnets = self - .discovery + .discovery_mut() .local_enr() .sync_committee_bitfield::() .expect("Local discovery must have sync committee bitfield"); @@ -717,7 +903,7 @@ impl Behaviour { } } // Save the updated metadata to disk - save_metadata_to_disk( + utils::save_metadata_to_disk( &self.network_dir, self.network_globals.local_metadata.read().clone(), &self.log, @@ -730,8 +916,8 @@ impl Behaviour { data: *self.network_globals.local_metadata.read().seq_number(), }; trace!(self.log, "Sending Ping"; "peer_id" => %peer_id); - let id = RequestId::Behaviour; - self.eth2_rpc + let id = RequestId::Internal; + self.eth2_rpc_mut() .send_request(peer_id, id, OutboundRequest::Ping(ping)); } @@ -742,14 +928,14 @@ impl Behaviour { }; trace!(self.log, "Sending Pong"; "request_id" => id.1, "peer_id" => %peer_id); let event = RPCCodedResponse::Success(RPCResponse::Pong(ping)); - self.eth2_rpc.send_response(peer_id, id, event); + self.eth2_rpc_mut().send_response(peer_id, id, event); } /// Sends a METADATA request to a peer. fn send_meta_data_request(&mut self, peer_id: PeerId) { let event = OutboundRequest::MetaData(PhantomData); - self.eth2_rpc - .send_request(peer_id, RequestId::Behaviour, event); + self.eth2_rpc_mut() + .send_request(peer_id, RequestId::Internal, event); } /// Sends a METADATA response to a peer. @@ -757,35 +943,36 @@ impl Behaviour { let event = RPCCodedResponse::Success(RPCResponse::MetaData( self.network_globals.local_metadata.read().clone(), )); - self.eth2_rpc.send_response(peer_id, id, event); - } - - /// Returns a reference to the peer manager to allow the swarm to notify the manager of peer - /// status - pub fn peer_manager(&mut self) -> &mut PeerManager { - &mut self.peer_manager + self.eth2_rpc_mut().send_response(peer_id, id, event); } // RPC Propagation methods /// Queues the response to be sent upwards as long at it was requested outside the Behaviour. - fn propagate_response( + #[must_use = "return the response"] + fn build_response( &mut self, id: RequestId, peer_id: PeerId, response: Response, - ) { + ) -> Option> { match id { - RequestId::Application(id) => self.add_event(BehaviourEvent::ResponseReceived { + RequestId::Application(id) => Some(NetworkEvent::ResponseReceived { peer_id, id, response, }), - RequestId::Behaviour => {} + RequestId::Internal => None, } } /// Convenience function to propagate a request. - fn propagate_request(&mut self, id: PeerRequestId, peer_id: PeerId, request: Request) { + #[must_use = "actually return the event"] + fn build_request( + &mut self, + id: PeerRequestId, + peer_id: PeerId, + request: Request, + ) -> NetworkEvent { // Increment metrics match &request { Request::Status(_) => { @@ -798,18 +985,10 @@ impl Behaviour { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_root"]) } } - self.add_event(BehaviourEvent::RequestReceived { + NetworkEvent::RequestReceived { peer_id, id, request, - }); - } - - /// Adds an event to the queue waking the current task to process it. - fn add_event(&mut self, event: BehaviourEvent) { - self.events.push_back(event); - if let Some(waker) = &self.waker { - waker.wake_by_ref(); } } @@ -818,7 +997,7 @@ impl Behaviour { fn dial_cached_enrs_in_subnet(&mut self, subnet: Subnet) { let predicate = subnet_predicate::(vec![subnet], &self.log); let peers_to_dial: Vec = self - .discovery + .discovery() .cached_enrs() .filter_map(|(peer_id, enr)| { let peers = self.network_globals.peers.read(); @@ -833,61 +1012,17 @@ impl Behaviour { debug!(self.log, "Dialing cached ENR peer"; "peer_id" => %peer_id); // Remove the ENR from the cache to prevent continual re-dialing on disconnects - self.discovery.remove_cached_enr(&peer_id); + self.discovery_mut().remove_cached_enr(&peer_id); // For any dial event, inform the peer manager let enr = self.discovery_mut().enr_of_peer(&peer_id); - self.peer_manager.inject_dialing(&peer_id, enr); - self.internal_events - .push_back(InternalBehaviourMessage::DialPeer(peer_id)); + self.peer_manager_mut().dial_peer(&peer_id, enr); } } - /// Creates a whitelist topic filter that covers all possible topics using the given set of - /// possible fork digests. - fn create_whitelist_filter( - possible_fork_digests: Vec<[u8; 4]>, - attestation_subnet_count: u64, - sync_committee_subnet_count: u64, - ) -> WhitelistSubscriptionFilter { - let mut possible_hashes = HashSet::new(); - for fork_digest in possible_fork_digests { - let mut add = |kind| { - let topic: Topic = - GossipTopic::new(kind, GossipEncoding::SSZSnappy, fork_digest).into(); - possible_hashes.insert(topic.hash()); - }; + /* Sub-behaviour event handling functions */ - use GossipKind::*; - add(BeaconBlock); - add(BeaconAggregateAndProof); - add(VoluntaryExit); - add(ProposerSlashing); - add(AttesterSlashing); - add(SignedContributionAndProof); - for id in 0..attestation_subnet_count { - add(Attestation(SubnetId::new(id))); - } - for id in 0..sync_committee_subnet_count { - add(SyncCommitteeMessage(SyncSubnetId::new(id))); - } - } - WhitelistSubscriptionFilter(possible_hashes) - } -} - -/* Behaviour Event Process Implementations - * - * These implementations dictate how to process each event that is emitted from each - * sub-behaviour. - */ - -// Gossipsub -impl NetworkBehaviourEventProcess for Behaviour -where - AppReqId: ReqId, - TSpec: EthSpec, -{ - fn inject_event(&mut self, event: GossipsubEvent) { + /// Handle a gossipsub event. + fn inject_gs_event(&mut self, event: GossipsubEvent) -> Option> { match event { GossipsubEvent::Message { propagation_source, @@ -900,7 +1035,7 @@ where Err(e) => { debug!(self.log, "Could not decode gossipsub message"; "topic" => ?gs_msg.topic,"error" => e); //reject the message - if let Err(e) = self.gossipsub.report_message_validation_result( + if let Err(e) = self.gossipsub_mut().report_message_validation_result( &id, &propagation_source, MessageAcceptance::Reject, @@ -910,7 +1045,7 @@ where } Ok(msg) => { // Notify the network - self.add_event(BehaviourEvent::PubsubMessage { + return Some(NetworkEvent::PubsubMessage { id, source: propagation_source, topic: gs_msg.topic, @@ -931,7 +1066,12 @@ where if let Some(msgs) = self.gossip_cache.retrieve(&topic) { for data in msgs { let topic_str: &str = topic.kind().as_ref(); - match self.gossipsub.publish(topic.clone().into(), data) { + match self + .swarm + .behaviour_mut() + .gossipsub + .publish(Topic::from(topic.clone()), data) + { Ok(_) => { warn!(self.log, "Gossip message published on retry"; "topic" => topic_str); if let Some(v) = metrics::get_int_counter( @@ -965,7 +1105,7 @@ where } GossipsubEvent::GossipsubNotSupported { peer_id } => { debug!(self.log, "Peer does not support gossipsub"; "peer_id" => %peer_id); - self.peer_manager.report_peer( + self.peer_manager_mut().report_peer( &peer_id, PeerAction::LowToleranceError, ReportSource::Gossipsub, @@ -974,26 +1114,23 @@ where ); } } + None } -} -// RPC -impl NetworkBehaviourEventProcess, TSpec>> - for Behaviour -where - AppReqId: ReqId, - TSpec: EthSpec, -{ - fn inject_event(&mut self, event: RPCMessage, TSpec>) { + /// Handle an RPC event. + fn inject_rpc_event( + &mut self, + event: RPCMessage, TSpec>, + ) -> Option> { let peer_id = event.peer_id; - if !self.peer_manager.is_connected(&peer_id) { + if !self.peer_manager().is_connected(&peer_id) { debug!( self.log, "Ignoring rpc message of disconnecting peer"; event ); - return; + return None; } let handler_id = event.conn_id; @@ -1009,16 +1146,17 @@ where // Inform the peer manager of the error. // An inbound error here means we sent an error to the peer, or the stream // timed out. - self.peer_manager.handle_rpc_error( + self.peer_manager_mut().handle_rpc_error( &peer_id, proto, &error, ConnectionDirection::Incoming, ); + None } HandlerErr::Outbound { id, proto, error } => { // Inform the peer manager that a request we sent to the peer failed - self.peer_manager.handle_rpc_error( + self.peer_manager_mut().handle_rpc_error( &peer_id, proto, &error, @@ -1026,7 +1164,9 @@ where ); // inform failures of requests comming outside the behaviour if let RequestId::Application(id) = id { - self.add_event(BehaviourEvent::RPCFailed { peer_id, id }); + Some(NetworkEvent::RPCFailed { peer_id, id }) + } else { + None } } } @@ -1037,13 +1177,15 @@ where /* Behaviour managed protocols: Ping and Metadata */ InboundRequest::Ping(ping) => { // inform the peer manager and send the response - self.peer_manager.ping_request(&peer_id, ping.data); + self.peer_manager_mut().ping_request(&peer_id, ping.data); // send a ping response self.pong(peer_request_id, peer_id); + None } InboundRequest::MetaData(_) => { // send the requested meta-data self.send_meta_data_response((handler_id, id), peer_id); + None } InboundRequest::Goodbye(reason) => { // queue for disconnection without a goodbye message @@ -1057,13 +1199,16 @@ where // disconnecting here. The RPC handler will automatically // disconnect for us. // The actual disconnection event will be relayed to the application. + None } /* Protocols propagated to the Network */ InboundRequest::Status(msg) => { // inform the peer manager that we have received a status from a peer - self.peer_manager.peer_statusd(&peer_id); + self.peer_manager_mut().peer_statusd(&peer_id); // propagate the STATUS message upwards - self.propagate_request(peer_request_id, peer_id, Request::Status(msg)) + let event = + self.build_request(peer_request_id, peer_id, Request::Status(msg)); + Some(event) } InboundRequest::BlocksByRange(req) => { let methods::OldBlocksByRangeRequest { @@ -1073,7 +1218,7 @@ where } = req; // Still disconnect the peer if the request is naughty. if step == 0 { - self.peer_manager.handle_rpc_error( + self.peer_manager_mut().handle_rpc_error( &peer_id, Protocol::BlocksByRange, &RPCError::InvalidData( @@ -1081,41 +1226,53 @@ where ), ConnectionDirection::Incoming, ); + return None; } // return just one block in case the step parameter is used. https://github.com/ethereum/consensus-specs/pull/2856 if step > 1 { count = 1; } - self.propagate_request( + let event = self.build_request( peer_request_id, peer_id, Request::BlocksByRange(BlocksByRangeRequest { start_slot, count }), ); + Some(event) } InboundRequest::BlocksByRoot(req) => { - self.propagate_request(peer_request_id, peer_id, Request::BlocksByRoot(req)) + let event = self.build_request( + peer_request_id, + peer_id, + Request::BlocksByRoot(req), + ); + Some(event) } } } Ok(RPCReceived::Response(id, resp)) => { match resp { /* Behaviour managed protocols */ - RPCResponse::Pong(ping) => self.peer_manager.pong_response(&peer_id, ping.data), + RPCResponse::Pong(ping) => { + self.peer_manager_mut().pong_response(&peer_id, ping.data); + None + } RPCResponse::MetaData(meta_data) => { - self.peer_manager.meta_data_response(&peer_id, meta_data) + self.peer_manager_mut() + .meta_data_response(&peer_id, meta_data); + None } /* Network propagated protocols */ RPCResponse::Status(msg) => { // inform the peer manager that we have received a status from a peer - self.peer_manager.peer_statusd(&peer_id); + self.peer_manager_mut().peer_statusd(&peer_id); // propagate the STATUS message upwards - self.propagate_response(id, peer_id, Response::Status(msg)); + self.build_response(id, peer_id, Response::Status(msg)) } RPCResponse::BlocksByRange(resp) => { - self.propagate_response(id, peer_id, Response::BlocksByRange(Some(resp))) + self.build_response(id, peer_id, Response::BlocksByRange(Some(resp))) } RPCResponse::BlocksByRoot(resp) => { - self.propagate_response(id, peer_id, Response::BlocksByRoot(Some(resp))) + self.build_response(id, peer_id, Response::BlocksByRoot(Some(resp))) } } } @@ -1124,52 +1281,32 @@ where ResponseTermination::BlocksByRange => Response::BlocksByRange(None), ResponseTermination::BlocksByRoot => Response::BlocksByRoot(None), }; - self.propagate_response(id, peer_id, response); + self.build_response(id, peer_id, response) } } } -} -// Discovery -impl NetworkBehaviourEventProcess for Behaviour -where - AppReqId: ReqId, - TSpec: EthSpec, -{ - fn inject_event(&mut self, event: DiscoveryEvent) { - match event { - DiscoveryEvent::SocketUpdated(socket_addr) => { - // A new UDP socket has been detected. - // Build a multiaddr to report to libp2p - let mut multiaddr = Multiaddr::from(socket_addr.ip()); - // NOTE: This doesn't actually track the external TCP port. More sophisticated NAT handling - // should handle this. - multiaddr.push(MProtocol::Tcp(self.network_globals.listen_port_tcp())); - self.internal_events - .push_back(InternalBehaviourMessage::SocketUpdated(multiaddr)); - } - DiscoveryEvent::QueryResult(results) => { - let to_dial_peers = self.peer_manager.peers_discovered(results); - for peer_id in to_dial_peers { - debug!(self.log, "Dialing discovered peer"; "peer_id" => %peer_id); - // For any dial event, inform the peer manager - let enr = self.discovery_mut().enr_of_peer(&peer_id); - self.peer_manager.inject_dialing(&peer_id, enr); - self.internal_events - .push_back(InternalBehaviourMessage::DialPeer(peer_id)); - } - } + /// Handle a discovery event. + fn inject_discovery_event( + &mut self, + event: DiscoveredPeers, + ) -> Option> { + let DiscoveredPeers { peers } = event; + let to_dial_peers = self.peer_manager_mut().peers_discovered(peers); + for peer_id in to_dial_peers { + debug!(self.log, "Dialing discovered peer"; "peer_id" => %peer_id); + // For any dial event, inform the peer manager + let enr = self.discovery_mut().enr_of_peer(&peer_id); + self.peer_manager_mut().dial_peer(&peer_id, enr); } + None } -} -// Identify -impl NetworkBehaviourEventProcess for Behaviour -where - TSpec: EthSpec, - AppReqId: ReqId, -{ - fn inject_event(&mut self, event: IdentifyEvent) { + /// Handle an identify event. + fn inject_identify_event( + &mut self, + event: IdentifyEvent, + ) -> Option> { match event { IdentifyEvent::Received { peer_id, mut info } => { if info.listen_addrs.len() > MAX_IDENTIFY_ADDRESSES { @@ -1180,66 +1317,155 @@ where info.listen_addrs.truncate(MAX_IDENTIFY_ADDRESSES); } // send peer info to the peer manager. - self.peer_manager.identify(&peer_id, &info); + self.peer_manager_mut().identify(&peer_id, &info); } IdentifyEvent::Sent { .. } => {} IdentifyEvent::Error { .. } => {} IdentifyEvent::Pushed { .. } => {} } + None } -} -type BehaviourHandler = - as NetworkBehaviour>::ConnectionHandler; - -impl Behaviour -where - TSpec: EthSpec, - AppReqId: ReqId, -{ - /// Consumes the events list and drives the Lighthouse global NetworkBehaviour. - fn poll( + /// Handle a peer manager event. + fn inject_pm_event( &mut self, - cx: &mut Context, - _: &mut impl PollParameters, - ) -> Poll, BehaviourHandler>> { - if let Some(waker) = &self.waker { - if waker.will_wake(cx.waker()) { - self.waker = Some(cx.waker().clone()); + event: PeerManagerEvent, + ) -> Option> { + match event { + PeerManagerEvent::PeerConnectedIncoming(peer_id) => { + Some(NetworkEvent::PeerConnectedIncoming(peer_id)) } - } else { - self.waker = Some(cx.waker().clone()); - } - - // Handle internal events first - if let Some(event) = self.internal_events.pop_front() { - match event { - InternalBehaviourMessage::DialPeer(peer_id) => { - // Submit the event - let handler = self.new_handler(); - return Poll::Ready(NBAction::Dial { - opts: DialOpts::peer_id(peer_id) - .condition(PeerCondition::Disconnected) - .build(), - handler, - }); - } - InternalBehaviourMessage::SocketUpdated(address) => { - return Poll::Ready(NBAction::ReportObservedAddr { - address, - score: AddressScore::Finite(1), - }); - } + PeerManagerEvent::PeerConnectedOutgoing(peer_id) => { + Some(NetworkEvent::PeerConnectedOutgoing(peer_id)) + } + PeerManagerEvent::PeerDisconnected(peer_id) => { + Some(NetworkEvent::PeerDisconnected(peer_id)) + } + PeerManagerEvent::Banned(peer_id, associated_ips) => { + self.discovery_mut().ban_peer(&peer_id, associated_ips); + Some(NetworkEvent::PeerBanned(peer_id)) + } + PeerManagerEvent::UnBanned(peer_id, associated_ips) => { + self.discovery_mut().unban_peer(&peer_id, associated_ips); + Some(NetworkEvent::PeerUnbanned(peer_id)) + } + PeerManagerEvent::Status(peer_id) => { + // it's time to status. We don't keep a beacon chain reference here, so we inform + // the network to send a status to this peer + Some(NetworkEvent::StatusPeer(peer_id)) + } + PeerManagerEvent::DiscoverPeers(peers_to_find) => { + // Peer manager has requested a discovery query for more peers. + self.discovery_mut().discover_peers(peers_to_find); + None + } + PeerManagerEvent::DiscoverSubnetPeers(subnets_to_discover) => { + // Peer manager has requested a subnet discovery query for more peers. + self.discover_subnet_peers(subnets_to_discover); + None + } + PeerManagerEvent::Ping(peer_id) => { + // send a ping request to this peer + self.ping(peer_id); + None + } + PeerManagerEvent::MetaData(peer_id) => { + self.send_meta_data_request(peer_id); + None + } + PeerManagerEvent::DisconnectPeer(peer_id, reason) => { + debug!(self.log, "Peer Manager disconnecting peer"; + "peer_id" => %peer_id, "reason" => %reason); + // send one goodbye + self.eth2_rpc_mut() + .shutdown(peer_id, RequestId::Internal, reason); + None } } + } - if let Some(event) = self.events.pop_front() { - return Poll::Ready(NBAction::GenerateEvent(event)); + /* Networking polling */ + + /// Poll the p2p networking stack. + /// + /// This will poll the swarm and do maintenance routines. + pub fn poll_network(&mut self, cx: &mut Context) -> Poll> { + while let Poll::Ready(Some(swarm_event)) = self.swarm.poll_next_unpin(cx) { + let maybe_event = match swarm_event { + SwarmEvent::Behaviour(behaviour_event) => match behaviour_event { + // Handle sub-behaviour events. + BehaviourEvent::Gossipsub(ge) => self.inject_gs_event(ge), + BehaviourEvent::Eth2Rpc(re) => self.inject_rpc_event(re), + BehaviourEvent::Discovery(de) => self.inject_discovery_event(de), + BehaviourEvent::Identify(ie) => self.inject_identify_event(ie), + BehaviourEvent::PeerManager(pe) => self.inject_pm_event(pe), + }, + SwarmEvent::ConnectionEstablished { .. } => None, + SwarmEvent::ConnectionClosed { .. } => None, + SwarmEvent::IncomingConnection { + local_addr, + send_back_addr, + } => { + trace!(self.log, "Incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr); + None + } + SwarmEvent::IncomingConnectionError { + local_addr, + send_back_addr, + error, + } => { + debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => %error); + None + } + SwarmEvent::OutgoingConnectionError { peer_id, error } => { + debug!(self.log, "Failed to dial address"; "peer_id" => ?peer_id, "error" => %error); + None + } + SwarmEvent::BannedPeer { + peer_id, + endpoint: _, + } => { + debug!(self.log, "Banned peer connection rejected"; "peer_id" => %peer_id); + None + } + SwarmEvent::NewListenAddr { address, .. } => { + Some(NetworkEvent::NewListenAddr(address)) + } + SwarmEvent::ExpiredListenAddr { address, .. } => { + debug!(self.log, "Listen address expired"; "address" => %address); + None + } + SwarmEvent::ListenerClosed { + addresses, reason, .. + } => { + crit!(self.log, "Listener closed"; "addresses" => ?addresses, "reason" => ?reason); + if Swarm::listeners(&self.swarm).count() == 0 { + Some(NetworkEvent::ZeroListeners) + } else { + None + } + } + SwarmEvent::ListenerError { error, .. } => { + // this is non fatal, but we still check + warn!(self.log, "Listener error"; "error" => ?error); + if Swarm::listeners(&self.swarm).count() == 0 { + Some(NetworkEvent::ZeroListeners) + } else { + None + } + } + SwarmEvent::Dialing(_) => None, + }; + + if let Some(ev) = maybe_event { + return Poll::Ready(ev); + } } // perform gossipsub score updates when necessary while self.update_gossipsub_scores.poll_tick(cx).is_ready() { - self.peer_manager.update_gossipsub_scores(&self.gossipsub); + let this = self.swarm.behaviour_mut(); + this.peer_manager.update_gossipsub_scores(&this.gossipsub); } // poll the gossipsub cache to clear expired messages @@ -1256,161 +1482,10 @@ where } } } - Poll::Pending } -} -impl NetworkBehaviourEventProcess - for Behaviour -{ - fn inject_event(&mut self, event: PeerManagerEvent) { - match event { - PeerManagerEvent::PeerConnectedIncoming(peer_id) => { - self.add_event(BehaviourEvent::PeerConnectedIncoming(peer_id)); - } - PeerManagerEvent::PeerConnectedOutgoing(peer_id) => { - self.add_event(BehaviourEvent::PeerConnectedOutgoing(peer_id)); - } - PeerManagerEvent::PeerDisconnected(peer_id) => { - self.add_event(BehaviourEvent::PeerDisconnected(peer_id)); - } - PeerManagerEvent::Banned(peer_id, associated_ips) => { - self.discovery.ban_peer(&peer_id, associated_ips); - self.add_event(BehaviourEvent::PeerBanned(peer_id)); - } - PeerManagerEvent::UnBanned(peer_id, associated_ips) => { - self.discovery.unban_peer(&peer_id, associated_ips); - self.add_event(BehaviourEvent::PeerUnbanned(peer_id)); - } - PeerManagerEvent::Status(peer_id) => { - // it's time to status. We don't keep a beacon chain reference here, so we inform - // the network to send a status to this peer - self.add_event(BehaviourEvent::StatusPeer(peer_id)); - } - PeerManagerEvent::DiscoverPeers(peers_to_find) => { - // Peer manager has requested a discovery query for more peers. - self.discovery.discover_peers(peers_to_find); - } - PeerManagerEvent::DiscoverSubnetPeers(subnets_to_discover) => { - // Peer manager has requested a subnet discovery query for more peers. - self.discover_subnet_peers(subnets_to_discover); - } - PeerManagerEvent::Ping(peer_id) => { - // send a ping request to this peer - self.ping(peer_id); - } - PeerManagerEvent::MetaData(peer_id) => { - self.send_meta_data_request(peer_id); - } - PeerManagerEvent::DisconnectPeer(peer_id, reason) => { - debug!(self.log, "Peer Manager disconnecting peer"; - "peer_id" => %peer_id, "reason" => %reason); - // send one goodbye - self.eth2_rpc - .shutdown(peer_id, RequestId::Behaviour, reason); - } - } - } -} - -/* Public API types */ - -/// The type of RPC requests the Behaviour informs it has received and allows for sending. -/// -// NOTE: This is an application-level wrapper over the lower network level requests that can be -// sent. The main difference is the absence of the Ping, Metadata and Goodbye protocols, which don't -// leave the Behaviour. For all protocols managed by RPC see `RPCRequest`. -#[derive(Debug, Clone, PartialEq)] -pub enum Request { - /// A Status message. - Status(StatusMessage), - /// A blocks by range request. - BlocksByRange(BlocksByRangeRequest), - /// A request blocks root request. - BlocksByRoot(BlocksByRootRequest), -} - -impl std::convert::From for OutboundRequest { - fn from(req: Request) -> OutboundRequest { - match req { - Request::BlocksByRoot(r) => OutboundRequest::BlocksByRoot(r), - Request::BlocksByRange(BlocksByRangeRequest { start_slot, count }) => { - OutboundRequest::BlocksByRange(methods::OldBlocksByRangeRequest { - start_slot, - count, - step: 1, - }) - } - Request::Status(s) => OutboundRequest::Status(s), - } - } -} - -/// The type of RPC responses the Behaviour informs it has received, and allows for sending. -/// -// NOTE: This is an application-level wrapper over the lower network level responses that can be -// sent. The main difference is the absense of Pong and Metadata, which don't leave the -// Behaviour. For all protocol reponses managed by RPC see `RPCResponse` and -// `RPCCodedResponse`. -#[derive(Debug, Clone, PartialEq)] -pub enum Response { - /// A Status message. - Status(StatusMessage), - /// A response to a get BLOCKS_BY_RANGE request. A None response signals the end of the batch. - BlocksByRange(Option>>), - /// A response to a get BLOCKS_BY_ROOT request. - BlocksByRoot(Option>>), -} - -impl std::convert::From> for RPCCodedResponse { - fn from(resp: Response) -> RPCCodedResponse { - match resp { - Response::BlocksByRoot(r) => match r { - Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRoot(b)), - None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRoot), - }, - Response::BlocksByRange(r) => match r { - Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRange(b)), - None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange), - }, - Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)), - } - } -} - -/// Persist metadata to disk -pub fn save_metadata_to_disk(dir: &Path, metadata: MetaData, log: &slog::Logger) { - let _ = std::fs::create_dir_all(&dir); - match File::create(dir.join(METADATA_FILENAME)) - .and_then(|mut f| f.write_all(&metadata.as_ssz_bytes())) - { - Ok(_) => { - debug!(log, "Metadata written to disk"); - } - Err(e) => { - warn!( - log, - "Could not write metadata to disk"; - "file" => format!("{:?}{:?}", dir, METADATA_FILENAME), - "error" => %e - ); - } - } -} - -impl slog::Value for RequestId { - fn serialize( - &self, - record: &slog::Record, - key: slog::Key, - serializer: &mut dyn slog::Serializer, - ) -> slog::Result { - match self { - RequestId::Behaviour => slog::Value::serialize("Behaviour", record, key, serializer), - RequestId::Application(ref id) => { - slog::Value::serialize(&format_args!("{:?}", id), record, key, serializer) - } - } + pub async fn next_event(&mut self) -> NetworkEvent { + futures::future::poll_fn(|cx| self.poll_network(cx)).await } } diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs new file mode 100644 index 000000000..2aaa46fe8 --- /dev/null +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -0,0 +1,288 @@ +use crate::multiaddr::Protocol; +use crate::rpc::{MetaData, MetaDataV1, MetaDataV2}; +use crate::types::{ + error, EnrAttestationBitfield, EnrSyncCommitteeBitfield, GossipEncoding, GossipKind, +}; +use crate::{GossipTopic, NetworkConfig}; +use libp2p::bandwidth::{BandwidthLogging, BandwidthSinks}; +use libp2p::core::{ + identity::Keypair, multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed, +}; +use libp2p::gossipsub::subscription_filter::WhitelistSubscriptionFilter; +use libp2p::gossipsub::IdentTopic as Topic; +use libp2p::{core, noise, PeerId, Transport}; +use prometheus_client::registry::Registry; +use slog::{debug, warn}; +use ssz::Decode; +use ssz::Encode; +use std::collections::HashSet; +use std::fs::File; +use std::io::prelude::*; +use std::path::Path; +use std::sync::Arc; +use std::time::Duration; +use types::{ChainSpec, EnrForkId, EthSpec, ForkContext, SubnetId, SyncSubnetId}; + +pub const NETWORK_KEY_FILENAME: &str = "key"; +/// The maximum simultaneous libp2p connections per peer. +pub const MAX_CONNECTIONS_PER_PEER: u32 = 1; +/// The filename to store our local metadata. +pub const METADATA_FILENAME: &str = "metadata"; + +pub struct Context<'a> { + pub config: &'a NetworkConfig, + pub enr_fork_id: EnrForkId, + pub fork_context: Arc, + pub chain_spec: &'a ChainSpec, + pub gossipsub_registry: Option<&'a mut Registry>, +} + +type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>; + +/// The implementation supports TCP/IP, WebSockets over TCP/IP, noise as the encryption layer, and +/// mplex as the multiplexing layer. +pub fn build_transport( + local_private_key: Keypair, +) -> std::io::Result<(BoxedTransport, Arc)> { + let tcp = + libp2p::tcp::TokioTcpTransport::new(libp2p::tcp::GenTcpConfig::default().nodelay(true)); + let transport = libp2p::dns::TokioDnsConfig::system(tcp)?; + #[cfg(feature = "libp2p-websocket")] + let transport = { + let trans_clone = transport.clone(); + transport.or_transport(libp2p::websocket::WsConfig::new(trans_clone)) + }; + + let (transport, bandwidth) = BandwidthLogging::new(transport); + + // mplex config + let mut mplex_config = libp2p::mplex::MplexConfig::new(); + mplex_config.set_max_buffer_size(256); + mplex_config.set_max_buffer_behaviour(libp2p::mplex::MaxBufferBehaviour::Block); + + // yamux config + let mut yamux_config = libp2p::yamux::YamuxConfig::default(); + yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::on_read()); + + // Authentication + Ok(( + transport + .upgrade(core::upgrade::Version::V1) + .authenticate(generate_noise_config(&local_private_key)) + .multiplex(core::upgrade::SelectUpgrade::new( + yamux_config, + mplex_config, + )) + .timeout(Duration::from_secs(10)) + .boxed(), + bandwidth, + )) +} + +// Useful helper functions for debugging. Currently not used in the client. +#[allow(dead_code)] +fn keypair_from_hex(hex_bytes: &str) -> error::Result { + let hex_bytes = if let Some(stripped) = hex_bytes.strip_prefix("0x") { + stripped.to_string() + } else { + hex_bytes.to_string() + }; + + hex::decode(&hex_bytes) + .map_err(|e| format!("Failed to parse p2p secret key bytes: {:?}", e).into()) + .and_then(keypair_from_bytes) +} + +#[allow(dead_code)] +fn keypair_from_bytes(mut bytes: Vec) -> error::Result { + libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut bytes) + .map(|secret| { + let keypair: libp2p::core::identity::secp256k1::Keypair = secret.into(); + Keypair::Secp256k1(keypair) + }) + .map_err(|e| format!("Unable to parse p2p secret key: {:?}", e).into()) +} + +/// Loads a private key from disk. If this fails, a new key is +/// generated and is then saved to disk. +/// +/// Currently only secp256k1 keys are allowed, as these are the only keys supported by discv5. +pub fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair { + // check for key from disk + let network_key_f = config.network_dir.join(NETWORK_KEY_FILENAME); + if let Ok(mut network_key_file) = File::open(network_key_f.clone()) { + let mut key_bytes: Vec = Vec::with_capacity(36); + match network_key_file.read_to_end(&mut key_bytes) { + Err(_) => debug!(log, "Could not read network key file"), + Ok(_) => { + // only accept secp256k1 keys for now + if let Ok(secret_key) = + libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut key_bytes) + { + let kp: libp2p::core::identity::secp256k1::Keypair = secret_key.into(); + debug!(log, "Loaded network key from disk."); + return Keypair::Secp256k1(kp); + } else { + debug!(log, "Network key file is not a valid secp256k1 key"); + } + } + } + } + + // if a key could not be loaded from disk, generate a new one and save it + let local_private_key = Keypair::generate_secp256k1(); + if let Keypair::Secp256k1(key) = local_private_key.clone() { + let _ = std::fs::create_dir_all(&config.network_dir); + match File::create(network_key_f.clone()) + .and_then(|mut f| f.write_all(&key.secret().to_bytes())) + { + Ok(_) => { + debug!(log, "New network key generated and written to disk"); + } + Err(e) => { + warn!( + log, + "Could not write node key to file: {:?}. error: {}", network_key_f, e + ); + } + } + } + local_private_key +} + +/// Generate authenticated XX Noise config from identity keys +fn generate_noise_config( + identity_keypair: &Keypair, +) -> noise::NoiseAuthenticated { + let static_dh_keys = noise::Keypair::::new() + .into_authentic(identity_keypair) + .expect("signing can fail only once during starting a node"); + noise::NoiseConfig::xx(static_dh_keys).into_authenticated() +} + +/// For a multiaddr that ends with a peer id, this strips this suffix. Rust-libp2p +/// only supports dialing to an address without providing the peer id. +pub fn strip_peer_id(addr: &mut Multiaddr) { + let last = addr.pop(); + match last { + Some(Protocol::P2p(_)) => {} + Some(other) => addr.push(other), + _ => {} + } +} + +/// Load metadata from persisted file. Return default metadata if loading fails. +pub fn load_or_build_metadata( + network_dir: &std::path::Path, + log: &slog::Logger, +) -> MetaData { + // We load a V2 metadata version by default (regardless of current fork) + // since a V2 metadata can be converted to V1. The RPC encoder is responsible + // for sending the correct metadata version based on the negotiated protocol version. + let mut meta_data = MetaDataV2 { + seq_number: 0, + attnets: EnrAttestationBitfield::::default(), + syncnets: EnrSyncCommitteeBitfield::::default(), + }; + // Read metadata from persisted file if available + let metadata_path = network_dir.join(METADATA_FILENAME); + if let Ok(mut metadata_file) = File::open(metadata_path) { + let mut metadata_ssz = Vec::new(); + if metadata_file.read_to_end(&mut metadata_ssz).is_ok() { + // Attempt to read a MetaDataV2 version from the persisted file, + // if that fails, read MetaDataV1 + match MetaDataV2::::from_ssz_bytes(&metadata_ssz) { + Ok(persisted_metadata) => { + meta_data.seq_number = persisted_metadata.seq_number; + // Increment seq number if persisted attnet is not default + if persisted_metadata.attnets != meta_data.attnets + || persisted_metadata.syncnets != meta_data.syncnets + { + meta_data.seq_number += 1; + } + debug!(log, "Loaded metadata from disk"); + } + Err(_) => { + match MetaDataV1::::from_ssz_bytes(&metadata_ssz) { + Ok(persisted_metadata) => { + let persisted_metadata = MetaData::V1(persisted_metadata); + // Increment seq number as the persisted metadata version is updated + meta_data.seq_number = *persisted_metadata.seq_number() + 1; + debug!(log, "Loaded metadata from disk"); + } + Err(e) => { + debug!( + log, + "Metadata from file could not be decoded"; + "error" => ?e, + ); + } + } + } + } + } + }; + + // Wrap the MetaData + let meta_data = MetaData::V2(meta_data); + + debug!(log, "Metadata sequence number"; "seq_num" => meta_data.seq_number()); + save_metadata_to_disk(network_dir, meta_data.clone(), log); + meta_data +} + +/// Creates a whitelist topic filter that covers all possible topics using the given set of +/// possible fork digests. +pub(crate) fn create_whitelist_filter( + possible_fork_digests: Vec<[u8; 4]>, + attestation_subnet_count: u64, + sync_committee_subnet_count: u64, +) -> WhitelistSubscriptionFilter { + let mut possible_hashes = HashSet::new(); + for fork_digest in possible_fork_digests { + let mut add = |kind| { + let topic: Topic = + GossipTopic::new(kind, GossipEncoding::SSZSnappy, fork_digest).into(); + possible_hashes.insert(topic.hash()); + }; + + use GossipKind::*; + add(BeaconBlock); + add(BeaconAggregateAndProof); + add(VoluntaryExit); + add(ProposerSlashing); + add(AttesterSlashing); + add(SignedContributionAndProof); + for id in 0..attestation_subnet_count { + add(Attestation(SubnetId::new(id))); + } + for id in 0..sync_committee_subnet_count { + add(SyncCommitteeMessage(SyncSubnetId::new(id))); + } + } + WhitelistSubscriptionFilter(possible_hashes) +} + +/// Persist metadata to disk +pub(crate) fn save_metadata_to_disk( + dir: &Path, + metadata: MetaData, + log: &slog::Logger, +) { + let _ = std::fs::create_dir_all(&dir); + match File::create(dir.join(METADATA_FILENAME)) + .and_then(|mut f| f.write_all(&metadata.as_ssz_bytes())) + { + Ok(_) => { + debug!(log, "Metadata written to disk"); + } + Err(e) => { + warn!( + log, + "Could not write metadata to disk"; + "file" => format!("{:?}{:?}", dir, METADATA_FILENAME), + "error" => %e + ); + } + } +} diff --git a/beacon_node/lighthouse_network/tests/common/behaviour.rs b/beacon_node/lighthouse_network/tests/common/behaviour.rs index 76eecfcbc..50fe6941d 100644 --- a/beacon_node/lighthouse_network/tests/common/behaviour.rs +++ b/beacon_node/lighthouse_network/tests/common/behaviour.rs @@ -23,7 +23,8 @@ use std::collections::HashMap; use std::task::{Context, Poll}; -use libp2p::core::connection::{ConnectedPoint, ConnectionId, ListenerId}; +use libp2p::core::connection::{ConnectedPoint, ConnectionId}; +use libp2p::core::transport::ListenerId; use libp2p::swarm::handler::{ConnectionHandler, DummyConnectionHandler, IntoConnectionHandler}; use libp2p::swarm::{DialError, NetworkBehaviour, NetworkBehaviourAction, PollParameters}; use libp2p::{Multiaddr, PeerId}; diff --git a/beacon_node/lighthouse_network/tests/common/mod.rs b/beacon_node/lighthouse_network/tests/common/mod.rs index ea770de6c..a3c32d0fb 100644 --- a/beacon_node/lighthouse_network/tests/common/mod.rs +++ b/beacon_node/lighthouse_network/tests/common/mod.rs @@ -1,10 +1,10 @@ #![cfg(test)] use libp2p::gossipsub::GossipsubConfigBuilder; +use lighthouse_network::service::Network as LibP2PService; use lighthouse_network::Enr; use lighthouse_network::EnrExt; use lighthouse_network::Multiaddr; -use lighthouse_network::Service as LibP2PService; -use lighthouse_network::{Libp2pEvent, NetworkConfig}; +use lighthouse_network::{NetworkConfig, NetworkEvent}; use slog::{debug, error, o, Drain}; use std::sync::Arc; use std::sync::Weak; @@ -119,18 +119,19 @@ pub async fn build_libp2p_instance( LibP2PService::new(executor, libp2p_context, &log) .await .expect("should build libp2p instance") - .1, + .0, signal, ) } #[allow(dead_code)] pub fn get_enr(node: &LibP2PService) -> Enr { - node.swarm.behaviour().local_enr() + node.local_enr() } // Returns `n` libp2p peers in fully connected topology. #[allow(dead_code)] +/* pub async fn build_full_mesh( rt: Weak, log: slog::Logger, @@ -157,8 +158,7 @@ pub async fn build_full_mesh( } } nodes -} - +}*/ // Constructs a pair of nodes with separate loggers. The sender dials the receiver. // This returns a (sender, receiver) pair. #[allow(dead_code)] @@ -173,19 +173,19 @@ pub async fn build_node_pair( let mut sender = build_libp2p_instance(rt.clone(), vec![], sender_log, fork_name).await; let mut receiver = build_libp2p_instance(rt, vec![], receiver_log, fork_name).await; - let receiver_multiaddr = receiver.swarm.behaviour_mut().local_enr().multiaddr()[1].clone(); + let receiver_multiaddr = receiver.local_enr().multiaddr()[1].clone(); // let the two nodes set up listeners let sender_fut = async { loop { - if let Libp2pEvent::NewListenAddr(_) = sender.next_event().await { + if let NetworkEvent::NewListenAddr(_) = sender.next_event().await { return; } } }; let receiver_fut = async { loop { - if let Libp2pEvent::NewListenAddr(_) = receiver.next_event().await { + if let NetworkEvent::NewListenAddr(_) = receiver.next_event().await { return; } } @@ -199,7 +199,8 @@ pub async fn build_node_pair( _ = joined => {} } - match libp2p::Swarm::dial(&mut sender.swarm, receiver_multiaddr.clone()) { + // sender.dial_peer(peer_id); + match sender.testing_dial(receiver_multiaddr.clone()) { Ok(()) => { debug!(log, "Sender dialed receiver"; "address" => format!("{:?}", receiver_multiaddr)) } @@ -226,7 +227,7 @@ pub async fn build_linear( .map(|x| get_enr(x).multiaddr()[1].clone()) .collect(); for i in 0..n - 1 { - match libp2p::Swarm::dial(&mut nodes[i].swarm, multiaddrs[i + 1].clone()) { + match nodes[i].testing_dial(multiaddrs[i + 1].clone()) { Ok(()) => debug!(log, "Connected"), Err(_) => error!(log, "Failed to connect"), }; diff --git a/beacon_node/lighthouse_network/tests/pm_tests.rs b/beacon_node/lighthouse_network/tests/pm_tests.rs index 96767204d..17a044ced 100644 --- a/beacon_node/lighthouse_network/tests/pm_tests.rs +++ b/beacon_node/lighthouse_network/tests/pm_tests.rs @@ -98,9 +98,7 @@ async fn banned_peers_consistency() { discovery_enabled: false, ..Default::default() }; - let pm = PeerManager::new(pm_config, globals.clone(), &pm_log) - .await - .unwrap(); + let pm = PeerManager::new(pm_config, globals.clone(), &pm_log).unwrap(); let mut pm_swarm = swarm::new_test_swarm(Behaviour::new(pm)); let pm_addr = swarm::bind_listener(&mut pm_swarm).await; let service = Service { swarm: pm_swarm }; diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 90052859b..918345349 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -1,8 +1,6 @@ #![cfg(test)] use lighthouse_network::rpc::methods::*; -use lighthouse_network::{ - rpc::max_rpc_size, BehaviourEvent, Libp2pEvent, ReportSource, Request, Response, -}; +use lighthouse_network::{rpc::max_rpc_size, NetworkEvent, ReportSource, Request, Response}; use slog::{debug, warn, Level}; use ssz::Encode; use ssz_types::VariableList; @@ -86,19 +84,16 @@ fn test_status_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender - .swarm - .behaviour_mut() - .send_request(peer_id, 10, rpc_request.clone()); + sender.send_request(peer_id, 10, rpc_request.clone()); } - Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { + NetworkEvent::ResponseReceived { peer_id: _, id: 10, response, - }) => { + } => { // Should receive the RPC response debug!(log, "Sender Received"); assert_eq!(response, rpc_response.clone()); @@ -114,19 +109,15 @@ fn test_status_rpc() { let receiver_future = async { loop { match receiver.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { + NetworkEvent::RequestReceived { peer_id, id, request, - }) => { + } => { if request == rpc_request { // send the response debug!(log, "Receiver Received"); - receiver.swarm.behaviour_mut().send_successful_response( - peer_id, - id, - rpc_response.clone(), - ); + receiver.send_response(peer_id, id, rpc_response.clone()); } } _ => {} // Ignore other events @@ -191,20 +182,16 @@ fn test_blocks_by_range_chunked_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender.swarm.behaviour_mut().send_request( - peer_id, - request_id, - rpc_request.clone(), - ); + sender.send_request(peer_id, request_id, rpc_request.clone()); } - Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { + NetworkEvent::ResponseReceived { peer_id: _, id: _, response, - }) => { + } => { warn!(log, "Sender received a response"); match response { Response::BlocksByRange(Some(_)) => { @@ -236,11 +223,11 @@ fn test_blocks_by_range_chunked_rpc() { let receiver_future = async { loop { match receiver.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { + NetworkEvent::RequestReceived { peer_id, id, request, - }) => { + } => { if request == rpc_request { // send the response warn!(log, "Receiver got request"); @@ -254,18 +241,10 @@ fn test_blocks_by_range_chunked_rpc() { } else { rpc_response_merge_small.clone() }; - receiver.swarm.behaviour_mut().send_successful_response( - peer_id, - id, - rpc_response.clone(), - ); + receiver.send_response(peer_id, id, rpc_response.clone()); } // send the stream termination - receiver.swarm.behaviour_mut().send_successful_response( - peer_id, - id, - Response::BlocksByRange(None), - ); + receiver.send_response(peer_id, id, Response::BlocksByRange(None)); } } _ => {} // Ignore other events @@ -318,17 +297,13 @@ fn test_blocks_by_range_over_limit() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender.swarm.behaviour_mut().send_request( - peer_id, - request_id, - rpc_request.clone(), - ); + sender.send_request(peer_id, request_id, rpc_request.clone()); } // The request will fail because the sender will refuse to send anything > MAX_RPC_SIZE - Libp2pEvent::Behaviour(BehaviourEvent::RPCFailed { id, .. }) => { + NetworkEvent::RPCFailed { id, .. } => { assert_eq!(id, request_id); return; } @@ -341,28 +316,20 @@ fn test_blocks_by_range_over_limit() { let receiver_future = async { loop { match receiver.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { + NetworkEvent::RequestReceived { peer_id, id, request, - }) => { + } => { if request == rpc_request { // send the response warn!(log, "Receiver got request"); for _ in 0..messages_to_send { let rpc_response = rpc_response_merge_large.clone(); - receiver.swarm.behaviour_mut().send_successful_response( - peer_id, - id, - rpc_response.clone(), - ); + receiver.send_response(peer_id, id, rpc_response.clone()); } // send the stream termination - receiver.swarm.behaviour_mut().send_successful_response( - peer_id, - id, - Response::BlocksByRange(None), - ); + receiver.send_response(peer_id, id, Response::BlocksByRange(None)); } } _ => {} // Ignore other events @@ -418,20 +385,16 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender.swarm.behaviour_mut().send_request( - peer_id, - request_id, - rpc_request.clone(), - ); + sender.send_request(peer_id, request_id, rpc_request.clone()); } - Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { + NetworkEvent::ResponseReceived { peer_id: _, id: _, response, - }) => + } => // Should receive the RPC response { debug!(log, "Sender received a response"); @@ -469,11 +432,11 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { .await { futures::future::Either::Left(( - Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { + NetworkEvent::RequestReceived { peer_id, id, request, - }), + }, _, )) => { if request == rpc_request { @@ -490,11 +453,7 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { if message_info.is_some() { messages_sent += 1; let (peer_id, stream_id) = message_info.as_ref().unwrap(); - receiver.swarm.behaviour_mut().send_successful_response( - *peer_id, - *stream_id, - rpc_response.clone(), - ); + receiver.send_response(*peer_id, *stream_id, rpc_response.clone()); debug!(log, "Sending message {}", messages_sent); if messages_sent == messages_to_send + extra_messages_to_send { // stop sending messages @@ -550,19 +509,16 @@ fn test_blocks_by_range_single_empty_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender - .swarm - .behaviour_mut() - .send_request(peer_id, 10, rpc_request.clone()); + sender.send_request(peer_id, 10, rpc_request.clone()); } - Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { + NetworkEvent::ResponseReceived { peer_id: _, id: 10, response, - }) => match response { + } => match response { Response::BlocksByRange(Some(_)) => { assert_eq!(response, rpc_response.clone()); messages_received += 1; @@ -585,28 +541,20 @@ fn test_blocks_by_range_single_empty_rpc() { let receiver_future = async { loop { match receiver.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { + NetworkEvent::RequestReceived { peer_id, id, request, - }) => { + } => { if request == rpc_request { // send the response warn!(log, "Receiver got request"); for _ in 1..=messages_to_send { - receiver.swarm.behaviour_mut().send_successful_response( - peer_id, - id, - rpc_response.clone(), - ); + receiver.send_response(peer_id, id, rpc_response.clone()); } // send the stream termination - receiver.swarm.behaviour_mut().send_successful_response( - peer_id, - id, - Response::BlocksByRange(None), - ); + receiver.send_response(peer_id, id, Response::BlocksByRange(None)); } } _ => {} // Ignore other events @@ -676,19 +624,16 @@ fn test_blocks_by_root_chunked_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender - .swarm - .behaviour_mut() - .send_request(peer_id, 6, rpc_request.clone()); + sender.send_request(peer_id, 6, rpc_request.clone()); } - Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { + NetworkEvent::ResponseReceived { peer_id: _, id: 6, response, - }) => match response { + } => match response { Response::BlocksByRoot(Some(_)) => { if messages_received < 2 { assert_eq!(response, rpc_response_base.clone()); @@ -717,11 +662,11 @@ fn test_blocks_by_root_chunked_rpc() { let receiver_future = async { loop { match receiver.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { + NetworkEvent::RequestReceived { peer_id, id, request, - }) => { + } => { if request == rpc_request { // send the response debug!(log, "Receiver got request"); @@ -735,19 +680,11 @@ fn test_blocks_by_root_chunked_rpc() { } else { rpc_response_merge_small.clone() }; - receiver.swarm.behaviour_mut().send_successful_response( - peer_id, - id, - rpc_response, - ); + receiver.send_response(peer_id, id, rpc_response); debug!(log, "Sending message"); } // send the stream termination - receiver.swarm.behaviour_mut().send_successful_response( - peer_id, - id, - Response::BlocksByRange(None), - ); + receiver.send_response(peer_id, id, Response::BlocksByRange(None)); debug!(log, "Send stream term"); } } @@ -811,19 +748,16 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender - .swarm - .behaviour_mut() - .send_request(peer_id, 10, rpc_request.clone()); + sender.send_request(peer_id, 10, rpc_request.clone()); } - Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { + NetworkEvent::ResponseReceived { peer_id: _, id: 10, response, - }) => { + } => { debug!(log, "Sender received a response"); match response { Response::BlocksByRoot(Some(_)) => { @@ -861,11 +795,11 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { .await { futures::future::Either::Left(( - Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { + NetworkEvent::RequestReceived { peer_id, id, request, - }), + }, _, )) => { if request == rpc_request { @@ -882,11 +816,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { if message_info.is_some() { messages_sent += 1; let (peer_id, stream_id) = message_info.as_ref().unwrap(); - receiver.swarm.behaviour_mut().send_successful_response( - *peer_id, - *stream_id, - rpc_response.clone(), - ); + receiver.send_response(*peer_id, *stream_id, rpc_response.clone()); debug!(log, "Sending message {}", messages_sent); if messages_sent == messages_to_send + extra_messages_to_send { // stop sending messages @@ -926,16 +856,16 @@ fn test_goodbye_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a goodbye and disconnect debug!(log, "Sending RPC"); - sender.swarm.behaviour_mut().goodbye_peer( + sender.goodbye_peer( &peer_id, GoodbyeReason::IrrelevantNetwork, ReportSource::SyncService, ); } - Libp2pEvent::Behaviour(BehaviourEvent::PeerDisconnected(_)) => { + NetworkEvent::PeerDisconnected(_) => { return; } _ => {} // Ignore other RPC messages @@ -947,7 +877,7 @@ fn test_goodbye_rpc() { let receiver_future = async { loop { match receiver.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerDisconnected(_)) => { + NetworkEvent::PeerDisconnected(_) => { // Should receive sent RPC request return; } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index f5e32dcff..ec8573ea1 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -11,17 +11,16 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use futures::channel::mpsc::Sender; use futures::future::OptionFuture; use futures::prelude::*; -use lighthouse_network::{ - prometheus_client::registry::Registry, MessageAcceptance, Service as LibP2PService, -}; +use futures::StreamExt; +use lighthouse_network::service::Network; +use lighthouse_network::{prometheus_client::registry::Registry, MessageAcceptance}; use lighthouse_network::{ rpc::{GoodbyeReason, RPCResponseErrorCode}, - Context, Libp2pEvent, PeerAction, PeerRequestId, PubsubMessage, ReportSource, Request, - Response, Subnet, + Context, PeerAction, PeerRequestId, PubsubMessage, ReportSource, Request, Response, Subnet, }; use lighthouse_network::{ types::{GossipEncoding, GossipTopic}, - BehaviourEvent, MessageId, NetworkGlobals, PeerId, + MessageId, NetworkEvent, NetworkGlobals, PeerId, }; use slog::{crit, debug, error, info, o, trace, warn}; use std::{net::SocketAddr, pin::Pin, sync::Arc, time::Duration}; @@ -171,7 +170,7 @@ pub struct NetworkService { /// A reference to the underlying beacon chain. beacon_chain: Arc>, /// The underlying libp2p service that drives all the network interactions. - libp2p: LibP2PService, + libp2p: Network, /// An attestation and subnet manager service. attestation_service: AttestationService, /// A sync committeee subnet manager service. @@ -273,8 +272,8 @@ impl NetworkService { }; // launch libp2p service - let (network_globals, mut libp2p) = - LibP2PService::new(executor.clone(), service_context, &network_log).await?; + let (mut libp2p, network_globals) = + Network::new(executor.clone(), service_context, &network_log).await?; // Repopulate the DHT with stored ENR's if discovery is not disabled. if !config.disable_discovery { @@ -284,7 +283,7 @@ impl NetworkService { "Loading peers into the routing table"; "peers" => enrs_to_load.len() ); for enr in enrs_to_load { - libp2p.swarm.behaviour_mut().add_enr(enr.clone()); + libp2p.add_enr(enr.clone()); } } @@ -402,7 +401,7 @@ impl NetworkService { _ = self.metrics_update.tick(), if self.metrics_enabled => { // update various network metrics metrics::update_gossip_metrics::( - self.libp2p.swarm.behaviour().gs(), + self.libp2p.gossipsub(), &self.network_globals, ); // update sync metrics @@ -429,7 +428,7 @@ impl NetworkService { Some(_) = &mut self.next_unsubscribe => { let new_enr_fork_id = self.beacon_chain.enr_fork_id(); - self.libp2p.swarm.behaviour_mut().unsubscribe_from_fork_topics_except(new_enr_fork_id.fork_digest); + self.libp2p.unsubscribe_from_fork_topics_except(new_enr_fork_id.fork_digest); info!(self.log, "Unsubscribed from old fork topics"); self.next_unsubscribe = Box::pin(None.into()); } @@ -439,7 +438,7 @@ impl NetworkService { let fork_version = self.beacon_chain.spec.fork_version_for_name(fork_name); let fork_digest = ChainSpec::compute_fork_digest(fork_version, self.beacon_chain.genesis_validators_root); info!(self.log, "Subscribing to new fork topics"); - self.libp2p.swarm.behaviour_mut().subscribe_new_fork_topics(fork_digest); + self.libp2p.subscribe_new_fork_topics(fork_digest); self.next_fork_subscriptions = Box::pin(None.into()); } else { @@ -456,92 +455,90 @@ impl NetworkService { /// Handle an event received from the network. async fn on_libp2p_event( &mut self, - ev: Libp2pEvent, + ev: NetworkEvent, shutdown_sender: &mut Sender, ) { match ev { - Libp2pEvent::Behaviour(event) => match event { - BehaviourEvent::PeerConnectedOutgoing(peer_id) => { - self.send_to_router(RouterMessage::PeerDialed(peer_id)); - } - BehaviourEvent::PeerConnectedIncoming(_) - | BehaviourEvent::PeerBanned(_) - | BehaviourEvent::PeerUnbanned(_) => { - // No action required for these events. - } - BehaviourEvent::PeerDisconnected(peer_id) => { - self.send_to_router(RouterMessage::PeerDisconnected(peer_id)); - } - BehaviourEvent::RequestReceived { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { + self.send_to_router(RouterMessage::PeerDialed(peer_id)); + } + NetworkEvent::PeerConnectedIncoming(_) + | NetworkEvent::PeerBanned(_) + | NetworkEvent::PeerUnbanned(_) => { + // No action required for these events. + } + NetworkEvent::PeerDisconnected(peer_id) => { + self.send_to_router(RouterMessage::PeerDisconnected(peer_id)); + } + NetworkEvent::RequestReceived { + peer_id, + id, + request, + } => { + self.send_to_router(RouterMessage::RPCRequestReceived { peer_id, id, request, - } => { - self.send_to_router(RouterMessage::RPCRequestReceived { - peer_id, - id, - request, - }); - } - BehaviourEvent::ResponseReceived { + }); + } + NetworkEvent::ResponseReceived { + peer_id, + id, + response, + } => { + self.send_to_router(RouterMessage::RPCResponseReceived { peer_id, - id, + request_id: id, response, - } => { - self.send_to_router(RouterMessage::RPCResponseReceived { - peer_id, - request_id: id, - response, - }); - } - BehaviourEvent::RPCFailed { id, peer_id } => { - self.send_to_router(RouterMessage::RPCFailed { - peer_id, - request_id: id, - }); - } - BehaviourEvent::StatusPeer(peer_id) => { - self.send_to_router(RouterMessage::StatusPeer(peer_id)); - } - BehaviourEvent::PubsubMessage { - id, - source, - message, - .. - } => { - match message { - // attestation information gets processed in the attestation service - PubsubMessage::Attestation(ref subnet_and_attestation) => { - let subnet = subnet_and_attestation.0; - let attestation = &subnet_and_attestation.1; - // checks if we have an aggregator for the slot. If so, we should process - // the attestation, else we just just propagate the Attestation. - let should_process = self - .attestation_service - .should_process_attestation(subnet, attestation); - self.send_to_router(RouterMessage::PubsubMessage( - id, - source, - message, - should_process, - )); - } - _ => { - // all else is sent to the router - self.send_to_router(RouterMessage::PubsubMessage( - id, source, message, true, - )); - } + }); + } + NetworkEvent::RPCFailed { id, peer_id } => { + self.send_to_router(RouterMessage::RPCFailed { + peer_id, + request_id: id, + }); + } + NetworkEvent::StatusPeer(peer_id) => { + self.send_to_router(RouterMessage::StatusPeer(peer_id)); + } + NetworkEvent::PubsubMessage { + id, + source, + message, + .. + } => { + match message { + // attestation information gets processed in the attestation service + PubsubMessage::Attestation(ref subnet_and_attestation) => { + let subnet = subnet_and_attestation.0; + let attestation = &subnet_and_attestation.1; + // checks if we have an aggregator for the slot. If so, we should process + // the attestation, else we just just propagate the Attestation. + let should_process = self + .attestation_service + .should_process_attestation(subnet, attestation); + self.send_to_router(RouterMessage::PubsubMessage( + id, + source, + message, + should_process, + )); + } + _ => { + // all else is sent to the router + self.send_to_router(RouterMessage::PubsubMessage( + id, source, message, true, + )); } } - }, - Libp2pEvent::NewListenAddr(multiaddr) => { + } + NetworkEvent::NewListenAddr(multiaddr) => { self.network_globals .listen_multiaddrs .write() .push(multiaddr); } - Libp2pEvent::ZeroListeners => { + NetworkEvent::ZeroListeners => { let _ = shutdown_sender .send(ShutdownReason::Failure( "All listeners are closed. Unable to listen", @@ -588,7 +585,7 @@ impl NetworkService { id, reason, } => { - self.libp2p.respond_with_error(peer_id, id, error, reason); + self.libp2p.send_error_reponse(peer_id, id, error, reason); } NetworkMessage::UPnPMappingEstablished { tcp_socket, @@ -599,8 +596,6 @@ impl NetworkService { if let Some(tcp_socket) = tcp_socket { if let Err(e) = self .libp2p - .swarm - .behaviour_mut() .discovery_mut() .update_enr_tcp_port(tcp_socket.port()) { @@ -613,8 +608,6 @@ impl NetworkService { if let Some(udp_socket) = udp_socket { if let Err(e) = self .libp2p - .swarm - .behaviour_mut() .discovery_mut() .update_enr_udp_socket(udp_socket) { @@ -633,14 +626,11 @@ impl NetworkService { "message_id" => %message_id, "validation_result" => ?validation_result ); - self.libp2p - .swarm - .behaviour_mut() - .report_message_validation_result( - &propagation_source, - message_id, - validation_result, - ); + self.libp2p.report_message_validation_result( + &propagation_source, + message_id, + validation_result, + ); } NetworkMessage::Publish { messages } => { let mut topic_kinds = Vec::new(); @@ -655,7 +645,7 @@ impl NetworkService { "count" => messages.len(), "topics" => ?topic_kinds ); - self.libp2p.swarm.behaviour_mut().publish(messages); + self.libp2p.publish(messages); } NetworkMessage::ReportPeer { peer_id, @@ -693,7 +683,7 @@ impl NetworkService { GossipEncoding::default(), fork_digest, ); - if self.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { + if self.libp2p.subscribe(topic.clone()) { subscribed_topics.push(topic); } else { warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); @@ -706,10 +696,10 @@ impl NetworkService { for subnet_id in 0..<::EthSpec as EthSpec>::SubnetBitfieldLength::to_u64() { let subnet = Subnet::Attestation(SubnetId::new(subnet_id)); // Update the ENR bitfield - self.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); + self.libp2p.update_enr_subnet(subnet, true); for fork_digest in self.required_gossip_fork_digests() { let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - if self.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { + if self.libp2p.subscribe(topic.clone()) { subscribed_topics.push(topic); } else { warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); @@ -720,17 +710,14 @@ impl NetworkService { for subnet_id in 0..subnet_max { let subnet = Subnet::SyncCommittee(SyncSubnetId::new(subnet_id)); // Update the ENR bitfield - self.libp2p - .swarm - .behaviour_mut() - .update_enr_subnet(subnet, true); + self.libp2p.update_enr_subnet(subnet, true); for fork_digest in self.required_gossip_fork_digests() { let topic = GossipTopic::new( subnet.into(), GossipEncoding::default(), fork_digest, ); - if self.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { + if self.libp2p.subscribe(topic.clone()) { subscribed_topics.push(topic); } else { warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); @@ -782,8 +769,6 @@ impl NetworkService { if let Some(active_validators) = active_validators_opt { if self .libp2p - .swarm - .behaviour_mut() .update_gossipsub_parameters(active_validators, slot) .is_err() { @@ -811,33 +796,24 @@ impl NetworkService { for fork_digest in self.required_gossip_fork_digests() { let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - self.libp2p.swarm.behaviour_mut().subscribe(topic); + self.libp2p.subscribe(topic); } } SubnetServiceMessage::Unsubscribe(subnet) => { for fork_digest in self.required_gossip_fork_digests() { let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - self.libp2p.swarm.behaviour_mut().unsubscribe(topic); + self.libp2p.unsubscribe(topic); } } SubnetServiceMessage::EnrAdd(subnet) => { - self.libp2p - .swarm - .behaviour_mut() - .update_enr_subnet(subnet, true); + self.libp2p.update_enr_subnet(subnet, true); } SubnetServiceMessage::EnrRemove(subnet) => { - self.libp2p - .swarm - .behaviour_mut() - .update_enr_subnet(subnet, false); + self.libp2p.update_enr_subnet(subnet, false); } SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => { - self.libp2p - .swarm - .behaviour_mut() - .discover_subnet_peers(subnets_to_discover); + self.libp2p.discover_subnet_peers(subnets_to_discover); } } } @@ -848,33 +824,24 @@ impl NetworkService { for fork_digest in self.required_gossip_fork_digests() { let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - self.libp2p.swarm.behaviour_mut().subscribe(topic); + self.libp2p.subscribe(topic); } } SubnetServiceMessage::Unsubscribe(subnet) => { for fork_digest in self.required_gossip_fork_digests() { let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - self.libp2p.swarm.behaviour_mut().unsubscribe(topic); + self.libp2p.unsubscribe(topic); } } SubnetServiceMessage::EnrAdd(subnet) => { - self.libp2p - .swarm - .behaviour_mut() - .update_enr_subnet(subnet, true); + self.libp2p.update_enr_subnet(subnet, true); } SubnetServiceMessage::EnrRemove(subnet) => { - self.libp2p - .swarm - .behaviour_mut() - .update_enr_subnet(subnet, false); + self.libp2p.update_enr_subnet(subnet, false); } SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => { - self.libp2p - .swarm - .behaviour_mut() - .discover_subnet_peers(subnets_to_discover); + self.libp2p.discover_subnet_peers(subnets_to_discover); } } } @@ -892,10 +859,7 @@ impl NetworkService { ); fork_context.update_current_fork(*new_fork_name); - self.libp2p - .swarm - .behaviour_mut() - .update_fork_version(new_enr_fork_id); + self.libp2p.update_fork_version(new_enr_fork_id); // Reinitialize the next_fork_update self.next_fork_update = Box::pin(next_fork_delay(&self.beacon_chain).into()); @@ -944,7 +908,7 @@ fn next_fork_subscriptions_delay( impl Drop for NetworkService { fn drop(&mut self) { // network thread is terminating - let enrs = self.libp2p.swarm.behaviour_mut().enr_entries(); + let enrs = self.libp2p.enr_entries(); debug!( self.log, "Persisting DHT to store"; diff --git a/book/src/installation-source.md b/book/src/installation-source.md index 661035ca5..61eb20642 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -16,7 +16,7 @@ operating system. Install the following packages: ```bash -sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang +sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler ``` > Note: Lighthouse requires CMake v3.12 or newer, which isn't available in the package repositories @@ -32,13 +32,18 @@ sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clan brew install cmake ``` +1. Install protoc using Homebrew: +``` +brew install protobuf +``` + [Homebrew]: https://brew.sh/ #### Windows 1. Install [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git). 1. Install the [Chocolatey](https://chocolatey.org/install) package manager for Windows. -1. Install Make, CMake and LLVM using Chocolatey: +1. Install Make, CMake, LLVM and protoc using Chocolatey: ``` choco install make @@ -52,10 +57,13 @@ choco install cmake --installargs 'ADD_CMAKE_TO_PATH=System' choco install llvm ``` +``` +choco install protoc +``` + These dependencies are for compiling Lighthouse natively on Windows. Lighthouse can also run successfully under the [Windows Subsystem for Linux (WSL)][WSL]. If using Ubuntu under WSL, you should follow the instructions for Ubuntu listed in the [Dependencies (Ubuntu)](#ubuntu) section. - [WSL]: https://docs.microsoft.com/en-us/windows/wsl/about ## Build Lighthouse diff --git a/book/src/setup.md b/book/src/setup.md index e8c56623b..a1febe4a0 100644 --- a/book/src/setup.md +++ b/book/src/setup.md @@ -14,6 +14,8 @@ The additional requirements for developers are: don't have `ganache` available on your `PATH` or if ganache is older than v7. - [`cmake`](https://cmake.org/cmake/help/latest/command/install.html). Used by some dependencies. See [`Installation Guide`](./installation.md) for more info. +- [`protoc`](https://github.com/protocolbuffers/protobuf/releases) required for + the networking stack. - [`java 11 runtime`](https://openjdk.java.net/projects/jdk/). 11 is the minimum, used by web3signer_tests. diff --git a/scripts/cross/aarch64-unknown-linux-gnu.dockerfile b/scripts/cross/aarch64-unknown-linux-gnu.dockerfile new file mode 100644 index 000000000..691639cd4 --- /dev/null +++ b/scripts/cross/aarch64-unknown-linux-gnu.dockerfile @@ -0,0 +1,14 @@ +ARG CROSS_BASE_IMAGE +FROM $CROSS_BASE_IMAGE + +RUN apt-get update -y && apt-get upgrade -y + +RUN apt-get install -y unzip && \ + PB_REL="https://github.com/protocolbuffers/protobuf/releases" && \ + curl -L $PB_REL/download/v3.15.8/protoc-3.15.8-linux-aarch_64.zip -o protoc.zip && \ + unzip protoc.zip -d /usr && \ + chmod +x /usr/bin/protoc + +RUN apt-get install -y cmake clang-3.9 + +ENV PROTOC=/usr/bin/protoc diff --git a/scripts/cross/x86_64-unknown-linux-gnu.dockerfile b/scripts/cross/x86_64-unknown-linux-gnu.dockerfile new file mode 100644 index 000000000..5472b980b --- /dev/null +++ b/scripts/cross/x86_64-unknown-linux-gnu.dockerfile @@ -0,0 +1,14 @@ +ARG CROSS_BASE_IMAGE +FROM $CROSS_BASE_IMAGE + +RUN apt-get update -y && apt-get upgrade -y + +RUN apt-get install -y unzip && \ + PB_REL="https://github.com/protocolbuffers/protobuf/releases" && \ + curl -L $PB_REL/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip -o protoc.zip && \ + unzip protoc.zip -d /usr && \ + chmod +x /usr/bin/protoc + +RUN apt-get install -y cmake clang-3.9 + +ENV PROTOC=/usr/bin/protoc