Libp2p v0.48.0 upgrade (#3547)
## Issue Addressed Upgrades libp2p to v.0.47.0. This is the compilation of - [x] #3495 - [x] #3497 - [x] #3491 - [x] #3546 - [x] #3553 Co-authored-by: Age Manning <Age@AgeManning.com>
This commit is contained in:
parent
6779912fe4
commit
b1d2510d1b
3
.github/workflows/local-testnet.yml
vendored
3
.github/workflows/local-testnet.yml
vendored
@ -20,7 +20,8 @@ jobs:
|
|||||||
|
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
|
- name: Install Protoc
|
||||||
|
uses: arduino/setup-protoc@v1
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
run: npm install ganache@latest --global
|
run: npm install ganache@latest --global
|
||||||
|
|
||||||
|
92
.github/workflows/test-suite.yml
vendored
92
.github/workflows/test-suite.yml
vendored
@ -50,6 +50,8 @@ jobs:
|
|||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v1
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
|
- name: Install Protoc
|
||||||
|
uses: arduino/setup-protoc@v1
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
run: sudo npm install -g ganache
|
run: sudo npm install -g ganache
|
||||||
- name: Run tests in release
|
- name: Run tests in release
|
||||||
@ -68,7 +70,7 @@ jobs:
|
|||||||
node-version: '14'
|
node-version: '14'
|
||||||
- name: Install windows build tools
|
- name: Install windows build tools
|
||||||
run: |
|
run: |
|
||||||
choco install python visualstudio2019-workload-vctools -y
|
choco install python protoc visualstudio2019-workload-vctools -y
|
||||||
npm config set msvs_version 2019
|
npm config set msvs_version 2019
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
run: npm install -g ganache --loglevel verbose
|
run: npm install -g ganache --loglevel verbose
|
||||||
@ -90,6 +92,8 @@ jobs:
|
|||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v1
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
|
- name: Install Protoc
|
||||||
|
uses: arduino/setup-protoc@v1
|
||||||
- name: Run beacon_chain tests for all known forks
|
- name: Run beacon_chain tests for all known forks
|
||||||
run: make test-beacon-chain
|
run: make test-beacon-chain
|
||||||
op-pool-tests:
|
op-pool-tests:
|
||||||
@ -100,6 +104,8 @@ jobs:
|
|||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v1
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
|
- name: Install Protoc
|
||||||
|
uses: arduino/setup-protoc@v1
|
||||||
- name: Run operation_pool tests for all known forks
|
- name: Run operation_pool tests for all known forks
|
||||||
run: make test-op-pool
|
run: make test-op-pool
|
||||||
slasher-tests:
|
slasher-tests:
|
||||||
@ -120,6 +126,8 @@ jobs:
|
|||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v1
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
|
- name: Install Protoc
|
||||||
|
uses: arduino/setup-protoc@v1
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
run: sudo npm install -g ganache
|
run: sudo npm install -g ganache
|
||||||
- name: Run tests in debug
|
- name: Run tests in debug
|
||||||
@ -132,6 +140,8 @@ jobs:
|
|||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v1
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
|
- name: Install Protoc
|
||||||
|
uses: arduino/setup-protoc@v1
|
||||||
- name: Run state_transition_vectors in release.
|
- name: Run state_transition_vectors in release.
|
||||||
run: make run-state-transition-tests
|
run: make run-state-transition-tests
|
||||||
ef-tests-ubuntu:
|
ef-tests-ubuntu:
|
||||||
@ -142,6 +152,8 @@ jobs:
|
|||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v1
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
|
- name: Install Protoc
|
||||||
|
uses: arduino/setup-protoc@v1
|
||||||
- name: Run consensus-spec-tests with blst, milagro and fake_crypto
|
- name: Run consensus-spec-tests with blst, milagro and fake_crypto
|
||||||
run: make test-ef
|
run: make test-ef
|
||||||
dockerfile-ubuntu:
|
dockerfile-ubuntu:
|
||||||
@ -164,6 +176,8 @@ jobs:
|
|||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v1
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
|
- name: Install Protoc
|
||||||
|
uses: arduino/setup-protoc@v1
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
run: sudo npm install -g ganache
|
run: sudo npm install -g ganache
|
||||||
- name: Run the beacon chain sim that starts from an eth1 contract
|
- name: Run the beacon chain sim that starts from an eth1 contract
|
||||||
@ -176,6 +190,8 @@ jobs:
|
|||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v1
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
|
- name: Install Protoc
|
||||||
|
uses: arduino/setup-protoc@v1
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
run: sudo npm install -g ganache
|
run: sudo npm install -g ganache
|
||||||
- name: Run the beacon chain sim and go through the merge transition
|
- name: Run the beacon chain sim and go through the merge transition
|
||||||
@ -188,6 +204,8 @@ jobs:
|
|||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v1
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
|
- name: Install Protoc
|
||||||
|
uses: arduino/setup-protoc@v1
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
run: sudo npm install -g ganache
|
run: sudo npm install -g ganache
|
||||||
- name: Run the beacon chain sim without an eth1 connection
|
- name: Run the beacon chain sim without an eth1 connection
|
||||||
@ -197,35 +215,39 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: cargo-fmt
|
needs: cargo-fmt
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v1
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install ganache
|
- name: Install Protoc
|
||||||
run: sudo npm install -g ganache
|
uses: arduino/setup-protoc@v1
|
||||||
- name: Run the syncing simulator
|
- name: Install ganache
|
||||||
run: cargo run --release --bin simulator syncing-sim
|
run: sudo npm install -g ganache
|
||||||
|
- name: Run the syncing simulator
|
||||||
|
run: cargo run --release --bin simulator syncing-sim
|
||||||
doppelganger-protection-test:
|
doppelganger-protection-test:
|
||||||
name: doppelganger-protection-test
|
name: doppelganger-protection-test
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: cargo-fmt
|
needs: cargo-fmt
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v1
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install ganache
|
- name: Install Protoc
|
||||||
run: sudo npm install -g ganache
|
uses: arduino/setup-protoc@v1
|
||||||
- name: Install lighthouse and lcli
|
- name: Install ganache
|
||||||
run: |
|
run: sudo npm install -g ganache
|
||||||
make
|
- name: Install lighthouse and lcli
|
||||||
make install-lcli
|
run: |
|
||||||
- name: Run the doppelganger protection success test script
|
make
|
||||||
run: |
|
make install-lcli
|
||||||
cd scripts/tests
|
- name: Run the doppelganger protection success test script
|
||||||
./doppelganger_protection.sh success
|
run: |
|
||||||
- name: Run the doppelganger protection failure test script
|
cd scripts/tests
|
||||||
run: |
|
./doppelganger_protection.sh success
|
||||||
cd scripts/tests
|
- name: Run the doppelganger protection failure test script
|
||||||
./doppelganger_protection.sh failure
|
run: |
|
||||||
|
cd scripts/tests
|
||||||
|
./doppelganger_protection.sh failure
|
||||||
execution-engine-integration-ubuntu:
|
execution-engine-integration-ubuntu:
|
||||||
name: execution-engine-integration-ubuntu
|
name: execution-engine-integration-ubuntu
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@ -240,6 +262,8 @@ jobs:
|
|||||||
dotnet-version: '6.0.201'
|
dotnet-version: '6.0.201'
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
|
- name: Install Protoc
|
||||||
|
uses: arduino/setup-protoc@v1
|
||||||
- name: Run exec engine integration tests in release
|
- name: Run exec engine integration tests in release
|
||||||
run: make test-exec-engine
|
run: make test-exec-engine
|
||||||
check-benchmarks:
|
check-benchmarks:
|
||||||
@ -250,6 +274,8 @@ jobs:
|
|||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v1
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
|
- name: Install Protoc
|
||||||
|
uses: arduino/setup-protoc@v1
|
||||||
- name: Typecheck benchmark code without running it
|
- name: Typecheck benchmark code without running it
|
||||||
run: make check-benches
|
run: make check-benches
|
||||||
check-consensus:
|
check-consensus:
|
||||||
@ -270,6 +296,8 @@ jobs:
|
|||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v1
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
|
- name: Install Protoc
|
||||||
|
uses: arduino/setup-protoc@v1
|
||||||
- name: Lint code for quality and style with Clippy
|
- name: Lint code for quality and style with Clippy
|
||||||
run: make lint
|
run: make lint
|
||||||
- name: Certify Cargo.lock freshness
|
- name: Certify Cargo.lock freshness
|
||||||
@ -289,6 +317,8 @@ jobs:
|
|||||||
git checkout 31a49666ccfcd7963b63345d6ce757c373f22c2a
|
git checkout 31a49666ccfcd7963b63345d6ce757c373f22c2a
|
||||||
cargo build --release --bin cargo-clippy --bin clippy-driver
|
cargo build --release --bin cargo-clippy --bin clippy-driver
|
||||||
cargo build --release --bin cargo-clippy --bin clippy-driver -Zunstable-options --out-dir $(rustc --print=sysroot)/bin
|
cargo build --release --bin cargo-clippy --bin clippy-driver -Zunstable-options --out-dir $(rustc --print=sysroot)/bin
|
||||||
|
- name: Install Protoc
|
||||||
|
uses: arduino/setup-protoc@v1
|
||||||
- name: Run Clippy with the disallowed-from-async lint
|
- name: Run Clippy with the disallowed-from-async lint
|
||||||
run: make nightly-lint
|
run: make nightly-lint
|
||||||
check-msrv:
|
check-msrv:
|
||||||
@ -299,6 +329,8 @@ jobs:
|
|||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v1
|
||||||
- name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }})
|
- name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }})
|
||||||
run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }}
|
run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }}
|
||||||
|
- name: Install Protoc
|
||||||
|
uses: arduino/setup-protoc@v1
|
||||||
- name: Run cargo check
|
- name: Run cargo check
|
||||||
run: cargo check --workspace
|
run: cargo check --workspace
|
||||||
arbitrary-check:
|
arbitrary-check:
|
||||||
@ -339,6 +371,8 @@ jobs:
|
|||||||
run: rustup toolchain install $PINNED_NIGHTLY
|
run: rustup toolchain install $PINNED_NIGHTLY
|
||||||
# NOTE: cargo-udeps version is pinned until this issue is resolved:
|
# NOTE: cargo-udeps version is pinned until this issue is resolved:
|
||||||
# https://github.com/est31/cargo-udeps/issues/135
|
# https://github.com/est31/cargo-udeps/issues/135
|
||||||
|
- name: Install Protoc
|
||||||
|
uses: arduino/setup-protoc@v1
|
||||||
- name: Install cargo-udeps
|
- name: Install cargo-udeps
|
||||||
run: cargo install cargo-udeps --locked --force --version 0.1.30
|
run: cargo install cargo-udeps --locked --force --version 0.1.30
|
||||||
- name: Create Cargo config dir
|
- name: Create Cargo config dir
|
||||||
|
159
Cargo.lock
generated
159
Cargo.lock
generated
@ -3226,9 +3226,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libp2p"
|
name = "libp2p"
|
||||||
version = "0.45.1"
|
version = "0.48.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "41726ee8f662563fafba2d2d484b14037cc8ecb8c953fbfc8439d4ce3a0a9029"
|
checksum = "94c996fe5bfdba47f5a5af71d48ecbe8cec900b7b97391cc1d3ba1afb0e2d3b6"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"futures",
|
"futures",
|
||||||
@ -3236,7 +3236,7 @@ dependencies = [
|
|||||||
"getrandom 0.2.7",
|
"getrandom 0.2.7",
|
||||||
"instant",
|
"instant",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"libp2p-core 0.33.0",
|
"libp2p-core 0.36.0",
|
||||||
"libp2p-dns",
|
"libp2p-dns",
|
||||||
"libp2p-gossipsub",
|
"libp2p-gossipsub",
|
||||||
"libp2p-identify",
|
"libp2p-identify",
|
||||||
@ -3293,9 +3293,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libp2p-core"
|
name = "libp2p-core"
|
||||||
version = "0.33.0"
|
version = "0.36.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "42d46fca305dee6757022e2f5a4f6c023315084d0ed7441c3ab244e76666d979"
|
checksum = "b1fff5bd889c82a0aec668f2045edd066f559d4e5c40354e5a4c77ac00caac38"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"asn1_der",
|
"asn1_der",
|
||||||
"bs58",
|
"bs58",
|
||||||
@ -3313,10 +3313,9 @@ dependencies = [
|
|||||||
"multistream-select 0.11.0",
|
"multistream-select 0.11.0",
|
||||||
"parking_lot 0.12.1",
|
"parking_lot 0.12.1",
|
||||||
"pin-project 1.0.11",
|
"pin-project 1.0.11",
|
||||||
"prost 0.10.4",
|
"prost 0.11.0",
|
||||||
"prost-build 0.10.4",
|
"prost-build 0.11.1",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
"ring",
|
|
||||||
"rw-stream-sink 0.3.0",
|
"rw-stream-sink 0.3.0",
|
||||||
"sha2 0.10.2",
|
"sha2 0.10.2",
|
||||||
"smallvec",
|
"smallvec",
|
||||||
@ -3328,12 +3327,12 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libp2p-dns"
|
name = "libp2p-dns"
|
||||||
version = "0.33.0"
|
version = "0.36.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "fbb462ec3a51fab457b4b44ac295e8b0a4b04dc175127e615cf996b1f0f1a268"
|
checksum = "6cb3c16e3bb2f76c751ae12f0f26e788c89d353babdded40411e7923f01fc978"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"futures",
|
"futures",
|
||||||
"libp2p-core 0.33.0",
|
"libp2p-core 0.36.0",
|
||||||
"log",
|
"log",
|
||||||
"parking_lot 0.12.1",
|
"parking_lot 0.12.1",
|
||||||
"smallvec",
|
"smallvec",
|
||||||
@ -3342,9 +3341,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libp2p-gossipsub"
|
name = "libp2p-gossipsub"
|
||||||
version = "0.38.1"
|
version = "0.41.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "43e064ba4d7832e01c738626c6b274ae100baba05f5ffcc7b265c2a3ed398108"
|
checksum = "2185aac44b162c95180ae4ddd1f4dfb705217ea1cb8e16bdfc70d31496fd80fa"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"asynchronous-codec",
|
"asynchronous-codec",
|
||||||
"base64",
|
"base64",
|
||||||
@ -3354,12 +3353,12 @@ dependencies = [
|
|||||||
"futures",
|
"futures",
|
||||||
"hex_fmt",
|
"hex_fmt",
|
||||||
"instant",
|
"instant",
|
||||||
"libp2p-core 0.33.0",
|
"libp2p-core 0.36.0",
|
||||||
"libp2p-swarm",
|
"libp2p-swarm",
|
||||||
"log",
|
"log",
|
||||||
"prometheus-client",
|
"prometheus-client",
|
||||||
"prost 0.10.4",
|
"prost 0.11.0",
|
||||||
"prost-build 0.10.4",
|
"prost-build 0.11.1",
|
||||||
"rand 0.7.3",
|
"rand 0.7.3",
|
||||||
"regex",
|
"regex",
|
||||||
"sha2 0.10.2",
|
"sha2 0.10.2",
|
||||||
@ -3370,19 +3369,19 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libp2p-identify"
|
name = "libp2p-identify"
|
||||||
version = "0.36.1"
|
version = "0.39.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "b84b53490442d086db1fa5375670c9666e79143dccadef3f7c74a4346899a984"
|
checksum = "f19440c84b509d69b13f0c9c28caa9bd3a059d25478527e937e86761f25c821e"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"asynchronous-codec",
|
"asynchronous-codec",
|
||||||
"futures",
|
"futures",
|
||||||
"futures-timer",
|
"futures-timer",
|
||||||
"libp2p-core 0.33.0",
|
"libp2p-core 0.36.0",
|
||||||
"libp2p-swarm",
|
"libp2p-swarm",
|
||||||
"log",
|
"log",
|
||||||
"lru",
|
"lru",
|
||||||
"prost 0.10.4",
|
"prost 0.11.0",
|
||||||
"prost-build 0.10.4",
|
"prost-build 0.11.1",
|
||||||
"prost-codec",
|
"prost-codec",
|
||||||
"smallvec",
|
"smallvec",
|
||||||
"thiserror",
|
"thiserror",
|
||||||
@ -3391,11 +3390,11 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libp2p-metrics"
|
name = "libp2p-metrics"
|
||||||
version = "0.6.1"
|
version = "0.9.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "564a7e5284d7d9b3140fdfc3cb6567bc32555e86a21de5604c2ec85da05cf384"
|
checksum = "a74ab339e8b5d989e8c1000a78adb5c064a6319245bb22d1e70b415ec18c39b8"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"libp2p-core 0.33.0",
|
"libp2p-core 0.36.0",
|
||||||
"libp2p-gossipsub",
|
"libp2p-gossipsub",
|
||||||
"libp2p-identify",
|
"libp2p-identify",
|
||||||
"libp2p-swarm",
|
"libp2p-swarm",
|
||||||
@ -3404,14 +3403,14 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libp2p-mplex"
|
name = "libp2p-mplex"
|
||||||
version = "0.33.0"
|
version = "0.36.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "5ff9c893f2367631a711301d703c47432af898c9bb8253bea0e2c051a13f7640"
|
checksum = "ce53169351226ee0eb18ee7bef8d38f308fa8ad7244f986ae776390c0ae8a44d"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"asynchronous-codec",
|
"asynchronous-codec",
|
||||||
"bytes",
|
"bytes",
|
||||||
"futures",
|
"futures",
|
||||||
"libp2p-core 0.33.0",
|
"libp2p-core 0.36.0",
|
||||||
"log",
|
"log",
|
||||||
"nohash-hasher",
|
"nohash-hasher",
|
||||||
"parking_lot 0.12.1",
|
"parking_lot 0.12.1",
|
||||||
@ -3422,18 +3421,18 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libp2p-noise"
|
name = "libp2p-noise"
|
||||||
version = "0.36.0"
|
version = "0.39.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "cf2cee1dad1c83325bbd182a8e94555778699cec8a9da00086efb7522c4c15ad"
|
checksum = "7cb0f939a444b06779ce551b3d78ebf13970ac27906ada452fd70abd160b09b8"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"curve25519-dalek 3.2.0",
|
"curve25519-dalek 3.2.0",
|
||||||
"futures",
|
"futures",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"libp2p-core 0.33.0",
|
"libp2p-core 0.36.0",
|
||||||
"log",
|
"log",
|
||||||
"prost 0.10.4",
|
"prost 0.11.0",
|
||||||
"prost-build 0.10.4",
|
"prost-build 0.11.1",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
"sha2 0.10.2",
|
"sha2 0.10.2",
|
||||||
"snow",
|
"snow",
|
||||||
@ -3444,33 +3443,33 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libp2p-plaintext"
|
name = "libp2p-plaintext"
|
||||||
version = "0.33.0"
|
version = "0.36.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "db007e737adc5d28b2e03223b0210164928ad742591127130796a72aa8eaf54f"
|
checksum = "328e8c654a55ac7f093eb96dfd0386244dd337f2bd2822dc019522b743ea8add"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"asynchronous-codec",
|
"asynchronous-codec",
|
||||||
"bytes",
|
"bytes",
|
||||||
"futures",
|
"futures",
|
||||||
"libp2p-core 0.33.0",
|
"libp2p-core 0.36.0",
|
||||||
"log",
|
"log",
|
||||||
"prost 0.10.4",
|
"prost 0.11.0",
|
||||||
"prost-build 0.10.4",
|
"prost-build 0.11.1",
|
||||||
"unsigned-varint 0.7.1",
|
"unsigned-varint 0.7.1",
|
||||||
"void",
|
"void",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libp2p-swarm"
|
name = "libp2p-swarm"
|
||||||
version = "0.36.1"
|
version = "0.39.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "8f4bb21c5abadbf00360c734f16bf87f1712ed4f23cd46148f625d2ddb867346"
|
checksum = "70ad2db60c06603606b54b58e4247e32efec87a93cb4387be24bf32926c600f2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"either",
|
"either",
|
||||||
"fnv",
|
"fnv",
|
||||||
"futures",
|
"futures",
|
||||||
"futures-timer",
|
"futures-timer",
|
||||||
"instant",
|
"instant",
|
||||||
"libp2p-core 0.33.0",
|
"libp2p-core 0.36.0",
|
||||||
"log",
|
"log",
|
||||||
"pin-project 1.0.11",
|
"pin-project 1.0.11",
|
||||||
"rand 0.7.3",
|
"rand 0.7.3",
|
||||||
@ -3481,26 +3480,27 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libp2p-swarm-derive"
|
name = "libp2p-swarm-derive"
|
||||||
version = "0.27.2"
|
version = "0.30.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "4f693c8c68213034d472cbb93a379c63f4f307d97c06f1c41e4985de481687a5"
|
checksum = "1f02622b9dd150011b4eeec387f8bd013189a2f27da08ba363e7c6e606d77a48"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"heck 0.4.0",
|
||||||
"quote",
|
"quote",
|
||||||
"syn",
|
"syn",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libp2p-tcp"
|
name = "libp2p-tcp"
|
||||||
version = "0.33.0"
|
version = "0.36.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "4f4933e38ef21b50698aefc87799c24f2a365c9d3f6cf50471f3f6a0bc410892"
|
checksum = "9675432b4c94b3960f3d2c7e57427b81aea92aab67fd0eebef09e2ae0ff54895"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"futures",
|
"futures",
|
||||||
"futures-timer",
|
"futures-timer",
|
||||||
"if-addrs 0.7.0",
|
"if-addrs 0.7.0",
|
||||||
"ipnet",
|
"ipnet",
|
||||||
"libc",
|
"libc",
|
||||||
"libp2p-core 0.33.0",
|
"libp2p-core 0.36.0",
|
||||||
"log",
|
"log",
|
||||||
"socket2",
|
"socket2",
|
||||||
"tokio",
|
"tokio",
|
||||||
@ -3508,14 +3508,14 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libp2p-websocket"
|
name = "libp2p-websocket"
|
||||||
version = "0.35.0"
|
version = "0.38.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "39d398fbb29f432c4128fabdaac2ed155c3bcaf1b9bd40eeeb10a471eefacbf5"
|
checksum = "de8a9e825cc03f2fc194d2e1622113d7fe18e1c7f4458a582b83140c9b9aea27"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"either",
|
"either",
|
||||||
"futures",
|
"futures",
|
||||||
"futures-rustls",
|
"futures-rustls",
|
||||||
"libp2p-core 0.33.0",
|
"libp2p-core 0.36.0",
|
||||||
"log",
|
"log",
|
||||||
"parking_lot 0.12.1",
|
"parking_lot 0.12.1",
|
||||||
"quicksink",
|
"quicksink",
|
||||||
@ -3527,12 +3527,12 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libp2p-yamux"
|
name = "libp2p-yamux"
|
||||||
version = "0.37.0"
|
version = "0.40.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "8fe653639ad74877c759720febb0cbcbf4caa221adde4eed2d3126ce5c6f381f"
|
checksum = "b74ec8dc042b583f0b2b93d52917f3b374c1e4b1cfa79ee74c7672c41257694c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"futures",
|
"futures",
|
||||||
"libp2p-core 0.33.0",
|
"libp2p-core 0.36.0",
|
||||||
"parking_lot 0.12.1",
|
"parking_lot 0.12.1",
|
||||||
"thiserror",
|
"thiserror",
|
||||||
"yamux",
|
"yamux",
|
||||||
@ -4440,15 +4440,6 @@ dependencies = [
|
|||||||
"types",
|
"types",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "owning_ref"
|
|
||||||
version = "0.4.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "6ff55baddef9e4ad00f88b6c743a2a8062d4c6ade126c2a528644b8e444d52ce"
|
|
||||||
dependencies = [
|
|
||||||
"stable_deref_trait",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "parity-scale-codec"
|
name = "parity-scale-codec"
|
||||||
version = "2.3.1"
|
version = "2.3.1"
|
||||||
@ -4876,21 +4867,21 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "prometheus-client"
|
name = "prometheus-client"
|
||||||
version = "0.16.0"
|
version = "0.18.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "ac1abe0255c04d15f571427a2d1e00099016506cf3297b53853acd2b7eb87825"
|
checksum = "3c473049631c233933d6286c88bbb7be30e62ec534cf99a9ae0079211f7fa603"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"dtoa",
|
"dtoa",
|
||||||
"itoa 1.0.2",
|
"itoa 1.0.2",
|
||||||
"owning_ref",
|
"parking_lot 0.12.1",
|
||||||
"prometheus-client-derive-text-encode",
|
"prometheus-client-derive-text-encode",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "prometheus-client-derive-text-encode"
|
name = "prometheus-client-derive-text-encode"
|
||||||
version = "0.2.0"
|
version = "0.3.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "e8e12d01b9d66ad9eb4529c57666b6263fc1993cb30261d83ead658fdd932652"
|
checksum = "66a455fbcb954c1a7decf3c586e860fd7889cddf4b8e164be736dbac95a953cd"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
@ -4909,12 +4900,12 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "prost"
|
name = "prost"
|
||||||
version = "0.10.4"
|
version = "0.11.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "71adf41db68aa0daaefc69bb30bcd68ded9b9abaad5d1fbb6304c4fb390e083e"
|
checksum = "399c3c31cdec40583bb68f0b18403400d01ec4289c383aa047560439952c4dd7"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"prost-derive 0.10.1",
|
"prost-derive 0.11.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -4939,21 +4930,19 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "prost-build"
|
name = "prost-build"
|
||||||
version = "0.10.4"
|
version = "0.11.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "8ae5a4388762d5815a9fc0dea33c56b021cdc8dde0c55e0c9ca57197254b0cab"
|
checksum = "7f835c582e6bd972ba8347313300219fed5bfa52caf175298d860b61ff6069bb"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"cfg-if",
|
|
||||||
"cmake",
|
|
||||||
"heck 0.4.0",
|
"heck 0.4.0",
|
||||||
"itertools",
|
"itertools",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"log",
|
"log",
|
||||||
"multimap",
|
"multimap",
|
||||||
"petgraph",
|
"petgraph",
|
||||||
"prost 0.10.4",
|
"prost 0.11.0",
|
||||||
"prost-types 0.10.1",
|
"prost-types 0.11.1",
|
||||||
"regex",
|
"regex",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
"which",
|
"which",
|
||||||
@ -4961,13 +4950,13 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "prost-codec"
|
name = "prost-codec"
|
||||||
version = "0.1.0"
|
version = "0.2.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "00af1e92c33b4813cc79fda3f2dbf56af5169709be0202df730e9ebc3e4cd007"
|
checksum = "011ae9ff8359df7915f97302d591cdd9e0e27fbd5a4ddc5bd13b71079bb20987"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"asynchronous-codec",
|
"asynchronous-codec",
|
||||||
"bytes",
|
"bytes",
|
||||||
"prost 0.10.4",
|
"prost 0.11.0",
|
||||||
"thiserror",
|
"thiserror",
|
||||||
"unsigned-varint 0.7.1",
|
"unsigned-varint 0.7.1",
|
||||||
]
|
]
|
||||||
@ -4987,9 +4976,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "prost-derive"
|
name = "prost-derive"
|
||||||
version = "0.10.1"
|
version = "0.11.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "7b670f45da57fb8542ebdbb6105a925fe571b67f9e7ed9f47a06a84e72b4e7cc"
|
checksum = "7345d5f0e08c0536d7ac7229952590239e77abf0a0100a1b1d890add6ea96364"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"itertools",
|
"itertools",
|
||||||
@ -5010,12 +4999,12 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "prost-types"
|
name = "prost-types"
|
||||||
version = "0.10.1"
|
version = "0.11.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "2d0a014229361011dc8e69c8a1ec6c2e8d0f2af7c91e3ea3f5b2170298461e68"
|
checksum = "4dfaa718ad76a44b3415e6c4d53b17c8f99160dcb3a99b10470fce8ad43f6e3e"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"prost 0.10.4",
|
"prost 0.11.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -6272,12 +6261,6 @@ dependencies = [
|
|||||||
"syn",
|
"syn",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "stable_deref_trait"
|
|
||||||
version = "1.2.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "state_processing"
|
name = "state_processing"
|
||||||
version = "0.2.0"
|
version = "0.2.0"
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
[target.x86_64-unknown-linux-gnu]
|
[target.x86_64-unknown-linux-gnu]
|
||||||
pre-build = ["apt-get install -y cmake clang-3.9"]
|
dockerfile = './scripts/cross/x86_64-unknown-linux-gnu.dockerfile'
|
||||||
|
|
||||||
[target.aarch64-unknown-linux-gnu]
|
[target.aarch64-unknown-linux-gnu]
|
||||||
pre-build = ["apt-get install -y cmake clang-3.9"]
|
dockerfile = './scripts/cross/aarch64-unknown-linux-gnu.dockerfile'
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
FROM rust:1.62.1-bullseye AS builder
|
FROM rust:1.62.1-bullseye AS builder
|
||||||
RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev
|
RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler
|
||||||
COPY . lighthouse
|
COPY . lighthouse
|
||||||
ARG FEATURES
|
ARG FEATURES
|
||||||
ENV FEATURES $FEATURES
|
ENV FEATURES $FEATURES
|
||||||
|
2
Makefile
2
Makefile
@ -179,7 +179,7 @@ arbitrary-fuzz:
|
|||||||
# Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database)
|
# Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database)
|
||||||
audit:
|
audit:
|
||||||
cargo install --force cargo-audit
|
cargo install --force cargo-audit
|
||||||
cargo audit --ignore RUSTSEC-2020-0071 --ignore RUSTSEC-2020-0159 --ignore RUSTSEC-2022-0040
|
cargo audit --ignore RUSTSEC-2020-0071 --ignore RUSTSEC-2020-0159
|
||||||
|
|
||||||
# Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose.
|
# Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose.
|
||||||
vendor:
|
vendor:
|
||||||
|
@ -118,9 +118,7 @@ pub async fn create_api_server_on_port<T: BeaconChainTypes>(
|
|||||||
|
|
||||||
// Only a peer manager can add peers, so we create a dummy manager.
|
// Only a peer manager can add peers, so we create a dummy manager.
|
||||||
let config = lighthouse_network::peer_manager::config::Config::default();
|
let config = lighthouse_network::peer_manager::config::Config::default();
|
||||||
let mut pm = PeerManager::new(config, network_globals.clone(), &log)
|
let mut pm = PeerManager::new(config, network_globals.clone(), &log).unwrap();
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// add a peer
|
// add a peer
|
||||||
let peer_id = PeerId::random();
|
let peer_id = PeerId::random();
|
||||||
|
@ -37,12 +37,12 @@ directory = { path = "../../common/directory" }
|
|||||||
regex = "1.5.5"
|
regex = "1.5.5"
|
||||||
strum = { version = "0.24.0", features = ["derive"] }
|
strum = { version = "0.24.0", features = ["derive"] }
|
||||||
superstruct = "0.5.0"
|
superstruct = "0.5.0"
|
||||||
prometheus-client = "0.16.0"
|
prometheus-client = "0.18.0"
|
||||||
unused_port = { path = "../../common/unused_port" }
|
unused_port = { path = "../../common/unused_port" }
|
||||||
delay_map = "0.1.1"
|
delay_map = "0.1.1"
|
||||||
|
|
||||||
[dependencies.libp2p]
|
[dependencies.libp2p]
|
||||||
version = "0.45.1"
|
version = "0.48.0"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio", "plaintext", "secp256k1"]
|
features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio", "plaintext", "secp256k1"]
|
||||||
|
|
||||||
|
@ -232,7 +232,6 @@ impl CombinedKeyExt for CombinedKey {
|
|||||||
.expect("libp2p key must be valid");
|
.expect("libp2p key must be valid");
|
||||||
Ok(CombinedKey::from(ed_keypair))
|
Ok(CombinedKey::from(ed_keypair))
|
||||||
}
|
}
|
||||||
_ => Err("ENR: Unsupported libp2p key type"),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -266,7 +265,6 @@ pub fn peer_id_to_node_id(peer_id: &PeerId) -> Result<discv5::enr::NodeId, Strin
|
|||||||
hasher.finalize(&mut output);
|
hasher.finalize(&mut output);
|
||||||
Ok(discv5::enr::NodeId::parse(&output).expect("Must be correct length"))
|
Ok(discv5::enr::NodeId::parse(&output).expect("Must be correct length"))
|
||||||
}
|
}
|
||||||
_ => Err("Unsupported public key".into()),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7,8 +7,8 @@ pub(crate) mod enr;
|
|||||||
pub mod enr_ext;
|
pub mod enr_ext;
|
||||||
|
|
||||||
// Allow external use of the lighthouse ENR builder
|
// Allow external use of the lighthouse ENR builder
|
||||||
use crate::behaviour::TARGET_SUBNET_PEERS;
|
|
||||||
use crate::metrics;
|
use crate::metrics;
|
||||||
|
use crate::service::TARGET_SUBNET_PEERS;
|
||||||
use crate::{error, Enr, NetworkConfig, NetworkGlobals, Subnet, SubnetDiscovery};
|
use crate::{error, Enr, NetworkConfig, NetworkGlobals, Subnet, SubnetDiscovery};
|
||||||
use discv5::{enr::NodeId, Discv5, Discv5Event};
|
use discv5::{enr::NodeId, Discv5, Discv5Event};
|
||||||
pub use enr::{
|
pub use enr::{
|
||||||
@ -21,6 +21,8 @@ pub use libp2p::core::identity::{Keypair, PublicKey};
|
|||||||
use enr::{ATTESTATION_BITFIELD_ENR_KEY, ETH2_ENR_KEY, SYNC_COMMITTEE_BITFIELD_ENR_KEY};
|
use enr::{ATTESTATION_BITFIELD_ENR_KEY, ETH2_ENR_KEY, SYNC_COMMITTEE_BITFIELD_ENR_KEY};
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use futures::stream::FuturesUnordered;
|
use futures::stream::FuturesUnordered;
|
||||||
|
use libp2p::multiaddr::Protocol;
|
||||||
|
use libp2p::swarm::AddressScore;
|
||||||
pub use libp2p::{
|
pub use libp2p::{
|
||||||
core::{connection::ConnectionId, ConnectedPoint, Multiaddr, PeerId},
|
core::{connection::ConnectionId, ConnectedPoint, Multiaddr, PeerId},
|
||||||
swarm::{
|
swarm::{
|
||||||
@ -67,13 +69,11 @@ pub const FIND_NODE_QUERY_CLOSEST_PEERS: usize = 16;
|
|||||||
/// The threshold for updating `min_ttl` on a connected peer.
|
/// The threshold for updating `min_ttl` on a connected peer.
|
||||||
const DURATION_DIFFERENCE: Duration = Duration::from_millis(1);
|
const DURATION_DIFFERENCE: Duration = Duration::from_millis(1);
|
||||||
|
|
||||||
/// The events emitted by polling discovery.
|
/// A query has completed. This result contains a mapping of discovered peer IDs to the `min_ttl`
|
||||||
pub enum DiscoveryEvent {
|
/// of the peer if it is specified.
|
||||||
/// A query has completed. This result contains a mapping of discovered peer IDs to the `min_ttl`
|
#[derive(Debug)]
|
||||||
/// of the peer if it is specified.
|
pub struct DiscoveredPeers {
|
||||||
QueryResult(HashMap<PeerId, Option<Instant>>),
|
pub peers: HashMap<PeerId, Option<Instant>>,
|
||||||
/// This indicates that our local UDP socketaddr has been updated and we should inform libp2p.
|
|
||||||
SocketUpdated(SocketAddr),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, PartialEq)]
|
#[derive(Clone, PartialEq)]
|
||||||
@ -362,7 +362,7 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an iterator over all enr entries in the DHT.
|
/// Returns an iterator over all enr entries in the DHT.
|
||||||
pub fn table_entries_enr(&mut self) -> Vec<Enr> {
|
pub fn table_entries_enr(&self) -> Vec<Enr> {
|
||||||
self.discv5.table_entries_enr()
|
self.discv5.table_entries_enr()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -909,7 +909,7 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
|||||||
impl<TSpec: EthSpec> NetworkBehaviour for Discovery<TSpec> {
|
impl<TSpec: EthSpec> NetworkBehaviour for Discovery<TSpec> {
|
||||||
// Discovery is not a real NetworkBehaviour...
|
// Discovery is not a real NetworkBehaviour...
|
||||||
type ConnectionHandler = libp2p::swarm::handler::DummyConnectionHandler;
|
type ConnectionHandler = libp2p::swarm::handler::DummyConnectionHandler;
|
||||||
type OutEvent = DiscoveryEvent;
|
type OutEvent = DiscoveredPeers;
|
||||||
|
|
||||||
fn new_handler(&mut self) -> Self::ConnectionHandler {
|
fn new_handler(&mut self) -> Self::ConnectionHandler {
|
||||||
libp2p::swarm::handler::DummyConnectionHandler::default()
|
libp2p::swarm::handler::DummyConnectionHandler::default()
|
||||||
@ -976,11 +976,9 @@ impl<TSpec: EthSpec> NetworkBehaviour for Discovery<TSpec> {
|
|||||||
self.process_queue();
|
self.process_queue();
|
||||||
|
|
||||||
// Drive the queries and return any results from completed queries
|
// Drive the queries and return any results from completed queries
|
||||||
if let Some(results) = self.poll_queries(cx) {
|
if let Some(peers) = self.poll_queries(cx) {
|
||||||
// return the result to the peer manager
|
// return the result to the peer manager
|
||||||
return Poll::Ready(NBAction::GenerateEvent(DiscoveryEvent::QueryResult(
|
return Poll::Ready(NBAction::GenerateEvent(DiscoveredPeers { peers }));
|
||||||
results,
|
|
||||||
)));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process the server event stream
|
// Process the server event stream
|
||||||
@ -1019,8 +1017,8 @@ impl<TSpec: EthSpec> NetworkBehaviour for Discovery<TSpec> {
|
|||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
Discv5Event::SocketUpdated(socket) => {
|
Discv5Event::SocketUpdated(socket_addr) => {
|
||||||
info!(self.log, "Address updated"; "ip" => %socket.ip(), "udp_port" => %socket.port());
|
info!(self.log, "Address updated"; "ip" => %socket_addr.ip(), "udp_port" => %socket_addr.port());
|
||||||
metrics::inc_counter(&metrics::ADDRESS_UPDATE_COUNT);
|
metrics::inc_counter(&metrics::ADDRESS_UPDATE_COUNT);
|
||||||
metrics::check_nat();
|
metrics::check_nat();
|
||||||
// Discv5 will have updated our local ENR. We save the updated version
|
// Discv5 will have updated our local ENR. We save the updated version
|
||||||
@ -1029,9 +1027,16 @@ impl<TSpec: EthSpec> NetworkBehaviour for Discovery<TSpec> {
|
|||||||
enr::save_enr_to_disk(Path::new(&self.enr_dir), &enr, &self.log);
|
enr::save_enr_to_disk(Path::new(&self.enr_dir), &enr, &self.log);
|
||||||
// update network globals
|
// update network globals
|
||||||
*self.network_globals.local_enr.write() = enr;
|
*self.network_globals.local_enr.write() = enr;
|
||||||
return Poll::Ready(NBAction::GenerateEvent(
|
// A new UDP socket has been detected.
|
||||||
DiscoveryEvent::SocketUpdated(socket),
|
// Build a multiaddr to report to libp2p
|
||||||
));
|
let mut address = Multiaddr::from(socket_addr.ip());
|
||||||
|
// NOTE: This doesn't actually track the external TCP port. More sophisticated NAT handling
|
||||||
|
// should handle this.
|
||||||
|
address.push(Protocol::Tcp(self.network_globals.listen_port_tcp()));
|
||||||
|
return Poll::Ready(NBAction::ReportObservedAddr {
|
||||||
|
address,
|
||||||
|
score: AddressScore::Finite(1),
|
||||||
|
});
|
||||||
}
|
}
|
||||||
Discv5Event::EnrAdded { .. }
|
Discv5Event::EnrAdded { .. }
|
||||||
| Discv5Event::TalkRequest(_)
|
| Discv5Event::TalkRequest(_)
|
||||||
|
@ -5,15 +5,14 @@
|
|||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate lazy_static;
|
extern crate lazy_static;
|
||||||
|
|
||||||
pub mod behaviour;
|
|
||||||
mod config;
|
mod config;
|
||||||
|
pub mod service;
|
||||||
|
|
||||||
#[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy
|
#[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy
|
||||||
pub mod discovery;
|
pub mod discovery;
|
||||||
pub mod metrics;
|
pub mod metrics;
|
||||||
pub mod peer_manager;
|
pub mod peer_manager;
|
||||||
pub mod rpc;
|
pub mod rpc;
|
||||||
mod service;
|
|
||||||
pub mod types;
|
pub mod types;
|
||||||
|
|
||||||
pub use config::gossip_max_size;
|
pub use config::gossip_max_size;
|
||||||
@ -69,7 +68,6 @@ pub use crate::types::{
|
|||||||
|
|
||||||
pub use prometheus_client;
|
pub use prometheus_client;
|
||||||
|
|
||||||
pub use behaviour::{BehaviourEvent, Gossipsub, PeerRequestId, Request, Response};
|
|
||||||
pub use config::Config as NetworkConfig;
|
pub use config::Config as NetworkConfig;
|
||||||
pub use discovery::{CombinedKeyExt, EnrExt, Eth2Enr};
|
pub use discovery::{CombinedKeyExt, EnrExt, Eth2Enr};
|
||||||
pub use discv5;
|
pub use discv5;
|
||||||
@ -85,4 +83,7 @@ pub use peer_manager::{
|
|||||||
peerdb::PeerDB,
|
peerdb::PeerDB,
|
||||||
ConnectionDirection, PeerConnectionStatus, PeerInfo, PeerManager, SyncInfo, SyncStatus,
|
ConnectionDirection, PeerConnectionStatus, PeerInfo, PeerManager, SyncInfo, SyncStatus,
|
||||||
};
|
};
|
||||||
pub use service::{load_private_key, Context, Libp2pEvent, Service, NETWORK_KEY_FILENAME};
|
// pub use service::{load_private_key, Context, Libp2pEvent, Service, NETWORK_KEY_FILENAME};
|
||||||
|
pub use service::api_types::{PeerRequestId, Request, Response};
|
||||||
|
pub use service::utils::*;
|
||||||
|
pub use service::{Gossipsub, NetworkEvent};
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
//! Implementation of Lighthouse's peer management system.
|
//! Implementation of Lighthouse's peer management system.
|
||||||
|
|
||||||
use crate::behaviour::TARGET_SUBNET_PEERS;
|
|
||||||
use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RPCResponseErrorCode};
|
use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RPCResponseErrorCode};
|
||||||
|
use crate::service::TARGET_SUBNET_PEERS;
|
||||||
use crate::{error, metrics, Gossipsub};
|
use crate::{error, metrics, Gossipsub};
|
||||||
use crate::{NetworkGlobals, PeerId};
|
use crate::{NetworkGlobals, PeerId};
|
||||||
use crate::{Subnet, SubnetDiscovery};
|
use crate::{Subnet, SubnetDiscovery};
|
||||||
@ -12,6 +12,7 @@ use peerdb::{client::ClientKind, BanOperation, BanResult, ScoreUpdateResult};
|
|||||||
use rand::seq::SliceRandom;
|
use rand::seq::SliceRandom;
|
||||||
use slog::{debug, error, trace, warn};
|
use slog::{debug, error, trace, warn};
|
||||||
use smallvec::SmallVec;
|
use smallvec::SmallVec;
|
||||||
|
use std::collections::VecDeque;
|
||||||
use std::{
|
use std::{
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
time::{Duration, Instant},
|
time::{Duration, Instant},
|
||||||
@ -71,6 +72,8 @@ pub struct PeerManager<TSpec: EthSpec> {
|
|||||||
status_peers: HashSetDelay<PeerId>,
|
status_peers: HashSetDelay<PeerId>,
|
||||||
/// The target number of peers we would like to connect to.
|
/// The target number of peers we would like to connect to.
|
||||||
target_peers: usize,
|
target_peers: usize,
|
||||||
|
/// Peers queued to be dialed.
|
||||||
|
peers_to_dial: VecDeque<(PeerId, Option<Enr>)>,
|
||||||
/// A collection of sync committee subnets that we need to stay subscribed to.
|
/// A collection of sync committee subnets that we need to stay subscribed to.
|
||||||
/// Sync committee subnets are longer term (256 epochs). Hence, we need to re-run
|
/// Sync committee subnets are longer term (256 epochs). Hence, we need to re-run
|
||||||
/// discovery queries for subnet peers if we disconnect from existing sync
|
/// discovery queries for subnet peers if we disconnect from existing sync
|
||||||
@ -115,7 +118,7 @@ pub enum PeerManagerEvent {
|
|||||||
|
|
||||||
impl<TSpec: EthSpec> PeerManager<TSpec> {
|
impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||||
// NOTE: Must be run inside a tokio executor.
|
// NOTE: Must be run inside a tokio executor.
|
||||||
pub async fn new(
|
pub fn new(
|
||||||
cfg: config::Config,
|
cfg: config::Config,
|
||||||
network_globals: Arc<NetworkGlobals<TSpec>>,
|
network_globals: Arc<NetworkGlobals<TSpec>>,
|
||||||
log: &slog::Logger,
|
log: &slog::Logger,
|
||||||
@ -135,6 +138,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
Ok(PeerManager {
|
Ok(PeerManager {
|
||||||
network_globals,
|
network_globals,
|
||||||
events: SmallVec::new(),
|
events: SmallVec::new(),
|
||||||
|
peers_to_dial: Default::default(),
|
||||||
inbound_ping_peers: HashSetDelay::new(Duration::from_secs(ping_interval_inbound)),
|
inbound_ping_peers: HashSetDelay::new(Duration::from_secs(ping_interval_inbound)),
|
||||||
outbound_ping_peers: HashSetDelay::new(Duration::from_secs(ping_interval_outbound)),
|
outbound_ping_peers: HashSetDelay::new(Duration::from_secs(ping_interval_outbound)),
|
||||||
status_peers: HashSetDelay::new(Duration::from_secs(status_interval)),
|
status_peers: HashSetDelay::new(Duration::from_secs(status_interval)),
|
||||||
@ -360,8 +364,8 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
/* Notifications from the Swarm */
|
/* Notifications from the Swarm */
|
||||||
|
|
||||||
// A peer is being dialed.
|
// A peer is being dialed.
|
||||||
pub fn inject_dialing(&mut self, peer_id: &PeerId, enr: Option<Enr>) {
|
pub fn dial_peer(&mut self, peer_id: &PeerId, enr: Option<Enr>) {
|
||||||
self.inject_peer_connection(peer_id, ConnectingType::Dialing, enr);
|
self.peers_to_dial.push_back((*peer_id, enr));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Reports if a peer is banned or not.
|
/// Reports if a peer is banned or not.
|
||||||
@ -1247,9 +1251,7 @@ mod tests {
|
|||||||
};
|
};
|
||||||
let log = build_log(slog::Level::Debug, false);
|
let log = build_log(slog::Level::Debug, false);
|
||||||
let globals = NetworkGlobals::new_test_globals(&log);
|
let globals = NetworkGlobals::new_test_globals(&log);
|
||||||
PeerManager::new(config, Arc::new(globals), &log)
|
PeerManager::new(config, Arc::new(globals), &log).unwrap()
|
||||||
.await
|
|
||||||
.unwrap()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
|
@ -3,6 +3,7 @@ use std::task::{Context, Poll};
|
|||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use libp2p::core::connection::ConnectionId;
|
use libp2p::core::connection::ConnectionId;
|
||||||
use libp2p::core::ConnectedPoint;
|
use libp2p::core::ConnectedPoint;
|
||||||
|
use libp2p::swarm::dial_opts::{DialOpts, PeerCondition};
|
||||||
use libp2p::swarm::handler::DummyConnectionHandler;
|
use libp2p::swarm::handler::DummyConnectionHandler;
|
||||||
use libp2p::swarm::{
|
use libp2p::swarm::{
|
||||||
ConnectionHandler, DialError, NetworkBehaviour, NetworkBehaviourAction, PollParameters,
|
ConnectionHandler, DialError, NetworkBehaviour, NetworkBehaviourAction, PollParameters,
|
||||||
@ -16,7 +17,7 @@ use crate::rpc::GoodbyeReason;
|
|||||||
use crate::types::SyncState;
|
use crate::types::SyncState;
|
||||||
|
|
||||||
use super::peerdb::BanResult;
|
use super::peerdb::BanResult;
|
||||||
use super::{PeerManager, PeerManagerEvent, ReportSource};
|
use super::{ConnectingType, PeerManager, PeerManagerEvent, ReportSource};
|
||||||
|
|
||||||
impl<TSpec: EthSpec> NetworkBehaviour for PeerManager<TSpec> {
|
impl<TSpec: EthSpec> NetworkBehaviour for PeerManager<TSpec> {
|
||||||
type ConnectionHandler = DummyConnectionHandler;
|
type ConnectionHandler = DummyConnectionHandler;
|
||||||
@ -99,6 +100,17 @@ impl<TSpec: EthSpec> NetworkBehaviour for PeerManager<TSpec> {
|
|||||||
self.events.shrink_to_fit();
|
self.events.shrink_to_fit();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some((peer_id, maybe_enr)) = self.peers_to_dial.pop_front() {
|
||||||
|
self.inject_peer_connection(&peer_id, ConnectingType::Dialing, maybe_enr);
|
||||||
|
let handler = self.new_handler();
|
||||||
|
return Poll::Ready(NetworkBehaviourAction::Dial {
|
||||||
|
opts: DialOpts::peer_id(peer_id)
|
||||||
|
.condition(PeerCondition::Disconnected)
|
||||||
|
.build(),
|
||||||
|
handler,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
Poll::Pending
|
Poll::Pending
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
//! As the logic develops this documentation will advance.
|
//! As the logic develops this documentation will advance.
|
||||||
//!
|
//!
|
||||||
//! The scoring algorithms are currently experimental.
|
//! The scoring algorithms are currently experimental.
|
||||||
use crate::behaviour::gossipsub_scoring_parameters::GREYLIST_THRESHOLD as GOSSIPSUB_GREYLIST_THRESHOLD;
|
use crate::service::gossipsub_scoring_parameters::GREYLIST_THRESHOLD as GOSSIPSUB_GREYLIST_THRESHOLD;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
use strum::AsRefStr;
|
use strum::AsRefStr;
|
||||||
|
@ -90,6 +90,7 @@ impl<T: EthSpec, Id: std::fmt::Debug> std::fmt::Display for RPCSend<Id, T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Messages sent to the user from the RPC protocol.
|
/// Messages sent to the user from the RPC protocol.
|
||||||
|
#[derive(Debug)]
|
||||||
pub struct RPCMessage<Id, TSpec: EthSpec> {
|
pub struct RPCMessage<Id, TSpec: EthSpec> {
|
||||||
/// The peer that sent the message.
|
/// The peer that sent the message.
|
||||||
pub peer_id: PeerId,
|
pub peer_id: PeerId,
|
||||||
|
@ -1,573 +0,0 @@
|
|||||||
use crate::behaviour::{
|
|
||||||
save_metadata_to_disk, Behaviour, BehaviourEvent, PeerRequestId, Request, Response,
|
|
||||||
};
|
|
||||||
use crate::config::NetworkLoad;
|
|
||||||
use crate::discovery::enr;
|
|
||||||
use crate::multiaddr::Protocol;
|
|
||||||
use crate::rpc::{GoodbyeReason, MetaData, MetaDataV1, MetaDataV2, RPCResponseErrorCode, ReqId};
|
|
||||||
use crate::types::{error, EnrAttestationBitfield, EnrSyncCommitteeBitfield, GossipKind};
|
|
||||||
use crate::EnrExt;
|
|
||||||
use crate::{NetworkConfig, NetworkGlobals, PeerAction, ReportSource};
|
|
||||||
use futures::prelude::*;
|
|
||||||
use libp2p::core::{
|
|
||||||
identity::Keypair, multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed,
|
|
||||||
};
|
|
||||||
use libp2p::{
|
|
||||||
bandwidth::{BandwidthLogging, BandwidthSinks},
|
|
||||||
core, noise,
|
|
||||||
swarm::{ConnectionLimits, SwarmBuilder, SwarmEvent},
|
|
||||||
PeerId, Swarm, Transport,
|
|
||||||
};
|
|
||||||
use prometheus_client::registry::Registry;
|
|
||||||
use slog::{crit, debug, info, o, trace, warn, Logger};
|
|
||||||
use ssz::Decode;
|
|
||||||
use std::fs::File;
|
|
||||||
use std::io::prelude::*;
|
|
||||||
use std::pin::Pin;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::time::Duration;
|
|
||||||
use types::{ChainSpec, EnrForkId, EthSpec, ForkContext};
|
|
||||||
|
|
||||||
use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS};
|
|
||||||
|
|
||||||
pub const NETWORK_KEY_FILENAME: &str = "key";
|
|
||||||
/// The maximum simultaneous libp2p connections per peer.
|
|
||||||
const MAX_CONNECTIONS_PER_PEER: u32 = 1;
|
|
||||||
/// The filename to store our local metadata.
|
|
||||||
pub const METADATA_FILENAME: &str = "metadata";
|
|
||||||
|
|
||||||
/// The types of events than can be obtained from polling the libp2p service.
|
|
||||||
///
|
|
||||||
/// This is a subset of the events that a libp2p swarm emits.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum Libp2pEvent<AppReqId: ReqId, TSpec: EthSpec> {
|
|
||||||
/// A behaviour event
|
|
||||||
Behaviour(BehaviourEvent<AppReqId, TSpec>),
|
|
||||||
/// A new listening address has been established.
|
|
||||||
NewListenAddr(Multiaddr),
|
|
||||||
/// We reached zero listening addresses.
|
|
||||||
ZeroListeners,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The configuration and state of the libp2p components for the beacon node.
|
|
||||||
pub struct Service<AppReqId: ReqId, TSpec: EthSpec> {
|
|
||||||
/// The libp2p Swarm handler.
|
|
||||||
pub swarm: Swarm<Behaviour<AppReqId, TSpec>>,
|
|
||||||
/// The bandwidth logger for the underlying libp2p transport.
|
|
||||||
pub bandwidth: Arc<BandwidthSinks>,
|
|
||||||
/// This node's PeerId.
|
|
||||||
pub local_peer_id: PeerId,
|
|
||||||
/// The libp2p logger handle.
|
|
||||||
pub log: Logger,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct Context<'a> {
|
|
||||||
pub config: &'a NetworkConfig,
|
|
||||||
pub enr_fork_id: EnrForkId,
|
|
||||||
pub fork_context: Arc<ForkContext>,
|
|
||||||
pub chain_spec: &'a ChainSpec,
|
|
||||||
pub gossipsub_registry: Option<&'a mut Registry>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<AppReqId: ReqId, TSpec: EthSpec> Service<AppReqId, TSpec> {
|
|
||||||
pub async fn new(
|
|
||||||
executor: task_executor::TaskExecutor,
|
|
||||||
ctx: Context<'_>,
|
|
||||||
log: &Logger,
|
|
||||||
) -> error::Result<(Arc<NetworkGlobals<TSpec>>, Self)> {
|
|
||||||
let log = log.new(o!("service"=> "libp2p"));
|
|
||||||
trace!(log, "Libp2p Service starting");
|
|
||||||
|
|
||||||
let config = ctx.config;
|
|
||||||
// initialise the node's ID
|
|
||||||
let local_keypair = load_private_key(config, &log);
|
|
||||||
|
|
||||||
// Create an ENR or load from disk if appropriate
|
|
||||||
let enr =
|
|
||||||
enr::build_or_load_enr::<TSpec>(local_keypair.clone(), config, &ctx.enr_fork_id, &log)?;
|
|
||||||
|
|
||||||
let local_peer_id = enr.peer_id();
|
|
||||||
|
|
||||||
// Construct the metadata
|
|
||||||
let meta_data = load_or_build_metadata(&config.network_dir, &log);
|
|
||||||
|
|
||||||
// set up a collection of variables accessible outside of the network crate
|
|
||||||
let network_globals = Arc::new(NetworkGlobals::new(
|
|
||||||
enr.clone(),
|
|
||||||
config.libp2p_port,
|
|
||||||
config.discovery_port,
|
|
||||||
meta_data,
|
|
||||||
config
|
|
||||||
.trusted_peers
|
|
||||||
.iter()
|
|
||||||
.map(|x| PeerId::from(x.clone()))
|
|
||||||
.collect(),
|
|
||||||
&log,
|
|
||||||
));
|
|
||||||
|
|
||||||
info!(log, "Libp2p Starting"; "peer_id" => %enr.peer_id(), "bandwidth_config" => format!("{}-{}", config.network_load, NetworkLoad::from(config.network_load).name));
|
|
||||||
let discovery_string = if config.disable_discovery {
|
|
||||||
"None".into()
|
|
||||||
} else {
|
|
||||||
config.discovery_port.to_string()
|
|
||||||
};
|
|
||||||
debug!(log, "Attempting to open listening ports"; "address" => ?config.listen_address, "tcp_port" => config.libp2p_port, "udp_port" => discovery_string);
|
|
||||||
|
|
||||||
let (mut swarm, bandwidth) = {
|
|
||||||
// Set up the transport - tcp/ws with noise and mplex
|
|
||||||
let (transport, bandwidth) = build_transport(local_keypair.clone())
|
|
||||||
.map_err(|e| format!("Failed to build transport: {:?}", e))?;
|
|
||||||
|
|
||||||
// Lighthouse network behaviour
|
|
||||||
let behaviour =
|
|
||||||
Behaviour::new(&local_keypair, ctx, network_globals.clone(), &log).await?;
|
|
||||||
|
|
||||||
// use the executor for libp2p
|
|
||||||
struct Executor(task_executor::TaskExecutor);
|
|
||||||
impl libp2p::core::Executor for Executor {
|
|
||||||
fn exec(&self, f: Pin<Box<dyn Future<Output = ()> + Send>>) {
|
|
||||||
self.0.spawn(f, "libp2p");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// sets up the libp2p connection limits
|
|
||||||
let limits = ConnectionLimits::default()
|
|
||||||
.with_max_pending_incoming(Some(5))
|
|
||||||
.with_max_pending_outgoing(Some(16))
|
|
||||||
.with_max_established_incoming(Some(
|
|
||||||
(config.target_peers as f32
|
|
||||||
* (1.0 + PEER_EXCESS_FACTOR - MIN_OUTBOUND_ONLY_FACTOR))
|
|
||||||
.ceil() as u32,
|
|
||||||
))
|
|
||||||
.with_max_established_outgoing(Some(
|
|
||||||
(config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)).ceil() as u32,
|
|
||||||
))
|
|
||||||
.with_max_established(Some(
|
|
||||||
(config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR + PRIORITY_PEER_EXCESS))
|
|
||||||
.ceil() as u32,
|
|
||||||
))
|
|
||||||
.with_max_established_per_peer(Some(MAX_CONNECTIONS_PER_PEER));
|
|
||||||
|
|
||||||
(
|
|
||||||
SwarmBuilder::new(transport, behaviour, local_peer_id)
|
|
||||||
.notify_handler_buffer_size(std::num::NonZeroUsize::new(7).expect("Not zero"))
|
|
||||||
.connection_event_buffer_size(64)
|
|
||||||
.connection_limits(limits)
|
|
||||||
.executor(Box::new(Executor(executor)))
|
|
||||||
.build(),
|
|
||||||
bandwidth,
|
|
||||||
)
|
|
||||||
};
|
|
||||||
|
|
||||||
// listen on the specified address
|
|
||||||
let listen_multiaddr = {
|
|
||||||
let mut m = Multiaddr::from(config.listen_address);
|
|
||||||
m.push(Protocol::Tcp(config.libp2p_port));
|
|
||||||
m
|
|
||||||
};
|
|
||||||
|
|
||||||
match Swarm::listen_on(&mut swarm, listen_multiaddr.clone()) {
|
|
||||||
Ok(_) => {
|
|
||||||
let mut log_address = listen_multiaddr;
|
|
||||||
log_address.push(Protocol::P2p(local_peer_id.into()));
|
|
||||||
info!(log, "Listening established"; "address" => %log_address);
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
crit!(
|
|
||||||
log,
|
|
||||||
"Unable to listen on libp2p address";
|
|
||||||
"error" => ?err,
|
|
||||||
"listen_multiaddr" => %listen_multiaddr,
|
|
||||||
);
|
|
||||||
return Err("Libp2p was unable to listen on the given listen address.".into());
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// helper closure for dialing peers
|
|
||||||
let mut dial = |mut multiaddr: Multiaddr| {
|
|
||||||
// strip the p2p protocol if it exists
|
|
||||||
strip_peer_id(&mut multiaddr);
|
|
||||||
match Swarm::dial(&mut swarm, multiaddr.clone()) {
|
|
||||||
Ok(()) => debug!(log, "Dialing libp2p peer"; "address" => %multiaddr),
|
|
||||||
Err(err) => debug!(
|
|
||||||
log,
|
|
||||||
"Could not connect to peer"; "address" => %multiaddr, "error" => ?err
|
|
||||||
),
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
// attempt to connect to user-input libp2p nodes
|
|
||||||
for multiaddr in &config.libp2p_nodes {
|
|
||||||
dial(multiaddr.clone());
|
|
||||||
}
|
|
||||||
|
|
||||||
// attempt to connect to any specified boot-nodes
|
|
||||||
let mut boot_nodes = config.boot_nodes_enr.clone();
|
|
||||||
boot_nodes.dedup();
|
|
||||||
|
|
||||||
for bootnode_enr in boot_nodes {
|
|
||||||
for multiaddr in &bootnode_enr.multiaddr() {
|
|
||||||
// ignore udp multiaddr if it exists
|
|
||||||
let components = multiaddr.iter().collect::<Vec<_>>();
|
|
||||||
if let Protocol::Udp(_) = components[1] {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if !network_globals
|
|
||||||
.peers
|
|
||||||
.read()
|
|
||||||
.is_connected_or_dialing(&bootnode_enr.peer_id())
|
|
||||||
{
|
|
||||||
dial(multiaddr.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for multiaddr in &config.boot_nodes_multiaddr {
|
|
||||||
// check TCP support for dialing
|
|
||||||
if multiaddr
|
|
||||||
.iter()
|
|
||||||
.any(|proto| matches!(proto, Protocol::Tcp(_)))
|
|
||||||
{
|
|
||||||
dial(multiaddr.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut subscribed_topics: Vec<GossipKind> = vec![];
|
|
||||||
|
|
||||||
for topic_kind in &config.topics {
|
|
||||||
if swarm.behaviour_mut().subscribe_kind(topic_kind.clone()) {
|
|
||||||
subscribed_topics.push(topic_kind.clone());
|
|
||||||
} else {
|
|
||||||
warn!(log, "Could not subscribe to topic"; "topic" => %topic_kind);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !subscribed_topics.is_empty() {
|
|
||||||
info!(log, "Subscribed to topics"; "topics" => ?subscribed_topics);
|
|
||||||
}
|
|
||||||
|
|
||||||
let service = Service {
|
|
||||||
swarm,
|
|
||||||
bandwidth,
|
|
||||||
local_peer_id,
|
|
||||||
log,
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok((network_globals, service))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Sends a request to a peer, with a given Id.
|
|
||||||
pub fn send_request(&mut self, peer_id: PeerId, request_id: AppReqId, request: Request) {
|
|
||||||
self.swarm
|
|
||||||
.behaviour_mut()
|
|
||||||
.send_request(peer_id, request_id, request);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Informs the peer that their request failed.
|
|
||||||
pub fn respond_with_error(
|
|
||||||
&mut self,
|
|
||||||
peer_id: PeerId,
|
|
||||||
id: PeerRequestId,
|
|
||||||
error: RPCResponseErrorCode,
|
|
||||||
reason: String,
|
|
||||||
) {
|
|
||||||
self.swarm
|
|
||||||
.behaviour_mut()
|
|
||||||
.send_error_reponse(peer_id, id, error, reason);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Report a peer's action.
|
|
||||||
pub fn report_peer(
|
|
||||||
&mut self,
|
|
||||||
peer_id: &PeerId,
|
|
||||||
action: PeerAction,
|
|
||||||
source: ReportSource,
|
|
||||||
msg: &'static str,
|
|
||||||
) {
|
|
||||||
self.swarm
|
|
||||||
.behaviour_mut()
|
|
||||||
.peer_manager_mut()
|
|
||||||
.report_peer(peer_id, action, source, None, msg);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Disconnect and ban a peer, providing a reason.
|
|
||||||
pub fn goodbye_peer(&mut self, peer_id: &PeerId, reason: GoodbyeReason, source: ReportSource) {
|
|
||||||
self.swarm
|
|
||||||
.behaviour_mut()
|
|
||||||
.goodbye_peer(peer_id, reason, source);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Sends a response to a peer's request.
|
|
||||||
pub fn send_response(&mut self, peer_id: PeerId, id: PeerRequestId, response: Response<TSpec>) {
|
|
||||||
self.swarm
|
|
||||||
.behaviour_mut()
|
|
||||||
.send_successful_response(peer_id, id, response);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn next_event(&mut self) -> Libp2pEvent<AppReqId, TSpec> {
|
|
||||||
loop {
|
|
||||||
match self.swarm.select_next_some().await {
|
|
||||||
SwarmEvent::Behaviour(behaviour) => {
|
|
||||||
// Handle banning here
|
|
||||||
match &behaviour {
|
|
||||||
BehaviourEvent::PeerBanned(peer_id) => {
|
|
||||||
self.swarm.ban_peer_id(*peer_id);
|
|
||||||
}
|
|
||||||
BehaviourEvent::PeerUnbanned(peer_id) => {
|
|
||||||
self.swarm.unban_peer_id(*peer_id);
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
return Libp2pEvent::Behaviour(behaviour);
|
|
||||||
}
|
|
||||||
SwarmEvent::ConnectionEstablished {
|
|
||||||
peer_id: _,
|
|
||||||
endpoint: _,
|
|
||||||
num_established: _,
|
|
||||||
concurrent_dial_errors: _,
|
|
||||||
} => {}
|
|
||||||
SwarmEvent::ConnectionClosed {
|
|
||||||
peer_id: _,
|
|
||||||
cause: _,
|
|
||||||
endpoint: _,
|
|
||||||
num_established: _,
|
|
||||||
} => {}
|
|
||||||
SwarmEvent::NewListenAddr { address, .. } => {
|
|
||||||
return Libp2pEvent::NewListenAddr(address)
|
|
||||||
}
|
|
||||||
SwarmEvent::IncomingConnection {
|
|
||||||
local_addr,
|
|
||||||
send_back_addr,
|
|
||||||
} => {
|
|
||||||
trace!(self.log, "Incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr)
|
|
||||||
}
|
|
||||||
SwarmEvent::IncomingConnectionError {
|
|
||||||
local_addr,
|
|
||||||
send_back_addr,
|
|
||||||
error,
|
|
||||||
} => {
|
|
||||||
debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => %error);
|
|
||||||
}
|
|
||||||
SwarmEvent::BannedPeer { peer_id, .. } => {
|
|
||||||
debug!(self.log, "Banned peer connection rejected"; "peer_id" => %peer_id);
|
|
||||||
}
|
|
||||||
SwarmEvent::OutgoingConnectionError { peer_id, error } => {
|
|
||||||
debug!(self.log, "Failed to dial address"; "peer_id" => ?peer_id, "error" => %error);
|
|
||||||
}
|
|
||||||
SwarmEvent::ExpiredListenAddr { address, .. } => {
|
|
||||||
debug!(self.log, "Listen address expired"; "address" => %address)
|
|
||||||
}
|
|
||||||
SwarmEvent::ListenerClosed {
|
|
||||||
addresses, reason, ..
|
|
||||||
} => {
|
|
||||||
crit!(self.log, "Listener closed"; "addresses" => ?addresses, "reason" => ?reason);
|
|
||||||
if Swarm::listeners(&self.swarm).count() == 0 {
|
|
||||||
return Libp2pEvent::ZeroListeners;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
SwarmEvent::ListenerError { error, .. } => {
|
|
||||||
// this is non fatal, but we still check
|
|
||||||
warn!(self.log, "Listener error"; "error" => ?error);
|
|
||||||
if Swarm::listeners(&self.swarm).count() == 0 {
|
|
||||||
return Libp2pEvent::ZeroListeners;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
SwarmEvent::Dialing(_peer_id) => {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>;
|
|
||||||
|
|
||||||
/// The implementation supports TCP/IP, WebSockets over TCP/IP, noise as the encryption layer, and
|
|
||||||
/// mplex as the multiplexing layer.
|
|
||||||
fn build_transport(
|
|
||||||
local_private_key: Keypair,
|
|
||||||
) -> std::io::Result<(BoxedTransport, Arc<BandwidthSinks>)> {
|
|
||||||
let tcp = libp2p::tcp::TokioTcpConfig::new().nodelay(true);
|
|
||||||
let transport = libp2p::dns::TokioDnsConfig::system(tcp)?;
|
|
||||||
#[cfg(feature = "libp2p-websocket")]
|
|
||||||
let transport = {
|
|
||||||
let trans_clone = transport.clone();
|
|
||||||
transport.or_transport(libp2p::websocket::WsConfig::new(trans_clone))
|
|
||||||
};
|
|
||||||
|
|
||||||
let (transport, bandwidth) = BandwidthLogging::new(transport);
|
|
||||||
|
|
||||||
// mplex config
|
|
||||||
let mut mplex_config = libp2p::mplex::MplexConfig::new();
|
|
||||||
mplex_config.set_max_buffer_size(256);
|
|
||||||
mplex_config.set_max_buffer_behaviour(libp2p::mplex::MaxBufferBehaviour::Block);
|
|
||||||
|
|
||||||
// yamux config
|
|
||||||
let mut yamux_config = libp2p::yamux::YamuxConfig::default();
|
|
||||||
yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::on_read());
|
|
||||||
|
|
||||||
// Authentication
|
|
||||||
Ok((
|
|
||||||
transport
|
|
||||||
.upgrade(core::upgrade::Version::V1)
|
|
||||||
.authenticate(generate_noise_config(&local_private_key))
|
|
||||||
.multiplex(core::upgrade::SelectUpgrade::new(
|
|
||||||
yamux_config,
|
|
||||||
mplex_config,
|
|
||||||
))
|
|
||||||
.timeout(Duration::from_secs(10))
|
|
||||||
.boxed(),
|
|
||||||
bandwidth,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Useful helper functions for debugging. Currently not used in the client.
|
|
||||||
#[allow(dead_code)]
|
|
||||||
fn keypair_from_hex(hex_bytes: &str) -> error::Result<Keypair> {
|
|
||||||
let hex_bytes = if let Some(stripped) = hex_bytes.strip_prefix("0x") {
|
|
||||||
stripped.to_string()
|
|
||||||
} else {
|
|
||||||
hex_bytes.to_string()
|
|
||||||
};
|
|
||||||
|
|
||||||
hex::decode(&hex_bytes)
|
|
||||||
.map_err(|e| format!("Failed to parse p2p secret key bytes: {:?}", e).into())
|
|
||||||
.and_then(keypair_from_bytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(dead_code)]
|
|
||||||
fn keypair_from_bytes(mut bytes: Vec<u8>) -> error::Result<Keypair> {
|
|
||||||
libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut bytes)
|
|
||||||
.map(|secret| {
|
|
||||||
let keypair: libp2p::core::identity::secp256k1::Keypair = secret.into();
|
|
||||||
Keypair::Secp256k1(keypair)
|
|
||||||
})
|
|
||||||
.map_err(|e| format!("Unable to parse p2p secret key: {:?}", e).into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Loads a private key from disk. If this fails, a new key is
|
|
||||||
/// generated and is then saved to disk.
|
|
||||||
///
|
|
||||||
/// Currently only secp256k1 keys are allowed, as these are the only keys supported by discv5.
|
|
||||||
pub fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair {
|
|
||||||
// check for key from disk
|
|
||||||
let network_key_f = config.network_dir.join(NETWORK_KEY_FILENAME);
|
|
||||||
if let Ok(mut network_key_file) = File::open(network_key_f.clone()) {
|
|
||||||
let mut key_bytes: Vec<u8> = Vec::with_capacity(36);
|
|
||||||
match network_key_file.read_to_end(&mut key_bytes) {
|
|
||||||
Err(_) => debug!(log, "Could not read network key file"),
|
|
||||||
Ok(_) => {
|
|
||||||
// only accept secp256k1 keys for now
|
|
||||||
if let Ok(secret_key) =
|
|
||||||
libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut key_bytes)
|
|
||||||
{
|
|
||||||
let kp: libp2p::core::identity::secp256k1::Keypair = secret_key.into();
|
|
||||||
debug!(log, "Loaded network key from disk.");
|
|
||||||
return Keypair::Secp256k1(kp);
|
|
||||||
} else {
|
|
||||||
debug!(log, "Network key file is not a valid secp256k1 key");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// if a key could not be loaded from disk, generate a new one and save it
|
|
||||||
let local_private_key = Keypair::generate_secp256k1();
|
|
||||||
if let Keypair::Secp256k1(key) = local_private_key.clone() {
|
|
||||||
let _ = std::fs::create_dir_all(&config.network_dir);
|
|
||||||
match File::create(network_key_f.clone())
|
|
||||||
.and_then(|mut f| f.write_all(&key.secret().to_bytes()))
|
|
||||||
{
|
|
||||||
Ok(_) => {
|
|
||||||
debug!(log, "New network key generated and written to disk");
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
warn!(
|
|
||||||
log,
|
|
||||||
"Could not write node key to file: {:?}. error: {}", network_key_f, e
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
local_private_key
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Generate authenticated XX Noise config from identity keys
|
|
||||||
fn generate_noise_config(
|
|
||||||
identity_keypair: &Keypair,
|
|
||||||
) -> noise::NoiseAuthenticated<noise::XX, noise::X25519Spec, ()> {
|
|
||||||
let static_dh_keys = noise::Keypair::<noise::X25519Spec>::new()
|
|
||||||
.into_authentic(identity_keypair)
|
|
||||||
.expect("signing can fail only once during starting a node");
|
|
||||||
noise::NoiseConfig::xx(static_dh_keys).into_authenticated()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// For a multiaddr that ends with a peer id, this strips this suffix. Rust-libp2p
|
|
||||||
/// only supports dialing to an address without providing the peer id.
|
|
||||||
fn strip_peer_id(addr: &mut Multiaddr) {
|
|
||||||
let last = addr.pop();
|
|
||||||
match last {
|
|
||||||
Some(Protocol::P2p(_)) => {}
|
|
||||||
Some(other) => addr.push(other),
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Load metadata from persisted file. Return default metadata if loading fails.
|
|
||||||
fn load_or_build_metadata<E: EthSpec>(
|
|
||||||
network_dir: &std::path::Path,
|
|
||||||
log: &slog::Logger,
|
|
||||||
) -> MetaData<E> {
|
|
||||||
// We load a V2 metadata version by default (regardless of current fork)
|
|
||||||
// since a V2 metadata can be converted to V1. The RPC encoder is responsible
|
|
||||||
// for sending the correct metadata version based on the negotiated protocol version.
|
|
||||||
let mut meta_data = MetaDataV2 {
|
|
||||||
seq_number: 0,
|
|
||||||
attnets: EnrAttestationBitfield::<E>::default(),
|
|
||||||
syncnets: EnrSyncCommitteeBitfield::<E>::default(),
|
|
||||||
};
|
|
||||||
// Read metadata from persisted file if available
|
|
||||||
let metadata_path = network_dir.join(METADATA_FILENAME);
|
|
||||||
if let Ok(mut metadata_file) = File::open(metadata_path) {
|
|
||||||
let mut metadata_ssz = Vec::new();
|
|
||||||
if metadata_file.read_to_end(&mut metadata_ssz).is_ok() {
|
|
||||||
// Attempt to read a MetaDataV2 version from the persisted file,
|
|
||||||
// if that fails, read MetaDataV1
|
|
||||||
match MetaDataV2::<E>::from_ssz_bytes(&metadata_ssz) {
|
|
||||||
Ok(persisted_metadata) => {
|
|
||||||
meta_data.seq_number = persisted_metadata.seq_number;
|
|
||||||
// Increment seq number if persisted attnet is not default
|
|
||||||
if persisted_metadata.attnets != meta_data.attnets
|
|
||||||
|| persisted_metadata.syncnets != meta_data.syncnets
|
|
||||||
{
|
|
||||||
meta_data.seq_number += 1;
|
|
||||||
}
|
|
||||||
debug!(log, "Loaded metadata from disk");
|
|
||||||
}
|
|
||||||
Err(_) => {
|
|
||||||
match MetaDataV1::<E>::from_ssz_bytes(&metadata_ssz) {
|
|
||||||
Ok(persisted_metadata) => {
|
|
||||||
let persisted_metadata = MetaData::V1(persisted_metadata);
|
|
||||||
// Increment seq number as the persisted metadata version is updated
|
|
||||||
meta_data.seq_number = *persisted_metadata.seq_number() + 1;
|
|
||||||
debug!(log, "Loaded metadata from disk");
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
debug!(
|
|
||||||
log,
|
|
||||||
"Metadata from file could not be decoded";
|
|
||||||
"error" => ?e,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Wrap the MetaData
|
|
||||||
let meta_data = MetaData::V2(meta_data);
|
|
||||||
|
|
||||||
debug!(log, "Metadata sequence number"; "seq_num" => meta_data.seq_number());
|
|
||||||
save_metadata_to_disk(network_dir, meta_data.clone(), log);
|
|
||||||
meta_data
|
|
||||||
}
|
|
101
beacon_node/lighthouse_network/src/service/api_types.rs
Normal file
101
beacon_node/lighthouse_network/src/service/api_types.rs
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use libp2p::core::connection::ConnectionId;
|
||||||
|
use types::{EthSpec, SignedBeaconBlock};
|
||||||
|
|
||||||
|
use crate::rpc::{
|
||||||
|
methods::{
|
||||||
|
BlocksByRangeRequest, BlocksByRootRequest, OldBlocksByRangeRequest, RPCCodedResponse,
|
||||||
|
RPCResponse, ResponseTermination, StatusMessage,
|
||||||
|
},
|
||||||
|
OutboundRequest, SubstreamId,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Identifier of requests sent by a peer.
|
||||||
|
pub type PeerRequestId = (ConnectionId, SubstreamId);
|
||||||
|
|
||||||
|
/// Identifier of a request.
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||||
|
pub enum RequestId<AppReqId> {
|
||||||
|
Application(AppReqId),
|
||||||
|
Internal,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The type of RPC requests the Behaviour informs it has received and allows for sending.
|
||||||
|
///
|
||||||
|
// NOTE: This is an application-level wrapper over the lower network level requests that can be
|
||||||
|
// sent. The main difference is the absence of the Ping, Metadata and Goodbye protocols, which don't
|
||||||
|
// leave the Behaviour. For all protocols managed by RPC see `RPCRequest`.
|
||||||
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
|
pub enum Request {
|
||||||
|
/// A Status message.
|
||||||
|
Status(StatusMessage),
|
||||||
|
/// A blocks by range request.
|
||||||
|
BlocksByRange(BlocksByRangeRequest),
|
||||||
|
/// A request blocks root request.
|
||||||
|
BlocksByRoot(BlocksByRootRequest),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<TSpec: EthSpec> std::convert::From<Request> for OutboundRequest<TSpec> {
|
||||||
|
fn from(req: Request) -> OutboundRequest<TSpec> {
|
||||||
|
match req {
|
||||||
|
Request::BlocksByRoot(r) => OutboundRequest::BlocksByRoot(r),
|
||||||
|
Request::BlocksByRange(BlocksByRangeRequest { start_slot, count }) => {
|
||||||
|
OutboundRequest::BlocksByRange(OldBlocksByRangeRequest {
|
||||||
|
start_slot,
|
||||||
|
count,
|
||||||
|
step: 1,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
Request::Status(s) => OutboundRequest::Status(s),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The type of RPC responses the Behaviour informs it has received, and allows for sending.
|
||||||
|
///
|
||||||
|
// NOTE: This is an application-level wrapper over the lower network level responses that can be
|
||||||
|
// sent. The main difference is the absense of Pong and Metadata, which don't leave the
|
||||||
|
// Behaviour. For all protocol reponses managed by RPC see `RPCResponse` and
|
||||||
|
// `RPCCodedResponse`.
|
||||||
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
|
pub enum Response<TSpec: EthSpec> {
|
||||||
|
/// A Status message.
|
||||||
|
Status(StatusMessage),
|
||||||
|
/// A response to a get BLOCKS_BY_RANGE request. A None response signals the end of the batch.
|
||||||
|
BlocksByRange(Option<Arc<SignedBeaconBlock<TSpec>>>),
|
||||||
|
/// A response to a get BLOCKS_BY_ROOT request.
|
||||||
|
BlocksByRoot(Option<Arc<SignedBeaconBlock<TSpec>>>),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<TSpec: EthSpec> std::convert::From<Response<TSpec>> for RPCCodedResponse<TSpec> {
|
||||||
|
fn from(resp: Response<TSpec>) -> RPCCodedResponse<TSpec> {
|
||||||
|
match resp {
|
||||||
|
Response::BlocksByRoot(r) => match r {
|
||||||
|
Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRoot(b)),
|
||||||
|
None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRoot),
|
||||||
|
},
|
||||||
|
Response::BlocksByRange(r) => match r {
|
||||||
|
Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRange(b)),
|
||||||
|
None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange),
|
||||||
|
},
|
||||||
|
Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<AppReqId: std::fmt::Debug> slog::Value for RequestId<AppReqId> {
|
||||||
|
fn serialize(
|
||||||
|
&self,
|
||||||
|
record: &slog::Record,
|
||||||
|
key: slog::Key,
|
||||||
|
serializer: &mut dyn slog::Serializer,
|
||||||
|
) -> slog::Result {
|
||||||
|
match self {
|
||||||
|
RequestId::Internal => slog::Value::serialize("Behaviour", record, key, serializer),
|
||||||
|
RequestId::Application(ref id) => {
|
||||||
|
slog::Value::serialize(&format_args!("{:?}", id), record, key, serializer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
34
beacon_node/lighthouse_network/src/service/behaviour.rs
Normal file
34
beacon_node/lighthouse_network/src/service/behaviour.rs
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
use crate::discovery::Discovery;
|
||||||
|
use crate::peer_manager::PeerManager;
|
||||||
|
use crate::rpc::{ReqId, RPC};
|
||||||
|
use crate::types::SnappyTransform;
|
||||||
|
|
||||||
|
use libp2p::gossipsub::subscription_filter::{
|
||||||
|
MaxCountSubscriptionFilter, WhitelistSubscriptionFilter,
|
||||||
|
};
|
||||||
|
use libp2p::gossipsub::Gossipsub as BaseGossipsub;
|
||||||
|
use libp2p::identify::Identify;
|
||||||
|
use libp2p::swarm::NetworkBehaviour;
|
||||||
|
use libp2p::NetworkBehaviour;
|
||||||
|
use types::EthSpec;
|
||||||
|
|
||||||
|
use super::api_types::RequestId;
|
||||||
|
|
||||||
|
pub type SubscriptionFilter = MaxCountSubscriptionFilter<WhitelistSubscriptionFilter>;
|
||||||
|
pub type Gossipsub = BaseGossipsub<SnappyTransform, SubscriptionFilter>;
|
||||||
|
|
||||||
|
#[derive(NetworkBehaviour)]
|
||||||
|
pub(crate) struct Behaviour<AppReqId: ReqId, TSpec: EthSpec> {
|
||||||
|
/// The routing pub-sub mechanism for eth2.
|
||||||
|
pub gossipsub: Gossipsub,
|
||||||
|
/// The Eth2 RPC specified in the wire-0 protocol.
|
||||||
|
pub eth2_rpc: RPC<RequestId<AppReqId>, TSpec>,
|
||||||
|
/// Discv5 Discovery protocol.
|
||||||
|
pub discovery: Discovery<TSpec>,
|
||||||
|
/// Keep regular connection to peers and disconnect if absent.
|
||||||
|
// NOTE: The id protocol is used for initial interop. This will be removed by mainnet.
|
||||||
|
/// Provides IP addresses and peer information.
|
||||||
|
pub identify: Identify,
|
||||||
|
/// The peer manager that keeps track of peer's reputation and status.
|
||||||
|
pub peer_manager: PeerManager<TSpec>,
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
288
beacon_node/lighthouse_network/src/service/utils.rs
Normal file
288
beacon_node/lighthouse_network/src/service/utils.rs
Normal file
@ -0,0 +1,288 @@
|
|||||||
|
use crate::multiaddr::Protocol;
|
||||||
|
use crate::rpc::{MetaData, MetaDataV1, MetaDataV2};
|
||||||
|
use crate::types::{
|
||||||
|
error, EnrAttestationBitfield, EnrSyncCommitteeBitfield, GossipEncoding, GossipKind,
|
||||||
|
};
|
||||||
|
use crate::{GossipTopic, NetworkConfig};
|
||||||
|
use libp2p::bandwidth::{BandwidthLogging, BandwidthSinks};
|
||||||
|
use libp2p::core::{
|
||||||
|
identity::Keypair, multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed,
|
||||||
|
};
|
||||||
|
use libp2p::gossipsub::subscription_filter::WhitelistSubscriptionFilter;
|
||||||
|
use libp2p::gossipsub::IdentTopic as Topic;
|
||||||
|
use libp2p::{core, noise, PeerId, Transport};
|
||||||
|
use prometheus_client::registry::Registry;
|
||||||
|
use slog::{debug, warn};
|
||||||
|
use ssz::Decode;
|
||||||
|
use ssz::Encode;
|
||||||
|
use std::collections::HashSet;
|
||||||
|
use std::fs::File;
|
||||||
|
use std::io::prelude::*;
|
||||||
|
use std::path::Path;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
|
use types::{ChainSpec, EnrForkId, EthSpec, ForkContext, SubnetId, SyncSubnetId};
|
||||||
|
|
||||||
|
pub const NETWORK_KEY_FILENAME: &str = "key";
|
||||||
|
/// The maximum simultaneous libp2p connections per peer.
|
||||||
|
pub const MAX_CONNECTIONS_PER_PEER: u32 = 1;
|
||||||
|
/// The filename to store our local metadata.
|
||||||
|
pub const METADATA_FILENAME: &str = "metadata";
|
||||||
|
|
||||||
|
pub struct Context<'a> {
|
||||||
|
pub config: &'a NetworkConfig,
|
||||||
|
pub enr_fork_id: EnrForkId,
|
||||||
|
pub fork_context: Arc<ForkContext>,
|
||||||
|
pub chain_spec: &'a ChainSpec,
|
||||||
|
pub gossipsub_registry: Option<&'a mut Registry>,
|
||||||
|
}
|
||||||
|
|
||||||
|
type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>;
|
||||||
|
|
||||||
|
/// The implementation supports TCP/IP, WebSockets over TCP/IP, noise as the encryption layer, and
|
||||||
|
/// mplex as the multiplexing layer.
|
||||||
|
pub fn build_transport(
|
||||||
|
local_private_key: Keypair,
|
||||||
|
) -> std::io::Result<(BoxedTransport, Arc<BandwidthSinks>)> {
|
||||||
|
let tcp =
|
||||||
|
libp2p::tcp::TokioTcpTransport::new(libp2p::tcp::GenTcpConfig::default().nodelay(true));
|
||||||
|
let transport = libp2p::dns::TokioDnsConfig::system(tcp)?;
|
||||||
|
#[cfg(feature = "libp2p-websocket")]
|
||||||
|
let transport = {
|
||||||
|
let trans_clone = transport.clone();
|
||||||
|
transport.or_transport(libp2p::websocket::WsConfig::new(trans_clone))
|
||||||
|
};
|
||||||
|
|
||||||
|
let (transport, bandwidth) = BandwidthLogging::new(transport);
|
||||||
|
|
||||||
|
// mplex config
|
||||||
|
let mut mplex_config = libp2p::mplex::MplexConfig::new();
|
||||||
|
mplex_config.set_max_buffer_size(256);
|
||||||
|
mplex_config.set_max_buffer_behaviour(libp2p::mplex::MaxBufferBehaviour::Block);
|
||||||
|
|
||||||
|
// yamux config
|
||||||
|
let mut yamux_config = libp2p::yamux::YamuxConfig::default();
|
||||||
|
yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::on_read());
|
||||||
|
|
||||||
|
// Authentication
|
||||||
|
Ok((
|
||||||
|
transport
|
||||||
|
.upgrade(core::upgrade::Version::V1)
|
||||||
|
.authenticate(generate_noise_config(&local_private_key))
|
||||||
|
.multiplex(core::upgrade::SelectUpgrade::new(
|
||||||
|
yamux_config,
|
||||||
|
mplex_config,
|
||||||
|
))
|
||||||
|
.timeout(Duration::from_secs(10))
|
||||||
|
.boxed(),
|
||||||
|
bandwidth,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Useful helper functions for debugging. Currently not used in the client.
|
||||||
|
#[allow(dead_code)]
|
||||||
|
fn keypair_from_hex(hex_bytes: &str) -> error::Result<Keypair> {
|
||||||
|
let hex_bytes = if let Some(stripped) = hex_bytes.strip_prefix("0x") {
|
||||||
|
stripped.to_string()
|
||||||
|
} else {
|
||||||
|
hex_bytes.to_string()
|
||||||
|
};
|
||||||
|
|
||||||
|
hex::decode(&hex_bytes)
|
||||||
|
.map_err(|e| format!("Failed to parse p2p secret key bytes: {:?}", e).into())
|
||||||
|
.and_then(keypair_from_bytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
|
fn keypair_from_bytes(mut bytes: Vec<u8>) -> error::Result<Keypair> {
|
||||||
|
libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut bytes)
|
||||||
|
.map(|secret| {
|
||||||
|
let keypair: libp2p::core::identity::secp256k1::Keypair = secret.into();
|
||||||
|
Keypair::Secp256k1(keypair)
|
||||||
|
})
|
||||||
|
.map_err(|e| format!("Unable to parse p2p secret key: {:?}", e).into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Loads a private key from disk. If this fails, a new key is
|
||||||
|
/// generated and is then saved to disk.
|
||||||
|
///
|
||||||
|
/// Currently only secp256k1 keys are allowed, as these are the only keys supported by discv5.
|
||||||
|
pub fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair {
|
||||||
|
// check for key from disk
|
||||||
|
let network_key_f = config.network_dir.join(NETWORK_KEY_FILENAME);
|
||||||
|
if let Ok(mut network_key_file) = File::open(network_key_f.clone()) {
|
||||||
|
let mut key_bytes: Vec<u8> = Vec::with_capacity(36);
|
||||||
|
match network_key_file.read_to_end(&mut key_bytes) {
|
||||||
|
Err(_) => debug!(log, "Could not read network key file"),
|
||||||
|
Ok(_) => {
|
||||||
|
// only accept secp256k1 keys for now
|
||||||
|
if let Ok(secret_key) =
|
||||||
|
libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut key_bytes)
|
||||||
|
{
|
||||||
|
let kp: libp2p::core::identity::secp256k1::Keypair = secret_key.into();
|
||||||
|
debug!(log, "Loaded network key from disk.");
|
||||||
|
return Keypair::Secp256k1(kp);
|
||||||
|
} else {
|
||||||
|
debug!(log, "Network key file is not a valid secp256k1 key");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if a key could not be loaded from disk, generate a new one and save it
|
||||||
|
let local_private_key = Keypair::generate_secp256k1();
|
||||||
|
if let Keypair::Secp256k1(key) = local_private_key.clone() {
|
||||||
|
let _ = std::fs::create_dir_all(&config.network_dir);
|
||||||
|
match File::create(network_key_f.clone())
|
||||||
|
.and_then(|mut f| f.write_all(&key.secret().to_bytes()))
|
||||||
|
{
|
||||||
|
Ok(_) => {
|
||||||
|
debug!(log, "New network key generated and written to disk");
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!(
|
||||||
|
log,
|
||||||
|
"Could not write node key to file: {:?}. error: {}", network_key_f, e
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
local_private_key
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Generate authenticated XX Noise config from identity keys
|
||||||
|
fn generate_noise_config(
|
||||||
|
identity_keypair: &Keypair,
|
||||||
|
) -> noise::NoiseAuthenticated<noise::XX, noise::X25519Spec, ()> {
|
||||||
|
let static_dh_keys = noise::Keypair::<noise::X25519Spec>::new()
|
||||||
|
.into_authentic(identity_keypair)
|
||||||
|
.expect("signing can fail only once during starting a node");
|
||||||
|
noise::NoiseConfig::xx(static_dh_keys).into_authenticated()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// For a multiaddr that ends with a peer id, this strips this suffix. Rust-libp2p
|
||||||
|
/// only supports dialing to an address without providing the peer id.
|
||||||
|
pub fn strip_peer_id(addr: &mut Multiaddr) {
|
||||||
|
let last = addr.pop();
|
||||||
|
match last {
|
||||||
|
Some(Protocol::P2p(_)) => {}
|
||||||
|
Some(other) => addr.push(other),
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Load metadata from persisted file. Return default metadata if loading fails.
|
||||||
|
pub fn load_or_build_metadata<E: EthSpec>(
|
||||||
|
network_dir: &std::path::Path,
|
||||||
|
log: &slog::Logger,
|
||||||
|
) -> MetaData<E> {
|
||||||
|
// We load a V2 metadata version by default (regardless of current fork)
|
||||||
|
// since a V2 metadata can be converted to V1. The RPC encoder is responsible
|
||||||
|
// for sending the correct metadata version based on the negotiated protocol version.
|
||||||
|
let mut meta_data = MetaDataV2 {
|
||||||
|
seq_number: 0,
|
||||||
|
attnets: EnrAttestationBitfield::<E>::default(),
|
||||||
|
syncnets: EnrSyncCommitteeBitfield::<E>::default(),
|
||||||
|
};
|
||||||
|
// Read metadata from persisted file if available
|
||||||
|
let metadata_path = network_dir.join(METADATA_FILENAME);
|
||||||
|
if let Ok(mut metadata_file) = File::open(metadata_path) {
|
||||||
|
let mut metadata_ssz = Vec::new();
|
||||||
|
if metadata_file.read_to_end(&mut metadata_ssz).is_ok() {
|
||||||
|
// Attempt to read a MetaDataV2 version from the persisted file,
|
||||||
|
// if that fails, read MetaDataV1
|
||||||
|
match MetaDataV2::<E>::from_ssz_bytes(&metadata_ssz) {
|
||||||
|
Ok(persisted_metadata) => {
|
||||||
|
meta_data.seq_number = persisted_metadata.seq_number;
|
||||||
|
// Increment seq number if persisted attnet is not default
|
||||||
|
if persisted_metadata.attnets != meta_data.attnets
|
||||||
|
|| persisted_metadata.syncnets != meta_data.syncnets
|
||||||
|
{
|
||||||
|
meta_data.seq_number += 1;
|
||||||
|
}
|
||||||
|
debug!(log, "Loaded metadata from disk");
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
match MetaDataV1::<E>::from_ssz_bytes(&metadata_ssz) {
|
||||||
|
Ok(persisted_metadata) => {
|
||||||
|
let persisted_metadata = MetaData::V1(persisted_metadata);
|
||||||
|
// Increment seq number as the persisted metadata version is updated
|
||||||
|
meta_data.seq_number = *persisted_metadata.seq_number() + 1;
|
||||||
|
debug!(log, "Loaded metadata from disk");
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
debug!(
|
||||||
|
log,
|
||||||
|
"Metadata from file could not be decoded";
|
||||||
|
"error" => ?e,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Wrap the MetaData
|
||||||
|
let meta_data = MetaData::V2(meta_data);
|
||||||
|
|
||||||
|
debug!(log, "Metadata sequence number"; "seq_num" => meta_data.seq_number());
|
||||||
|
save_metadata_to_disk(network_dir, meta_data.clone(), log);
|
||||||
|
meta_data
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a whitelist topic filter that covers all possible topics using the given set of
|
||||||
|
/// possible fork digests.
|
||||||
|
pub(crate) fn create_whitelist_filter(
|
||||||
|
possible_fork_digests: Vec<[u8; 4]>,
|
||||||
|
attestation_subnet_count: u64,
|
||||||
|
sync_committee_subnet_count: u64,
|
||||||
|
) -> WhitelistSubscriptionFilter {
|
||||||
|
let mut possible_hashes = HashSet::new();
|
||||||
|
for fork_digest in possible_fork_digests {
|
||||||
|
let mut add = |kind| {
|
||||||
|
let topic: Topic =
|
||||||
|
GossipTopic::new(kind, GossipEncoding::SSZSnappy, fork_digest).into();
|
||||||
|
possible_hashes.insert(topic.hash());
|
||||||
|
};
|
||||||
|
|
||||||
|
use GossipKind::*;
|
||||||
|
add(BeaconBlock);
|
||||||
|
add(BeaconAggregateAndProof);
|
||||||
|
add(VoluntaryExit);
|
||||||
|
add(ProposerSlashing);
|
||||||
|
add(AttesterSlashing);
|
||||||
|
add(SignedContributionAndProof);
|
||||||
|
for id in 0..attestation_subnet_count {
|
||||||
|
add(Attestation(SubnetId::new(id)));
|
||||||
|
}
|
||||||
|
for id in 0..sync_committee_subnet_count {
|
||||||
|
add(SyncCommitteeMessage(SyncSubnetId::new(id)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
WhitelistSubscriptionFilter(possible_hashes)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Persist metadata to disk
|
||||||
|
pub(crate) fn save_metadata_to_disk<E: EthSpec>(
|
||||||
|
dir: &Path,
|
||||||
|
metadata: MetaData<E>,
|
||||||
|
log: &slog::Logger,
|
||||||
|
) {
|
||||||
|
let _ = std::fs::create_dir_all(&dir);
|
||||||
|
match File::create(dir.join(METADATA_FILENAME))
|
||||||
|
.and_then(|mut f| f.write_all(&metadata.as_ssz_bytes()))
|
||||||
|
{
|
||||||
|
Ok(_) => {
|
||||||
|
debug!(log, "Metadata written to disk");
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!(
|
||||||
|
log,
|
||||||
|
"Could not write metadata to disk";
|
||||||
|
"file" => format!("{:?}{:?}", dir, METADATA_FILENAME),
|
||||||
|
"error" => %e
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -23,7 +23,8 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
use libp2p::core::connection::{ConnectedPoint, ConnectionId, ListenerId};
|
use libp2p::core::connection::{ConnectedPoint, ConnectionId};
|
||||||
|
use libp2p::core::transport::ListenerId;
|
||||||
use libp2p::swarm::handler::{ConnectionHandler, DummyConnectionHandler, IntoConnectionHandler};
|
use libp2p::swarm::handler::{ConnectionHandler, DummyConnectionHandler, IntoConnectionHandler};
|
||||||
use libp2p::swarm::{DialError, NetworkBehaviour, NetworkBehaviourAction, PollParameters};
|
use libp2p::swarm::{DialError, NetworkBehaviour, NetworkBehaviourAction, PollParameters};
|
||||||
use libp2p::{Multiaddr, PeerId};
|
use libp2p::{Multiaddr, PeerId};
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
#![cfg(test)]
|
#![cfg(test)]
|
||||||
use libp2p::gossipsub::GossipsubConfigBuilder;
|
use libp2p::gossipsub::GossipsubConfigBuilder;
|
||||||
|
use lighthouse_network::service::Network as LibP2PService;
|
||||||
use lighthouse_network::Enr;
|
use lighthouse_network::Enr;
|
||||||
use lighthouse_network::EnrExt;
|
use lighthouse_network::EnrExt;
|
||||||
use lighthouse_network::Multiaddr;
|
use lighthouse_network::Multiaddr;
|
||||||
use lighthouse_network::Service as LibP2PService;
|
use lighthouse_network::{NetworkConfig, NetworkEvent};
|
||||||
use lighthouse_network::{Libp2pEvent, NetworkConfig};
|
|
||||||
use slog::{debug, error, o, Drain};
|
use slog::{debug, error, o, Drain};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::sync::Weak;
|
use std::sync::Weak;
|
||||||
@ -119,18 +119,19 @@ pub async fn build_libp2p_instance(
|
|||||||
LibP2PService::new(executor, libp2p_context, &log)
|
LibP2PService::new(executor, libp2p_context, &log)
|
||||||
.await
|
.await
|
||||||
.expect("should build libp2p instance")
|
.expect("should build libp2p instance")
|
||||||
.1,
|
.0,
|
||||||
signal,
|
signal,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
pub fn get_enr(node: &LibP2PService<ReqId, E>) -> Enr {
|
pub fn get_enr(node: &LibP2PService<ReqId, E>) -> Enr {
|
||||||
node.swarm.behaviour().local_enr()
|
node.local_enr()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns `n` libp2p peers in fully connected topology.
|
// Returns `n` libp2p peers in fully connected topology.
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
|
/*
|
||||||
pub async fn build_full_mesh(
|
pub async fn build_full_mesh(
|
||||||
rt: Weak<Runtime>,
|
rt: Weak<Runtime>,
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
@ -157,8 +158,7 @@ pub async fn build_full_mesh(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
nodes
|
nodes
|
||||||
}
|
}*/
|
||||||
|
|
||||||
// Constructs a pair of nodes with separate loggers. The sender dials the receiver.
|
// Constructs a pair of nodes with separate loggers. The sender dials the receiver.
|
||||||
// This returns a (sender, receiver) pair.
|
// This returns a (sender, receiver) pair.
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
@ -173,19 +173,19 @@ pub async fn build_node_pair(
|
|||||||
let mut sender = build_libp2p_instance(rt.clone(), vec![], sender_log, fork_name).await;
|
let mut sender = build_libp2p_instance(rt.clone(), vec![], sender_log, fork_name).await;
|
||||||
let mut receiver = build_libp2p_instance(rt, vec![], receiver_log, fork_name).await;
|
let mut receiver = build_libp2p_instance(rt, vec![], receiver_log, fork_name).await;
|
||||||
|
|
||||||
let receiver_multiaddr = receiver.swarm.behaviour_mut().local_enr().multiaddr()[1].clone();
|
let receiver_multiaddr = receiver.local_enr().multiaddr()[1].clone();
|
||||||
|
|
||||||
// let the two nodes set up listeners
|
// let the two nodes set up listeners
|
||||||
let sender_fut = async {
|
let sender_fut = async {
|
||||||
loop {
|
loop {
|
||||||
if let Libp2pEvent::NewListenAddr(_) = sender.next_event().await {
|
if let NetworkEvent::NewListenAddr(_) = sender.next_event().await {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let receiver_fut = async {
|
let receiver_fut = async {
|
||||||
loop {
|
loop {
|
||||||
if let Libp2pEvent::NewListenAddr(_) = receiver.next_event().await {
|
if let NetworkEvent::NewListenAddr(_) = receiver.next_event().await {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -199,7 +199,8 @@ pub async fn build_node_pair(
|
|||||||
_ = joined => {}
|
_ = joined => {}
|
||||||
}
|
}
|
||||||
|
|
||||||
match libp2p::Swarm::dial(&mut sender.swarm, receiver_multiaddr.clone()) {
|
// sender.dial_peer(peer_id);
|
||||||
|
match sender.testing_dial(receiver_multiaddr.clone()) {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
debug!(log, "Sender dialed receiver"; "address" => format!("{:?}", receiver_multiaddr))
|
debug!(log, "Sender dialed receiver"; "address" => format!("{:?}", receiver_multiaddr))
|
||||||
}
|
}
|
||||||
@ -226,7 +227,7 @@ pub async fn build_linear(
|
|||||||
.map(|x| get_enr(x).multiaddr()[1].clone())
|
.map(|x| get_enr(x).multiaddr()[1].clone())
|
||||||
.collect();
|
.collect();
|
||||||
for i in 0..n - 1 {
|
for i in 0..n - 1 {
|
||||||
match libp2p::Swarm::dial(&mut nodes[i].swarm, multiaddrs[i + 1].clone()) {
|
match nodes[i].testing_dial(multiaddrs[i + 1].clone()) {
|
||||||
Ok(()) => debug!(log, "Connected"),
|
Ok(()) => debug!(log, "Connected"),
|
||||||
Err(_) => error!(log, "Failed to connect"),
|
Err(_) => error!(log, "Failed to connect"),
|
||||||
};
|
};
|
||||||
|
@ -98,9 +98,7 @@ async fn banned_peers_consistency() {
|
|||||||
discovery_enabled: false,
|
discovery_enabled: false,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
let pm = PeerManager::new(pm_config, globals.clone(), &pm_log)
|
let pm = PeerManager::new(pm_config, globals.clone(), &pm_log).unwrap();
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
let mut pm_swarm = swarm::new_test_swarm(Behaviour::new(pm));
|
let mut pm_swarm = swarm::new_test_swarm(Behaviour::new(pm));
|
||||||
let pm_addr = swarm::bind_listener(&mut pm_swarm).await;
|
let pm_addr = swarm::bind_listener(&mut pm_swarm).await;
|
||||||
let service = Service { swarm: pm_swarm };
|
let service = Service { swarm: pm_swarm };
|
||||||
|
@ -1,8 +1,6 @@
|
|||||||
#![cfg(test)]
|
#![cfg(test)]
|
||||||
use lighthouse_network::rpc::methods::*;
|
use lighthouse_network::rpc::methods::*;
|
||||||
use lighthouse_network::{
|
use lighthouse_network::{rpc::max_rpc_size, NetworkEvent, ReportSource, Request, Response};
|
||||||
rpc::max_rpc_size, BehaviourEvent, Libp2pEvent, ReportSource, Request, Response,
|
|
||||||
};
|
|
||||||
use slog::{debug, warn, Level};
|
use slog::{debug, warn, Level};
|
||||||
use ssz::Encode;
|
use ssz::Encode;
|
||||||
use ssz_types::VariableList;
|
use ssz_types::VariableList;
|
||||||
@ -86,19 +84,16 @@ fn test_status_rpc() {
|
|||||||
let sender_future = async {
|
let sender_future = async {
|
||||||
loop {
|
loop {
|
||||||
match sender.next_event().await {
|
match sender.next_event().await {
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => {
|
NetworkEvent::PeerConnectedOutgoing(peer_id) => {
|
||||||
// Send a STATUS message
|
// Send a STATUS message
|
||||||
debug!(log, "Sending RPC");
|
debug!(log, "Sending RPC");
|
||||||
sender
|
sender.send_request(peer_id, 10, rpc_request.clone());
|
||||||
.swarm
|
|
||||||
.behaviour_mut()
|
|
||||||
.send_request(peer_id, 10, rpc_request.clone());
|
|
||||||
}
|
}
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived {
|
NetworkEvent::ResponseReceived {
|
||||||
peer_id: _,
|
peer_id: _,
|
||||||
id: 10,
|
id: 10,
|
||||||
response,
|
response,
|
||||||
}) => {
|
} => {
|
||||||
// Should receive the RPC response
|
// Should receive the RPC response
|
||||||
debug!(log, "Sender Received");
|
debug!(log, "Sender Received");
|
||||||
assert_eq!(response, rpc_response.clone());
|
assert_eq!(response, rpc_response.clone());
|
||||||
@ -114,19 +109,15 @@ fn test_status_rpc() {
|
|||||||
let receiver_future = async {
|
let receiver_future = async {
|
||||||
loop {
|
loop {
|
||||||
match receiver.next_event().await {
|
match receiver.next_event().await {
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived {
|
NetworkEvent::RequestReceived {
|
||||||
peer_id,
|
peer_id,
|
||||||
id,
|
id,
|
||||||
request,
|
request,
|
||||||
}) => {
|
} => {
|
||||||
if request == rpc_request {
|
if request == rpc_request {
|
||||||
// send the response
|
// send the response
|
||||||
debug!(log, "Receiver Received");
|
debug!(log, "Receiver Received");
|
||||||
receiver.swarm.behaviour_mut().send_successful_response(
|
receiver.send_response(peer_id, id, rpc_response.clone());
|
||||||
peer_id,
|
|
||||||
id,
|
|
||||||
rpc_response.clone(),
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => {} // Ignore other events
|
_ => {} // Ignore other events
|
||||||
@ -191,20 +182,16 @@ fn test_blocks_by_range_chunked_rpc() {
|
|||||||
let sender_future = async {
|
let sender_future = async {
|
||||||
loop {
|
loop {
|
||||||
match sender.next_event().await {
|
match sender.next_event().await {
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => {
|
NetworkEvent::PeerConnectedOutgoing(peer_id) => {
|
||||||
// Send a STATUS message
|
// Send a STATUS message
|
||||||
debug!(log, "Sending RPC");
|
debug!(log, "Sending RPC");
|
||||||
sender.swarm.behaviour_mut().send_request(
|
sender.send_request(peer_id, request_id, rpc_request.clone());
|
||||||
peer_id,
|
|
||||||
request_id,
|
|
||||||
rpc_request.clone(),
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived {
|
NetworkEvent::ResponseReceived {
|
||||||
peer_id: _,
|
peer_id: _,
|
||||||
id: _,
|
id: _,
|
||||||
response,
|
response,
|
||||||
}) => {
|
} => {
|
||||||
warn!(log, "Sender received a response");
|
warn!(log, "Sender received a response");
|
||||||
match response {
|
match response {
|
||||||
Response::BlocksByRange(Some(_)) => {
|
Response::BlocksByRange(Some(_)) => {
|
||||||
@ -236,11 +223,11 @@ fn test_blocks_by_range_chunked_rpc() {
|
|||||||
let receiver_future = async {
|
let receiver_future = async {
|
||||||
loop {
|
loop {
|
||||||
match receiver.next_event().await {
|
match receiver.next_event().await {
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived {
|
NetworkEvent::RequestReceived {
|
||||||
peer_id,
|
peer_id,
|
||||||
id,
|
id,
|
||||||
request,
|
request,
|
||||||
}) => {
|
} => {
|
||||||
if request == rpc_request {
|
if request == rpc_request {
|
||||||
// send the response
|
// send the response
|
||||||
warn!(log, "Receiver got request");
|
warn!(log, "Receiver got request");
|
||||||
@ -254,18 +241,10 @@ fn test_blocks_by_range_chunked_rpc() {
|
|||||||
} else {
|
} else {
|
||||||
rpc_response_merge_small.clone()
|
rpc_response_merge_small.clone()
|
||||||
};
|
};
|
||||||
receiver.swarm.behaviour_mut().send_successful_response(
|
receiver.send_response(peer_id, id, rpc_response.clone());
|
||||||
peer_id,
|
|
||||||
id,
|
|
||||||
rpc_response.clone(),
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
// send the stream termination
|
// send the stream termination
|
||||||
receiver.swarm.behaviour_mut().send_successful_response(
|
receiver.send_response(peer_id, id, Response::BlocksByRange(None));
|
||||||
peer_id,
|
|
||||||
id,
|
|
||||||
Response::BlocksByRange(None),
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => {} // Ignore other events
|
_ => {} // Ignore other events
|
||||||
@ -318,17 +297,13 @@ fn test_blocks_by_range_over_limit() {
|
|||||||
let sender_future = async {
|
let sender_future = async {
|
||||||
loop {
|
loop {
|
||||||
match sender.next_event().await {
|
match sender.next_event().await {
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => {
|
NetworkEvent::PeerConnectedOutgoing(peer_id) => {
|
||||||
// Send a STATUS message
|
// Send a STATUS message
|
||||||
debug!(log, "Sending RPC");
|
debug!(log, "Sending RPC");
|
||||||
sender.swarm.behaviour_mut().send_request(
|
sender.send_request(peer_id, request_id, rpc_request.clone());
|
||||||
peer_id,
|
|
||||||
request_id,
|
|
||||||
rpc_request.clone(),
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
// The request will fail because the sender will refuse to send anything > MAX_RPC_SIZE
|
// The request will fail because the sender will refuse to send anything > MAX_RPC_SIZE
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::RPCFailed { id, .. }) => {
|
NetworkEvent::RPCFailed { id, .. } => {
|
||||||
assert_eq!(id, request_id);
|
assert_eq!(id, request_id);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -341,28 +316,20 @@ fn test_blocks_by_range_over_limit() {
|
|||||||
let receiver_future = async {
|
let receiver_future = async {
|
||||||
loop {
|
loop {
|
||||||
match receiver.next_event().await {
|
match receiver.next_event().await {
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived {
|
NetworkEvent::RequestReceived {
|
||||||
peer_id,
|
peer_id,
|
||||||
id,
|
id,
|
||||||
request,
|
request,
|
||||||
}) => {
|
} => {
|
||||||
if request == rpc_request {
|
if request == rpc_request {
|
||||||
// send the response
|
// send the response
|
||||||
warn!(log, "Receiver got request");
|
warn!(log, "Receiver got request");
|
||||||
for _ in 0..messages_to_send {
|
for _ in 0..messages_to_send {
|
||||||
let rpc_response = rpc_response_merge_large.clone();
|
let rpc_response = rpc_response_merge_large.clone();
|
||||||
receiver.swarm.behaviour_mut().send_successful_response(
|
receiver.send_response(peer_id, id, rpc_response.clone());
|
||||||
peer_id,
|
|
||||||
id,
|
|
||||||
rpc_response.clone(),
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
// send the stream termination
|
// send the stream termination
|
||||||
receiver.swarm.behaviour_mut().send_successful_response(
|
receiver.send_response(peer_id, id, Response::BlocksByRange(None));
|
||||||
peer_id,
|
|
||||||
id,
|
|
||||||
Response::BlocksByRange(None),
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => {} // Ignore other events
|
_ => {} // Ignore other events
|
||||||
@ -418,20 +385,16 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() {
|
|||||||
let sender_future = async {
|
let sender_future = async {
|
||||||
loop {
|
loop {
|
||||||
match sender.next_event().await {
|
match sender.next_event().await {
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => {
|
NetworkEvent::PeerConnectedOutgoing(peer_id) => {
|
||||||
// Send a STATUS message
|
// Send a STATUS message
|
||||||
debug!(log, "Sending RPC");
|
debug!(log, "Sending RPC");
|
||||||
sender.swarm.behaviour_mut().send_request(
|
sender.send_request(peer_id, request_id, rpc_request.clone());
|
||||||
peer_id,
|
|
||||||
request_id,
|
|
||||||
rpc_request.clone(),
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived {
|
NetworkEvent::ResponseReceived {
|
||||||
peer_id: _,
|
peer_id: _,
|
||||||
id: _,
|
id: _,
|
||||||
response,
|
response,
|
||||||
}) =>
|
} =>
|
||||||
// Should receive the RPC response
|
// Should receive the RPC response
|
||||||
{
|
{
|
||||||
debug!(log, "Sender received a response");
|
debug!(log, "Sender received a response");
|
||||||
@ -469,11 +432,11 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() {
|
|||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
futures::future::Either::Left((
|
futures::future::Either::Left((
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived {
|
NetworkEvent::RequestReceived {
|
||||||
peer_id,
|
peer_id,
|
||||||
id,
|
id,
|
||||||
request,
|
request,
|
||||||
}),
|
},
|
||||||
_,
|
_,
|
||||||
)) => {
|
)) => {
|
||||||
if request == rpc_request {
|
if request == rpc_request {
|
||||||
@ -490,11 +453,7 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() {
|
|||||||
if message_info.is_some() {
|
if message_info.is_some() {
|
||||||
messages_sent += 1;
|
messages_sent += 1;
|
||||||
let (peer_id, stream_id) = message_info.as_ref().unwrap();
|
let (peer_id, stream_id) = message_info.as_ref().unwrap();
|
||||||
receiver.swarm.behaviour_mut().send_successful_response(
|
receiver.send_response(*peer_id, *stream_id, rpc_response.clone());
|
||||||
*peer_id,
|
|
||||||
*stream_id,
|
|
||||||
rpc_response.clone(),
|
|
||||||
);
|
|
||||||
debug!(log, "Sending message {}", messages_sent);
|
debug!(log, "Sending message {}", messages_sent);
|
||||||
if messages_sent == messages_to_send + extra_messages_to_send {
|
if messages_sent == messages_to_send + extra_messages_to_send {
|
||||||
// stop sending messages
|
// stop sending messages
|
||||||
@ -550,19 +509,16 @@ fn test_blocks_by_range_single_empty_rpc() {
|
|||||||
let sender_future = async {
|
let sender_future = async {
|
||||||
loop {
|
loop {
|
||||||
match sender.next_event().await {
|
match sender.next_event().await {
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => {
|
NetworkEvent::PeerConnectedOutgoing(peer_id) => {
|
||||||
// Send a STATUS message
|
// Send a STATUS message
|
||||||
debug!(log, "Sending RPC");
|
debug!(log, "Sending RPC");
|
||||||
sender
|
sender.send_request(peer_id, 10, rpc_request.clone());
|
||||||
.swarm
|
|
||||||
.behaviour_mut()
|
|
||||||
.send_request(peer_id, 10, rpc_request.clone());
|
|
||||||
}
|
}
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived {
|
NetworkEvent::ResponseReceived {
|
||||||
peer_id: _,
|
peer_id: _,
|
||||||
id: 10,
|
id: 10,
|
||||||
response,
|
response,
|
||||||
}) => match response {
|
} => match response {
|
||||||
Response::BlocksByRange(Some(_)) => {
|
Response::BlocksByRange(Some(_)) => {
|
||||||
assert_eq!(response, rpc_response.clone());
|
assert_eq!(response, rpc_response.clone());
|
||||||
messages_received += 1;
|
messages_received += 1;
|
||||||
@ -585,28 +541,20 @@ fn test_blocks_by_range_single_empty_rpc() {
|
|||||||
let receiver_future = async {
|
let receiver_future = async {
|
||||||
loop {
|
loop {
|
||||||
match receiver.next_event().await {
|
match receiver.next_event().await {
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived {
|
NetworkEvent::RequestReceived {
|
||||||
peer_id,
|
peer_id,
|
||||||
id,
|
id,
|
||||||
request,
|
request,
|
||||||
}) => {
|
} => {
|
||||||
if request == rpc_request {
|
if request == rpc_request {
|
||||||
// send the response
|
// send the response
|
||||||
warn!(log, "Receiver got request");
|
warn!(log, "Receiver got request");
|
||||||
|
|
||||||
for _ in 1..=messages_to_send {
|
for _ in 1..=messages_to_send {
|
||||||
receiver.swarm.behaviour_mut().send_successful_response(
|
receiver.send_response(peer_id, id, rpc_response.clone());
|
||||||
peer_id,
|
|
||||||
id,
|
|
||||||
rpc_response.clone(),
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
// send the stream termination
|
// send the stream termination
|
||||||
receiver.swarm.behaviour_mut().send_successful_response(
|
receiver.send_response(peer_id, id, Response::BlocksByRange(None));
|
||||||
peer_id,
|
|
||||||
id,
|
|
||||||
Response::BlocksByRange(None),
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => {} // Ignore other events
|
_ => {} // Ignore other events
|
||||||
@ -676,19 +624,16 @@ fn test_blocks_by_root_chunked_rpc() {
|
|||||||
let sender_future = async {
|
let sender_future = async {
|
||||||
loop {
|
loop {
|
||||||
match sender.next_event().await {
|
match sender.next_event().await {
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => {
|
NetworkEvent::PeerConnectedOutgoing(peer_id) => {
|
||||||
// Send a STATUS message
|
// Send a STATUS message
|
||||||
debug!(log, "Sending RPC");
|
debug!(log, "Sending RPC");
|
||||||
sender
|
sender.send_request(peer_id, 6, rpc_request.clone());
|
||||||
.swarm
|
|
||||||
.behaviour_mut()
|
|
||||||
.send_request(peer_id, 6, rpc_request.clone());
|
|
||||||
}
|
}
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived {
|
NetworkEvent::ResponseReceived {
|
||||||
peer_id: _,
|
peer_id: _,
|
||||||
id: 6,
|
id: 6,
|
||||||
response,
|
response,
|
||||||
}) => match response {
|
} => match response {
|
||||||
Response::BlocksByRoot(Some(_)) => {
|
Response::BlocksByRoot(Some(_)) => {
|
||||||
if messages_received < 2 {
|
if messages_received < 2 {
|
||||||
assert_eq!(response, rpc_response_base.clone());
|
assert_eq!(response, rpc_response_base.clone());
|
||||||
@ -717,11 +662,11 @@ fn test_blocks_by_root_chunked_rpc() {
|
|||||||
let receiver_future = async {
|
let receiver_future = async {
|
||||||
loop {
|
loop {
|
||||||
match receiver.next_event().await {
|
match receiver.next_event().await {
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived {
|
NetworkEvent::RequestReceived {
|
||||||
peer_id,
|
peer_id,
|
||||||
id,
|
id,
|
||||||
request,
|
request,
|
||||||
}) => {
|
} => {
|
||||||
if request == rpc_request {
|
if request == rpc_request {
|
||||||
// send the response
|
// send the response
|
||||||
debug!(log, "Receiver got request");
|
debug!(log, "Receiver got request");
|
||||||
@ -735,19 +680,11 @@ fn test_blocks_by_root_chunked_rpc() {
|
|||||||
} else {
|
} else {
|
||||||
rpc_response_merge_small.clone()
|
rpc_response_merge_small.clone()
|
||||||
};
|
};
|
||||||
receiver.swarm.behaviour_mut().send_successful_response(
|
receiver.send_response(peer_id, id, rpc_response);
|
||||||
peer_id,
|
|
||||||
id,
|
|
||||||
rpc_response,
|
|
||||||
);
|
|
||||||
debug!(log, "Sending message");
|
debug!(log, "Sending message");
|
||||||
}
|
}
|
||||||
// send the stream termination
|
// send the stream termination
|
||||||
receiver.swarm.behaviour_mut().send_successful_response(
|
receiver.send_response(peer_id, id, Response::BlocksByRange(None));
|
||||||
peer_id,
|
|
||||||
id,
|
|
||||||
Response::BlocksByRange(None),
|
|
||||||
);
|
|
||||||
debug!(log, "Send stream term");
|
debug!(log, "Send stream term");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -811,19 +748,16 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
|
|||||||
let sender_future = async {
|
let sender_future = async {
|
||||||
loop {
|
loop {
|
||||||
match sender.next_event().await {
|
match sender.next_event().await {
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => {
|
NetworkEvent::PeerConnectedOutgoing(peer_id) => {
|
||||||
// Send a STATUS message
|
// Send a STATUS message
|
||||||
debug!(log, "Sending RPC");
|
debug!(log, "Sending RPC");
|
||||||
sender
|
sender.send_request(peer_id, 10, rpc_request.clone());
|
||||||
.swarm
|
|
||||||
.behaviour_mut()
|
|
||||||
.send_request(peer_id, 10, rpc_request.clone());
|
|
||||||
}
|
}
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived {
|
NetworkEvent::ResponseReceived {
|
||||||
peer_id: _,
|
peer_id: _,
|
||||||
id: 10,
|
id: 10,
|
||||||
response,
|
response,
|
||||||
}) => {
|
} => {
|
||||||
debug!(log, "Sender received a response");
|
debug!(log, "Sender received a response");
|
||||||
match response {
|
match response {
|
||||||
Response::BlocksByRoot(Some(_)) => {
|
Response::BlocksByRoot(Some(_)) => {
|
||||||
@ -861,11 +795,11 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
|
|||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
futures::future::Either::Left((
|
futures::future::Either::Left((
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived {
|
NetworkEvent::RequestReceived {
|
||||||
peer_id,
|
peer_id,
|
||||||
id,
|
id,
|
||||||
request,
|
request,
|
||||||
}),
|
},
|
||||||
_,
|
_,
|
||||||
)) => {
|
)) => {
|
||||||
if request == rpc_request {
|
if request == rpc_request {
|
||||||
@ -882,11 +816,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
|
|||||||
if message_info.is_some() {
|
if message_info.is_some() {
|
||||||
messages_sent += 1;
|
messages_sent += 1;
|
||||||
let (peer_id, stream_id) = message_info.as_ref().unwrap();
|
let (peer_id, stream_id) = message_info.as_ref().unwrap();
|
||||||
receiver.swarm.behaviour_mut().send_successful_response(
|
receiver.send_response(*peer_id, *stream_id, rpc_response.clone());
|
||||||
*peer_id,
|
|
||||||
*stream_id,
|
|
||||||
rpc_response.clone(),
|
|
||||||
);
|
|
||||||
debug!(log, "Sending message {}", messages_sent);
|
debug!(log, "Sending message {}", messages_sent);
|
||||||
if messages_sent == messages_to_send + extra_messages_to_send {
|
if messages_sent == messages_to_send + extra_messages_to_send {
|
||||||
// stop sending messages
|
// stop sending messages
|
||||||
@ -926,16 +856,16 @@ fn test_goodbye_rpc() {
|
|||||||
let sender_future = async {
|
let sender_future = async {
|
||||||
loop {
|
loop {
|
||||||
match sender.next_event().await {
|
match sender.next_event().await {
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => {
|
NetworkEvent::PeerConnectedOutgoing(peer_id) => {
|
||||||
// Send a goodbye and disconnect
|
// Send a goodbye and disconnect
|
||||||
debug!(log, "Sending RPC");
|
debug!(log, "Sending RPC");
|
||||||
sender.swarm.behaviour_mut().goodbye_peer(
|
sender.goodbye_peer(
|
||||||
&peer_id,
|
&peer_id,
|
||||||
GoodbyeReason::IrrelevantNetwork,
|
GoodbyeReason::IrrelevantNetwork,
|
||||||
ReportSource::SyncService,
|
ReportSource::SyncService,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::PeerDisconnected(_)) => {
|
NetworkEvent::PeerDisconnected(_) => {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
_ => {} // Ignore other RPC messages
|
_ => {} // Ignore other RPC messages
|
||||||
@ -947,7 +877,7 @@ fn test_goodbye_rpc() {
|
|||||||
let receiver_future = async {
|
let receiver_future = async {
|
||||||
loop {
|
loop {
|
||||||
match receiver.next_event().await {
|
match receiver.next_event().await {
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::PeerDisconnected(_)) => {
|
NetworkEvent::PeerDisconnected(_) => {
|
||||||
// Should receive sent RPC request
|
// Should receive sent RPC request
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -11,17 +11,16 @@ use beacon_chain::{BeaconChain, BeaconChainTypes};
|
|||||||
use futures::channel::mpsc::Sender;
|
use futures::channel::mpsc::Sender;
|
||||||
use futures::future::OptionFuture;
|
use futures::future::OptionFuture;
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use lighthouse_network::{
|
use futures::StreamExt;
|
||||||
prometheus_client::registry::Registry, MessageAcceptance, Service as LibP2PService,
|
use lighthouse_network::service::Network;
|
||||||
};
|
use lighthouse_network::{prometheus_client::registry::Registry, MessageAcceptance};
|
||||||
use lighthouse_network::{
|
use lighthouse_network::{
|
||||||
rpc::{GoodbyeReason, RPCResponseErrorCode},
|
rpc::{GoodbyeReason, RPCResponseErrorCode},
|
||||||
Context, Libp2pEvent, PeerAction, PeerRequestId, PubsubMessage, ReportSource, Request,
|
Context, PeerAction, PeerRequestId, PubsubMessage, ReportSource, Request, Response, Subnet,
|
||||||
Response, Subnet,
|
|
||||||
};
|
};
|
||||||
use lighthouse_network::{
|
use lighthouse_network::{
|
||||||
types::{GossipEncoding, GossipTopic},
|
types::{GossipEncoding, GossipTopic},
|
||||||
BehaviourEvent, MessageId, NetworkGlobals, PeerId,
|
MessageId, NetworkEvent, NetworkGlobals, PeerId,
|
||||||
};
|
};
|
||||||
use slog::{crit, debug, error, info, o, trace, warn};
|
use slog::{crit, debug, error, info, o, trace, warn};
|
||||||
use std::{net::SocketAddr, pin::Pin, sync::Arc, time::Duration};
|
use std::{net::SocketAddr, pin::Pin, sync::Arc, time::Duration};
|
||||||
@ -171,7 +170,7 @@ pub struct NetworkService<T: BeaconChainTypes> {
|
|||||||
/// A reference to the underlying beacon chain.
|
/// A reference to the underlying beacon chain.
|
||||||
beacon_chain: Arc<BeaconChain<T>>,
|
beacon_chain: Arc<BeaconChain<T>>,
|
||||||
/// The underlying libp2p service that drives all the network interactions.
|
/// The underlying libp2p service that drives all the network interactions.
|
||||||
libp2p: LibP2PService<RequestId, T::EthSpec>,
|
libp2p: Network<RequestId, T::EthSpec>,
|
||||||
/// An attestation and subnet manager service.
|
/// An attestation and subnet manager service.
|
||||||
attestation_service: AttestationService<T>,
|
attestation_service: AttestationService<T>,
|
||||||
/// A sync committeee subnet manager service.
|
/// A sync committeee subnet manager service.
|
||||||
@ -273,8 +272,8 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// launch libp2p service
|
// launch libp2p service
|
||||||
let (network_globals, mut libp2p) =
|
let (mut libp2p, network_globals) =
|
||||||
LibP2PService::new(executor.clone(), service_context, &network_log).await?;
|
Network::new(executor.clone(), service_context, &network_log).await?;
|
||||||
|
|
||||||
// Repopulate the DHT with stored ENR's if discovery is not disabled.
|
// Repopulate the DHT with stored ENR's if discovery is not disabled.
|
||||||
if !config.disable_discovery {
|
if !config.disable_discovery {
|
||||||
@ -284,7 +283,7 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
|||||||
"Loading peers into the routing table"; "peers" => enrs_to_load.len()
|
"Loading peers into the routing table"; "peers" => enrs_to_load.len()
|
||||||
);
|
);
|
||||||
for enr in enrs_to_load {
|
for enr in enrs_to_load {
|
||||||
libp2p.swarm.behaviour_mut().add_enr(enr.clone());
|
libp2p.add_enr(enr.clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -402,7 +401,7 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
|||||||
_ = self.metrics_update.tick(), if self.metrics_enabled => {
|
_ = self.metrics_update.tick(), if self.metrics_enabled => {
|
||||||
// update various network metrics
|
// update various network metrics
|
||||||
metrics::update_gossip_metrics::<T::EthSpec>(
|
metrics::update_gossip_metrics::<T::EthSpec>(
|
||||||
self.libp2p.swarm.behaviour().gs(),
|
self.libp2p.gossipsub(),
|
||||||
&self.network_globals,
|
&self.network_globals,
|
||||||
);
|
);
|
||||||
// update sync metrics
|
// update sync metrics
|
||||||
@ -429,7 +428,7 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
|||||||
|
|
||||||
Some(_) = &mut self.next_unsubscribe => {
|
Some(_) = &mut self.next_unsubscribe => {
|
||||||
let new_enr_fork_id = self.beacon_chain.enr_fork_id();
|
let new_enr_fork_id = self.beacon_chain.enr_fork_id();
|
||||||
self.libp2p.swarm.behaviour_mut().unsubscribe_from_fork_topics_except(new_enr_fork_id.fork_digest);
|
self.libp2p.unsubscribe_from_fork_topics_except(new_enr_fork_id.fork_digest);
|
||||||
info!(self.log, "Unsubscribed from old fork topics");
|
info!(self.log, "Unsubscribed from old fork topics");
|
||||||
self.next_unsubscribe = Box::pin(None.into());
|
self.next_unsubscribe = Box::pin(None.into());
|
||||||
}
|
}
|
||||||
@ -439,7 +438,7 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
|||||||
let fork_version = self.beacon_chain.spec.fork_version_for_name(fork_name);
|
let fork_version = self.beacon_chain.spec.fork_version_for_name(fork_name);
|
||||||
let fork_digest = ChainSpec::compute_fork_digest(fork_version, self.beacon_chain.genesis_validators_root);
|
let fork_digest = ChainSpec::compute_fork_digest(fork_version, self.beacon_chain.genesis_validators_root);
|
||||||
info!(self.log, "Subscribing to new fork topics");
|
info!(self.log, "Subscribing to new fork topics");
|
||||||
self.libp2p.swarm.behaviour_mut().subscribe_new_fork_topics(fork_digest);
|
self.libp2p.subscribe_new_fork_topics(fork_digest);
|
||||||
self.next_fork_subscriptions = Box::pin(None.into());
|
self.next_fork_subscriptions = Box::pin(None.into());
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
@ -456,92 +455,90 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
|||||||
/// Handle an event received from the network.
|
/// Handle an event received from the network.
|
||||||
async fn on_libp2p_event(
|
async fn on_libp2p_event(
|
||||||
&mut self,
|
&mut self,
|
||||||
ev: Libp2pEvent<RequestId, T::EthSpec>,
|
ev: NetworkEvent<RequestId, T::EthSpec>,
|
||||||
shutdown_sender: &mut Sender<ShutdownReason>,
|
shutdown_sender: &mut Sender<ShutdownReason>,
|
||||||
) {
|
) {
|
||||||
match ev {
|
match ev {
|
||||||
Libp2pEvent::Behaviour(event) => match event {
|
NetworkEvent::PeerConnectedOutgoing(peer_id) => {
|
||||||
BehaviourEvent::PeerConnectedOutgoing(peer_id) => {
|
self.send_to_router(RouterMessage::PeerDialed(peer_id));
|
||||||
self.send_to_router(RouterMessage::PeerDialed(peer_id));
|
}
|
||||||
}
|
NetworkEvent::PeerConnectedIncoming(_)
|
||||||
BehaviourEvent::PeerConnectedIncoming(_)
|
| NetworkEvent::PeerBanned(_)
|
||||||
| BehaviourEvent::PeerBanned(_)
|
| NetworkEvent::PeerUnbanned(_) => {
|
||||||
| BehaviourEvent::PeerUnbanned(_) => {
|
// No action required for these events.
|
||||||
// No action required for these events.
|
}
|
||||||
}
|
NetworkEvent::PeerDisconnected(peer_id) => {
|
||||||
BehaviourEvent::PeerDisconnected(peer_id) => {
|
self.send_to_router(RouterMessage::PeerDisconnected(peer_id));
|
||||||
self.send_to_router(RouterMessage::PeerDisconnected(peer_id));
|
}
|
||||||
}
|
NetworkEvent::RequestReceived {
|
||||||
BehaviourEvent::RequestReceived {
|
peer_id,
|
||||||
|
id,
|
||||||
|
request,
|
||||||
|
} => {
|
||||||
|
self.send_to_router(RouterMessage::RPCRequestReceived {
|
||||||
peer_id,
|
peer_id,
|
||||||
id,
|
id,
|
||||||
request,
|
request,
|
||||||
} => {
|
});
|
||||||
self.send_to_router(RouterMessage::RPCRequestReceived {
|
}
|
||||||
peer_id,
|
NetworkEvent::ResponseReceived {
|
||||||
id,
|
peer_id,
|
||||||
request,
|
id,
|
||||||
});
|
response,
|
||||||
}
|
} => {
|
||||||
BehaviourEvent::ResponseReceived {
|
self.send_to_router(RouterMessage::RPCResponseReceived {
|
||||||
peer_id,
|
peer_id,
|
||||||
id,
|
request_id: id,
|
||||||
response,
|
response,
|
||||||
} => {
|
});
|
||||||
self.send_to_router(RouterMessage::RPCResponseReceived {
|
}
|
||||||
peer_id,
|
NetworkEvent::RPCFailed { id, peer_id } => {
|
||||||
request_id: id,
|
self.send_to_router(RouterMessage::RPCFailed {
|
||||||
response,
|
peer_id,
|
||||||
});
|
request_id: id,
|
||||||
}
|
});
|
||||||
BehaviourEvent::RPCFailed { id, peer_id } => {
|
}
|
||||||
self.send_to_router(RouterMessage::RPCFailed {
|
NetworkEvent::StatusPeer(peer_id) => {
|
||||||
peer_id,
|
self.send_to_router(RouterMessage::StatusPeer(peer_id));
|
||||||
request_id: id,
|
}
|
||||||
});
|
NetworkEvent::PubsubMessage {
|
||||||
}
|
id,
|
||||||
BehaviourEvent::StatusPeer(peer_id) => {
|
source,
|
||||||
self.send_to_router(RouterMessage::StatusPeer(peer_id));
|
message,
|
||||||
}
|
..
|
||||||
BehaviourEvent::PubsubMessage {
|
} => {
|
||||||
id,
|
match message {
|
||||||
source,
|
// attestation information gets processed in the attestation service
|
||||||
message,
|
PubsubMessage::Attestation(ref subnet_and_attestation) => {
|
||||||
..
|
let subnet = subnet_and_attestation.0;
|
||||||
} => {
|
let attestation = &subnet_and_attestation.1;
|
||||||
match message {
|
// checks if we have an aggregator for the slot. If so, we should process
|
||||||
// attestation information gets processed in the attestation service
|
// the attestation, else we just just propagate the Attestation.
|
||||||
PubsubMessage::Attestation(ref subnet_and_attestation) => {
|
let should_process = self
|
||||||
let subnet = subnet_and_attestation.0;
|
.attestation_service
|
||||||
let attestation = &subnet_and_attestation.1;
|
.should_process_attestation(subnet, attestation);
|
||||||
// checks if we have an aggregator for the slot. If so, we should process
|
self.send_to_router(RouterMessage::PubsubMessage(
|
||||||
// the attestation, else we just just propagate the Attestation.
|
id,
|
||||||
let should_process = self
|
source,
|
||||||
.attestation_service
|
message,
|
||||||
.should_process_attestation(subnet, attestation);
|
should_process,
|
||||||
self.send_to_router(RouterMessage::PubsubMessage(
|
));
|
||||||
id,
|
}
|
||||||
source,
|
_ => {
|
||||||
message,
|
// all else is sent to the router
|
||||||
should_process,
|
self.send_to_router(RouterMessage::PubsubMessage(
|
||||||
));
|
id, source, message, true,
|
||||||
}
|
));
|
||||||
_ => {
|
|
||||||
// all else is sent to the router
|
|
||||||
self.send_to_router(RouterMessage::PubsubMessage(
|
|
||||||
id, source, message, true,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
}
|
||||||
Libp2pEvent::NewListenAddr(multiaddr) => {
|
NetworkEvent::NewListenAddr(multiaddr) => {
|
||||||
self.network_globals
|
self.network_globals
|
||||||
.listen_multiaddrs
|
.listen_multiaddrs
|
||||||
.write()
|
.write()
|
||||||
.push(multiaddr);
|
.push(multiaddr);
|
||||||
}
|
}
|
||||||
Libp2pEvent::ZeroListeners => {
|
NetworkEvent::ZeroListeners => {
|
||||||
let _ = shutdown_sender
|
let _ = shutdown_sender
|
||||||
.send(ShutdownReason::Failure(
|
.send(ShutdownReason::Failure(
|
||||||
"All listeners are closed. Unable to listen",
|
"All listeners are closed. Unable to listen",
|
||||||
@ -588,7 +585,7 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
|||||||
id,
|
id,
|
||||||
reason,
|
reason,
|
||||||
} => {
|
} => {
|
||||||
self.libp2p.respond_with_error(peer_id, id, error, reason);
|
self.libp2p.send_error_reponse(peer_id, id, error, reason);
|
||||||
}
|
}
|
||||||
NetworkMessage::UPnPMappingEstablished {
|
NetworkMessage::UPnPMappingEstablished {
|
||||||
tcp_socket,
|
tcp_socket,
|
||||||
@ -599,8 +596,6 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
|||||||
if let Some(tcp_socket) = tcp_socket {
|
if let Some(tcp_socket) = tcp_socket {
|
||||||
if let Err(e) = self
|
if let Err(e) = self
|
||||||
.libp2p
|
.libp2p
|
||||||
.swarm
|
|
||||||
.behaviour_mut()
|
|
||||||
.discovery_mut()
|
.discovery_mut()
|
||||||
.update_enr_tcp_port(tcp_socket.port())
|
.update_enr_tcp_port(tcp_socket.port())
|
||||||
{
|
{
|
||||||
@ -613,8 +608,6 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
|||||||
if let Some(udp_socket) = udp_socket {
|
if let Some(udp_socket) = udp_socket {
|
||||||
if let Err(e) = self
|
if let Err(e) = self
|
||||||
.libp2p
|
.libp2p
|
||||||
.swarm
|
|
||||||
.behaviour_mut()
|
|
||||||
.discovery_mut()
|
.discovery_mut()
|
||||||
.update_enr_udp_socket(udp_socket)
|
.update_enr_udp_socket(udp_socket)
|
||||||
{
|
{
|
||||||
@ -633,14 +626,11 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
|||||||
"message_id" => %message_id,
|
"message_id" => %message_id,
|
||||||
"validation_result" => ?validation_result
|
"validation_result" => ?validation_result
|
||||||
);
|
);
|
||||||
self.libp2p
|
self.libp2p.report_message_validation_result(
|
||||||
.swarm
|
&propagation_source,
|
||||||
.behaviour_mut()
|
message_id,
|
||||||
.report_message_validation_result(
|
validation_result,
|
||||||
&propagation_source,
|
);
|
||||||
message_id,
|
|
||||||
validation_result,
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
NetworkMessage::Publish { messages } => {
|
NetworkMessage::Publish { messages } => {
|
||||||
let mut topic_kinds = Vec::new();
|
let mut topic_kinds = Vec::new();
|
||||||
@ -655,7 +645,7 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
|||||||
"count" => messages.len(),
|
"count" => messages.len(),
|
||||||
"topics" => ?topic_kinds
|
"topics" => ?topic_kinds
|
||||||
);
|
);
|
||||||
self.libp2p.swarm.behaviour_mut().publish(messages);
|
self.libp2p.publish(messages);
|
||||||
}
|
}
|
||||||
NetworkMessage::ReportPeer {
|
NetworkMessage::ReportPeer {
|
||||||
peer_id,
|
peer_id,
|
||||||
@ -693,7 +683,7 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
|||||||
GossipEncoding::default(),
|
GossipEncoding::default(),
|
||||||
fork_digest,
|
fork_digest,
|
||||||
);
|
);
|
||||||
if self.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) {
|
if self.libp2p.subscribe(topic.clone()) {
|
||||||
subscribed_topics.push(topic);
|
subscribed_topics.push(topic);
|
||||||
} else {
|
} else {
|
||||||
warn!(self.log, "Could not subscribe to topic"; "topic" => %topic);
|
warn!(self.log, "Could not subscribe to topic"; "topic" => %topic);
|
||||||
@ -706,10 +696,10 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
|||||||
for subnet_id in 0..<<T as BeaconChainTypes>::EthSpec as EthSpec>::SubnetBitfieldLength::to_u64() {
|
for subnet_id in 0..<<T as BeaconChainTypes>::EthSpec as EthSpec>::SubnetBitfieldLength::to_u64() {
|
||||||
let subnet = Subnet::Attestation(SubnetId::new(subnet_id));
|
let subnet = Subnet::Attestation(SubnetId::new(subnet_id));
|
||||||
// Update the ENR bitfield
|
// Update the ENR bitfield
|
||||||
self.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true);
|
self.libp2p.update_enr_subnet(subnet, true);
|
||||||
for fork_digest in self.required_gossip_fork_digests() {
|
for fork_digest in self.required_gossip_fork_digests() {
|
||||||
let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest);
|
let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest);
|
||||||
if self.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) {
|
if self.libp2p.subscribe(topic.clone()) {
|
||||||
subscribed_topics.push(topic);
|
subscribed_topics.push(topic);
|
||||||
} else {
|
} else {
|
||||||
warn!(self.log, "Could not subscribe to topic"; "topic" => %topic);
|
warn!(self.log, "Could not subscribe to topic"; "topic" => %topic);
|
||||||
@ -720,17 +710,14 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
|||||||
for subnet_id in 0..subnet_max {
|
for subnet_id in 0..subnet_max {
|
||||||
let subnet = Subnet::SyncCommittee(SyncSubnetId::new(subnet_id));
|
let subnet = Subnet::SyncCommittee(SyncSubnetId::new(subnet_id));
|
||||||
// Update the ENR bitfield
|
// Update the ENR bitfield
|
||||||
self.libp2p
|
self.libp2p.update_enr_subnet(subnet, true);
|
||||||
.swarm
|
|
||||||
.behaviour_mut()
|
|
||||||
.update_enr_subnet(subnet, true);
|
|
||||||
for fork_digest in self.required_gossip_fork_digests() {
|
for fork_digest in self.required_gossip_fork_digests() {
|
||||||
let topic = GossipTopic::new(
|
let topic = GossipTopic::new(
|
||||||
subnet.into(),
|
subnet.into(),
|
||||||
GossipEncoding::default(),
|
GossipEncoding::default(),
|
||||||
fork_digest,
|
fork_digest,
|
||||||
);
|
);
|
||||||
if self.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) {
|
if self.libp2p.subscribe(topic.clone()) {
|
||||||
subscribed_topics.push(topic);
|
subscribed_topics.push(topic);
|
||||||
} else {
|
} else {
|
||||||
warn!(self.log, "Could not subscribe to topic"; "topic" => %topic);
|
warn!(self.log, "Could not subscribe to topic"; "topic" => %topic);
|
||||||
@ -782,8 +769,6 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
|||||||
if let Some(active_validators) = active_validators_opt {
|
if let Some(active_validators) = active_validators_opt {
|
||||||
if self
|
if self
|
||||||
.libp2p
|
.libp2p
|
||||||
.swarm
|
|
||||||
.behaviour_mut()
|
|
||||||
.update_gossipsub_parameters(active_validators, slot)
|
.update_gossipsub_parameters(active_validators, slot)
|
||||||
.is_err()
|
.is_err()
|
||||||
{
|
{
|
||||||
@ -811,33 +796,24 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
|||||||
for fork_digest in self.required_gossip_fork_digests() {
|
for fork_digest in self.required_gossip_fork_digests() {
|
||||||
let topic =
|
let topic =
|
||||||
GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest);
|
GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest);
|
||||||
self.libp2p.swarm.behaviour_mut().subscribe(topic);
|
self.libp2p.subscribe(topic);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
SubnetServiceMessage::Unsubscribe(subnet) => {
|
SubnetServiceMessage::Unsubscribe(subnet) => {
|
||||||
for fork_digest in self.required_gossip_fork_digests() {
|
for fork_digest in self.required_gossip_fork_digests() {
|
||||||
let topic =
|
let topic =
|
||||||
GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest);
|
GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest);
|
||||||
self.libp2p.swarm.behaviour_mut().unsubscribe(topic);
|
self.libp2p.unsubscribe(topic);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
SubnetServiceMessage::EnrAdd(subnet) => {
|
SubnetServiceMessage::EnrAdd(subnet) => {
|
||||||
self.libp2p
|
self.libp2p.update_enr_subnet(subnet, true);
|
||||||
.swarm
|
|
||||||
.behaviour_mut()
|
|
||||||
.update_enr_subnet(subnet, true);
|
|
||||||
}
|
}
|
||||||
SubnetServiceMessage::EnrRemove(subnet) => {
|
SubnetServiceMessage::EnrRemove(subnet) => {
|
||||||
self.libp2p
|
self.libp2p.update_enr_subnet(subnet, false);
|
||||||
.swarm
|
|
||||||
.behaviour_mut()
|
|
||||||
.update_enr_subnet(subnet, false);
|
|
||||||
}
|
}
|
||||||
SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => {
|
SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => {
|
||||||
self.libp2p
|
self.libp2p.discover_subnet_peers(subnets_to_discover);
|
||||||
.swarm
|
|
||||||
.behaviour_mut()
|
|
||||||
.discover_subnet_peers(subnets_to_discover);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -848,33 +824,24 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
|||||||
for fork_digest in self.required_gossip_fork_digests() {
|
for fork_digest in self.required_gossip_fork_digests() {
|
||||||
let topic =
|
let topic =
|
||||||
GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest);
|
GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest);
|
||||||
self.libp2p.swarm.behaviour_mut().subscribe(topic);
|
self.libp2p.subscribe(topic);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
SubnetServiceMessage::Unsubscribe(subnet) => {
|
SubnetServiceMessage::Unsubscribe(subnet) => {
|
||||||
for fork_digest in self.required_gossip_fork_digests() {
|
for fork_digest in self.required_gossip_fork_digests() {
|
||||||
let topic =
|
let topic =
|
||||||
GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest);
|
GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest);
|
||||||
self.libp2p.swarm.behaviour_mut().unsubscribe(topic);
|
self.libp2p.unsubscribe(topic);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
SubnetServiceMessage::EnrAdd(subnet) => {
|
SubnetServiceMessage::EnrAdd(subnet) => {
|
||||||
self.libp2p
|
self.libp2p.update_enr_subnet(subnet, true);
|
||||||
.swarm
|
|
||||||
.behaviour_mut()
|
|
||||||
.update_enr_subnet(subnet, true);
|
|
||||||
}
|
}
|
||||||
SubnetServiceMessage::EnrRemove(subnet) => {
|
SubnetServiceMessage::EnrRemove(subnet) => {
|
||||||
self.libp2p
|
self.libp2p.update_enr_subnet(subnet, false);
|
||||||
.swarm
|
|
||||||
.behaviour_mut()
|
|
||||||
.update_enr_subnet(subnet, false);
|
|
||||||
}
|
}
|
||||||
SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => {
|
SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => {
|
||||||
self.libp2p
|
self.libp2p.discover_subnet_peers(subnets_to_discover);
|
||||||
.swarm
|
|
||||||
.behaviour_mut()
|
|
||||||
.discover_subnet_peers(subnets_to_discover);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -892,10 +859,7 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
|||||||
);
|
);
|
||||||
fork_context.update_current_fork(*new_fork_name);
|
fork_context.update_current_fork(*new_fork_name);
|
||||||
|
|
||||||
self.libp2p
|
self.libp2p.update_fork_version(new_enr_fork_id);
|
||||||
.swarm
|
|
||||||
.behaviour_mut()
|
|
||||||
.update_fork_version(new_enr_fork_id);
|
|
||||||
// Reinitialize the next_fork_update
|
// Reinitialize the next_fork_update
|
||||||
self.next_fork_update = Box::pin(next_fork_delay(&self.beacon_chain).into());
|
self.next_fork_update = Box::pin(next_fork_delay(&self.beacon_chain).into());
|
||||||
|
|
||||||
@ -944,7 +908,7 @@ fn next_fork_subscriptions_delay<T: BeaconChainTypes>(
|
|||||||
impl<T: BeaconChainTypes> Drop for NetworkService<T> {
|
impl<T: BeaconChainTypes> Drop for NetworkService<T> {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
// network thread is terminating
|
// network thread is terminating
|
||||||
let enrs = self.libp2p.swarm.behaviour_mut().enr_entries();
|
let enrs = self.libp2p.enr_entries();
|
||||||
debug!(
|
debug!(
|
||||||
self.log,
|
self.log,
|
||||||
"Persisting DHT to store";
|
"Persisting DHT to store";
|
||||||
|
@ -16,7 +16,7 @@ operating system.
|
|||||||
Install the following packages:
|
Install the following packages:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang
|
sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler
|
||||||
```
|
```
|
||||||
|
|
||||||
> Note: Lighthouse requires CMake v3.12 or newer, which isn't available in the package repositories
|
> Note: Lighthouse requires CMake v3.12 or newer, which isn't available in the package repositories
|
||||||
@ -32,13 +32,18 @@ sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clan
|
|||||||
brew install cmake
|
brew install cmake
|
||||||
```
|
```
|
||||||
|
|
||||||
|
1. Install protoc using Homebrew:
|
||||||
|
```
|
||||||
|
brew install protobuf
|
||||||
|
```
|
||||||
|
|
||||||
[Homebrew]: https://brew.sh/
|
[Homebrew]: https://brew.sh/
|
||||||
|
|
||||||
#### Windows
|
#### Windows
|
||||||
|
|
||||||
1. Install [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git).
|
1. Install [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git).
|
||||||
1. Install the [Chocolatey](https://chocolatey.org/install) package manager for Windows.
|
1. Install the [Chocolatey](https://chocolatey.org/install) package manager for Windows.
|
||||||
1. Install Make, CMake and LLVM using Chocolatey:
|
1. Install Make, CMake, LLVM and protoc using Chocolatey:
|
||||||
|
|
||||||
```
|
```
|
||||||
choco install make
|
choco install make
|
||||||
@ -52,10 +57,13 @@ choco install cmake --installargs 'ADD_CMAKE_TO_PATH=System'
|
|||||||
choco install llvm
|
choco install llvm
|
||||||
```
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
choco install protoc
|
||||||
|
```
|
||||||
|
|
||||||
These dependencies are for compiling Lighthouse natively on Windows. Lighthouse can also run
|
These dependencies are for compiling Lighthouse natively on Windows. Lighthouse can also run
|
||||||
successfully under the [Windows Subsystem for Linux (WSL)][WSL]. If using Ubuntu under WSL, you
|
successfully under the [Windows Subsystem for Linux (WSL)][WSL]. If using Ubuntu under WSL, you
|
||||||
should follow the instructions for Ubuntu listed in the [Dependencies (Ubuntu)](#ubuntu) section.
|
should follow the instructions for Ubuntu listed in the [Dependencies (Ubuntu)](#ubuntu) section.
|
||||||
|
|
||||||
[WSL]: https://docs.microsoft.com/en-us/windows/wsl/about
|
[WSL]: https://docs.microsoft.com/en-us/windows/wsl/about
|
||||||
|
|
||||||
## Build Lighthouse
|
## Build Lighthouse
|
||||||
|
@ -14,6 +14,8 @@ The additional requirements for developers are:
|
|||||||
don't have `ganache` available on your `PATH` or if ganache is older than v7.
|
don't have `ganache` available on your `PATH` or if ganache is older than v7.
|
||||||
- [`cmake`](https://cmake.org/cmake/help/latest/command/install.html). Used by
|
- [`cmake`](https://cmake.org/cmake/help/latest/command/install.html). Used by
|
||||||
some dependencies. See [`Installation Guide`](./installation.md) for more info.
|
some dependencies. See [`Installation Guide`](./installation.md) for more info.
|
||||||
|
- [`protoc`](https://github.com/protocolbuffers/protobuf/releases) required for
|
||||||
|
the networking stack.
|
||||||
- [`java 11 runtime`](https://openjdk.java.net/projects/jdk/). 11 is the minimum,
|
- [`java 11 runtime`](https://openjdk.java.net/projects/jdk/). 11 is the minimum,
|
||||||
used by web3signer_tests.
|
used by web3signer_tests.
|
||||||
|
|
||||||
|
14
scripts/cross/aarch64-unknown-linux-gnu.dockerfile
Normal file
14
scripts/cross/aarch64-unknown-linux-gnu.dockerfile
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
ARG CROSS_BASE_IMAGE
|
||||||
|
FROM $CROSS_BASE_IMAGE
|
||||||
|
|
||||||
|
RUN apt-get update -y && apt-get upgrade -y
|
||||||
|
|
||||||
|
RUN apt-get install -y unzip && \
|
||||||
|
PB_REL="https://github.com/protocolbuffers/protobuf/releases" && \
|
||||||
|
curl -L $PB_REL/download/v3.15.8/protoc-3.15.8-linux-aarch_64.zip -o protoc.zip && \
|
||||||
|
unzip protoc.zip -d /usr && \
|
||||||
|
chmod +x /usr/bin/protoc
|
||||||
|
|
||||||
|
RUN apt-get install -y cmake clang-3.9
|
||||||
|
|
||||||
|
ENV PROTOC=/usr/bin/protoc
|
14
scripts/cross/x86_64-unknown-linux-gnu.dockerfile
Normal file
14
scripts/cross/x86_64-unknown-linux-gnu.dockerfile
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
ARG CROSS_BASE_IMAGE
|
||||||
|
FROM $CROSS_BASE_IMAGE
|
||||||
|
|
||||||
|
RUN apt-get update -y && apt-get upgrade -y
|
||||||
|
|
||||||
|
RUN apt-get install -y unzip && \
|
||||||
|
PB_REL="https://github.com/protocolbuffers/protobuf/releases" && \
|
||||||
|
curl -L $PB_REL/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip -o protoc.zip && \
|
||||||
|
unzip protoc.zip -d /usr && \
|
||||||
|
chmod +x /usr/bin/protoc
|
||||||
|
|
||||||
|
RUN apt-get install -y cmake clang-3.9
|
||||||
|
|
||||||
|
ENV PROTOC=/usr/bin/protoc
|
Loading…
Reference in New Issue
Block a user